diff options
Diffstat (limited to 'src/3rdparty/v8/src/heap.cc')
-rw-r--r-- | src/3rdparty/v8/src/heap.cc | 1163 |
1 files changed, 755 insertions, 408 deletions
diff --git a/src/3rdparty/v8/src/heap.cc b/src/3rdparty/v8/src/heap.cc index f678517..ebf3ccd 100644 --- a/src/3rdparty/v8/src/heap.cc +++ b/src/3rdparty/v8/src/heap.cc @@ -48,6 +48,7 @@ #include "snapshot.h" #include "store-buffer.h" #include "v8threads.h" +#include "v8utils.h" #include "vm-state-inl.h" #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP #include "regexp-macro-assembler.h" @@ -66,21 +67,26 @@ Heap::Heap() : isolate_(NULL), // semispace_size_ should be a power of 2 and old_generation_size_ should be // a multiple of Page::kPageSize. -#if defined(ANDROID) -#define LUMP_OF_MEMORY (128 * KB) - code_range_size_(0), -#elif defined(V8_TARGET_ARCH_X64) +#if defined(V8_TARGET_ARCH_X64) #define LUMP_OF_MEMORY (2 * MB) code_range_size_(512*MB), #else #define LUMP_OF_MEMORY MB code_range_size_(0), #endif +#if defined(ANDROID) + reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)), + max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)), + initial_semispace_size_(Page::kPageSize), + max_old_generation_size_(192*MB), + max_executable_size_(max_old_generation_size_), +#else reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)), max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)), initial_semispace_size_(Page::kPageSize), max_old_generation_size_(700ul * LUMP_OF_MEMORY), max_executable_size_(256l * LUMP_OF_MEMORY), +#endif // Variables set based on semispace_size_ and old_generation_size_ in // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) @@ -92,6 +98,7 @@ Heap::Heap() linear_allocation_scope_depth_(0), contexts_disposed_(0), global_ic_age_(0), + flush_monomorphic_ics_(false), scan_on_scavenge_pages_(0), new_space_(this), old_pointer_space_(NULL), @@ -134,6 +141,7 @@ Heap::Heap() previous_survival_rate_trend_(Heap::STABLE), survival_rate_trend_(Heap::STABLE), max_gc_pause_(0), + total_gc_time_ms_(0), max_alive_after_gc_(0), min_in_mutator_(kMaxInt), alive_after_last_gc_(0), @@ -150,7 +158,8 @@ Heap::Heap() scavenges_since_last_idle_round_(kIdleScavengeThreshold), promotion_queue_(this), configured_(false), - chunks_queued_for_free_(NULL) { + chunks_queued_for_free_(NULL), + relocation_mutex_(NULL) { // Allow build-time customization of the max semispace size. Building // V8 with snapshots and a non-default max semispace size is much // easier if you can define it as part of the build environment. @@ -168,12 +177,14 @@ Heap::Heap() } memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); - global_contexts_list_ = NULL; + native_contexts_list_ = NULL; mark_compact_collector_.heap_ = this; external_string_table_.heap_ = this; // Put a dummy entry in the remembered pages so we can find the list the // minidump even if there are no real unmapped pages. RememberUnmappedPage(NULL, false); + + ClearObjectStats(true); } @@ -201,6 +212,20 @@ intptr_t Heap::CommittedMemory() { lo_space_->Size(); } + +size_t Heap::CommittedPhysicalMemory() { + if (!HasBeenSetUp()) return 0; + + return new_space_.CommittedPhysicalMemory() + + old_pointer_space_->CommittedPhysicalMemory() + + old_data_space_->CommittedPhysicalMemory() + + code_space_->CommittedPhysicalMemory() + + map_space_->CommittedPhysicalMemory() + + cell_space_->CommittedPhysicalMemory() + + lo_space_->CommittedPhysicalMemory(); +} + + intptr_t Heap::CommittedMemoryExecutable() { if (!HasBeenSetUp()) return 0; @@ -315,48 +340,59 @@ void Heap::ReportStatisticsBeforeGC() { void Heap::PrintShortHeapStatistics() { if (!FLAG_trace_gc_verbose) return; - PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d\n", - isolate_->memory_allocator()->Size(), - isolate_->memory_allocator()->Available()); - PrintF("New space, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d\n", - Heap::new_space_.Size(), - new_space_.Available()); - PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d" - ", waste: %8" V8_PTR_PREFIX "d\n", - old_pointer_space_->Size(), - old_pointer_space_->Available(), - old_pointer_space_->Waste()); - PrintF("Old data space, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d" - ", waste: %8" V8_PTR_PREFIX "d\n", - old_data_space_->Size(), - old_data_space_->Available(), - old_data_space_->Waste()); - PrintF("Code space, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d" - ", waste: %8" V8_PTR_PREFIX "d\n", - code_space_->Size(), - code_space_->Available(), - code_space_->Waste()); - PrintF("Map space, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d" - ", waste: %8" V8_PTR_PREFIX "d\n", - map_space_->Size(), - map_space_->Available(), - map_space_->Waste()); - PrintF("Cell space, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d" - ", waste: %8" V8_PTR_PREFIX "d\n", - cell_space_->Size(), - cell_space_->Available(), - cell_space_->Waste()); - PrintF("Large object space, used: %8" V8_PTR_PREFIX "d" - ", available: %8" V8_PTR_PREFIX "d\n", - lo_space_->Size(), - lo_space_->Available()); + PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB\n", + isolate_->memory_allocator()->Size() / KB, + isolate_->memory_allocator()->Available() / KB); + PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + new_space_.Size() / KB, + new_space_.Available() / KB, + new_space_.CommittedMemory() / KB); + PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + old_pointer_space_->SizeOfObjects() / KB, + old_pointer_space_->Available() / KB, + old_pointer_space_->CommittedMemory() / KB); + PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + old_data_space_->SizeOfObjects() / KB, + old_data_space_->Available() / KB, + old_data_space_->CommittedMemory() / KB); + PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + code_space_->SizeOfObjects() / KB, + code_space_->Available() / KB, + code_space_->CommittedMemory() / KB); + PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + map_space_->SizeOfObjects() / KB, + map_space_->Available() / KB, + map_space_->CommittedMemory() / KB); + PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + cell_space_->SizeOfObjects() / KB, + cell_space_->Available() / KB, + cell_space_->CommittedMemory() / KB); + PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + lo_space_->SizeOfObjects() / KB, + lo_space_->Available() / KB, + lo_space_->CommittedMemory() / KB); + PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB" + ", available: %6" V8_PTR_PREFIX "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + this->SizeOfObjects() / KB, + this->Available() / KB, + this->CommittedMemory() / KB); + PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_); } @@ -383,18 +419,23 @@ void Heap::GarbageCollectionPrologue() { ClearJSFunctionResultCaches(); gc_count_++; unflattened_strings_length_ = 0; -#ifdef DEBUG - ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); - allow_allocation(false); + if (FLAG_flush_code && FLAG_flush_code_incrementally) { + mark_compact_collector()->EnableCodeFlushing(true); + } + +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { Verify(); } +#endif + +#ifdef DEBUG + ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); + allow_allocation(false); if (FLAG_gc_verbose) Print(); -#endif // DEBUG -#if defined(DEBUG) ReportStatisticsBeforeGC(); #endif // DEBUG @@ -402,6 +443,7 @@ void Heap::GarbageCollectionPrologue() { store_buffer()->GCPrologue(); } + intptr_t Heap::SizeOfObjects() { intptr_t total = 0; AllSpaces spaces; @@ -411,17 +453,34 @@ intptr_t Heap::SizeOfObjects() { return total; } + +void Heap::RepairFreeListsAfterBoot() { + PagedSpaces spaces; + for (PagedSpace* space = spaces.next(); + space != NULL; + space = spaces.next()) { + space->RepairFreeListsAfterBoot(); + } +} + + void Heap::GarbageCollectionEpilogue() { store_buffer()->GCEpilogue(); LiveObjectList::GCEpilogue(); -#ifdef DEBUG - allow_allocation(true); - ZapFromSpace(); + // In release mode, we only zap the from space under heap verification. + if (Heap::ShouldZapGarbage()) { + ZapFromSpace(); + } + +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { Verify(); } +#endif +#ifdef DEBUG + allow_allocation(true); if (FLAG_print_global_handles) isolate_->global_handles()->Print(); if (FLAG_print_handles) PrintHandles(); if (FLAG_gc_verbose) Print(); @@ -435,6 +494,56 @@ void Heap::GarbageCollectionEpilogue() { symbol_table()->Capacity()); isolate_->counters()->number_of_symbols()->Set( symbol_table()->NumberOfElements()); + + if (CommittedMemory() > 0) { + isolate_->counters()->external_fragmentation_total()->AddSample( + static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); + + isolate_->counters()->heap_fraction_map_space()->AddSample( + static_cast<int>( + (map_space()->CommittedMemory() * 100.0) / CommittedMemory())); + isolate_->counters()->heap_fraction_cell_space()->AddSample( + static_cast<int>( + (cell_space()->CommittedMemory() * 100.0) / CommittedMemory())); + + isolate_->counters()->heap_sample_total_committed()->AddSample( + static_cast<int>(CommittedMemory() / KB)); + isolate_->counters()->heap_sample_total_used()->AddSample( + static_cast<int>(SizeOfObjects() / KB)); + isolate_->counters()->heap_sample_map_space_committed()->AddSample( + static_cast<int>(map_space()->CommittedMemory() / KB)); + isolate_->counters()->heap_sample_cell_space_committed()->AddSample( + static_cast<int>(cell_space()->CommittedMemory() / KB)); + } + +#define UPDATE_COUNTERS_FOR_SPACE(space) \ + isolate_->counters()->space##_bytes_available()->Set( \ + static_cast<int>(space()->Available())); \ + isolate_->counters()->space##_bytes_committed()->Set( \ + static_cast<int>(space()->CommittedMemory())); \ + isolate_->counters()->space##_bytes_used()->Set( \ + static_cast<int>(space()->SizeOfObjects())); +#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \ + if (space()->CommittedMemory() > 0) { \ + isolate_->counters()->external_fragmentation_##space()->AddSample( \ + static_cast<int>(100 - \ + (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \ + } +#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \ + UPDATE_COUNTERS_FOR_SPACE(space) \ + UPDATE_FRAGMENTATION_FOR_SPACE(space) + + UPDATE_COUNTERS_FOR_SPACE(new_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space) +#undef UPDATE_COUNTERS_FOR_SPACE +#undef UPDATE_FRAGMENTATION_FOR_SPACE +#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE + #if defined(DEBUG) ReportStatisticsAfterGC(); #endif // DEBUG @@ -542,10 +651,12 @@ bool Heap::CollectGarbage(AllocationSpace space, PerformGarbageCollection(collector, &tracer); rate->Stop(); + ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); + + // This can do debug callbacks and restart incremental marking. GarbageCollectionEpilogue(); } - ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); if (incremental_marking()->IsStopped()) { if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { incremental_marking()->Start(); @@ -566,7 +677,7 @@ void Heap::PerformScavenge() { } -#ifdef DEBUG +#ifdef VERIFY_HEAP // Helper class for verifying the symbol table. class SymbolTableVerifier : public ObjectVisitor { public: @@ -575,20 +686,18 @@ class SymbolTableVerifier : public ObjectVisitor { for (Object** p = start; p < end; p++) { if ((*p)->IsHeapObject()) { // Check that the symbol is actually a symbol. - ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol()); + CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol()); } } } }; -#endif // DEBUG static void VerifySymbolTable() { -#ifdef DEBUG SymbolTableVerifier verifier; HEAP->symbol_table()->IterateElements(&verifier); -#endif // DEBUG } +#endif // VERIFY_HEAP static bool AbortIncrementalMarkingAndCollectGarbage( @@ -603,67 +712,42 @@ static bool AbortIncrementalMarkingAndCollectGarbage( void Heap::ReserveSpace( - int new_space_size, - int pointer_space_size, - int data_space_size, - int code_space_size, - int map_space_size, - int cell_space_size, - int large_object_size) { - NewSpace* new_space = Heap::new_space(); - PagedSpace* old_pointer_space = Heap::old_pointer_space(); - PagedSpace* old_data_space = Heap::old_data_space(); - PagedSpace* code_space = Heap::code_space(); - PagedSpace* map_space = Heap::map_space(); - PagedSpace* cell_space = Heap::cell_space(); - LargeObjectSpace* lo_space = Heap::lo_space(); + int *sizes, + Address *locations_out) { bool gc_performed = true; int counter = 0; static const int kThreshold = 20; while (gc_performed && counter++ < kThreshold) { gc_performed = false; - if (!new_space->ReserveSpace(new_space_size)) { - Heap::CollectGarbage(NEW_SPACE, - "failed to reserve space in the new space"); - gc_performed = true; - } - if (!old_pointer_space->ReserveSpace(pointer_space_size)) { - AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE, - "failed to reserve space in the old pointer space"); - gc_performed = true; - } - if (!(old_data_space->ReserveSpace(data_space_size))) { - AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE, - "failed to reserve space in the old data space"); - gc_performed = true; - } - if (!(code_space->ReserveSpace(code_space_size))) { - AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE, - "failed to reserve space in the code space"); - gc_performed = true; - } - if (!(map_space->ReserveSpace(map_space_size))) { - AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE, - "failed to reserve space in the map space"); - gc_performed = true; - } - if (!(cell_space->ReserveSpace(cell_space_size))) { - AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE, - "failed to reserve space in the cell space"); - gc_performed = true; - } - // We add a slack-factor of 2 in order to have space for a series of - // large-object allocations that are only just larger than the page size. - large_object_size *= 2; - // The ReserveSpace method on the large object space checks how much - // we can expand the old generation. This includes expansion caused by - // allocation in the other spaces. - large_object_size += cell_space_size + map_space_size + code_space_size + - data_space_size + pointer_space_size; - if (!(lo_space->ReserveSpace(large_object_size))) { - AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE, - "failed to reserve space in the large object space"); - gc_performed = true; + ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1); + for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) { + if (sizes[space] != 0) { + MaybeObject* allocation; + if (space == NEW_SPACE) { + allocation = new_space()->AllocateRaw(sizes[space]); + } else { + allocation = paged_space(space)->AllocateRaw(sizes[space]); + } + FreeListNode* node; + if (!allocation->To<FreeListNode>(&node)) { + if (space == NEW_SPACE) { + Heap::CollectGarbage(NEW_SPACE, + "failed to reserve space in the new space"); + } else { + AbortIncrementalMarkingAndCollectGarbage( + this, + static_cast<AllocationSpace>(space), + "failed to reserve space in paged space"); + } + gc_performed = true; + break; + } else { + // Mark with a free list node, in case we have a GC before + // deserializing. + node->set_size(this, sizes[space]); + locations_out[space] = node->address(); + } + } } } @@ -691,7 +775,7 @@ void Heap::EnsureFromSpaceIsCommitted() { void Heap::ClearJSFunctionResultCaches() { if (isolate_->bootstrapper()->IsActive()) return; - Object* context = global_contexts_list_; + Object* context = native_contexts_list_; while (!context->IsUndefined()) { // Get the caches for this context. GC can happen when the context // is not fully initialized, so the caches can be undefined. @@ -718,7 +802,7 @@ void Heap::ClearNormalizedMapCaches() { return; } - Object* context = global_contexts_list_; + Object* context = native_contexts_list_; while (!context->IsUndefined()) { // GC can happen when the context is not fully initialized, // so the cache can be undefined. @@ -770,9 +854,12 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, PROFILE(isolate_, CodeMovingGCEvent()); } +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { VerifySymbolTable(); } +#endif + if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { ASSERT(!allocation_allowed_); GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); @@ -847,8 +934,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, // have to limit maximal capacity of the young generation. new_space_high_promotion_mode_active_ = true; if (FLAG_trace_gc) { - PrintF("Limited new space size due to high promotion rate: %d MB\n", - new_space_.InitialCapacity() / MB); + PrintPID("Limited new space size due to high promotion rate: %d MB\n", + new_space_.InitialCapacity() / MB); } } else if (new_space_high_promotion_mode_active_ && IsStableOrDecreasingSurvivalTrend() && @@ -858,8 +945,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, // to grow again. new_space_high_promotion_mode_active_ = false; if (FLAG_trace_gc) { - PrintF("Unlimited new space size due to low promotion rate: %d MB\n", - new_space_.MaximumCapacity() / MB); + PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", + new_space_.MaximumCapacity() / MB); } } @@ -899,9 +986,12 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); global_gc_epilogue_callback_(); } + +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { VerifySymbolTable(); } +#endif return next_gc_likely_to_collect_more; } @@ -928,7 +1018,7 @@ void Heap::MarkCompact(GCTracer* tracer) { contexts_disposed_ = 0; - isolate_->set_context_exit_happened(false); + flush_monomorphic_ics_ = false; } @@ -938,7 +1028,8 @@ void Heap::MarkCompactPrologue() { isolate_->keyed_lookup_cache()->Clear(); isolate_->context_slot_cache()->Clear(); isolate_->descriptor_lookup_cache()->Clear(); - StringSplitCache::Clear(string_split_cache()); + RegExpResultsCache::Clear(string_split_cache()); + RegExpResultsCache::Clear(regexp_multiple_cache()); isolate_->compilation_cache()->MarkCompactPrologue(); @@ -983,7 +1074,7 @@ class ScavengeVisitor: public ObjectVisitor { }; -#ifdef DEBUG +#ifdef VERIFY_HEAP // Visitor class to verify pointers in code or data space do not point into // new space. class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { @@ -991,7 +1082,7 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { void VisitPointers(Object** start, Object**end) { for (Object** current = start; current < end; current++) { if ((*current)->IsHeapObject()) { - ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current))); + CHECK(!HEAP->InNewSpace(HeapObject::cast(*current))); } } } @@ -1016,7 +1107,7 @@ static void VerifyNonPointerSpacePointers() { object->Iterate(&v); } } -#endif +#endif // VERIFY_HEAP void Heap::CheckNewSpaceExpansionCriteria() { @@ -1154,7 +1245,9 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer { void Heap::Scavenge() { -#ifdef DEBUG + RelocationLock relocation_lock(this); + +#ifdef VERIFY_HEAP if (FLAG_verify_heap) VerifyNonPointerSpacePointers(); #endif @@ -1220,20 +1313,32 @@ void Heap::Scavenge() { // Copy objects reachable from cells by scavenging cell values directly. HeapObjectIterator cell_iterator(cell_space_); - for (HeapObject* cell = cell_iterator.Next(); - cell != NULL; cell = cell_iterator.Next()) { - if (cell->IsJSGlobalPropertyCell()) { - Address value_address = - reinterpret_cast<Address>(cell) + - (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); + for (HeapObject* heap_object = cell_iterator.Next(); + heap_object != NULL; + heap_object = cell_iterator.Next()) { + if (heap_object->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object); + Address value_address = cell->ValueAddress(); scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); } } - // Scavenge object reachable from the global contexts list directly. - scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); + // Copy objects reachable from the code flushing candidates list. + MarkCompactCollector* collector = mark_compact_collector(); + if (collector->is_code_flushing_enabled()) { + collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); + } + + // Scavenge object reachable from the native contexts list directly. + scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_)); new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + + while (IterateObjectGroups(&scavenge_visitor)) { + new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + } + isolate()->global_handles()->RemoveObjectGroups(); + isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( &IsUnscavengedHeapObject); isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( @@ -1274,6 +1379,51 @@ void Heap::Scavenge() { } +// TODO(mstarzinger): Unify this method with +// MarkCompactCollector::MarkObjectGroups(). +bool Heap::IterateObjectGroups(ObjectVisitor* scavenge_visitor) { + List<ObjectGroup*>* object_groups = + isolate()->global_handles()->object_groups(); + + int last = 0; + bool changed = false; + for (int i = 0; i < object_groups->length(); i++) { + ObjectGroup* entry = object_groups->at(i); + ASSERT(entry != NULL); + + Object*** objects = entry->objects_; + bool group_marked = false; + for (size_t j = 0; j < entry->length_; j++) { + Object* object = *objects[j]; + if (object->IsHeapObject()) { + if (!IsUnscavengedHeapObject(this, &object)) { + group_marked = true; + break; + } + } + } + + if (!group_marked) { + (*object_groups)[last++] = entry; + continue; + } + + for (size_t j = 0; j < entry->length_; ++j) { + Object* object = *objects[j]; + if (object->IsHeapObject()) { + scavenge_visitor->VisitPointer(&object); + changed = true; + } + } + + entry->Dispose(); + object_groups->at(i) = NULL; + } + object_groups->Rewind(last); + return changed; +} + + HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, Object** p) { MapWord first_word = HeapObject::cast(*p)->map_word(); @@ -1291,9 +1441,11 @@ HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, void Heap::UpdateNewSpaceReferencesInExternalStringTable( ExternalStringTableUpdaterCallback updater_func) { +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { external_string_table_.Verify(); } +#endif if (external_string_table_.new_space_strings_.is_empty()) return; @@ -1391,7 +1543,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { Object* undefined = undefined_value(); Object* head = undefined; Context* tail = NULL; - Object* candidate = global_contexts_list_; + Object* candidate = native_contexts_list_; // We don't record weak slots during marking or scavenges. // Instead we do it once when we complete mark-compact cycle. @@ -1464,20 +1616,47 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { } // Update the head of the list of contexts. - global_contexts_list_ = head; + native_contexts_list_ = head; } void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { AssertNoAllocation no_allocation; - class VisitorAdapter : public ObjectVisitor { + // Both the external string table and the symbol table may contain + // external strings, but neither lists them exhaustively, nor is the + // intersection set empty. Therefore we iterate over the external string + // table first, ignoring symbols, and then over the symbol table. + + class ExternalStringTableVisitorAdapter : public ObjectVisitor { public: - explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor) - : visitor_(visitor) {} + explicit ExternalStringTableVisitorAdapter( + v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} + virtual void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + // Visit non-symbol external strings, + // since symbols are listed in the symbol table. + if (!(*p)->IsSymbol()) { + ASSERT((*p)->IsExternalString()); + visitor_->VisitExternalString(Utils::ToLocal( + Handle<String>(String::cast(*p)))); + } + } + } + private: + v8::ExternalResourceVisitor* visitor_; + } external_string_table_visitor(visitor); + + external_string_table_.Iterate(&external_string_table_visitor); + + class SymbolTableVisitorAdapter : public ObjectVisitor { + public: + explicit SymbolTableVisitorAdapter( + v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} virtual void VisitPointers(Object** start, Object** end) { for (Object** p = start; p < end; p++) { if ((*p)->IsExternalString()) { + ASSERT((*p)->IsSymbol()); visitor_->VisitExternalString(Utils::ToLocal( Handle<String>(String::cast(*p)))); } @@ -1485,8 +1664,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { } private: v8::ExternalResourceVisitor* visitor_; - } visitor_adapter(visitor); - external_string_table_.Iterate(&visitor_adapter); + } symbol_table_visitor(visitor); + + symbol_table()->IterateElements(&symbol_table_visitor); } @@ -1590,7 +1770,7 @@ class ScavengingVisitor : public StaticVisitorBase { table_.Register(kVisitFixedArray, &EvacuateFixedArray); table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); - table_.Register(kVisitGlobalContext, + table_.Register(kVisitNativeContext, &ObjectEvacuationStrategy<POINTER_OBJECT>:: template VisitSpecialized<Context::kSize>); @@ -1676,7 +1856,7 @@ class ScavengingVisitor : public StaticVisitorBase { RecordCopiedObject(heap, target); HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); Isolate* isolate = heap->isolate(); - if (isolate->logger()->is_logging() || + if (isolate->logger()->is_logging_code_events() || CpuProfiler::is_profiling(isolate)) { if (target->IsSharedFunctionInfo()) { PROFILE(isolate, SharedFunctionInfoMoveEvent( @@ -1984,9 +2164,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, int instance_size) { Object* result; - { MaybeObject* maybe_result = AllocateRawMap(); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + MaybeObject* maybe_result = AllocateRawMap(); + if (!maybe_result->ToObject(&result)) return maybe_result; // Map::cast cannot be used due to uninitialized map field. reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); @@ -1999,6 +2178,9 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, reinterpret_cast<Map*>(result)->set_unused_property_fields(0); reinterpret_cast<Map*>(result)->set_bit_field(0); reinterpret_cast<Map*>(result)->set_bit_field2(0); + int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) | + Map::OwnsDescriptors::encode(true); + reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3); return result; } @@ -2007,9 +2189,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size, ElementsKind elements_kind) { Object* result; - { MaybeObject* maybe_result = AllocateRawMap(); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + MaybeObject* maybe_result = AllocateRawMap(); + if (!maybe_result->To(&result)) return maybe_result; Map* map = reinterpret_cast<Map*>(result); map->set_map_no_write_barrier(meta_map()); @@ -2021,20 +2202,17 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type, map->set_instance_size(instance_size); map->set_inobject_properties(0); map->set_pre_allocated_property_fields(0); - map->init_instance_descriptors(); map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); - map->init_prototype_transitions(undefined_value()); + map->init_back_pointer(undefined_value()); map->set_unused_property_fields(0); + map->set_instance_descriptors(empty_descriptor_array()); map->set_bit_field(0); map->set_bit_field2(1 << Map::kIsExtensible); + int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) | + Map::OwnsDescriptors::encode(true); + map->set_bit_field3(bit_field3); map->set_elements_kind(elements_kind); - // If the map object is aligned fill the padding area with Smi 0 objects. - if (Map::kPadStart < Map::kSize) { - memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag, - 0, - Map::kSize - Map::kPadStart); - } return map; } @@ -2071,8 +2249,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() { { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE); if (!maybe_info->To(&info)) return maybe_info; } - info->set_ic_total_count(0); - info->set_ic_with_type_info_count(0); + info->initialize_storage(); info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()), SKIP_WRITE_BARRIER); return info; @@ -2160,17 +2337,17 @@ bool Heap::CreateInitialMaps() { set_empty_descriptor_array(DescriptorArray::cast(obj)); // Fix the instance_descriptors for the existing maps. - meta_map()->init_instance_descriptors(); meta_map()->set_code_cache(empty_fixed_array()); - meta_map()->init_prototype_transitions(undefined_value()); + meta_map()->init_back_pointer(undefined_value()); + meta_map()->set_instance_descriptors(empty_descriptor_array()); - fixed_array_map()->init_instance_descriptors(); fixed_array_map()->set_code_cache(empty_fixed_array()); - fixed_array_map()->init_prototype_transitions(undefined_value()); + fixed_array_map()->init_back_pointer(undefined_value()); + fixed_array_map()->set_instance_descriptors(empty_descriptor_array()); - oddball_map()->init_instance_descriptors(); oddball_map()->set_code_cache(empty_fixed_array()); - oddball_map()->init_prototype_transitions(undefined_value()); + oddball_map()->init_back_pointer(undefined_value()); + oddball_map()->set_instance_descriptors(empty_descriptor_array()); // Fix prototype object for existing maps. meta_map()->set_prototype(null_value()); @@ -2378,9 +2555,16 @@ bool Heap::CreateInitialMaps() { AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (!maybe_obj->ToObject(&obj)) return false; } - Map* global_context_map = Map::cast(obj); - global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext); - set_global_context_map(global_context_map); + set_global_context_map(Map::cast(obj)); + + { MaybeObject* maybe_obj = + AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); + if (!maybe_obj->ToObject(&obj)) return false; + } + Map* native_context_map = Map::cast(obj); + native_context_map->set_dictionary_map(true); + native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext); + set_native_context_map(native_context_map); { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize); @@ -2469,7 +2653,7 @@ bool Heap::CreateApiObjects() { // bottleneck to trap the Smi-only -> fast elements transition, and there // appears to be no benefit for optimize this case. Map* new_neander_map = Map::cast(obj); - new_neander_map->set_elements_kind(FAST_ELEMENTS); + new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); set_neander_map(new_neander_map); { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); @@ -2632,7 +2816,7 @@ bool Heap::CreateInitialObjects() { // hash code in place. The hash code for the hidden_symbol is zero to ensure // that it will always be at the first entry in property descriptors. { MaybeObject* maybe_obj = - AllocateSymbol(CStrVector(""), 0, String::kZeroHash); + AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash); if (!maybe_obj->ToObject(&obj)) return false; } hidden_symbol_ = String::cast(obj); @@ -2693,18 +2877,33 @@ bool Heap::CreateInitialObjects() { set_single_character_string_cache(FixedArray::cast(obj)); // Allocate cache for string split. - { MaybeObject* maybe_obj = - AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED); + { MaybeObject* maybe_obj = AllocateFixedArray( + RegExpResultsCache::kRegExpResultsCacheSize, TENURED); if (!maybe_obj->ToObject(&obj)) return false; } set_string_split_cache(FixedArray::cast(obj)); + { MaybeObject* maybe_obj = AllocateFixedArray( + RegExpResultsCache::kRegExpResultsCacheSize, TENURED); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_regexp_multiple_cache(FixedArray::cast(obj)); + // Allocate cache for external strings pointing to native source code. { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount()); if (!maybe_obj->ToObject(&obj)) return false; } set_natives_source_cache(FixedArray::cast(obj)); + // Allocate object to hold object observation state. + { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); + if (!maybe_obj->ToObject(&obj)) return false; + } + { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj)); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_observation_state(JSObject::cast(obj)); + // Handling of script id generation is in FACTORY->NewScript. set_last_script_id(undefined_value()); @@ -2724,70 +2923,126 @@ bool Heap::CreateInitialObjects() { } -Object* StringSplitCache::Lookup( - FixedArray* cache, String* string, String* pattern) { - if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0); - uint32_t hash = string->Hash(); - uint32_t index = ((hash & (kStringSplitCacheSize - 1)) & +bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { + RootListIndex writable_roots[] = { + kStoreBufferTopRootIndex, + kStackLimitRootIndex, + kInstanceofCacheFunctionRootIndex, + kInstanceofCacheMapRootIndex, + kInstanceofCacheAnswerRootIndex, + kCodeStubsRootIndex, + kNonMonomorphicCacheRootIndex, + kPolymorphicCodeCacheRootIndex, + kLastScriptIdRootIndex, + kEmptyScriptRootIndex, + kRealStackLimitRootIndex, + kArgumentsAdaptorDeoptPCOffsetRootIndex, + kConstructStubDeoptPCOffsetRootIndex, + kGetterStubDeoptPCOffsetRootIndex, + kSetterStubDeoptPCOffsetRootIndex, + kSymbolTableRootIndex, + }; + + for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) { + if (root_index == writable_roots[i]) + return true; + } + return false; +} + + +Object* RegExpResultsCache::Lookup(Heap* heap, + String* key_string, + Object* key_pattern, + ResultsCacheType type) { + FixedArray* cache; + if (!key_string->IsSymbol()) return Smi::FromInt(0); + if (type == STRING_SPLIT_SUBSTRINGS) { + ASSERT(key_pattern->IsString()); + if (!key_pattern->IsSymbol()) return Smi::FromInt(0); + cache = heap->string_split_cache(); + } else { + ASSERT(type == REGEXP_MULTIPLE_INDICES); + ASSERT(key_pattern->IsFixedArray()); + cache = heap->regexp_multiple_cache(); + } + + uint32_t hash = key_string->Hash(); + uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & ~(kArrayEntriesPerCacheEntry - 1)); - if (cache->get(index + kStringOffset) == string && - cache->get(index + kPatternOffset) == pattern) { + if (cache->get(index + kStringOffset) == key_string && + cache->get(index + kPatternOffset) == key_pattern) { return cache->get(index + kArrayOffset); } - index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1)); - if (cache->get(index + kStringOffset) == string && - cache->get(index + kPatternOffset) == pattern) { + index = + ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); + if (cache->get(index + kStringOffset) == key_string && + cache->get(index + kPatternOffset) == key_pattern) { return cache->get(index + kArrayOffset); } return Smi::FromInt(0); } -void StringSplitCache::Enter(Heap* heap, - FixedArray* cache, - String* string, - String* pattern, - FixedArray* array) { - if (!string->IsSymbol() || !pattern->IsSymbol()) return; - uint32_t hash = string->Hash(); - uint32_t index = ((hash & (kStringSplitCacheSize - 1)) & +void RegExpResultsCache::Enter(Heap* heap, + String* key_string, + Object* key_pattern, + FixedArray* value_array, + ResultsCacheType type) { + FixedArray* cache; + if (!key_string->IsSymbol()) return; + if (type == STRING_SPLIT_SUBSTRINGS) { + ASSERT(key_pattern->IsString()); + if (!key_pattern->IsSymbol()) return; + cache = heap->string_split_cache(); + } else { + ASSERT(type == REGEXP_MULTIPLE_INDICES); + ASSERT(key_pattern->IsFixedArray()); + cache = heap->regexp_multiple_cache(); + } + + uint32_t hash = key_string->Hash(); + uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & ~(kArrayEntriesPerCacheEntry - 1)); if (cache->get(index + kStringOffset) == Smi::FromInt(0)) { - cache->set(index + kStringOffset, string); - cache->set(index + kPatternOffset, pattern); - cache->set(index + kArrayOffset, array); + cache->set(index + kStringOffset, key_string); + cache->set(index + kPatternOffset, key_pattern); + cache->set(index + kArrayOffset, value_array); } else { uint32_t index2 = - ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1)); + ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) { - cache->set(index2 + kStringOffset, string); - cache->set(index2 + kPatternOffset, pattern); - cache->set(index2 + kArrayOffset, array); + cache->set(index2 + kStringOffset, key_string); + cache->set(index2 + kPatternOffset, key_pattern); + cache->set(index2 + kArrayOffset, value_array); } else { cache->set(index2 + kStringOffset, Smi::FromInt(0)); cache->set(index2 + kPatternOffset, Smi::FromInt(0)); cache->set(index2 + kArrayOffset, Smi::FromInt(0)); - cache->set(index + kStringOffset, string); - cache->set(index + kPatternOffset, pattern); - cache->set(index + kArrayOffset, array); + cache->set(index + kStringOffset, key_string); + cache->set(index + kPatternOffset, key_pattern); + cache->set(index + kArrayOffset, value_array); } } - if (array->length() < 100) { // Limit how many new symbols we want to make. - for (int i = 0; i < array->length(); i++) { - String* str = String::cast(array->get(i)); + // If the array is a reasonably short list of substrings, convert it into a + // list of symbols. + if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) { + for (int i = 0; i < value_array->length(); i++) { + String* str = String::cast(value_array->get(i)); Object* symbol; MaybeObject* maybe_symbol = heap->LookupSymbol(str); if (maybe_symbol->ToObject(&symbol)) { - array->set(i, symbol); + value_array->set(i, symbol); } } } - array->set_map_no_write_barrier(heap->fixed_cow_array_map()); + // Convert backing store to a copy-on-write array. + value_array->set_map_no_write_barrier(heap->fixed_cow_array_map()); } -void StringSplitCache::Clear(FixedArray* cache) { - for (int i = 0; i < kStringSplitCacheSize; i++) { +void RegExpResultsCache::Clear(FixedArray* cache) { + for (int i = 0; i < kRegExpResultsCacheSize; i++) { cache->set(i, Smi::FromInt(0)); } } @@ -2817,7 +3072,7 @@ void Heap::AllocateFullSizeNumberStringCache() { // The idea is to have a small number string cache in the snapshot to keep // boot-time memory usage down. If we expand the number string cache already // while creating the snapshot then that didn't work out. - ASSERT(!Serializer::enabled()); + ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL); MaybeObject* maybe_obj = AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED); Object* new_cache; @@ -3005,6 +3260,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) { share->set_name(name); Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal); share->set_code(illegal); + share->ClearOptimizedCodeMap(); share->set_scope_info(ScopeInfo::Empty()); Code* construct_stub = isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric); @@ -3017,8 +3273,8 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) { share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER); share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER); share->set_ast_node_count(0); - share->set_deopt_counter(FLAG_deopt_every_n_times); - share->set_ic_age(0); + share->set_stress_deopt_counter(FLAG_deopt_every_n_times); + share->set_counters(0); // Set integer fields (smi or int, depending on the architecture). share->set_length(0); @@ -3050,6 +3306,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type, } JSMessageObject* message = JSMessageObject::cast(result); message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); + message->initialize_elements(); message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); message->set_type(type); message->set_arguments(arguments); @@ -3274,7 +3531,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer, } ASSERT(buffer->IsFlat()); -#if DEBUG +#if VERIFY_HEAP if (FLAG_verify_heap) { buffer->StringVerify(); } @@ -3326,6 +3583,8 @@ MaybeObject* Heap::AllocateExternalStringFromAscii( return Failure::OutOfMemoryException(); } + ASSERT(String::IsAscii(resource->data(), static_cast<int>(length))); + Map* map = external_ascii_string_map(); Object* result; { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); @@ -3489,17 +3748,27 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, MaybeObject* maybe_result; // Large code objects and code objects which should stay at a fixed address // are allocated in large object space. - if (obj_size > code_space()->AreaSize() || immovable) { + HeapObject* result; + bool force_lo_space = obj_size > code_space()->AreaSize(); + if (force_lo_space) { maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(obj_size); } + if (!maybe_result->To<HeapObject>(&result)) return maybe_result; - Object* result; - if (!maybe_result->ToObject(&result)) return maybe_result; + if (immovable && !force_lo_space && + // Objects on the first page of each space are never moved. + !code_space_->FirstPage()->Contains(result->address())) { + // Discard the first code allocation, which was on a page where it could be + // moved. + CreateFillerObjectAt(result->address(), obj_size); + maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); + if (!maybe_result->To<HeapObject>(&result)) return maybe_result; + } // Initialize the object - HeapObject::cast(result)->set_map_no_write_barrier(code_map()); + result->set_map_no_write_barrier(code_map()); Code* code = Code::cast(result); ASSERT(!isolate_->code_range()->exists() || isolate_->code_range()->contains(code->address())); @@ -3526,7 +3795,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, // through the self_reference parameter. code->CopyFrom(desc); -#ifdef DEBUG +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { code->Verify(); } @@ -3608,7 +3877,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { isolate_->code_range()->contains(code->address())); new_code->Relocate(new_addr - old_addr); -#ifdef DEBUG +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { code->Verify(); } @@ -3655,29 +3924,27 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) { // from the function's context, since the function can be from a // different context. JSFunction* object_function = - function->context()->global_context()->object_function(); + function->context()->native_context()->object_function(); // Each function prototype gets a copy of the object function map. // This avoid unwanted sharing of maps between prototypes of different // constructors. Map* new_map; ASSERT(object_function->has_initial_map()); - { MaybeObject* maybe_map = - object_function->initial_map()->CopyDropTransitions(); - if (!maybe_map->To<Map>(&new_map)) return maybe_map; - } + MaybeObject* maybe_map = object_function->initial_map()->Copy(); + if (!maybe_map->To(&new_map)) return maybe_map; + Object* prototype; - { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map); - if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; - } + MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map); + if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; + // When creating the prototype for the function we must set its // constructor to the function. - Object* result; - { MaybeObject* maybe_result = - JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( - constructor_symbol(), function, DONT_ENUM); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + MaybeObject* maybe_failure = + JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( + constructor_symbol(), function, DONT_ENUM); + if (maybe_failure->IsFailure()) return maybe_failure; + return prototype; } @@ -3707,12 +3974,12 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { !JSFunction::cast(callee)->shared()->is_classic_mode(); if (strict_mode_callee) { boilerplate = - isolate()->context()->global_context()-> + isolate()->context()->native_context()-> strict_mode_arguments_boilerplate(); arguments_object_size = kArgumentsObjectSizeStrict; } else { boilerplate = - isolate()->context()->global_context()->arguments_boilerplate(); + isolate()->context()->native_context()->arguments_boilerplate(); arguments_object_size = kArgumentsObjectSize; } @@ -3751,7 +4018,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { // Check the state of the object ASSERT(JSObject::cast(result)->HasFastProperties()); - ASSERT(JSObject::cast(result)->HasFastElements()); + ASSERT(JSObject::cast(result)->HasFastObjectElements()); return result; } @@ -3778,25 +4045,22 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) { // suggested by the function. int instance_size = fun->shared()->CalculateInstanceSize(); int in_object_properties = fun->shared()->CalculateInObjectProperties(); - Object* map_obj; - { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size); - if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj; - } + Map* map; + MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size); + if (!maybe_map->To(&map)) return maybe_map; // Fetch or allocate prototype. Object* prototype; if (fun->has_instance_prototype()) { prototype = fun->instance_prototype(); } else { - { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun); - if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; - } + MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun); + if (!maybe_prototype->To(&prototype)) return maybe_prototype; } - Map* map = Map::cast(map_obj); map->set_inobject_properties(in_object_properties); map->set_unused_property_fields(in_object_properties); map->set_prototype(prototype); - ASSERT(map->has_fast_elements()); + ASSERT(map->has_fast_object_elements()); // If the function has only simple this property assignments add // field descriptors for these to the initial map as the object @@ -3811,21 +4075,17 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) { fun->shared()->ForbidInlineConstructor(); } else { DescriptorArray* descriptors; - { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count); - if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) { - return maybe_descriptors_obj; - } - } + MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count); + if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors; + DescriptorArray::WhitenessWitness witness(descriptors); for (int i = 0; i < count; i++) { String* name = fun->shared()->GetThisPropertyAssignmentName(i); ASSERT(name->IsSymbol()); - FieldDescriptor field(name, i, NONE); - field.SetEnumerationIndex(i); + FieldDescriptor field(name, i, NONE, i + 1); descriptors->Set(i, &field, witness); } - descriptors->SetNextEnumerationIndex(count); - descriptors->SortUnchecked(witness); + descriptors->Sort(); // The descriptors may contain duplicates because the compiler does not // guarantee the uniqueness of property names (it would have required @@ -3834,7 +4094,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) { if (HasDuplicates(descriptors)) { fun->shared()->ForbidInlineConstructor(); } else { - map->set_instance_descriptors(descriptors); + map->InitializeDescriptors(descriptors); map->set_pre_allocated_property_fields(count); map->set_unused_property_fields(in_object_properties - count); } @@ -3913,8 +4173,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { InitializeJSObjectFromMap(JSObject::cast(obj), FixedArray::cast(properties), map); - ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() || - JSObject::cast(obj)->HasFastElements()); + ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements()); return obj; } @@ -3942,13 +4201,18 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor, } -MaybeObject* Heap::AllocateJSModule() { +MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) { // Allocate a fresh map. Modules do not have a prototype. Map* map; MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize); if (!maybe_map->To(&map)) return maybe_map; // Allocate the object based on the map. - return AllocateJSObjectFromMap(map, TENURED); + JSModule* module; + MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED); + if (!maybe_module->To(&module)) return maybe_module; + module->set_context(context); + module->set_scope_info(scope_info); + return module; } @@ -3959,6 +4223,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( ArrayStorageAllocationMode mode, PretenureFlag pretenure) { ASSERT(capacity >= length); + if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) { + elements_kind = GetHoleyElementsKind(elements_kind); + } MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); JSArray* array; if (!maybe_array->To(&array)) return maybe_array; @@ -3979,8 +4246,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); } } else { - ASSERT(elements_kind == FAST_ELEMENTS || - elements_kind == FAST_SMI_ONLY_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { maybe_elms = AllocateUninitializedFixedArray(capacity); } else { @@ -4006,6 +4272,7 @@ MaybeObject* Heap::AllocateJSArrayWithElements( array->set_elements(elements); array->set_length(Smi::FromInt(elements->length())); + array->ValidateElements(); return array; } @@ -4059,6 +4326,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler, MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { ASSERT(constructor->has_initial_map()); Map* map = constructor->initial_map(); + ASSERT(map->is_dictionary_map()); // Make sure no field properties are described in the initial map. // This guarantees us that normalizing the properties does not @@ -4076,13 +4344,11 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512; // Allocate a dictionary object for backing storage. - Object* obj; - { MaybeObject* maybe_obj = - StringDictionary::Allocate( - map->NumberOfDescribedProperties() * 2 + initial_size); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - StringDictionary* dictionary = StringDictionary::cast(obj); + StringDictionary* dictionary; + MaybeObject* maybe_dictionary = + StringDictionary::Allocate( + map->NumberOfOwnDescriptors() * 2 + initial_size); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; // The global object might be created from an object template with accessors. // Fill these accessors into the dictionary. @@ -4090,36 +4356,32 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { for (int i = 0; i < descs->number_of_descriptors(); i++) { PropertyDetails details = descs->GetDetails(i); ASSERT(details.type() == CALLBACKS); // Only accessors are expected. - PropertyDetails d = - PropertyDetails(details.attributes(), CALLBACKS, details.index()); + PropertyDetails d = PropertyDetails(details.attributes(), + CALLBACKS, + details.descriptor_index()); Object* value = descs->GetCallbacksObject(i); - { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value); - if (!maybe_value->ToObject(&value)) return maybe_value; - } + MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value); + if (!maybe_value->ToObject(&value)) return maybe_value; - Object* result; - { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - dictionary = StringDictionary::cast(result); + MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d); + if (!maybe_added->To(&dictionary)) return maybe_added; } // Allocate the global object and initialize it with the backing store. - { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - JSObject* global = JSObject::cast(obj); + JSObject* global; + MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE); + if (!maybe_global->To(&global)) return maybe_global; + InitializeJSObjectFromMap(global, dictionary, map); // Create a new map for the global object. - { MaybeObject* maybe_obj = map->CopyDropDescriptors(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - Map* new_map = Map::cast(obj); + Map* new_map; + MaybeObject* maybe_map = map->CopyDropDescriptors(); + if (!maybe_map->To(&new_map)) return maybe_map; + new_map->set_dictionary_map(true); // Set up the global object as a normalized object. global->set_map(new_map); - global->map()->clear_instance_descriptors(); global->set_properties(dictionary); // Make sure result is a global object with properties in dictionary. @@ -4248,7 +4510,7 @@ MaybeObject* Heap::ReinitializeJSReceiver( map->set_function_with_prototype(true); InitializeFunction(JSFunction::cast(object), shared, the_hole_value()); JSFunction::cast(object)->set_context( - isolate()->context()->global_context()); + isolate()->context()->native_context()); } // Put in filler if the new object is smaller than the old. @@ -4289,7 +4551,8 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor, MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string, PretenureFlag pretenure) { - if (string.length() == 1) { + int length = string.length(); + if (length == 1) { return Heap::LookupSingleCharacterStringFromCode(string[0]); } Object* result; @@ -4299,22 +4562,20 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string, } // Copy the characters into the new object. - SeqAsciiString* string_result = SeqAsciiString::cast(result); - for (int i = 0; i < string.length(); i++) { - string_result->SeqAsciiStringSet(i, string[i]); - } + CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length); return result; } MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string, + int non_ascii_start, PretenureFlag pretenure) { - // Count the number of characters in the UTF-8 string and check if - // it is an ASCII string. + // Continue counting the number of characters in the UTF-8 string, starting + // from the first non-ascii character or word. + int chars = non_ascii_start; Access<UnicodeCache::Utf8Decoder> decoder(isolate_->unicode_cache()->utf8_decoder()); - decoder->Reset(string.start(), string.length()); - int chars = 0; + decoder->Reset(string.start() + non_ascii_start, string.length() - chars); while (decoder->has_more()) { uint32_t r = decoder->GetNext(); if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) { @@ -4330,16 +4591,16 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string, } // Convert and copy the characters into the new object. - String* string_result = String::cast(result); + SeqTwoByteString* twobyte = SeqTwoByteString::cast(result); decoder->Reset(string.start(), string.length()); int i = 0; while (i < chars) { uint32_t r = decoder->GetNext(); if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) { - string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r)); - string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r)); + twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r)); + twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r)); } else { - string_result->Set(i++, r); + twobyte->SeqTwoByteStringSet(i++, r); } } return result; @@ -4349,20 +4610,18 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string, MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string, PretenureFlag pretenure) { // Check if the string is an ASCII string. - MaybeObject* maybe_result; - if (String::IsAscii(string.start(), string.length())) { - maybe_result = AllocateRawAsciiString(string.length(), pretenure); - } else { // It's not an ASCII string. - maybe_result = AllocateRawTwoByteString(string.length(), pretenure); - } Object* result; - if (!maybe_result->ToObject(&result)) return maybe_result; + int length = string.length(); + const uc16* start = string.start(); - // Copy the characters into the new object, which may be either ASCII or - // UTF-16. - String* string_result = String::cast(result); - for (int i = 0; i < string.length(); i++) { - string_result->Set(i, string[i]); + if (String::IsAscii(start, length)) { + MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure); + if (!maybe_result->ToObject(&result)) return maybe_result; + CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length); + } else { // It's not an ASCII string. + MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure); + if (!maybe_result->ToObject(&result)) return maybe_result; + CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length); } return result; } @@ -4491,6 +4750,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { String::cast(result)->set_length(length); String::cast(result)->set_hash_field(String::kEmptyHashField); ASSERT_EQ(size, HeapObject::cast(result)->Size()); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + // Initialize string's content to ensure ASCII-ness (character range 0-127) + // as required when verifying the heap. + char* dest = SeqAsciiString::cast(result)->GetChars(); + memset(dest, 0x0F, length * kCharSize); + } +#endif + return result; } @@ -4534,16 +4803,16 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length, MaybeObject* Heap::AllocateJSArray( ElementsKind elements_kind, PretenureFlag pretenure) { - Context* global_context = isolate()->context()->global_context(); - JSFunction* array_function = global_context->array_function(); + Context* native_context = isolate()->context()->native_context(); + JSFunction* array_function = native_context->array_function(); Map* map = array_function->initial_map(); - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - map = Map::cast(global_context->double_js_array_map()); - } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) { - map = Map::cast(global_context->object_js_array_map()); - } else { - ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); - ASSERT(map == global_context->smi_js_array_map()); + Object* maybe_map_array = native_context->js_array_maps(); + if (!maybe_map_array->IsUndefined()) { + Object* maybe_transitioned_map = + FixedArray::cast(maybe_map_array)->get(elements_kind); + if (!maybe_transitioned_map->IsUndefined()) { + map = Map::cast(maybe_transitioned_map); + } } return AllocateJSObjectFromMap(map, pretenure); @@ -4820,35 +5089,50 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { } -MaybeObject* Heap::AllocateGlobalContext() { +MaybeObject* Heap::AllocateNativeContext() { Object* result; { MaybeObject* maybe_result = - AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); + AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS); if (!maybe_result->ToObject(&result)) return maybe_result; } Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(global_context_map()); - context->set_smi_js_array_map(undefined_value()); - context->set_double_js_array_map(undefined_value()); - context->set_object_js_array_map(undefined_value()); - ASSERT(context->IsGlobalContext()); + context->set_map_no_write_barrier(native_context_map()); + context->set_js_array_maps(undefined_value()); + ASSERT(context->IsNativeContext()); ASSERT(result->IsContext()); return result; } -MaybeObject* Heap::AllocateModuleContext(Context* previous, +MaybeObject* Heap::AllocateGlobalContext(JSFunction* function, ScopeInfo* scope_info) { Object* result; { MaybeObject* maybe_result = - AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED); + AllocateFixedArray(scope_info->ContextLength(), TENURED); if (!maybe_result->ToObject(&result)) return maybe_result; } Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(module_context_map()); - context->set_previous(previous); + context->set_map_no_write_barrier(global_context_map()); + context->set_closure(function); + context->set_previous(function->context()); context->set_extension(scope_info); - context->set_global(previous->global()); + context->set_global_object(function->context()->global_object()); + ASSERT(context->IsGlobalContext()); + ASSERT(result->IsContext()); + return context; +} + + +MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) { + Object* result; + { MaybeObject* maybe_result = + AllocateFixedArray(scope_info->ContextLength(), TENURED); + if (!maybe_result->ToObject(&result)) return maybe_result; + } + Context* context = reinterpret_cast<Context*>(result); + context->set_map_no_write_barrier(module_context_map()); + // Context links will be set later. + context->set_extension(Smi::FromInt(0)); return context; } @@ -4863,9 +5147,9 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) { context->set_map_no_write_barrier(function_context_map()); context->set_closure(function); context->set_previous(function->context()); - context->set_extension(NULL); - context->set_global(function->context()->global()); - context->set_qml_global(function->context()->qml_global()); + context->set_extension(Smi::FromInt(0)); + context->set_global_object(function->context()->global_object()); + context->set_qml_global_object(function->context()->qml_global_object()); return context; } @@ -4885,8 +5169,8 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function, context->set_closure(function); context->set_previous(previous); context->set_extension(name); - context->set_global(previous->global()); - context->set_qml_global(previous->qml_global()); + context->set_global_object(previous->global_object()); + context->set_qml_global_object(previous->qml_global_object()); context->set(Context::THROWN_OBJECT_INDEX, thrown_object); return context; } @@ -4904,8 +5188,8 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function, context->set_closure(function); context->set_previous(previous); context->set_extension(extension); - context->set_global(previous->global()); - context->set_qml_global(previous->qml_global()); + context->set_global_object(previous->global_object()); + context->set_qml_global_object(previous->qml_global_object()); return context; } @@ -4923,8 +5207,8 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function, context->set_closure(function); context->set_previous(previous); context->set_extension(scope_info); - context->set_global(previous->global()); - context->set_qml_global(previous->qml_global()); + context->set_global_object(previous->global_object()); + context->set_qml_global_object(previous->qml_global_object()); return context; } @@ -4998,12 +5282,17 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { bool Heap::IdleNotification(int hint) { + // Hints greater than this value indicate that + // the embedder is requesting a lot of GC work. const int kMaxHint = 1000; + // Minimal hint that allows to do full GC. + const int kMinHintForFullGC = 100; intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4; // The size factor is in range [5..250]. The numbers here are chosen from // experiments. If you changes them, make sure to test with // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.* - intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold; + intptr_t step_size = + size_factor * IncrementalMarking::kAllocatedThreshold; if (contexts_disposed_ > 0) { if (hint >= kMaxHint) { @@ -5066,16 +5355,30 @@ bool Heap::IdleNotification(int hint) { mark_sweeps_since_idle_round_started_ += new_mark_sweeps; ms_count_at_last_idle_notification_ = ms_count_; - if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { + int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound - + mark_sweeps_since_idle_round_started_; + + if (remaining_mark_sweeps <= 0) { FinishIdleRound(); return true; } if (incremental_marking()->IsStopped()) { - incremental_marking()->Start(); + // If there are no more than two GCs left in this idle round and we are + // allowed to do a full GC, then make those GCs full in order to compact + // the code space. + // TODO(ulan): Once we enable code compaction for incremental marking, + // we can get rid of this special case and always start incremental marking. + if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) { + CollectAllGarbage(kReduceMemoryFootprintMask, + "idle notification: finalize idle round"); + } else { + incremental_marking()->Start(); + } + } + if (!incremental_marking()->IsStopped()) { + AdvanceIdleIncrementalMarking(step_size); } - - AdvanceIdleIncrementalMarking(step_size); return false; } @@ -5249,9 +5552,9 @@ bool Heap::InSpace(Address addr, AllocationSpace space) { } -#ifdef DEBUG +#ifdef VERIFY_HEAP void Heap::Verify() { - ASSERT(HasBeenSetUp()); + CHECK(HasBeenSetUp()); store_buffer()->Verify(); @@ -5269,38 +5572,8 @@ void Heap::Verify() { cell_space_->Verify(&no_dirty_regions_visitor); lo_space_->Verify(); - - VerifyNoAccessorPairSharing(); -} - - -void Heap::VerifyNoAccessorPairSharing() { - // Verification is done in 2 phases: First we mark all AccessorPairs, checking - // that we mark only unmarked pairs, then we clear all marks, restoring the - // initial state. We use the Smi tag of the AccessorPair's getter as the - // marking bit, because we can never see a Smi as the getter. - for (int phase = 0; phase < 2; phase++) { - HeapObjectIterator iter(map_space()); - for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) { - if (obj->IsMap()) { - DescriptorArray* descs = Map::cast(obj)->instance_descriptors(); - for (int i = 0; i < descs->number_of_descriptors(); i++) { - if (descs->GetType(i) == CALLBACKS && - descs->GetValue(i)->IsAccessorPair()) { - AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i)); - uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter()); - uintptr_t after = (phase == 0) ? - ((before & ~kSmiTagMask) | kSmiTag) : - ((before & ~kHeapObjectTag) | kHeapObjectTag); - CHECK(before != after); - accessors->set_getter(reinterpret_cast<Object*>(after)); - } - } - } - } - } } -#endif // DEBUG +#endif MaybeObject* Heap::LookupSymbol(Vector<const char> string) { @@ -5393,7 +5666,6 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) { } -#ifdef DEBUG void Heap::ZapFromSpace() { NewSpacePageIterator it(new_space_.FromSpaceStart(), new_space_.FromSpaceEnd()); @@ -5406,7 +5678,6 @@ void Heap::ZapFromSpace() { } } } -#endif // DEBUG void Heap::IterateAndMarkPointersToFromSpace(Address start, @@ -5656,6 +5927,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { // Iterate over local handles in handle scopes. isolate_->handle_scope_implementer()->Iterate(v); + isolate_->IterateDeferredHandles(v); v->Synchronize(VisitorSynchronization::kHandleScope); // Iterate over the builtin code objects and code stubs in the @@ -5718,8 +5990,8 @@ bool Heap::ConfigureHeap(int max_semispace_size, if (max_semispace_size < Page::kPageSize) { max_semispace_size = Page::kPageSize; if (FLAG_trace_gc) { - PrintF("Max semispace size cannot be less than %dkbytes\n", - Page::kPageSize >> 10); + PrintPID("Max semispace size cannot be less than %dkbytes\n", + Page::kPageSize >> 10); } } max_semispace_size_ = max_semispace_size; @@ -5734,8 +6006,8 @@ bool Heap::ConfigureHeap(int max_semispace_size, if (max_semispace_size_ > reserved_semispace_size_) { max_semispace_size_ = reserved_semispace_size_; if (FLAG_trace_gc) { - PrintF("Max semispace size cannot be more than %dkbytes\n", - reserved_semispace_size_ >> 10); + PrintPID("Max semispace size cannot be more than %dkbytes\n", + reserved_semispace_size_ >> 10); } } } else { @@ -5760,7 +6032,7 @@ bool Heap::ConfigureHeap(int max_semispace_size, max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_); reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_); - external_allocation_limit_ = 10 * max_semispace_size_; + external_allocation_limit_ = 16 * max_semispace_size_; // The old generation is paged and needs at least one page for each space. int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; @@ -6110,7 +6382,7 @@ bool Heap::SetUp(bool create_heap_objects) { // Create initial objects if (!CreateInitialObjects()) return false; - global_contexts_list_ = undefined_value(); + native_contexts_list_ = undefined_value(); } LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); @@ -6118,6 +6390,8 @@ bool Heap::SetUp(bool create_heap_objects) { store_buffer()->SetUp(); + if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex(); + return true; } @@ -6140,16 +6414,18 @@ void Heap::SetStackLimits() { void Heap::TearDown() { -#ifdef DEBUG +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { Verify(); } #endif + if (FLAG_print_cumulative_gc_stat) { PrintF("\n\n"); PrintF("gc_count=%d ", gc_count_); PrintF("mark_sweep_count=%d ", ms_count_); PrintF("max_gc_pause=%d ", get_max_gc_pause()); + PrintF("total_gc_time=%d ", total_gc_time_ms_); PrintF("min_in_mutator=%d ", get_min_in_mutator()); PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc()); @@ -6203,6 +6479,8 @@ void Heap::TearDown() { isolate_->memory_allocator()->TearDown(); + delete relocation_mutex_; + #ifdef DEBUG delete debug_utils_; debug_utils_ = NULL; @@ -6620,7 +6898,7 @@ void PathTracer::TracePathFrom(Object** root) { ASSERT((search_target_ == kAnyGlobalObject) || search_target_->IsHeapObject()); found_target_in_trace_ = false; - object_stack_.Clear(); + Reset(); MarkVisitor mark_visitor(this); MarkRecursively(root, &mark_visitor); @@ -6632,8 +6910,8 @@ void PathTracer::TracePathFrom(Object** root) { } -static bool SafeIsGlobalContext(HeapObject* obj) { - return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map(); +static bool SafeIsNativeContext(HeapObject* obj) { + return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map(); } @@ -6655,7 +6933,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { return; } - bool is_global_context = SafeIsGlobalContext(obj); + bool is_native_context = SafeIsNativeContext(obj); // not visited yet Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); @@ -6665,7 +6943,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag)); // Scan the object body. - if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) { + if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) { // This is specialized to scan Context's properly. Object** start = reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize); @@ -6724,11 +7002,7 @@ void PathTracer::ProcessResults() { for (int i = 0; i < object_stack_.length(); i++) { if (i > 0) PrintF("\n |\n |\n V\n\n"); Object* obj = object_stack_[i]; -#ifdef OBJECT_PRINT obj->Print(); -#else - obj->ShortPrint(); -#endif } PrintF("=====================================\n"); } @@ -6737,6 +7011,15 @@ void PathTracer::ProcessResults() { #ifdef DEBUG +// Triggers a depth-first traversal of reachable objects from one +// given root object and finds a path to a specific heap object and +// prints it. +void Heap::TracePathToObjectFrom(Object* target, Object* root) { + PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); + tracer.VisitPointer(&root); +} + + // Triggers a depth-first traversal of reachable objects from roots // and finds a path to a specific heap object and prints it. void Heap::TracePathToObject(Object* target) { @@ -6824,6 +7107,7 @@ GCTracer::~GCTracer() { // Update cumulative GC statistics if required. if (FLAG_print_cumulative_gc_stat) { + heap_->total_gc_time_ms_ += time; heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time); heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_, heap_->alive_after_last_gc_); @@ -6831,9 +7115,13 @@ GCTracer::~GCTracer() { heap_->min_in_mutator_ = Min(heap_->min_in_mutator_, static_cast<int>(spent_in_mutator_)); } + } else if (FLAG_trace_gc_verbose) { + heap_->total_gc_time_ms_ += time; } - PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init()); + if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return; + + PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init()); if (!FLAG_trace_gc_nvp) { int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]); @@ -6875,9 +7163,7 @@ GCTracer::~GCTracer() { PrintF(".\n"); } else { PrintF("pause=%d ", time); - PrintF("mutator=%d ", - static_cast<int>(spent_in_mutator_)); - + PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_)); PrintF("gc="); switch (collector_) { case SCAVENGER: @@ -7004,7 +7290,7 @@ void KeyedLookupCache::Clear() { void DescriptorLookupCache::Clear() { - for (int index = 0; index < kLength; index++) keys_[index].array = NULL; + for (int index = 0; index < kLength; index++) keys_[index].source = NULL; } @@ -7044,7 +7330,7 @@ void TranscendentalCache::Clear() { void ExternalStringTable::CleanUp() { int last = 0; for (int i = 0; i < new_space_strings_.length(); ++i) { - if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) { + if (new_space_strings_[i] == heap_->the_hole_value()) { continue; } if (heap_->InNewSpace(new_space_strings_[i])) { @@ -7056,16 +7342,18 @@ void ExternalStringTable::CleanUp() { new_space_strings_.Rewind(last); last = 0; for (int i = 0; i < old_space_strings_.length(); ++i) { - if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) { + if (old_space_strings_[i] == heap_->the_hole_value()) { continue; } ASSERT(!heap_->InNewSpace(old_space_strings_[i])); old_space_strings_[last++] = old_space_strings_[i]; } old_space_strings_.Rewind(last); +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { Verify(); } +#endif } @@ -7156,4 +7444,63 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) { remembered_unmapped_pages_index_ %= kRememberedUnmappedPages; } + +void Heap::ClearObjectStats(bool clear_last_time_stats) { + memset(object_counts_, 0, sizeof(object_counts_)); + memset(object_sizes_, 0, sizeof(object_sizes_)); + if (clear_last_time_stats) { + memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_)); + memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_)); + } +} + + +static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER; + + +void Heap::CheckpointObjectStats() { + ScopedLock lock(checkpoint_object_stats_mutex.Pointer()); + Counters* counters = isolate()->counters(); +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + counters->count_of_##name()->Increment( \ + static_cast<int>(object_counts_[name])); \ + counters->count_of_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[name])); \ + counters->size_of_##name()->Increment( \ + static_cast<int>(object_sizes_[name])); \ + counters->size_of_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[name])); + INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT + int index; +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \ + counters->count_of_CODE_TYPE_##name()->Increment( \ + static_cast<int>(object_counts_[index])); \ + counters->count_of_CODE_TYPE_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[index])); \ + counters->size_of_CODE_TYPE_##name()->Increment( \ + static_cast<int>(object_sizes_[index])); \ + counters->size_of_CODE_TYPE_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[index])); + CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \ + counters->count_of_FIXED_ARRAY_##name()->Increment( \ + static_cast<int>(object_counts_[index])); \ + counters->count_of_FIXED_ARRAY_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[index])); \ + counters->size_of_FIXED_ARRAY_##name()->Increment( \ + static_cast<int>(object_sizes_[index])); \ + counters->size_of_FIXED_ARRAY_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[index])); + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT + + memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); + memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); + ClearObjectStats(); +} + } } // namespace v8::internal |