diff options
Diffstat (limited to 'src/3rdparty/v8/src/incremental-marking.cc')
-rw-r--r-- | src/3rdparty/v8/src/incremental-marking.cc | 393 |
1 files changed, 169 insertions, 224 deletions
diff --git a/src/3rdparty/v8/src/incremental-marking.cc b/src/3rdparty/v8/src/incremental-marking.cc index 94afffa..b34d6d9 100644 --- a/src/3rdparty/v8/src/incremental-marking.cc +++ b/src/3rdparty/v8/src/incremental-marking.cc @@ -31,6 +31,8 @@ #include "code-stubs.h" #include "compilation-cache.h" +#include "objects-visiting.h" +#include "objects-visiting-inl.h" #include "v8conversions.h" namespace v8 { @@ -42,7 +44,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap) state_(STOPPED), marking_deque_memory_(NULL), marking_deque_memory_committed_(false), - marker_(this, heap->mark_compact_collector()), steps_count_(0), steps_took_(0), longest_step_(0.0), @@ -51,7 +52,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap) steps_count_since_last_gc_(0), steps_took_since_last_gc_(0), should_hurry_(false), - allocation_marking_factor_(0), + marking_speed_(0), allocated_(0), no_marking_scope_depth_(0) { } @@ -65,7 +66,7 @@ void IncrementalMarking::TearDown() { void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot, Object* value) { - if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) { + if (BaseRecordWrite(obj, slot, value) && slot != NULL) { MarkBit obj_bit = Marking::MarkBitFrom(obj); if (Marking::IsBlack(obj_bit)) { // Object is not going to be rescanned we need to record the slot. @@ -80,17 +81,19 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object* value, Isolate* isolate) { ASSERT(obj->IsHeapObject()); - - // Fast cases should already be covered by RecordWriteStub. - ASSERT(value->IsHeapObject()); - ASSERT(!value->IsHeapNumber()); - ASSERT(!value->IsString() || - value->IsConsString() || - value->IsSlicedString()); - ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value)))); - IncrementalMarking* marking = isolate->heap()->incremental_marking(); ASSERT(!marking->is_compacting_); + + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + int counter = chunk->write_barrier_counter(); + if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { + marking->write_barriers_invoked_since_last_step_ += + MemoryChunk::kWriteBarrierCounterGranularity - + chunk->write_barrier_counter(); + chunk->set_write_barrier_counter( + MemoryChunk::kWriteBarrierCounterGranularity); + } + marking->RecordWrite(obj, NULL, value); } @@ -98,8 +101,20 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, Object** slot, Isolate* isolate) { + ASSERT(obj->IsHeapObject()); IncrementalMarking* marking = isolate->heap()->incremental_marking(); ASSERT(marking->is_compacting_); + + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + int counter = chunk->write_barrier_counter(); + if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { + marking->write_barriers_invoked_since_last_step_ += + MemoryChunk::kWriteBarrierCounterGranularity - + chunk->write_barrier_counter(); + chunk->set_write_barrier_counter( + MemoryChunk::kWriteBarrierCounterGranularity); + } + marking->RecordWrite(obj, slot, *slot); } @@ -125,9 +140,9 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, - Object** slot, - Code* value) { - if (BaseRecordWrite(host, slot, value) && is_compacting_) { + Object** slot, + Code* value) { + if (BaseRecordWrite(host, slot, value)) { ASSERT(slot != NULL); heap_->mark_compact_collector()-> RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value); @@ -160,93 +175,92 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, } -class IncrementalMarkingMarkingVisitor : public ObjectVisitor { - public: - IncrementalMarkingMarkingVisitor(Heap* heap, - IncrementalMarking* incremental_marking) - : heap_(heap), - incremental_marking_(incremental_marking) { - } - - void VisitEmbeddedPointer(RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); - Object* target = rinfo->target_object(); - if (target->NonFailureIsHeapObject()) { - heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target); - MarkObject(target); +static void MarkObjectGreyDoNotEnqueue(Object* obj) { + if (obj->IsHeapObject()) { + HeapObject* heap_obj = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); + if (Marking::IsBlack(mark_bit)) { + MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), + -heap_obj->Size()); } + Marking::AnyToGrey(mark_bit); } +} - void VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() - && (target->ic_age() != heap_->global_ic_age())) { - IC::Clear(rinfo->pc()); - target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - } - heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target)); - MarkObject(target); - } - void VisitDebugTarget(RelocInfo* rinfo) { - ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && - rinfo->IsPatchedReturnSequence()) || - (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && - rinfo->IsPatchedDebugBreakSlotSequence())); - Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); - heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target)); - MarkObject(target); +class IncrementalMarkingMarkingVisitor + : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { + public: + static void Initialize() { + StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); + + table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); + table_.Register(kVisitJSRegExp, &VisitJSRegExp); } - void VisitCodeEntry(Address entry_address) { - Object* target = Code::GetObjectFromEntryAddress(entry_address); - heap_->mark_compact_collector()-> - RecordCodeEntrySlot(entry_address, Code::cast(target)); - MarkObject(target); + static void VisitNativeContextIncremental(Map* map, HeapObject* object) { + Context* context = Context::cast(object); + + // We will mark cache black with a separate pass + // when we finish marking. + MarkObjectGreyDoNotEnqueue(context->normalized_map_cache()); + VisitNativeContext(map, context); } - void VisitSharedFunctionInfo(SharedFunctionInfo* shared) { - if (shared->ic_age() != heap_->global_ic_age()) { - shared->ResetForNewContext(heap_->global_ic_age()); - } + static void VisitJSWeakMap(Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + VisitPointers(heap, + HeapObject::RawField(object, JSWeakMap::kPropertiesOffset), + HeapObject::RawField(object, JSWeakMap::kSize)); } - void VisitPointer(Object** p) { + static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {} + + INLINE(static void VisitPointer(Heap* heap, Object** p)) { Object* obj = *p; if (obj->NonFailureIsHeapObject()) { - heap_->mark_compact_collector()->RecordSlot(p, p, obj); - MarkObject(obj); + heap->mark_compact_collector()->RecordSlot(p, p, obj); + MarkObject(heap, obj); } } - void VisitPointers(Object** start, Object** end) { + INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { for (Object** p = start; p < end; p++) { Object* obj = *p; if (obj->NonFailureIsHeapObject()) { - heap_->mark_compact_collector()->RecordSlot(start, p, obj); - MarkObject(obj); + heap->mark_compact_collector()->RecordSlot(start, p, obj); + MarkObject(heap, obj); } } } - private: - // Mark object pointed to by p. - INLINE(void MarkObject(Object* obj)) { + // Marks the object grey and pushes it on the marking stack. + INLINE(static void MarkObject(Heap* heap, Object* obj)) { HeapObject* heap_object = HeapObject::cast(obj); MarkBit mark_bit = Marking::MarkBitFrom(heap_object); if (mark_bit.data_only()) { - if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) { + if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) { MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), heap_object->Size()); } } else if (Marking::IsWhite(mark_bit)) { - incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); + heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); } } - Heap* heap_; - IncrementalMarking* incremental_marking_; + // Marks the object black without pushing it on the marking stack. + // Returns true if object needed marking and false otherwise. + INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { + HeapObject* heap_object = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(heap_object); + if (Marking::IsWhite(mark_bit)) { + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), + heap_object->Size()); + return true; + } + return false; + } }; @@ -290,6 +304,11 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { }; +void IncrementalMarking::Initialize() { + IncrementalMarkingMarkingVisitor::Initialize(); +} + + void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking, bool is_compacting) { @@ -498,19 +517,6 @@ void IncrementalMarking::Start() { } -static void MarkObjectGreyDoNotEnqueue(Object* obj) { - if (obj->IsHeapObject()) { - HeapObject* heap_obj = HeapObject::cast(obj); - MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); - if (Marking::IsBlack(mark_bit)) { - MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), - -heap_obj->Size()); - } - Marking::AnyToGrey(mark_bit); - } -} - - void IncrementalMarking::StartMarking(CompactionFlag flag) { if (FLAG_trace_incremental_marking) { PrintF("[IncrementalMarking] Start marking\n"); @@ -537,8 +543,8 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) { ActivateIncrementalWriteBarrier(); -#ifdef DEBUG // Marking bits are cleared by the sweeper. +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); } @@ -623,20 +629,50 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { } -void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) { - v->VisitPointers( - HeapObject::RawField( - ctx, Context::MarkCompactBodyDescriptor::kStartOffset), - HeapObject::RawField( - ctx, Context::MarkCompactBodyDescriptor::kEndOffset)); +void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { + MarkBit map_mark_bit = Marking::MarkBitFrom(map); + if (Marking::IsWhite(map_mark_bit)) { + WhiteToGreyAndPush(map, map_mark_bit); + } + + IncrementalMarkingMarkingVisitor::IterateBody(map, obj); + + MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); + SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || + (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); + Marking::MarkBlack(obj_mark_bit); + MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); +} - MarkCompactCollector* collector = heap_->mark_compact_collector(); - for (int idx = Context::FIRST_WEAK_SLOT; - idx < Context::GLOBAL_CONTEXT_SLOTS; - ++idx) { - Object** slot = - HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx)); - collector->RecordSlot(slot, slot, *slot); + +void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { + Map* filler_map = heap_->one_pointer_filler_map(); + while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + int size = obj->SizeFromMap(map); + bytes_to_process -= size; + VisitObject(map, obj, size); + } +} + + +void IncrementalMarking::ProcessMarkingDeque() { + Map* filler_map = heap_->one_pointer_filler_map(); + while (!marking_deque_.IsEmpty()) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + VisitObject(map, obj, obj->SizeFromMap(map)); } } @@ -650,45 +686,7 @@ void IncrementalMarking::Hurry() { } // TODO(gc) hurry can mark objects it encounters black as mutator // was stopped. - Map* filler_map = heap_->one_pointer_filler_map(); - Map* global_context_map = heap_->global_context_map(); - IncrementalMarkingMarkingVisitor marking_visitor(heap_, this); - while (!marking_deque_.IsEmpty()) { - HeapObject* obj = marking_deque_.Pop(); - - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = obj->map(); - if (map == filler_map) { - continue; - } else if (map == global_context_map) { - // Global contexts have weak fields. - VisitGlobalContext(Context::cast(obj), &marking_visitor); - } else if (map->instance_type() == MAP_TYPE) { - Map* map = Map::cast(obj); - heap_->ClearCacheOnMap(map); - - // When map collection is enabled we have to mark through map's - // transitions and back pointers in a special way to make these links - // weak. Only maps for subclasses of JSReceiver can have transitions. - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && - map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - marker_.MarkMapContents(map); - } else { - marking_visitor.VisitPointers( - HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), - HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); - } - } else { - obj->Iterate(&marking_visitor); - } - - MarkBit mark_bit = Marking::MarkBitFrom(obj); - ASSERT(!Marking::IsBlack(mark_bit)); - Marking::MarkBlack(mark_bit); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); - } + ProcessMarkingDeque(); state_ = COMPLETE; if (FLAG_trace_incremental_marking) { double end = OS::TimeCurrentMillis(); @@ -704,7 +702,7 @@ void IncrementalMarking::Hurry() { PolymorphicCodeCache::kSize); } - Object* context = heap_->global_contexts_list(); + Object* context = heap_->native_contexts_list(); while (!context->IsUndefined()) { // GC can happen when the context is not fully initialized, // so the cache can be undefined. @@ -794,11 +792,25 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, allocated_ += allocated_bytes; - if (allocated_ < kAllocatedThreshold) return; + if (allocated_ < kAllocatedThreshold && + write_barriers_invoked_since_last_step_ < + kWriteBarriersInvokedThreshold) { + return; + } if (state_ == MARKING && no_marking_scope_depth_ > 0) return; - intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; + // The marking speed is driven either by the allocation rate or by the rate + // at which we are having to check the color of objects in the write barrier. + // It is possible for a tight non-allocating loop to run a lot of write + // barriers before we get here and check them (marking can only take place on + // allocation), so to reduce the lumpiness we don't use the write barriers + // invoked since last step directly to determine the amount of work to do. + intptr_t bytes_to_process = + marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold); + allocated_ = 0; + write_barriers_invoked_since_last_step_ = 0; + bytes_scanned_ += bytes_to_process; double start = 0; @@ -813,87 +825,19 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, StartMarking(PREVENT_COMPACTION); } } else if (state_ == MARKING) { - Map* filler_map = heap_->one_pointer_filler_map(); - Map* global_context_map = heap_->global_context_map(); - IncrementalMarkingMarkingVisitor marking_visitor(heap_, this); - while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { - HeapObject* obj = marking_deque_.Pop(); - - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = obj->map(); - if (map == filler_map) continue; - - int size = obj->SizeFromMap(map); - bytes_to_process -= size; - MarkBit map_mark_bit = Marking::MarkBitFrom(map); - if (Marking::IsWhite(map_mark_bit)) { - WhiteToGreyAndPush(map, map_mark_bit); - } - - // TODO(gc) switch to static visitor instead of normal visitor. - if (map == global_context_map) { - // Global contexts have weak fields. - Context* ctx = Context::cast(obj); - - // We will mark cache black with a separate pass - // when we finish marking. - MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); - - VisitGlobalContext(ctx, &marking_visitor); - } else if (map->instance_type() == MAP_TYPE) { - Map* map = Map::cast(obj); - heap_->ClearCacheOnMap(map); - - // When map collection is enabled we have to mark through map's - // transitions and back pointers in a special way to make these links - // weak. Only maps for subclasses of JSReceiver can have transitions. - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && - map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - marker_.MarkMapContents(map); - } else { - marking_visitor.VisitPointers( - HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), - HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); - } - } else if (map->instance_type() == JS_FUNCTION_TYPE) { - marking_visitor.VisitPointers( - HeapObject::RawField(obj, JSFunction::kPropertiesOffset), - HeapObject::RawField(obj, JSFunction::kCodeEntryOffset)); - - marking_visitor.VisitCodeEntry( - obj->address() + JSFunction::kCodeEntryOffset); - - marking_visitor.VisitPointers( - HeapObject::RawField(obj, - JSFunction::kCodeEntryOffset + kPointerSize), - HeapObject::RawField(obj, - JSFunction::kNonWeakFieldsEndOffset)); - } else { - obj->IterateBody(map->instance_type(), size, &marking_visitor); - } - - MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); - SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || - (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); - Marking::MarkBlack(obj_mark_bit); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); - } + ProcessMarkingDeque(bytes_to_process); if (marking_deque_.IsEmpty()) MarkingComplete(action); } - allocated_ = 0; - steps_count_++; steps_count_since_last_gc_++; bool speed_up = false; - if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) { + if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { if (FLAG_trace_gc) { - PrintF("Speed up marking after %d steps\n", - static_cast<int>(kAllocationMarkingFactorSpeedupInterval)); + PrintPID("Speed up marking after %d steps\n", + static_cast<int>(kMarkingSpeedAccellerationInterval)); } speed_up = true; } @@ -902,35 +846,35 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, (old_generation_space_available_at_start_of_incremental_ < 10 * MB); bool only_1_nth_of_space_that_was_available_still_left = - (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) < + (SpaceLeftInOldSpace() * (marking_speed_ + 1) < old_generation_space_available_at_start_of_incremental_); if (space_left_is_very_small || only_1_nth_of_space_that_was_available_still_left) { - if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n"); + if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); speed_up = true; } bool size_of_old_space_multiplied_by_n_during_marking = (heap_->PromotedTotalSize() > - (allocation_marking_factor_ + 1) * + (marking_speed_ + 1) * old_generation_space_used_at_start_of_incremental_); if (size_of_old_space_multiplied_by_n_during_marking) { speed_up = true; if (FLAG_trace_gc) { - PrintF("Speed up marking because of heap size increase\n"); + PrintPID("Speed up marking because of heap size increase\n"); } } int64_t promoted_during_marking = heap_->PromotedTotalSize() - old_generation_space_used_at_start_of_incremental_; - intptr_t delay = allocation_marking_factor_ * MB; + intptr_t delay = marking_speed_ * MB; intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); // We try to scan at at least twice the speed that we are allocating. if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { if (FLAG_trace_gc) { - PrintF("Speed up marking because marker was not keeping up\n"); + PrintPID("Speed up marking because marker was not keeping up\n"); } speed_up = true; } @@ -938,15 +882,15 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, if (speed_up) { if (state_ != MARKING) { if (FLAG_trace_gc) { - PrintF("Postponing speeding up marking until marking starts\n"); + PrintPID("Postponing speeding up marking until marking starts\n"); } } else { - allocation_marking_factor_ += kAllocationMarkingFactorSpeedup; - allocation_marking_factor_ = static_cast<int>( - Min(kMaxAllocationMarkingFactor, - static_cast<intptr_t>(allocation_marking_factor_ * 1.3))); + marking_speed_ += kMarkingSpeedAccellerationInterval; + marking_speed_ = static_cast<int>( + Min(kMaxMarkingSpeed, + static_cast<intptr_t>(marking_speed_ * 1.3))); if (FLAG_trace_gc) { - PrintF("Marking speed increased to %d\n", allocation_marking_factor_); + PrintPID("Marking speed increased to %d\n", marking_speed_); } } } @@ -972,8 +916,9 @@ void IncrementalMarking::ResetStepCounters() { steps_count_since_last_gc_ = 0; steps_took_since_last_gc_ = 0; bytes_rescanned_ = 0; - allocation_marking_factor_ = kInitialAllocationMarkingFactor; + marking_speed_ = kInitialMarkingSpeed; bytes_scanned_ = 0; + write_barriers_invoked_since_last_step_ = 0; } |