summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/spaces.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src/spaces.cc')
-rw-r--r--src/3rdparty/v8/src/spaces.cc194
1 files changed, 141 insertions, 53 deletions
diff --git a/src/3rdparty/v8/src/spaces.cc b/src/3rdparty/v8/src/spaces.cc
index a0c8f2c..583b2ca 100644
--- a/src/3rdparty/v8/src/spaces.cc
+++ b/src/3rdparty/v8/src/spaces.cc
@@ -447,6 +447,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
+ chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+ chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
@@ -496,6 +498,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
VirtualMemory reservation;
Address area_start = NULL;
Address area_end = NULL;
+
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
OS::CommitPageSize()) + CodePageGuardSize();
@@ -528,10 +531,11 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
size_executable_ += reservation.size();
}
-#ifdef DEBUG
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), body_size);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, CodePageGuardStartOffset());
+ ZapBlock(base + CodePageAreaStartOffset(), body_size);
+ }
+
area_start = base + CodePageAreaStartOffset();
area_end = area_start + body_size;
} else {
@@ -543,9 +547,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
if (base == NULL) return NULL;
-#ifdef DEBUG
- ZapBlock(base, chunk_size);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, chunk_size);
+ }
area_start = base + Page::kObjectStartOffset;
area_end = base + chunk_size;
@@ -621,9 +625,11 @@ bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
-#ifdef DEBUG
- ZapBlock(start, size);
-#endif
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(start, size);
+ }
+
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
@@ -819,6 +825,18 @@ void PagedSpace::TearDown() {
}
+size_t PagedSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ size_t size = 0;
+ PageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
@@ -881,10 +899,10 @@ intptr_t PagedSpace::SizeOfFirstPage() {
size = 192 * KB;
break;
case MAP_SPACE:
- size = 128 * KB;
+ size = 16 * kPointerSize * KB;
break;
case CELL_SPACE:
- size = 96 * KB;
+ size = 16 * kPointerSize * KB;
break;
case CODE_SPACE:
if (kPointerSize == 8) {
@@ -984,8 +1002,7 @@ void PagedSpace::ReleaseAllUnusedPages() {
void PagedSpace::Print() { }
#endif
-
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (was_swept_conservatively_) return;
@@ -995,23 +1012,23 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
- ASSERT(page->owner() == this);
+ CHECK(page->owner() == this);
if (page == Page::FromAllocationTop(allocation_info_.top)) {
allocation_pointer_found_in_space = true;
}
- ASSERT(page->WasSweptPrecisely());
+ CHECK(page->WasSweptPrecisely());
HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- ASSERT(end_of_previous_object <= object->address());
+ CHECK(end_of_previous_object <= object->address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -1026,15 +1043,14 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
black_size += size;
}
- ASSERT(object->address() + size <= top);
+ CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
- ASSERT_LE(black_size, page->LiveBytes());
+ CHECK_LE(black_size, page->LiveBytes());
}
- ASSERT(allocation_pointer_found_in_space);
+ CHECK(allocation_pointer_found_in_space);
}
-#endif
-
+#endif // VERIFY_HEAP
// -----------------------------------------------------------------------------
// NewSpace implementation
@@ -1172,6 +1188,7 @@ void NewSpace::Shrink() {
void NewSpace::UpdateAllocationInfo() {
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = to_space_.page_low();
allocation_info_.limit = to_space_.page_high();
@@ -1258,7 +1275,7 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
@@ -1307,8 +1324,8 @@ void NewSpace::Verify() {
}
// Check semi-spaces.
- ASSERT_EQ(from_space_.id(), kFromSpace);
- ASSERT_EQ(to_space_.id(), kToSpace);
+ CHECK_EQ(from_space_.id(), kFromSpace);
+ CHECK_EQ(to_space_.id(), kToSpace);
from_space_.Verify();
to_space_.Verify();
}
@@ -1384,6 +1401,17 @@ bool SemiSpace::Uncommit() {
}
+size_t SemiSpace::CommittedPhysicalMemory() {
+ if (!is_committed()) return 0;
+ size_t size = 0;
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
@@ -1524,8 +1552,9 @@ void SemiSpace::set_age_mark(Address mark) {
#ifdef DEBUG
void SemiSpace::Print() { }
+#endif
-
+#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
NewSpacePage* page = anchor_.next_page();
@@ -1555,8 +1584,9 @@ void SemiSpace::Verify() {
page = page->next_page();
}
}
+#endif
-
+#ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space
NewSpacePage* page = NewSpacePage::FromLimit(start);
@@ -1816,6 +1846,17 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
promoted_histogram_[type].increment_bytes(obj->Size());
}
+
+size_t NewSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ size_t size = to_space_.CommittedPhysicalMemory();
+ if (from_space_.is_committed()) {
+ size += from_space_.CommittedPhysicalMemory();
+ }
+ return size;
+}
+
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
@@ -2027,15 +2068,16 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
+ owner_->heap()->incremental_marking()->OldSpaceStep(
+ size_in_bytes - old_linear_size);
+
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+ reinterpret_cast<Object**>(new_node->address())[i] =
+ Smi::FromInt(kCodeZapValue);
}
#endif
- owner_->heap()->incremental_marking()->OldSpaceStep(
- size_in_bytes - old_linear_size);
-
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
@@ -2257,11 +2299,40 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
Free(top(), old_linear_size);
SetTop(new_area->address(), new_area->address() + size_in_bytes);
- Allocate(size_in_bytes);
return true;
}
+static void RepairFreeList(Heap* heap, FreeListNode* n) {
+ while (n != NULL) {
+ Map** map_location = reinterpret_cast<Map**>(n->address());
+ if (*map_location == NULL) {
+ *map_location = heap->free_space_map();
+ } else {
+ ASSERT(*map_location == heap->free_space_map());
+ }
+ n = n->next();
+ }
+}
+
+
+void FreeList::RepairLists(Heap* heap) {
+ RepairFreeList(heap, small_list_);
+ RepairFreeList(heap, medium_list_);
+ RepairFreeList(heap, large_list_);
+ RepairFreeList(heap, huge_list_);
+}
+
+
+// After we have booted, we have created a map which represents free space
+// on the heap. If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally NULL), so we need to
+// fix them.
+void PagedSpace::RepairFreeListsAfterBoot() {
+ free_list_.RepairLists(heap());
+}
+
+
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
@@ -2520,25 +2591,27 @@ void FixedSpace::PrepareForMarkCompact() {
// -----------------------------------------------------------------------------
// MapSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
-#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
- ASSERT(object->IsMap() || object->IsFreeSpace());
+ CHECK(object->IsMap() || object->IsFreeSpace());
}
-#endif
// -----------------------------------------------------------------------------
// GlobalPropertyCellSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
-#ifdef DEBUG
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
- ASSERT(object->IsJSGlobalPropertyCell() ||
+ CHECK(object->IsJSGlobalPropertyCell() ||
object->map() == heap()->two_pointer_filler_map());
}
-#endif
// -----------------------------------------------------------------------------
@@ -2648,18 +2721,31 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
HeapObject* object = page->GetObject();
-#ifdef DEBUG
- // Make the object consistent so the heap can be vefified in OldSpaceStep.
- reinterpret_cast<Object**>(object->address())[0] =
- heap()->fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ // Make the object consistent so the heap can be verified in OldSpaceStep.
+ // We only need to do this in debug builds or if verify_heap is on.
+ reinterpret_cast<Object**>(object->address())[0] =
+ heap()->fixed_array_map();
+ reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+ }
heap()->incremental_marking()->OldSpaceStep(object_size);
return object;
}
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ size_t size = 0;
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ size += current->CommittedPhysicalMemory();
+ current = current->next_page();
+ }
+ return size;
+}
+
+
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
@@ -2752,7 +2838,7 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
@@ -2763,18 +2849,18 @@ void LargeObjectSpace::Verify() {
// object area start.
HeapObject* object = chunk->GetObject();
Page* page = Page::FromAddress(object->address());
- ASSERT(object->address() == page->area_start());
+ CHECK(object->address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space.
Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, and byte arrays in large object space.
- ASSERT(object->IsCode() || object->IsSeqString() ||
+ CHECK(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
object->IsFixedDoubleArray() || object->IsByteArray());
@@ -2793,15 +2879,17 @@ void LargeObjectSpace::Verify() {
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(heap()->Contains(element_object));
- ASSERT(element_object->map()->IsMap());
+ CHECK(heap()->Contains(element_object));
+ CHECK(element_object->map()->IsMap());
}
}
}
}
}
+#endif
+#ifdef DEBUG
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {