# HG changeset patch # User Nicolas B. Pierron Bug 1520366 - Count and prepend transferred chunks when merging realms. r= diff --git a/js/src/ds/LifoAlloc.cpp b/js/src/ds/LifoAlloc.cpp index 5cbf4ff5e97a7..56f5ae7d18bbb 100644 --- a/js/src/ds/LifoAlloc.cpp +++ b/js/src/ds/LifoAlloc.cpp @@ -109,16 +109,17 @@ void LifoAlloc::reset(size_t defaultChunkSize) { while (!unused_.empty()) { unused_.popFirst(); } defaultChunkSize_ = defaultChunkSize; oversizeThreshold_ = defaultChunkSize; markCount = 0; curSize_ = 0; oversizeSize_ = 0; + transferredChunksSize_ = 0; } void LifoAlloc::freeAll() { while (!chunks_.empty()) { UniqueBumpChunk bc = chunks_.popFirst(); decrementCurSize(bc->computedSizeOfIncludingThis()); } while (!oversize_.empty()) { @@ -130,16 +131,19 @@ void LifoAlloc::freeAll() { UniqueBumpChunk bc = unused_.popFirst(); decrementCurSize(bc->computedSizeOfIncludingThis()); } // Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an // excellent sanity check. MOZ_ASSERT(curSize_ == 0); MOZ_ASSERT(oversizeSize_ == 0); + // We do not keep track of which chunks were transferred, therefore we cannot + // decrement this size incrementally. + transferredChunksSize_ = 0; } // Round at the same page granularity used by malloc. static size_t MallocGoodSize(size_t aSize) { #if defined(MOZ_MEMORY) return malloc_good_size(aSize); #else return aSize; @@ -171,21 +175,27 @@ LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n, // bytes in a newly allocated chunk, or default to |defaultChunkSize_|. size_t minSize; if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) || (minSize & (size_t(1) << (BitSize::value - 1))))) { return nullptr; } + // Note: oversize and transferred sizes are non-overlapping sets of + // allocations. Therefore, we can safely add both counters and substract it + // from curSize_. MOZ_ASSERT(curSize_ >= oversizeSize_); + MOZ_ASSERT(curSize_ >= transferredChunksSize_); + const size_t exceptionSize = oversizeSize_ + transferredChunksSize_; + MOZ_ASSERT(curSize_ >= exceptionSize); const size_t chunkSize = (oversize || minSize > defaultChunkSize_) ? MallocGoodSize(minSize) - : NextSize(defaultChunkSize_, curSize_ - oversizeSize_); + : NextSize(defaultChunkSize_, curSize_ - exceptionSize); // Create a new BumpChunk, and allocate space for it. UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize); if (!result) { return nullptr; } MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize); return result; @@ -349,34 +359,49 @@ void LifoAlloc::steal(LifoAlloc* other) { oversize_ = std::move(other->oversize_); unused_ = std::move(other->unused_); markCount = other->markCount; defaultChunkSize_ = other->defaultChunkSize_; oversizeThreshold_ = other->oversizeThreshold_; curSize_ = other->curSize_; peakSize_ = Max(peakSize_, other->peakSize_); oversizeSize_ = other->oversizeSize_; + transferredChunksSize_ = other->transferredChunksSize_; #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT) fallibleScope_ = other->fallibleScope_; #endif other->reset(defaultChunkSize_); } void LifoAlloc::transferFrom(LifoAlloc* other) { MOZ_ASSERT(!markCount); MOZ_ASSERT(!other->markCount); + size_t otherChunksSize = other->curSize_ - other->oversizeSize_; +#ifdef DEBUG + size_t measuredSize = 0; + for (detail::BumpChunk& bc : other->chunks_) { + measuredSize += bc.computedSizeOfIncludingThis(); + } + for (detail::BumpChunk& bc : other->unused_) { + measuredSize += bc.computedSizeOfIncludingThis(); + } + MOZ_ASSERT(otherChunksSize == measuredSize); +#endif + incrementCurSize(other->curSize_); oversizeSize_ += other->oversizeSize_; + transferredChunksSize_ += otherChunksSize; appendUnused(std::move(other->unused_)); - appendUsed(std::move(other->chunks_)); - oversize_.appendAll(std::move(other->oversize_)); + chunks_.prependAll(std::move(other->chunks_)); + oversize_.prependAll(std::move(other->oversize_)); other->curSize_ = 0; other->oversizeSize_ = 0; + other->transferredChunksSize_ = 0; } void LifoAlloc::transferUnusedFrom(LifoAlloc* other) { MOZ_ASSERT(!markCount); size_t size = 0; for (detail::BumpChunk& bc : other->unused_) { size += bc.computedSizeOfIncludingThis(); diff --git a/js/src/ds/LifoAlloc.h b/js/src/ds/LifoAlloc.h index 5dcb162f7d5c5..d6804db7f62dc 100644 --- a/js/src/ds/LifoAlloc.h +++ b/js/src/ds/LifoAlloc.h @@ -169,16 +169,27 @@ class SingleLinkedList { } else { head_ = std::move(list.head_); } last_ = list.last_; list.last_ = nullptr; assertInvariants(); list.assertInvariants(); } + void steal(SingleLinkedList&& list) { + head_ = std::move(list.head_); + last_ = list.last_; + list.last_ = nullptr; + assertInvariants(); + list.assertInvariants(); + } + void prependAll(SingleLinkedList&& list) { + list.appendAll(std::move(*this)); + steal(std::move(list)); + } UniquePtr popFirst() { MOZ_ASSERT(head_); UniquePtr result = std::move(head_); head_ = std::move(result->next_); if (!head_) { last_ = nullptr; } assertInvariants(); @@ -511,19 +522,24 @@ class LifoAlloc { BumpChunkList oversize_; // Set of unused chunks, which can be reused for future allocations. BumpChunkList unused_; size_t markCount; size_t defaultChunkSize_; size_t oversizeThreshold_; + // Size of all chunks in both chunks_ list and oversize_ list. size_t curSize_; + // Maximum value reached by curSize_. size_t peakSize_; + // Size of all oversize_ chunks. size_t oversizeSize_; + // Size of all chunks from the chunks_ list transferred from another LifoAlloc. + size_t transferredChunksSize_; #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT) bool fallibleScope_; #endif void operator=(const LifoAlloc&) = delete; LifoAlloc(const LifoAlloc&) = delete; // Return a BumpChunk that can perform an allocation of at least size |n|.