blob: bfc749f46a4acb161b80c374a8c8967a9a454e4d [file] [log] [blame]
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +00001/*
2 * Copyright (C) 2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "LocalAllocator.h"
28
29#include "AllocatingScope.h"
30#include "LocalAllocatorInlines.h"
31#include "Options.h"
32
33namespace JSC {
34
fpizlo@apple.com5a101e52018-05-22 19:20:05 +000035LocalAllocator::LocalAllocator(BlockDirectory* directory)
36 : m_directory(directory)
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +000037 , m_cellSize(directory->m_cellSize)
38 , m_freeList(m_cellSize)
39{
40 auto locker = holdLock(directory->m_localAllocatorsLock);
41 directory->m_localAllocators.append(this);
42}
43
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +000044void LocalAllocator::reset()
45{
46 m_freeList.clear();
47 m_currentBlock = nullptr;
48 m_lastActiveBlock = nullptr;
fpizlo@apple.comb37e3d42018-02-06 03:50:30 +000049 m_allocationCursor = 0;
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +000050}
51
52LocalAllocator::~LocalAllocator()
53{
54 if (isOnList()) {
55 auto locker = holdLock(m_directory->m_localAllocatorsLock);
56 remove();
57 }
58
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +000059 bool ok = true;
60 if (!m_freeList.allocationWillFail()) {
61 dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-empty free-list.\n");
62 ok = false;
63 }
64 if (m_currentBlock) {
65 dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-null current block.\n");
66 ok = false;
67 }
68 if (m_lastActiveBlock) {
69 dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-null last active block.\n");
70 ok = false;
71 }
72 RELEASE_ASSERT(ok);
73}
74
75void LocalAllocator::stopAllocating()
76{
77 ASSERT(!m_lastActiveBlock);
78 if (!m_currentBlock) {
79 ASSERT(m_freeList.allocationWillFail());
80 return;
81 }
82
83 m_currentBlock->stopAllocating(m_freeList);
84 m_lastActiveBlock = m_currentBlock;
85 m_currentBlock = nullptr;
86 m_freeList.clear();
87}
88
89void LocalAllocator::resumeAllocating()
90{
91 if (!m_lastActiveBlock)
92 return;
93
94 m_lastActiveBlock->resumeAllocating(m_freeList);
95 m_currentBlock = m_lastActiveBlock;
96 m_lastActiveBlock = nullptr;
97}
98
99void LocalAllocator::prepareForAllocation()
100{
101 reset();
102}
103
104void LocalAllocator::stopAllocatingForGood()
105{
106 stopAllocating();
107 reset();
108}
109
110void* LocalAllocator::allocateSlowCase(GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
111{
112 SuperSamplerScope superSamplerScope(false);
113 Heap& heap = *m_directory->m_heap;
114 ASSERT(heap.vm()->currentThreadIsHoldingAPILock());
115 doTestCollectionsIfNeeded(deferralContext);
116
117 ASSERT(!m_directory->markedSpace().isIterating());
118 heap.didAllocate(m_freeList.originalSize());
119
120 didConsumeFreeList();
121
122 AllocatingScope helpingHeap(heap);
123
124 heap.collectIfNecessaryOrDefer(deferralContext);
125
126 // Goofy corner case: the GC called a callback and now this directory has a currentBlock. This only
127 // happens when running WebKit tests, which inject a callback into the GC's finalization.
128 if (UNLIKELY(m_currentBlock))
129 return allocate(deferralContext, failureMode);
130
131 void* result = tryAllocateWithoutCollecting();
132
133 if (LIKELY(result != 0))
134 return result;
135
136 MarkedBlock::Handle* block = m_directory->tryAllocateBlock();
137 if (!block) {
138 if (failureMode == AllocationFailureMode::Assert)
139 RELEASE_ASSERT_NOT_REACHED();
140 else
141 return nullptr;
142 }
fpizlo@apple.com9f73b532018-04-03 23:52:09 +0000143 m_directory->addBlock(block);
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +0000144 result = allocateIn(block);
145 ASSERT(result);
146 return result;
147}
148
149void LocalAllocator::didConsumeFreeList()
150{
151 if (m_currentBlock)
152 m_currentBlock->didConsumeFreeList();
153
154 m_freeList.clear();
155 m_currentBlock = nullptr;
156}
157
158void* LocalAllocator::tryAllocateWithoutCollecting()
159{
160 // FIXME: If we wanted this to be used for real multi-threaded allocations then we would have to
161 // come up with some concurrency protocol here. That protocol would need to be able to handle:
162 //
163 // - The basic case of multiple LocalAllocators trying to do an allocationCursor search on the
164 // same bitvector. That probably needs the bitvector lock at least.
165 //
166 // - The harder case of some LocalAllocator triggering a steal from a different BlockDirectory
167 // via a search in the AlignedMemoryAllocator's list. Who knows what locks that needs.
168 //
169 // One way to make this work is to have a single per-Heap lock that protects all mutator lock
170 // allocation slow paths. That would probably be scalable enough for years. It would certainly be
171 // for using TLC allocation from JIT threads.
172 // https://bugs.webkit.org/show_bug.cgi?id=181635
173
174 SuperSamplerScope superSamplerScope(false);
175
176 ASSERT(!m_currentBlock);
177 ASSERT(m_freeList.allocationWillFail());
178
179 for (;;) {
fpizlo@apple.comb37e3d42018-02-06 03:50:30 +0000180 MarkedBlock::Handle* block = m_directory->findBlockForAllocation(*this);
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +0000181 if (!block)
182 break;
183
184 if (void* result = tryAllocateIn(block))
185 return result;
186 }
187
188 if (Options::stealEmptyBlocksFromOtherAllocators()
189 && (Options::tradeDestructorBlocks() || !m_directory->needsDestruction())) {
190 if (MarkedBlock::Handle* block = m_directory->m_subspace->findEmptyBlockToSteal()) {
191 RELEASE_ASSERT(block->alignedMemoryAllocator() == m_directory->m_subspace->alignedMemoryAllocator());
192
193 block->sweep(nullptr);
194
195 // It's good that this clears canAllocateButNotEmpty as well as all other bits,
196 // because there is a remote chance that a block may have both canAllocateButNotEmpty
197 // and empty set at the same time.
198 block->removeFromDirectory();
fpizlo@apple.com9f73b532018-04-03 23:52:09 +0000199 m_directory->addBlock(block);
fpizlo@apple.com47d0cf72018-01-25 19:32:00 +0000200 return allocateIn(block);
201 }
202 }
203
204 return nullptr;
205}
206
207void* LocalAllocator::allocateIn(MarkedBlock::Handle* block)
208{
209 void* result = tryAllocateIn(block);
210 RELEASE_ASSERT(result);
211 return result;
212}
213
214void* LocalAllocator::tryAllocateIn(MarkedBlock::Handle* block)
215{
216 ASSERT(block);
217 ASSERT(!block->isFreeListed());
218
219 block->sweep(&m_freeList);
220
221 // It's possible to stumble on a completely full block. Marking tries to retire these, but
222 // that algorithm is racy and may forget to do it sometimes.
223 if (m_freeList.allocationWillFail()) {
224 ASSERT(block->isFreeListed());
225 block->unsweepWithNoNewlyAllocated();
226 ASSERT(!block->isFreeListed());
227 ASSERT(!m_directory->isEmpty(NoLockingNecessary, block));
228 ASSERT(!m_directory->isCanAllocateButNotEmpty(NoLockingNecessary, block));
229 return nullptr;
230 }
231
232 m_currentBlock = block;
233
234 void* result = m_freeList.allocate(
235 [] () -> HeapCell* {
236 RELEASE_ASSERT_NOT_REACHED();
237 return nullptr;
238 });
239 m_directory->setIsEden(NoLockingNecessary, m_currentBlock, true);
240 m_directory->markedSpace().didAllocateInBlock(m_currentBlock);
241 return result;
242}
243
244void LocalAllocator::doTestCollectionsIfNeeded(GCDeferralContext* deferralContext)
245{
246 if (!Options::slowPathAllocsBetweenGCs())
247 return;
248
249 static unsigned allocationCount = 0;
250 if (!allocationCount) {
251 if (!m_directory->m_heap->isDeferred()) {
252 if (deferralContext)
253 deferralContext->m_shouldGC = true;
254 else
255 m_directory->m_heap->collectNow(Sync, CollectionScope::Full);
256 }
257 }
258 if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
259 allocationCount = 0;
260}
261
262bool LocalAllocator::isFreeListedCell(const void* target) const
263{
264 // This abomination exists to detect when an object is in the dead-but-not-destructed state.
265 // Therefore, it's not even clear that this needs to do anything beyond returning "false", since
266 // if we know that the block owning the object is free-listed, then it's impossible for any
267 // objects to be in the dead-but-not-destructed state.
268 // FIXME: Get rid of this abomination. https://bugs.webkit.org/show_bug.cgi?id=181655
269 return m_freeList.contains(bitwise_cast<HeapCell*>(target));
270}
271
272} // namespace JSC
273