blob: 16e8cfb6f7dfb625443c1b1d51258bbd724941f7 [file] [log] [blame]
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
* Copyright (C) 2003-2009, 2013-2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#pragma once
#include "ArrayBuffer.h"
#include "CollectionScope.h"
#include "GCIncomingRefCountedSet.h"
#include "HandleSet.h"
#include "HandleStack.h"
#include "HeapObserver.h"
#include "ListableHandler.h"
#include "MachineStackMarker.h"
#include "MarkedAllocator.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
#include "MarkedSpace.h"
#include "MutatorState.h"
#include "Options.h"
#include "SlotVisitor.h"
#include "StructureIDTable.h"
#include "TinyBloomFilter.h"
#include "UnconditionalFinalizer.h"
#include "WeakHandleOwner.h"
#include "WeakReferenceHarvester.h"
#include "WriteBarrierBuffer.h"
#include "WriteBarrierSupport.h"
#include <wtf/AutomaticThread.h>
#include <wtf/Deque.h>
#include <wtf/HashCountedSet.h>
#include <wtf/HashSet.h>
#include <wtf/ParallelHelperPool.h>
namespace JSC {
class CodeBlock;
class CodeBlockSet;
class GCDeferralContext;
class EdenGCActivityCallback;
class ExecutableBase;
class FullGCActivityCallback;
class GCActivityCallback;
class GCAwareJITStubRoutine;
class Heap;
class HeapProfiler;
class HeapRootVisitor;
class HeapVerifier;
class HelpingGCScope;
class IncrementalSweeper;
class JITStubRoutine;
class JITStubRoutineSet;
class JSCell;
class JSValue;
class LLIntOffsetsExtractor;
class MarkedArgumentBuffer;
class StopIfNecessaryTimer;
class VM;
namespace DFG {
class SpeculativeJIT;
class Worklist;
}
static void* const zombifiedBits = reinterpret_cast<void*>(static_cast<uintptr_t>(0xdeadbeef));
typedef HashCountedSet<JSCell*> ProtectCountSet;
typedef HashCountedSet<const char*> TypeCountSet;
enum HeapType { SmallHeap, LargeHeap };
class HeapUtil;
class Heap {
WTF_MAKE_NONCOPYABLE(Heap);
public:
friend class JIT;
friend class DFG::SpeculativeJIT;
static Heap* heap(const JSValue); // 0 for immediate values
static Heap* heap(const HeapCell*);
// This constant determines how many blocks we iterate between checks of our
// deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect
// overstepping our deadline more quickly, while increasing it will cause
// our scan to run faster.
static const unsigned s_timeCheckResolution = 16;
static bool isMarked(const void*);
static bool isMarkedConcurrently(const void*);
static bool testAndSetMarked(HeapVersion, const void*);
static size_t cellSize(const void*);
void writeBarrier(const JSCell* from);
void writeBarrier(const JSCell* from, JSValue to);
void writeBarrier(const JSCell* from, JSCell* to);
void writeBarrierWithoutFence(const JSCell* from);
// Take this if you know that from->cellState() < barrierThreshold.
JS_EXPORT_PRIVATE void writeBarrierSlowPath(const JSCell* from);
WriteBarrierBuffer& writeBarrierBuffer() { return m_writeBarrierBuffer; }
void flushWriteBarrierBuffer(JSCell*);
Heap(VM*, HeapType);
~Heap();
void lastChanceToFinalize();
void releaseDelayedReleasedObjects();
VM* vm() const { return m_vm; }
MarkedSpace& objectSpace() { return m_objectSpace; }
MachineThreads& machineThreads() { return m_machineThreads; }
const SlotVisitor& slotVisitor() const { return m_slotVisitor; }
JS_EXPORT_PRIVATE GCActivityCallback* fullActivityCallback();
JS_EXPORT_PRIVATE GCActivityCallback* edenActivityCallback();
JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool);
JS_EXPORT_PRIVATE IncrementalSweeper* sweeper();
void addObserver(HeapObserver* observer) { m_observers.append(observer); }
void removeObserver(HeapObserver* observer) { m_observers.removeFirst(observer); }
MutatorState mutatorState() const { return m_mutatorState; }
Optional<CollectionScope> collectionScope() const { return m_collectionScope; }
bool hasHeapAccess() const;
bool mutatorIsStopped() const;
bool collectorBelievesThatTheWorldIsStopped() const;
// We're always busy on the collection threads. On the main thread, this returns true if we're
// helping heap.
JS_EXPORT_PRIVATE bool isCurrentThreadBusy();
MarkedSpace::Subspace& subspaceForObjectWithoutDestructor() { return m_objectSpace.subspaceForObjectsWithoutDestructor(); }
MarkedSpace::Subspace& subspaceForObjectDestructor() { return m_objectSpace.subspaceForObjectsWithDestructor(); }
MarkedSpace::Subspace& subspaceForAuxiliaryData() { return m_objectSpace.subspaceForAuxiliaryData(); }
template<typename ClassType> MarkedSpace::Subspace& subspaceForObjectOfType();
MarkedAllocator* allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
MarkedAllocator* allocatorForObjectWithDestructor(size_t bytes) { return m_objectSpace.destructorAllocatorFor(bytes); }
template<typename ClassType> MarkedAllocator* allocatorForObjectOfType(size_t bytes);
MarkedAllocator* allocatorForAuxiliaryData(size_t bytes) { return m_objectSpace.auxiliaryAllocatorFor(bytes); }
void* allocateAuxiliary(JSCell* intendedOwner, size_t);
void* tryAllocateAuxiliary(JSCell* intendedOwner, size_t);
void* tryAllocateAuxiliary(GCDeferralContext*, JSCell* intendedOwner, size_t);
void* tryReallocateAuxiliary(JSCell* intendedOwner, void* oldBase, size_t oldSize, size_t newSize);
void ascribeOwner(JSCell* intendedOwner, void*);
typedef void (*Finalizer)(JSCell*);
JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer);
void addExecutable(ExecutableBase*);
void notifyIsSafeToCollect() { m_isSafeToCollect = true; }
bool isSafeToCollect() const { return m_isSafeToCollect; }
JS_EXPORT_PRIVATE bool isHeapSnapshotting() const;
JS_EXPORT_PRIVATE void collectAllGarbageIfNotDoneRecently();
JS_EXPORT_PRIVATE void collectAllGarbage();
bool canCollect();
bool shouldCollectHeuristic();
bool shouldCollect();
// Queue up a collection. Returns immediately. This will not queue a collection if a collection
// of equal or greater strength exists. Full collections are stronger than Nullopt collections
// and Nullopt collections are stronger than Eden collections. Nullopt means that the GC can
// choose Eden or Full. This implies that if you request a GC while that GC is ongoing, nothing
// will happen.
JS_EXPORT_PRIVATE void collectAsync(Optional<CollectionScope> = Nullopt);
// Queue up a collection and wait for it to complete. This won't return until you get your own
// complete collection. For example, if there was an ongoing asynchronous collection at the time
// you called this, then this would wait for that one to complete and then trigger your
// collection and then return. In weird cases, there could be multiple GC requests in the backlog
// and this will wait for that backlog before running its GC and returning.
JS_EXPORT_PRIVATE void collectSync(Optional<CollectionScope> = Nullopt);
bool collectIfNecessaryOrDefer(GCDeferralContext* = nullptr); // Returns true if it did collect.
void collectAccordingToDeferGCProbability();
void completeAllJITPlans();
// Use this API to report non-GC memory referenced by GC objects. Be sure to
// call both of these functions: Calling only one may trigger catastropic
// memory growth.
void reportExtraMemoryAllocated(size_t);
JS_EXPORT_PRIVATE void reportExtraMemoryVisited(CellState oldState, size_t);
#if ENABLE(RESOURCE_USAGE)
// Use this API to report the subset of extra memory that lives outside this process.
JS_EXPORT_PRIVATE void reportExternalMemoryVisited(CellState oldState, size_t);
size_t externalMemorySize() { return m_externalMemorySize; }
#endif
// Use this API to report non-GC memory if you can't use the better API above.
void deprecatedReportExtraMemory(size_t);
JS_EXPORT_PRIVATE void reportAbandonedObjectGraph();
JS_EXPORT_PRIVATE void protect(JSValue);
JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0.
JS_EXPORT_PRIVATE size_t extraMemorySize(); // Non-GC memory referenced by GC objects.
JS_EXPORT_PRIVATE size_t size();
JS_EXPORT_PRIVATE size_t capacity();
JS_EXPORT_PRIVATE size_t objectCount();
JS_EXPORT_PRIVATE size_t globalObjectCount();
JS_EXPORT_PRIVATE size_t protectedObjectCount();
JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount();
JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> protectedObjectTypeCounts();
JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> objectTypeCounts();
HashSet<MarkedArgumentBuffer*>& markListSet();
template<typename Functor> void forEachProtectedCell(const Functor&);
template<typename Functor> void forEachCodeBlock(const Functor&);
HandleSet* handleSet() { return &m_handleSet; }
HandleStack* handleStack() { return &m_handleStack; }
void willStartIterating();
void didFinishIterating();
double lastFullGCLength() const { return m_lastFullGCLength; }
double lastEdenGCLength() const { return m_lastEdenGCLength; }
void increaseLastFullGCLength(double amount) { m_lastFullGCLength += amount; }
size_t sizeBeforeLastEdenCollection() const { return m_sizeBeforeLastEdenCollect; }
size_t sizeAfterLastEdenCollection() const { return m_sizeAfterLastEdenCollect; }
size_t sizeBeforeLastFullCollection() const { return m_sizeBeforeLastFullCollect; }
size_t sizeAfterLastFullCollection() const { return m_sizeAfterLastFullCollect; }
void deleteAllCodeBlocks();
void deleteAllUnlinkedCodeBlocks();
void didAllocate(size_t);
bool isPagedOut(double deadline);
const JITStubRoutineSet& jitStubRoutines() { return *m_jitStubRoutines; }
void addReference(JSCell*, ArrayBuffer*);
bool isDeferred() const { return !!m_deferralDepth || !Options::useGC(); }
StructureIDTable& structureIDTable() { return m_structureIDTable; }
CodeBlockSet& codeBlockSet() { return *m_codeBlocks; }
#if USE(FOUNDATION)
template<typename T> void releaseSoon(RetainPtr<T>&&);
#endif
static bool isZombified(JSCell* cell) { return *(void**)cell == zombifiedBits; }
JS_EXPORT_PRIVATE void registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback);
JS_EXPORT_PRIVATE void unregisterWeakGCMap(void* weakGCMap);
void addLogicallyEmptyWeakBlock(WeakBlock*);
#if ENABLE(RESOURCE_USAGE)
size_t blockBytesAllocated() const { return m_blockBytesAllocated; }
#endif
void didAllocateBlock(size_t capacity);
void didFreeBlock(size_t capacity);
bool barrierShouldBeFenced() const { return m_barrierShouldBeFenced; }
const bool* addressOfBarrierShouldBeFenced() const { return &m_barrierShouldBeFenced; }
unsigned barrierThreshold() const { return m_barrierThreshold; }
const unsigned* addressOfBarrierThreshold() const { return &m_barrierThreshold; }
// If true, the GC believes that the mutator is currently messing with the heap. We call this
// "having heap access". The GC may block if the mutator is in this state. If false, the GC may
// currently be doing things to the heap that make the heap unsafe to access for the mutator.
bool hasAccess() const;
// If the mutator does not currently have heap access, this function will acquire it. If the GC
// is currently using the lack of heap access to do dangerous things to the heap then this
// function will block, waiting for the GC to finish. It's not valid to call this if the mutator
// already has heap access. The mutator is required to precisely track whether or not it has
// heap access.
//
// It's totally fine to acquireAccess() upon VM instantiation and keep it that way. This is how
// WebCore uses us. For most other clients, JSLock does acquireAccess()/releaseAccess() for you.
void acquireAccess();
// Releases heap access. If the GC is blocking waiting to do bad things to the heap, it will be
// allowed to run now.
//
// Ordinarily, you should use the ReleaseHeapAccessScope to release and then reacquire heap
// access. You should do this anytime you're about do perform a blocking operation, like waiting
// on the ParkingLot.
void releaseAccess();
// This is like a super optimized way of saying:
//
// releaseAccess()
// acquireAccess()
//
// The fast path is an inlined relaxed load and branch. The slow path will block the mutator if
// the GC wants to do bad things to the heap.
//
// All allocations logically call this. As an optimization to improve GC progress, you can call
// this anywhere that you can afford a load-branch and where an object allocation would have been
// safe.
//
// The GC will also push a stopIfNecessary() event onto the runloop of the thread that
// instantiated the VM whenever it wants the mutator to stop. This means that if you never block
// but instead use the runloop to wait for events, then you could safely run in a mode where the
// mutator has permanent heap access (like the DOM does). If you have good event handling
// discipline (i.e. you don't block the runloop) then you can be sure that stopIfNecessary() will
// already be called for you at the right times.
void stopIfNecessary();
#if USE(CF)
CFRunLoopRef runLoop() const { return m_runLoop.get(); }
JS_EXPORT_PRIVATE void setRunLoop(CFRunLoopRef);
#endif // USE(CF)
private:
friend class AllocatingScope;
friend class CodeBlock;
friend class DeferGC;
friend class DeferGCForAWhile;
friend class GCAwareJITStubRoutine;
friend class GCLogging;
friend class GCThread;
friend class HandleSet;
friend class HeapUtil;
friend class HeapVerifier;
friend class HelpingGCScope;
friend class JITStubRoutine;
friend class LLIntOffsetsExtractor;
friend class MarkedSpace;
friend class MarkedAllocator;
friend class MarkedBlock;
friend class SlotVisitor;
friend class IncrementalSweeper;
friend class HeapStatistics;
friend class VM;
friend class WeakSet;
class Thread;
friend class Thread;
template<typename T> friend void* allocateCell(Heap&);
template<typename T> friend void* allocateCell(Heap&, size_t);
template<typename T> friend void* allocateCell(Heap&, GCDeferralContext*);
template<typename T> friend void* allocateCell(Heap&, GCDeferralContext*, size_t);
void* allocateWithDestructor(size_t); // For use with objects with destructors.
void* allocateWithoutDestructor(size_t); // For use with objects without destructors.
void* allocateWithDestructor(GCDeferralContext*, size_t);
void* allocateWithoutDestructor(GCDeferralContext*, size_t);
template<typename ClassType> void* allocateObjectOfType(size_t); // Chooses one of the methods above based on type.
template<typename ClassType> void* allocateObjectOfType(GCDeferralContext*, size_t);
static const size_t minExtraMemory = 256;
class FinalizerOwner : public WeakHandleOwner {
void finalize(Handle<Unknown>, void* context) override;
};
JS_EXPORT_PRIVATE bool isValidAllocation(size_t);
JS_EXPORT_PRIVATE void reportExtraMemoryAllocatedSlowCase(size_t);
JS_EXPORT_PRIVATE void deprecatedReportExtraMemorySlowCase(size_t);
bool shouldCollectInThread(const LockHolder&);
void collectInThread();
void stopTheWorld();
void resumeTheWorld();
void stopTheMutator();
void resumeTheMutator();
void stopIfNecessarySlow();
bool stopIfNecessarySlow(unsigned extraStateBits);
template<typename Func>
void waitForCollector(const Func&);
JS_EXPORT_PRIVATE void acquireAccessSlow();
JS_EXPORT_PRIVATE void releaseAccessSlow();
bool handleGCDidJIT(unsigned);
bool handleNeedFinalize(unsigned);
void handleGCDidJIT();
void handleNeedFinalize();
void setGCDidJIT();
void setNeedFinalize();
void waitWhileNeedFinalize();
unsigned setMutatorWaiting();
void clearMutatorWaiting();
void notifyThreadStopping(const LockHolder&);
typedef uint64_t Ticket;
Ticket requestCollection(Optional<CollectionScope>);
void waitForCollection(Ticket);
void suspendCompilerThreads();
void willStartCollection(Optional<CollectionScope>);
void flushOldStructureIDTables();
void flushWriteBarrierBuffer();
void stopAllocation();
void prepareForMarking();
void markRoots(double gcStartTime);
void gatherStackRoots(ConservativeRoots&);
void gatherJSStackRoots(ConservativeRoots&);
void gatherScratchBufferRoots(ConservativeRoots&);
void beginMarking();
void visitExternalRememberedSet();
void visitSmallStrings();
void visitConservativeRoots(ConservativeRoots&);
void visitCompilerWorklistWeakReferences();
void removeDeadCompilerWorklistEntries();
void visitProtectedObjects(HeapRootVisitor&);
void visitArgumentBuffers(HeapRootVisitor&);
void visitException(HeapRootVisitor&);
void visitStrongHandles(HeapRootVisitor&);
void visitHandleStack(HeapRootVisitor&);
void visitSamplingProfiler();
void visitTypeProfiler();
void visitShadowChicken();
void traceCodeBlocksAndJITStubRoutines();
void visitWeakHandles(HeapRootVisitor&);
void updateObjectCounts(double gcStartTime);
void endMarking();
void reapWeakHandles();
void pruneStaleEntriesFromWeakGCMaps();
void sweepArrayBuffers();
void snapshotUnswept();
void deleteSourceProviderCaches();
void notifyIncrementalSweeper();
void prepareForAllocation();
void harvestWeakReferences();
void finalizeUnconditionalFinalizers();
void clearUnmarkedExecutables();
void deleteUnmarkedCompiledCode();
JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*);
void updateAllocationLimits();
void didFinishCollection(double gcStartTime);
void resumeCompilerThreads();
void zombifyDeadObjects();
void gatherExtraHeapSnapshotData(HeapProfiler&);
void removeDeadHeapSnapshotNodes(HeapProfiler&);
void finalize();
void sweepLargeAllocations();
void sweepAllLogicallyEmptyWeakBlocks();
bool sweepNextLogicallyEmptyWeakBlock();
bool shouldDoFullCollection(Optional<CollectionScope> requestedCollectionScope) const;
void incrementDeferralDepth();
void decrementDeferralDepth();
JS_EXPORT_PRIVATE void decrementDeferralDepthAndGCIfNeeded();
size_t threadVisitCount();
size_t threadBytesVisited();
void forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>&);
const HeapType m_heapType;
const size_t m_ramSize;
const size_t m_minBytesPerCycle;
size_t m_sizeAfterLastCollect;
size_t m_sizeAfterLastFullCollect;
size_t m_sizeBeforeLastFullCollect;
size_t m_sizeAfterLastEdenCollect;
size_t m_sizeBeforeLastEdenCollect;
size_t m_bytesAllocatedThisCycle;
size_t m_bytesAbandonedSinceLastFullCollect;
size_t m_maxEdenSize;
size_t m_maxHeapSize;
bool m_shouldDoFullCollection;
size_t m_totalBytesVisited;
size_t m_totalBytesVisitedThisCycle;
Optional<CollectionScope> m_collectionScope;
Optional<CollectionScope> m_lastCollectionScope;
MutatorState m_mutatorState { MutatorState::Running };
StructureIDTable m_structureIDTable;
MarkedSpace m_objectSpace;
GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers;
size_t m_extraMemorySize;
size_t m_deprecatedExtraMemorySize;
HashSet<const JSCell*> m_copyingRememberedSet;
ProtectCountSet m_protectedValues;
std::unique_ptr<HashSet<MarkedArgumentBuffer*>> m_markListSet;
MachineThreads m_machineThreads;
SlotVisitor m_slotVisitor;
// We pool the slot visitors used by parallel marking threads. It's useful to be able to
// enumerate over them, and it's useful to have them cache some small amount of memory from
// one GC to the next. GC marking threads claim these at the start of marking, and return
// them at the end.
Vector<std::unique_ptr<SlotVisitor>> m_parallelSlotVisitors;
Vector<SlotVisitor*> m_availableParallelSlotVisitors;
Lock m_parallelSlotVisitorLock;
HandleSet m_handleSet;
HandleStack m_handleStack;
std::unique_ptr<CodeBlockSet> m_codeBlocks;
std::unique_ptr<JITStubRoutineSet> m_jitStubRoutines;
FinalizerOwner m_finalizerOwner;
bool m_isSafeToCollect;
WriteBarrierBuffer m_writeBarrierBuffer;
bool m_barrierShouldBeFenced { Options::forceFencedBarrier() };
unsigned m_barrierThreshold { Options::forceFencedBarrier() ? tautologicalThreshold : blackThreshold };
VM* m_vm;
double m_lastFullGCLength;
double m_lastEdenGCLength;
Vector<ExecutableBase*> m_executables;
Vector<WeakBlock*> m_logicallyEmptyWeakBlocks;
size_t m_indexOfNextLogicallyEmptyWeakBlockToSweep { WTF::notFound };
#if USE(CF)
RetainPtr<CFRunLoopRef> m_runLoop;
#endif // USE(CF)
RefPtr<FullGCActivityCallback> m_fullActivityCallback;
RefPtr<GCActivityCallback> m_edenActivityCallback;
RefPtr<IncrementalSweeper> m_sweeper;
RefPtr<StopIfNecessaryTimer> m_stopIfNecessaryTimer;
Vector<HeapObserver*> m_observers;
unsigned m_deferralDepth;
std::unique_ptr<HeapVerifier> m_verifier;
#if USE(FOUNDATION)
Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects;
unsigned m_delayedReleaseRecursionCount;
#endif
HashMap<void*, std::function<void()>> m_weakGCMaps;
Lock m_markingMutex;
Condition m_markingConditionVariable;
MarkStackArray m_sharedMarkStack;
unsigned m_numberOfActiveParallelMarkers { 0 };
unsigned m_numberOfWaitingParallelMarkers { 0 };
bool m_parallelMarkersShouldExit { false };
Lock m_opaqueRootsMutex;
HashSet<void*> m_opaqueRoots;
static const size_t s_blockFragmentLength = 32;
ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
ParallelHelperClient m_helperClient;
#if ENABLE(RESOURCE_USAGE)
size_t m_blockBytesAllocated { 0 };
size_t m_externalMemorySize { 0 };
#endif
static const unsigned shouldStopBit = 1u << 0u;
static const unsigned stoppedBit = 1u << 1u;
static const unsigned hasAccessBit = 1u << 2u;
static const unsigned gcDidJITBit = 1u << 3u; // Set when the GC did some JITing, so on resume we need to cpuid.
static const unsigned needFinalizeBit = 1u << 4u;
static const unsigned mutatorWaitingBit = 1u << 5u; // Allows the mutator to use this as a condition variable.
Atomic<unsigned> m_worldState;
bool m_collectorBelievesThatTheWorldIsStopped { false };
Deque<Optional<CollectionScope>> m_requests;
Ticket m_lastServedTicket { 0 };
Ticket m_lastGrantedTicket { 0 };
bool m_threadShouldStop { false };
bool m_threadIsStopping { false };
Box<Lock> m_threadLock;
RefPtr<AutomaticThreadCondition> m_threadCondition; // The mutator must not wait on this. It would cause a deadlock.
RefPtr<AutomaticThread> m_thread;
};
} // namespace JSC