[bmalloc] Define alias for std::lock_guard and std::unique_lock for better readability
https://bugs.webkit.org/show_bug.cgi?id=206443

Reviewed by Yusuke Suzuki.

There are two types of lock holder in bmalloc: std::lock_guard and std::unique_lock. Their names are relatively long
and a bit harder to distinguish them each other. Define simple type name for them, LockHolder and UniqueLockHolder.

* bmalloc/AllIsoHeaps.cpp:
(bmalloc::AllIsoHeaps::AllIsoHeaps):
(bmalloc::AllIsoHeaps::add):
(bmalloc::AllIsoHeaps::head):
* bmalloc/AllIsoHeaps.h:
* bmalloc/Allocator.cpp:
(bmalloc::Allocator::reallocateImpl):
(bmalloc::Allocator::refillAllocatorSlowCase):
(bmalloc::Allocator::allocateLarge):
* bmalloc/CryptoRandom.cpp:
(bmalloc::ARC4RandomNumberGenerator::ARC4RandomNumberGenerator):
(bmalloc::ARC4RandomNumberGenerator::randomValues):
* bmalloc/Deallocator.cpp:
(bmalloc::Deallocator::scavenge):
(bmalloc::Deallocator::processObjectLog):
(bmalloc::Deallocator::deallocateSlowCase):
* bmalloc/Deallocator.h:
(bmalloc::Deallocator::lineCache):
* bmalloc/DebugHeap.cpp:
(bmalloc::DebugHeap::DebugHeap):
(bmalloc::DebugHeap::memalignLarge):
(bmalloc::DebugHeap::freeLarge):
* bmalloc/DebugHeap.h:
* bmalloc/DeferredTrigger.h:
* bmalloc/DeferredTriggerInlines.h:
(bmalloc::DeferredTrigger<trigger>::didBecome):
(bmalloc::DeferredTrigger<trigger>::handleDeferral):
* bmalloc/Environment.cpp:
(bmalloc::Environment::Environment):
* bmalloc/Environment.h:
* bmalloc/Gigacage.cpp:
(bmalloc::PrimitiveDisableCallbacks::PrimitiveDisableCallbacks):
(Gigacage::disablePrimitiveGigacage):
(Gigacage::addPrimitiveDisableCallback):
(Gigacage::removePrimitiveDisableCallback):
* bmalloc/Heap.cpp:
(bmalloc::Heap::Heap):
(bmalloc::Heap::freeableMemory):
(bmalloc::Heap::markAllLargeAsEligibile):
(bmalloc::Heap::decommitLargeRange):
(bmalloc::Heap::scavenge):
(bmalloc::Heap::scavengeToHighWatermark):
(bmalloc::Heap::deallocateLineCache):
(bmalloc::Heap::allocateSmallChunk):
(bmalloc::Heap::allocateSmallPage):
(bmalloc::Heap::deallocateSmallLine):
(bmalloc::Heap::allocateSmallBumpRangesByMetadata):
(bmalloc::Heap::allocateSmallBumpRangesByObject):
(bmalloc::Heap::splitAndAllocate):
(bmalloc::Heap::allocateLarge):
(bmalloc::Heap::isLarge):
(bmalloc::Heap::largeSize):
(bmalloc::Heap::shrinkLarge):
(bmalloc::Heap::deallocateLarge):
(bmalloc::Heap::externalCommit):
(bmalloc::Heap::externalDecommit):
* bmalloc/Heap.h:
(bmalloc::Heap::allocateSmallBumpRanges):
(bmalloc::Heap::derefSmallLine):
* bmalloc/HeapConstants.cpp:
(bmalloc::HeapConstants::HeapConstants):
* bmalloc/HeapConstants.h:
* bmalloc/IsoAllocatorInlines.h:
(bmalloc::IsoAllocator<Config>::allocateSlow):
(bmalloc::IsoAllocator<Config>::scavenge):
* bmalloc/IsoDeallocatorInlines.h:
(bmalloc::IsoDeallocator<Config>::deallocate):
(bmalloc::IsoDeallocator<Config>::scavenge):
* bmalloc/IsoDirectory.h:
* bmalloc/IsoDirectoryInlines.h:
(bmalloc::passedNumPages>::takeFirstEligible):
(bmalloc::passedNumPages>::didBecome):
(bmalloc::passedNumPages>::didDecommit):
(bmalloc::passedNumPages>::scavengePage):
(bmalloc::passedNumPages>::scavenge):
(bmalloc::passedNumPages>::scavengeToHighWatermark):
(bmalloc::passedNumPages>::forEachCommittedPage):
* bmalloc/IsoHeapImpl.h:
* bmalloc/IsoHeapImplInlines.h:
(bmalloc::IsoHeapImpl<Config>::takeFirstEligible):
(bmalloc::IsoHeapImpl<Config>::didBecomeEligibleOrDecommited):
(bmalloc::IsoHeapImpl<Config>::scavenge):
(bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark):
(bmalloc::IsoHeapImpl<Config>::numLiveObjects):
(bmalloc::IsoHeapImpl<Config>::numCommittedPages):
(bmalloc::IsoHeapImpl<Config>::forEachDirectory):
(bmalloc::IsoHeapImpl<Config>::forEachCommittedPage):
(bmalloc::IsoHeapImpl<Config>::forEachLiveObject):
(bmalloc::IsoHeapImpl<Config>::allocateFromShared):
* bmalloc/IsoPage.h:
* bmalloc/IsoPageInlines.h:
(bmalloc::IsoPage<Config>::free):
(bmalloc::IsoPage<Config>::startAllocating):
(bmalloc::IsoPage<Config>::stopAllocating):
(bmalloc::IsoPage<Config>::forEachLiveObject):
* bmalloc/IsoSharedHeap.h:
(bmalloc::IsoSharedHeap::IsoSharedHeap):
* bmalloc/IsoSharedHeapInlines.h:
(bmalloc::IsoSharedHeap::allocateNew):
(bmalloc::IsoSharedHeap::allocateSlow):
* bmalloc/IsoSharedPage.h:
* bmalloc/IsoSharedPageInlines.h:
(bmalloc::IsoSharedPage::free):
(bmalloc::IsoSharedPage::startAllocating):
(bmalloc::IsoSharedPage::stopAllocating):
* bmalloc/IsoTLSDeallocatorEntry.h:
* bmalloc/IsoTLSDeallocatorEntryInlines.h:
(bmalloc::IsoTLSDeallocatorEntry<Config>::IsoTLSDeallocatorEntry):
* bmalloc/IsoTLSInlines.h:
(bmalloc::IsoTLS::ensureHeap):
* bmalloc/IsoTLSLayout.cpp:
(bmalloc::IsoTLSLayout::IsoTLSLayout):
(bmalloc::IsoTLSLayout::add):
* bmalloc/IsoTLSLayout.h:
* bmalloc/Mutex.h:
(bmalloc::sleep):
(bmalloc::waitUntilFalse):
* bmalloc/ObjectType.cpp:
(bmalloc::objectType):
* bmalloc/PerProcess.cpp:
(bmalloc::getPerProcessData):
* bmalloc/PerProcess.h:
(bmalloc::PerProcess::getSlowCase):
* bmalloc/Scavenger.cpp:
(bmalloc::Scavenger::Scavenger):
(bmalloc::Scavenger::run):
(bmalloc::Scavenger::runSoon):
(bmalloc::Scavenger::scheduleIfUnderMemoryPressure):
(bmalloc::Scavenger::schedule):
(bmalloc::Scavenger::timeSinceLastFullScavenge):
(bmalloc::Scavenger::timeSinceLastPartialScavenge):
(bmalloc::Scavenger::scavenge):
(bmalloc::Scavenger::partialScavenge):
(bmalloc::Scavenger::freeableMemory):
(bmalloc::Scavenger::threadRunLoop):
* bmalloc/Scavenger.h:
* bmalloc/SmallLine.h:
(bmalloc::SmallLine::refCount):
(bmalloc::SmallLine::ref):
(bmalloc::SmallLine::deref):
* bmalloc/SmallPage.h:
(bmalloc::SmallPage::refCount):
(bmalloc::SmallPage::hasFreeLines const):
(bmalloc::SmallPage::setHasFreeLines):
(bmalloc::SmallPage::ref):
(bmalloc::SmallPage::deref):
* bmalloc/StaticPerProcess.h:
* bmalloc/VMHeap.cpp:
(bmalloc::VMHeap::VMHeap):
* bmalloc/VMHeap.h:
* bmalloc/Zone.cpp:
(bmalloc::Zone::Zone):
* bmalloc/Zone.h:
* bmalloc/bmalloc.cpp:
(bmalloc::api::tryLargeZeroedMemalignVirtual):
(bmalloc::api::freeLargeVirtual):
(bmalloc::api::setScavengerThreadQOSClass):


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@254781 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/bmalloc/ChangeLog b/Source/bmalloc/ChangeLog
index 1ae66f7..20d79e3 100644
--- a/Source/bmalloc/ChangeLog
+++ b/Source/bmalloc/ChangeLog
@@ -1,3 +1,171 @@
+2020-01-17  Basuke Suzuki  <basuke.suzuki@sony.com>
+
+        [bmalloc] Define alias for std::lock_guard and std::unique_lock for better readability
+        https://bugs.webkit.org/show_bug.cgi?id=206443
+
+        Reviewed by Yusuke Suzuki.
+
+        There are two types of lock holder in bmalloc: std::lock_guard and std::unique_lock. Their names are relatively long
+        and a bit harder to distinguish them each other. Define simple type name for them, LockHolder and UniqueLockHolder.
+
+        * bmalloc/AllIsoHeaps.cpp:
+        (bmalloc::AllIsoHeaps::AllIsoHeaps):
+        (bmalloc::AllIsoHeaps::add):
+        (bmalloc::AllIsoHeaps::head):
+        * bmalloc/AllIsoHeaps.h:
+        * bmalloc/Allocator.cpp:
+        (bmalloc::Allocator::reallocateImpl):
+        (bmalloc::Allocator::refillAllocatorSlowCase):
+        (bmalloc::Allocator::allocateLarge):
+        * bmalloc/CryptoRandom.cpp:
+        (bmalloc::ARC4RandomNumberGenerator::ARC4RandomNumberGenerator):
+        (bmalloc::ARC4RandomNumberGenerator::randomValues):
+        * bmalloc/Deallocator.cpp:
+        (bmalloc::Deallocator::scavenge):
+        (bmalloc::Deallocator::processObjectLog):
+        (bmalloc::Deallocator::deallocateSlowCase):
+        * bmalloc/Deallocator.h:
+        (bmalloc::Deallocator::lineCache):
+        * bmalloc/DebugHeap.cpp:
+        (bmalloc::DebugHeap::DebugHeap):
+        (bmalloc::DebugHeap::memalignLarge):
+        (bmalloc::DebugHeap::freeLarge):
+        * bmalloc/DebugHeap.h:
+        * bmalloc/DeferredTrigger.h:
+        * bmalloc/DeferredTriggerInlines.h:
+        (bmalloc::DeferredTrigger<trigger>::didBecome):
+        (bmalloc::DeferredTrigger<trigger>::handleDeferral):
+        * bmalloc/Environment.cpp:
+        (bmalloc::Environment::Environment):
+        * bmalloc/Environment.h:
+        * bmalloc/Gigacage.cpp:
+        (bmalloc::PrimitiveDisableCallbacks::PrimitiveDisableCallbacks):
+        (Gigacage::disablePrimitiveGigacage):
+        (Gigacage::addPrimitiveDisableCallback):
+        (Gigacage::removePrimitiveDisableCallback):
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::Heap):
+        (bmalloc::Heap::freeableMemory):
+        (bmalloc::Heap::markAllLargeAsEligibile):
+        (bmalloc::Heap::decommitLargeRange):
+        (bmalloc::Heap::scavenge):
+        (bmalloc::Heap::scavengeToHighWatermark):
+        (bmalloc::Heap::deallocateLineCache):
+        (bmalloc::Heap::allocateSmallChunk):
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::deallocateSmallLine):
+        (bmalloc::Heap::allocateSmallBumpRangesByMetadata):
+        (bmalloc::Heap::allocateSmallBumpRangesByObject):
+        (bmalloc::Heap::splitAndAllocate):
+        (bmalloc::Heap::allocateLarge):
+        (bmalloc::Heap::isLarge):
+        (bmalloc::Heap::largeSize):
+        (bmalloc::Heap::shrinkLarge):
+        (bmalloc::Heap::deallocateLarge):
+        (bmalloc::Heap::externalCommit):
+        (bmalloc::Heap::externalDecommit):
+        * bmalloc/Heap.h:
+        (bmalloc::Heap::allocateSmallBumpRanges):
+        (bmalloc::Heap::derefSmallLine):
+        * bmalloc/HeapConstants.cpp:
+        (bmalloc::HeapConstants::HeapConstants):
+        * bmalloc/HeapConstants.h:
+        * bmalloc/IsoAllocatorInlines.h:
+        (bmalloc::IsoAllocator<Config>::allocateSlow):
+        (bmalloc::IsoAllocator<Config>::scavenge):
+        * bmalloc/IsoDeallocatorInlines.h:
+        (bmalloc::IsoDeallocator<Config>::deallocate):
+        (bmalloc::IsoDeallocator<Config>::scavenge):
+        * bmalloc/IsoDirectory.h:
+        * bmalloc/IsoDirectoryInlines.h:
+        (bmalloc::passedNumPages>::takeFirstEligible):
+        (bmalloc::passedNumPages>::didBecome):
+        (bmalloc::passedNumPages>::didDecommit):
+        (bmalloc::passedNumPages>::scavengePage):
+        (bmalloc::passedNumPages>::scavenge):
+        (bmalloc::passedNumPages>::scavengeToHighWatermark):
+        (bmalloc::passedNumPages>::forEachCommittedPage):
+        * bmalloc/IsoHeapImpl.h:
+        * bmalloc/IsoHeapImplInlines.h:
+        (bmalloc::IsoHeapImpl<Config>::takeFirstEligible):
+        (bmalloc::IsoHeapImpl<Config>::didBecomeEligibleOrDecommited):
+        (bmalloc::IsoHeapImpl<Config>::scavenge):
+        (bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark):
+        (bmalloc::IsoHeapImpl<Config>::numLiveObjects):
+        (bmalloc::IsoHeapImpl<Config>::numCommittedPages):
+        (bmalloc::IsoHeapImpl<Config>::forEachDirectory):
+        (bmalloc::IsoHeapImpl<Config>::forEachCommittedPage):
+        (bmalloc::IsoHeapImpl<Config>::forEachLiveObject):
+        (bmalloc::IsoHeapImpl<Config>::allocateFromShared):
+        * bmalloc/IsoPage.h:
+        * bmalloc/IsoPageInlines.h:
+        (bmalloc::IsoPage<Config>::free):
+        (bmalloc::IsoPage<Config>::startAllocating):
+        (bmalloc::IsoPage<Config>::stopAllocating):
+        (bmalloc::IsoPage<Config>::forEachLiveObject):
+        * bmalloc/IsoSharedHeap.h:
+        (bmalloc::IsoSharedHeap::IsoSharedHeap):
+        * bmalloc/IsoSharedHeapInlines.h:
+        (bmalloc::IsoSharedHeap::allocateNew):
+        (bmalloc::IsoSharedHeap::allocateSlow):
+        * bmalloc/IsoSharedPage.h:
+        * bmalloc/IsoSharedPageInlines.h:
+        (bmalloc::IsoSharedPage::free):
+        (bmalloc::IsoSharedPage::startAllocating):
+        (bmalloc::IsoSharedPage::stopAllocating):
+        * bmalloc/IsoTLSDeallocatorEntry.h:
+        * bmalloc/IsoTLSDeallocatorEntryInlines.h:
+        (bmalloc::IsoTLSDeallocatorEntry<Config>::IsoTLSDeallocatorEntry):
+        * bmalloc/IsoTLSInlines.h:
+        (bmalloc::IsoTLS::ensureHeap):
+        * bmalloc/IsoTLSLayout.cpp:
+        (bmalloc::IsoTLSLayout::IsoTLSLayout):
+        (bmalloc::IsoTLSLayout::add):
+        * bmalloc/IsoTLSLayout.h:
+        * bmalloc/Mutex.h:
+        (bmalloc::sleep):
+        (bmalloc::waitUntilFalse):
+        * bmalloc/ObjectType.cpp:
+        (bmalloc::objectType):
+        * bmalloc/PerProcess.cpp:
+        (bmalloc::getPerProcessData):
+        * bmalloc/PerProcess.h:
+        (bmalloc::PerProcess::getSlowCase):
+        * bmalloc/Scavenger.cpp:
+        (bmalloc::Scavenger::Scavenger):
+        (bmalloc::Scavenger::run):
+        (bmalloc::Scavenger::runSoon):
+        (bmalloc::Scavenger::scheduleIfUnderMemoryPressure):
+        (bmalloc::Scavenger::schedule):
+        (bmalloc::Scavenger::timeSinceLastFullScavenge):
+        (bmalloc::Scavenger::timeSinceLastPartialScavenge):
+        (bmalloc::Scavenger::scavenge):
+        (bmalloc::Scavenger::partialScavenge):
+        (bmalloc::Scavenger::freeableMemory):
+        (bmalloc::Scavenger::threadRunLoop):
+        * bmalloc/Scavenger.h:
+        * bmalloc/SmallLine.h:
+        (bmalloc::SmallLine::refCount):
+        (bmalloc::SmallLine::ref):
+        (bmalloc::SmallLine::deref):
+        * bmalloc/SmallPage.h:
+        (bmalloc::SmallPage::refCount):
+        (bmalloc::SmallPage::hasFreeLines const):
+        (bmalloc::SmallPage::setHasFreeLines):
+        (bmalloc::SmallPage::ref):
+        (bmalloc::SmallPage::deref):
+        * bmalloc/StaticPerProcess.h:
+        * bmalloc/VMHeap.cpp:
+        (bmalloc::VMHeap::VMHeap):
+        * bmalloc/VMHeap.h:
+        * bmalloc/Zone.cpp:
+        (bmalloc::Zone::Zone):
+        * bmalloc/Zone.h:
+        * bmalloc/bmalloc.cpp:
+        (bmalloc::api::tryLargeZeroedMemalignVirtual):
+        (bmalloc::api::freeLargeVirtual):
+        (bmalloc::api::setScavengerThreadQOSClass):
+
 2020-01-17  Yusuke Suzuki  <ysuzuki@apple.com>
 
         [bmalloc] Fix IsoHeapImpl's assertion introduced in r254708
diff --git a/Source/bmalloc/bmalloc/AllIsoHeaps.cpp b/Source/bmalloc/bmalloc/AllIsoHeaps.cpp
index ca420e5..7790a1a 100644
--- a/Source/bmalloc/bmalloc/AllIsoHeaps.cpp
+++ b/Source/bmalloc/bmalloc/AllIsoHeaps.cpp
@@ -29,20 +29,20 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(AllIsoHeaps);
 
-AllIsoHeaps::AllIsoHeaps(const std::lock_guard<Mutex>&)
+AllIsoHeaps::AllIsoHeaps(const LockHolder&)
 {
 }
 
 void AllIsoHeaps::add(IsoHeapImplBase* heap)
 {
-    std::lock_guard<Mutex> locker(mutex());
+    LockHolder locker(mutex());
     heap->m_next = m_head;
     m_head = heap;
 }
 
 IsoHeapImplBase* AllIsoHeaps::head()
 {
-    std::lock_guard<Mutex> locker(mutex());
+    LockHolder locker(mutex());
     return m_head;
 }
 
diff --git a/Source/bmalloc/bmalloc/AllIsoHeaps.h b/Source/bmalloc/bmalloc/AllIsoHeaps.h
index 6fb9c06..42ddc11 100644
--- a/Source/bmalloc/bmalloc/AllIsoHeaps.h
+++ b/Source/bmalloc/bmalloc/AllIsoHeaps.h
@@ -33,7 +33,7 @@
 
 class BEXPORT AllIsoHeaps : public StaticPerProcess<AllIsoHeaps> {
 public:
-    AllIsoHeaps(const std::lock_guard<Mutex>&);
+    AllIsoHeaps(const LockHolder&);
     
     void add(IsoHeapImplBase*);
     IsoHeapImplBase* head();
diff --git a/Source/bmalloc/bmalloc/Allocator.cpp b/Source/bmalloc/bmalloc/Allocator.cpp
index 0cddd69..506d2d8 100644
--- a/Source/bmalloc/bmalloc/Allocator.cpp
+++ b/Source/bmalloc/bmalloc/Allocator.cpp
@@ -76,7 +76,7 @@
         break;
     }
     case ObjectType::Large: {
-        std::unique_lock<Mutex> lock(Heap::mutex());
+        UniqueLockHolder lock(Heap::mutex());
         oldSize = m_heap.largeSize(lock, object);
 
         if (newSize < oldSize && newSize > smallMax) {
@@ -122,7 +122,7 @@
 {
     BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
 
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     m_deallocator.processObjectLog(lock);
     m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock), action);
 }
@@ -137,7 +137,7 @@
 
 BNO_INLINE void* Allocator::allocateLarge(size_t size, FailureAction action)
 {
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     return m_heap.allocateLarge(lock, alignment, size, action);
 }
 
diff --git a/Source/bmalloc/bmalloc/CryptoRandom.cpp b/Source/bmalloc/bmalloc/CryptoRandom.cpp
index 91c35fc..dc831c8 100644
--- a/Source/bmalloc/bmalloc/CryptoRandom.cpp
+++ b/Source/bmalloc/bmalloc/CryptoRandom.cpp
@@ -61,7 +61,7 @@
 
 class ARC4RandomNumberGenerator : public StaticPerProcess<ARC4RandomNumberGenerator> {
 public:
-    ARC4RandomNumberGenerator(const std::lock_guard<Mutex>&);
+    ARC4RandomNumberGenerator(const LockHolder&);
 
     uint32_t randomNumber();
     void randomValues(void* buffer, size_t length);
@@ -86,7 +86,7 @@
     j = 0;
 }
 
-ARC4RandomNumberGenerator::ARC4RandomNumberGenerator(const std::lock_guard<Mutex>&)
+ARC4RandomNumberGenerator::ARC4RandomNumberGenerator(const LockHolder&)
     : m_count(0)
 {
 }
@@ -164,7 +164,7 @@
 
 void ARC4RandomNumberGenerator::randomValues(void* buffer, size_t length)
 {
-    std::lock_guard<Mutex> lock(mutex());
+    LockHolder lock(mutex());
 
     unsigned char* result = reinterpret_cast<unsigned char*>(buffer);
     stirIfNeeded();
diff --git a/Source/bmalloc/bmalloc/Deallocator.cpp b/Source/bmalloc/bmalloc/Deallocator.cpp
index 3772270..d141407 100644
--- a/Source/bmalloc/bmalloc/Deallocator.cpp
+++ b/Source/bmalloc/bmalloc/Deallocator.cpp
@@ -50,13 +50,13 @@
     
 void Deallocator::scavenge()
 {
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
 
     processObjectLog(lock);
     m_heap.deallocateLineCache(lock, lineCache(lock));
 }
 
-void Deallocator::processObjectLog(std::unique_lock<Mutex>& lock)
+void Deallocator::processObjectLog(UniqueLockHolder& lock)
 {
     for (Object object : m_objectLog)
         m_heap.derefSmallLine(lock, object, lineCache(lock));
@@ -68,7 +68,7 @@
     if (!object)
         return;
 
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     if (m_heap.isLarge(lock, object)) {
         m_heap.deallocateLarge(lock, object);
         return;
diff --git a/Source/bmalloc/bmalloc/Deallocator.h b/Source/bmalloc/bmalloc/Deallocator.h
index 1342c4c..f14feea 100644
--- a/Source/bmalloc/bmalloc/Deallocator.h
+++ b/Source/bmalloc/bmalloc/Deallocator.h
@@ -46,9 +46,9 @@
     void deallocate(void*);
     void scavenge();
     
-    void processObjectLog(std::unique_lock<Mutex>&);
+    void processObjectLog(UniqueLockHolder&);
     
-    LineCache& lineCache(std::unique_lock<Mutex>&) { return m_lineCache; }
+    LineCache& lineCache(UniqueLockHolder&) { return m_lineCache; }
 
 private:
     bool deallocateFastCase(void*);
diff --git a/Source/bmalloc/bmalloc/DebugHeap.cpp b/Source/bmalloc/bmalloc/DebugHeap.cpp
index 2c9f5b4..e89a427 100644
--- a/Source/bmalloc/bmalloc/DebugHeap.cpp
+++ b/Source/bmalloc/bmalloc/DebugHeap.cpp
@@ -40,7 +40,7 @@
 
 #if BOS(DARWIN)
 
-DebugHeap::DebugHeap(const std::lock_guard<Mutex>&)
+DebugHeap::DebugHeap(const LockHolder&)
     : m_zone(malloc_create_zone(0, 0))
     , m_pageSize(vmPageSize())
 {
@@ -88,7 +88,7 @@
 
 #else
 
-DebugHeap::DebugHeap(const std::lock_guard<Mutex>&)
+DebugHeap::DebugHeap(const LockHolder&)
     : m_pageSize(vmPageSize())
 {
 }
@@ -141,7 +141,7 @@
     if (!result)
         return nullptr;
     {
-        std::lock_guard<Mutex> locker(mutex());
+        LockHolder locker(mutex());
         m_sizeMap[result] = size;
     }
     return result;
@@ -154,7 +154,7 @@
     
     size_t size;
     {
-        std::lock_guard<Mutex> locker(mutex());
+        LockHolder locker(mutex());
         size = m_sizeMap[base];
         size_t numErased = m_sizeMap.erase(base);
         RELEASE_BASSERT(numErased == 1);
diff --git a/Source/bmalloc/bmalloc/DebugHeap.h b/Source/bmalloc/bmalloc/DebugHeap.h
index 9db8552..35dc534 100644
--- a/Source/bmalloc/bmalloc/DebugHeap.h
+++ b/Source/bmalloc/bmalloc/DebugHeap.h
@@ -40,7 +40,7 @@
     
 class DebugHeap : private StaticPerProcess<DebugHeap> {
 public:
-    DebugHeap(const std::lock_guard<Mutex>&);
+    DebugHeap(const LockHolder&);
     
     void* malloc(size_t, FailureAction);
     void* memalign(size_t alignment, size_t, FailureAction);
diff --git a/Source/bmalloc/bmalloc/DeferredTrigger.h b/Source/bmalloc/bmalloc/DeferredTrigger.h
index 29fb197..d337a13 100644
--- a/Source/bmalloc/bmalloc/DeferredTrigger.h
+++ b/Source/bmalloc/bmalloc/DeferredTrigger.h
@@ -39,10 +39,10 @@
     DeferredTrigger() { }
     
     template<typename Config>
-    void didBecome(const std::lock_guard<Mutex>&, IsoPage<Config>&);
+    void didBecome(const LockHolder&, IsoPage<Config>&);
     
     template<typename Config>
-    void handleDeferral(const std::lock_guard<Mutex>&, IsoPage<Config>&);
+    void handleDeferral(const LockHolder&, IsoPage<Config>&);
     
 private:
     bool m_hasBeenDeferred { false };
diff --git a/Source/bmalloc/bmalloc/DeferredTriggerInlines.h b/Source/bmalloc/bmalloc/DeferredTriggerInlines.h
index 71ea726..4272982 100644
--- a/Source/bmalloc/bmalloc/DeferredTriggerInlines.h
+++ b/Source/bmalloc/bmalloc/DeferredTriggerInlines.h
@@ -32,7 +32,7 @@
 
 template<IsoPageTrigger trigger>
 template<typename Config>
-void DeferredTrigger<trigger>::didBecome(const std::lock_guard<Mutex>& locker, IsoPage<Config>& page)
+void DeferredTrigger<trigger>::didBecome(const LockHolder& locker, IsoPage<Config>& page)
 {
     if (page.isInUseForAllocation())
         m_hasBeenDeferred = true;
@@ -42,7 +42,7 @@
 
 template<IsoPageTrigger trigger>
 template<typename Config>
-void DeferredTrigger<trigger>::handleDeferral(const std::lock_guard<Mutex>& locker, IsoPage<Config>& page)
+void DeferredTrigger<trigger>::handleDeferral(const LockHolder& locker, IsoPage<Config>& page)
 {
     RELEASE_BASSERT(!page.isInUseForAllocation());
     
diff --git a/Source/bmalloc/bmalloc/Environment.cpp b/Source/bmalloc/bmalloc/Environment.cpp
index 877a6c2..5a1d815 100644
--- a/Source/bmalloc/bmalloc/Environment.cpp
+++ b/Source/bmalloc/bmalloc/Environment.cpp
@@ -127,7 +127,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(Environment);
 
-Environment::Environment(const std::lock_guard<Mutex>&)
+Environment::Environment(const LockHolder&)
     : m_isDebugHeapEnabled(computeIsDebugHeapEnabled())
 {
 }
diff --git a/Source/bmalloc/bmalloc/Environment.h b/Source/bmalloc/bmalloc/Environment.h
index 7b28de9..37e5fb2 100644
--- a/Source/bmalloc/bmalloc/Environment.h
+++ b/Source/bmalloc/bmalloc/Environment.h
@@ -33,7 +33,7 @@
 
 class Environment : public StaticPerProcess<Environment> {
 public:
-    BEXPORT Environment(const std::lock_guard<Mutex>&);
+    BEXPORT Environment(const LockHolder&);
     
     bool isDebugHeapEnabled() { return m_isDebugHeapEnabled; }
 
diff --git a/Source/bmalloc/bmalloc/Gigacage.cpp b/Source/bmalloc/bmalloc/Gigacage.cpp
index 4201959f..f04651b 100644
--- a/Source/bmalloc/bmalloc/Gigacage.cpp
+++ b/Source/bmalloc/bmalloc/Gigacage.cpp
@@ -61,7 +61,7 @@
 namespace bmalloc {
 
 struct PrimitiveDisableCallbacks : public StaticPerProcess<PrimitiveDisableCallbacks> {
-    PrimitiveDisableCallbacks(const std::lock_guard<Mutex>&) { }
+    PrimitiveDisableCallbacks(const LockHolder&) { }
     
     Vector<Gigacage::Callback> callbacks;
 };
@@ -257,7 +257,7 @@
     }
     
     PrimitiveDisableCallbacks& callbacks = *PrimitiveDisableCallbacks::get();
-    std::unique_lock<Mutex> lock(PrimitiveDisableCallbacks::mutex());
+    UniqueLockHolder lock(PrimitiveDisableCallbacks::mutex());
     for (Callback& callback : callbacks.callbacks)
         callback.function(callback.argument);
     callbacks.callbacks.shrink(0);
@@ -275,14 +275,14 @@
     }
     
     PrimitiveDisableCallbacks& callbacks = *PrimitiveDisableCallbacks::get();
-    std::unique_lock<Mutex> lock(PrimitiveDisableCallbacks::mutex());
+    UniqueLockHolder lock(PrimitiveDisableCallbacks::mutex());
     callbacks.callbacks.push(Callback(function, argument));
 }
 
 void removePrimitiveDisableCallback(void (*function)(void*), void* argument)
 {
     PrimitiveDisableCallbacks& callbacks = *PrimitiveDisableCallbacks::get();
-    std::unique_lock<Mutex> lock(PrimitiveDisableCallbacks::mutex());
+    UniqueLockHolder lock(PrimitiveDisableCallbacks::mutex());
     for (size_t i = 0; i < callbacks.callbacks.size(); ++i) {
         if (callbacks.callbacks[i].function == function
             && callbacks.callbacks[i].argument == argument) {
diff --git a/Source/bmalloc/bmalloc/Heap.cpp b/Source/bmalloc/bmalloc/Heap.cpp
index 3aae43a..6d714f0 100644
--- a/Source/bmalloc/bmalloc/Heap.cpp
+++ b/Source/bmalloc/bmalloc/Heap.cpp
@@ -45,7 +45,7 @@
 
 namespace bmalloc {
 
-Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
+Heap::Heap(HeapKind kind, LockHolder&)
     : m_kind { kind }, m_constants { *HeapConstants::get() }
 {
     BASSERT(!Environment::get()->isDebugHeapEnabled());
@@ -81,7 +81,7 @@
     return Gigacage::size(gigacageKind(m_kind));
 }
 
-size_t Heap::freeableMemory(const std::lock_guard<Mutex>&)
+size_t Heap::freeableMemory(const LockHolder&)
 {
     return m_freeableMemory;
 }
@@ -91,14 +91,14 @@
     return m_footprint;
 }
 
-void Heap::markAllLargeAsEligibile(const std::lock_guard<Mutex>&)
+void Heap::markAllLargeAsEligibile(const LockHolder&)
 {
     m_largeFree.markAllAsEligibile();
     m_hasPendingDecommits = false;
     m_condition.notify_all();
 }
 
-void Heap::decommitLargeRange(const std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
+void Heap::decommitLargeRange(const LockHolder&, LargeRange& range, BulkDecommit& decommitter)
 {
     m_footprint -= range.totalPhysicalSize();
     m_freeableMemory -= range.totalPhysicalSize();
@@ -114,9 +114,9 @@
 }
 
 #if BUSE(PARTIAL_SCAVENGE)
-void Heap::scavenge(const std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
+void Heap::scavenge(const LockHolder& lock, BulkDecommit& decommitter)
 #else
-void Heap::scavenge(const std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
+void Heap::scavenge(const LockHolder& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
 #endif
 {
     for (auto& list : m_freePages) {
@@ -169,7 +169,7 @@
 }
 
 #if BUSE(PARTIAL_SCAVENGE)
-void Heap::scavengeToHighWatermark(const std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
+void Heap::scavengeToHighWatermark(const LockHolder& lock, BulkDecommit& decommitter)
 {
     void* newHighWaterMark = nullptr;
     for (LargeRange& range : m_largeFree) {
@@ -182,7 +182,7 @@
 }
 #endif
 
-void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
+void Heap::deallocateLineCache(UniqueLockHolder&, LineCache& lineCache)
 {
     for (auto& list : lineCache) {
         while (!list.isEmpty()) {
@@ -192,7 +192,7 @@
     }
 }
 
-void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass, FailureAction action)
+void Heap::allocateSmallChunk(UniqueLockHolder& lock, size_t pageClass, FailureAction action)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
     
@@ -267,7 +267,7 @@
     m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
 }
 
-SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache, FailureAction action)
+SmallPage* Heap::allocateSmallPage(UniqueLockHolder& lock, size_t sizeClass, LineCache& lineCache, FailureAction action)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
@@ -323,7 +323,7 @@
     return page;
 }
 
-void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
+void Heap::deallocateSmallLine(UniqueLockHolder& lock, Object object, LineCache& lineCache)
 {
     BASSERT(!object.line()->refCount(lock));
     SmallPage* page = object.page();
@@ -363,7 +363,7 @@
 }
 
 void Heap::allocateSmallBumpRangesByMetadata(
-    std::unique_lock<Mutex>& lock, size_t sizeClass,
+    UniqueLockHolder& lock, size_t sizeClass,
     BumpAllocator& allocator, BumpRangeCache& rangeCache,
     LineCache& lineCache, FailureAction action)
 {
@@ -431,7 +431,7 @@
 }
 
 void Heap::allocateSmallBumpRangesByObject(
-    std::unique_lock<Mutex>& lock, size_t sizeClass,
+    UniqueLockHolder& lock, size_t sizeClass,
     BumpAllocator& allocator, BumpRangeCache& rangeCache,
     LineCache& lineCache, FailureAction action)
 {
@@ -492,7 +492,7 @@
     }
 }
 
-LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
+LargeRange Heap::splitAndAllocate(UniqueLockHolder&, LargeRange& range, size_t alignment, size_t size)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
@@ -540,7 +540,7 @@
     return range;
 }
 
-void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size, FailureAction action)
+void* Heap::allocateLarge(UniqueLockHolder& lock, size_t alignment, size_t size, FailureAction action)
 {
 #define ASSERT_OR_RETURN_ON_FAILURE(cond) do { \
         if (action == FailureAction::Crash) \
@@ -593,17 +593,17 @@
 #undef ASSERT_OR_RETURN_ON_FAILURE
 }
 
-bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
+bool Heap::isLarge(UniqueLockHolder&, void* object)
 {
     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
 }
 
-size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
+size_t Heap::largeSize(UniqueLockHolder&, void* object)
 {
     return m_largeAllocated.get(object);
 }
 
-void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
+void Heap::shrinkLarge(UniqueLockHolder& lock, const Range& object, size_t newSize)
 {
     BASSERT(object.size() > newSize);
 
@@ -614,7 +614,7 @@
     m_scavenger->schedule(size);
 }
 
-void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
+void Heap::deallocateLarge(UniqueLockHolder&, void* object)
 {
     size_t size = m_largeAllocated.remove(object);
     m_largeFree.add(LargeRange(object, size, size, size));
@@ -624,11 +624,11 @@
 
 void Heap::externalCommit(void* ptr, size_t size)
 {
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     externalCommit(lock, ptr, size);
 }
 
-void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
+void Heap::externalCommit(UniqueLockHolder&, void* ptr, size_t size)
 {
     BUNUSED_PARAM(ptr);
 
@@ -640,11 +640,11 @@
 
 void Heap::externalDecommit(void* ptr, size_t size)
 {
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     externalDecommit(lock, ptr, size);
 }
 
-void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
+void Heap::externalDecommit(UniqueLockHolder&, void* ptr, size_t size)
 {
     BUNUSED_PARAM(ptr);
 
diff --git a/Source/bmalloc/bmalloc/Heap.h b/Source/bmalloc/bmalloc/Heap.h
index 59c75f2..26d7743 100644
--- a/Source/bmalloc/bmalloc/Heap.h
+++ b/Source/bmalloc/bmalloc/Heap.h
@@ -57,44 +57,44 @@
 
 class Heap {
 public:
-    Heap(HeapKind, std::lock_guard<Mutex>&);
+    Heap(HeapKind, LockHolder&);
     
     static Mutex& mutex() { return PerProcess<PerHeapKind<Heap>>::mutex(); }
     
     HeapKind kind() const { return m_kind; }
     
-    void allocateSmallBumpRanges(std::unique_lock<Mutex>&, size_t sizeClass,
+    void allocateSmallBumpRanges(UniqueLockHolder&, size_t sizeClass,
         BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
-    void derefSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
-    void deallocateLineCache(std::unique_lock<Mutex>&, LineCache&);
+    void derefSmallLine(UniqueLockHolder&, Object, LineCache&);
+    void deallocateLineCache(UniqueLockHolder&, LineCache&);
 
-    void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t, FailureAction);
-    void deallocateLarge(std::unique_lock<Mutex>&, void*);
+    void* allocateLarge(UniqueLockHolder&, size_t alignment, size_t, FailureAction);
+    void deallocateLarge(UniqueLockHolder&, void*);
 
-    bool isLarge(std::unique_lock<Mutex>&, void*);
-    size_t largeSize(std::unique_lock<Mutex>&, void*);
-    void shrinkLarge(std::unique_lock<Mutex>&, const Range&, size_t);
+    bool isLarge(UniqueLockHolder&, void*);
+    size_t largeSize(UniqueLockHolder&, void*);
+    void shrinkLarge(UniqueLockHolder&, const Range&, size_t);
 
 #if BUSE(PARTIAL_SCAVENGE)
-    void scavengeToHighWatermark(const std::lock_guard<Mutex>&, BulkDecommit&);
-    void scavenge(const std::lock_guard<Mutex>&, BulkDecommit&);
+    void scavengeToHighWatermark(const LockHolder&, BulkDecommit&);
+    void scavenge(const LockHolder&, BulkDecommit&);
 #else
-    void scavenge(const std::lock_guard<Mutex>&, BulkDecommit&, size_t& deferredDecommits);
+    void scavenge(const LockHolder&, BulkDecommit&, size_t& deferredDecommits);
 #endif
-    void scavenge(const std::lock_guard<Mutex>&, BulkDecommit&, size_t& freed, size_t goal);
+    void scavenge(const LockHolder&, BulkDecommit&, size_t& freed, size_t goal);
 
-    size_t freeableMemory(const std::lock_guard<Mutex>&);
+    size_t freeableMemory(const LockHolder&);
     size_t footprint();
 
     void externalDecommit(void* ptr, size_t);
-    void externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t);
+    void externalDecommit(UniqueLockHolder&, void* ptr, size_t);
     void externalCommit(void* ptr, size_t);
-    void externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t);
+    void externalCommit(UniqueLockHolder&, void* ptr, size_t);
 
-    void markAllLargeAsEligibile(const std::lock_guard<Mutex>&);
+    void markAllLargeAsEligibile(const LockHolder&);
 
 private:
-    void decommitLargeRange(const std::lock_guard<Mutex>&, LargeRange&, BulkDecommit&);
+    void decommitLargeRange(const LockHolder&, LargeRange&, BulkDecommit&);
 
     struct LargeObjectHash {
         static unsigned hash(void* key)
@@ -110,22 +110,22 @@
     void* gigacageBasePtr(); // May crash if !usingGigacage().
     size_t gigacageSize();
 
-    void allocateSmallBumpRangesByMetadata(std::unique_lock<Mutex>&,
+    void allocateSmallBumpRangesByMetadata(UniqueLockHolder&,
         size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
-    void allocateSmallBumpRangesByObject(std::unique_lock<Mutex>&,
+    void allocateSmallBumpRangesByObject(UniqueLockHolder&,
         size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
 
-    SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&, FailureAction);
-    void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
+    SmallPage* allocateSmallPage(UniqueLockHolder&, size_t sizeClass, LineCache&, FailureAction);
+    void deallocateSmallLine(UniqueLockHolder&, Object, LineCache&);
 
-    void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass, FailureAction);
+    void allocateSmallChunk(UniqueLockHolder&, size_t pageClass, FailureAction);
     void deallocateSmallChunk(Chunk*, size_t pageClass);
 
     void mergeLarge(BeginTag*&, EndTag*&, Range&);
     void mergeLargeLeft(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
     void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
 
-    LargeRange splitAndAllocate(std::unique_lock<Mutex>&, LargeRange&, size_t alignment, size_t);
+    LargeRange splitAndAllocate(UniqueLockHolder&, LargeRange&, size_t alignment, size_t);
 
     HeapKind m_kind;
     HeapConstants& m_constants;
@@ -157,7 +157,7 @@
 };
 
 inline void Heap::allocateSmallBumpRanges(
-    std::unique_lock<Mutex>& lock, size_t sizeClass,
+    UniqueLockHolder& lock, size_t sizeClass,
     BumpAllocator& allocator, BumpRangeCache& rangeCache,
     LineCache& lineCache, FailureAction action)
 {
@@ -166,7 +166,7 @@
     return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache, action);
 }
 
-inline void Heap::derefSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
+inline void Heap::derefSmallLine(UniqueLockHolder& lock, Object object, LineCache& lineCache)
 {
     if (!object.line()->deref(lock))
         return;
diff --git a/Source/bmalloc/bmalloc/HeapConstants.cpp b/Source/bmalloc/bmalloc/HeapConstants.cpp
index f384a54..4601d2f 100644
--- a/Source/bmalloc/bmalloc/HeapConstants.cpp
+++ b/Source/bmalloc/bmalloc/HeapConstants.cpp
@@ -30,7 +30,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(HeapConstants);
 
-HeapConstants::HeapConstants(const std::lock_guard<Mutex>&)
+HeapConstants::HeapConstants(const LockHolder&)
     : m_vmPageSizePhysical { vmPageSizePhysical() }
 {
     RELEASE_BASSERT(m_vmPageSizePhysical >= smallPageSize);
diff --git a/Source/bmalloc/bmalloc/HeapConstants.h b/Source/bmalloc/bmalloc/HeapConstants.h
index 0ebc950..fb40738 100644
--- a/Source/bmalloc/bmalloc/HeapConstants.h
+++ b/Source/bmalloc/bmalloc/HeapConstants.h
@@ -37,7 +37,7 @@
 
 class HeapConstants : public StaticPerProcess<HeapConstants> {
 public:
-    HeapConstants(const std::lock_guard<Mutex>&);
+    HeapConstants(const LockHolder&);
     ~HeapConstants() = delete;
 
     inline size_t pageClass(size_t sizeClass) const { return m_pageClasses[sizeClass]; }
diff --git a/Source/bmalloc/bmalloc/IsoAllocatorInlines.h b/Source/bmalloc/bmalloc/IsoAllocatorInlines.h
index d79f424..37f7611 100644
--- a/Source/bmalloc/bmalloc/IsoAllocatorInlines.h
+++ b/Source/bmalloc/bmalloc/IsoAllocatorInlines.h
@@ -59,7 +59,7 @@
 template<typename Config>
 BNO_INLINE void* IsoAllocator<Config>::allocateSlow(IsoHeapImpl<Config>& heap, bool abortOnFailure)
 {
-    std::lock_guard<Mutex> locker(heap.lock);
+    LockHolder locker(heap.lock);
 
     AllocationMode allocationMode = heap.updateAllocationMode();
     if (allocationMode == AllocationMode::Shared) {
@@ -93,7 +93,7 @@
 void IsoAllocator<Config>::scavenge(IsoHeapImpl<Config>& heap)
 {
     if (m_currentPage) {
-        std::lock_guard<Mutex> locker(heap.lock);
+        LockHolder locker(heap.lock);
         m_currentPage->stopAllocating(locker, m_freeList);
         m_currentPage = nullptr;
         m_freeList.clear();
diff --git a/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h b/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h
index 032fb77..3f49ee61 100644
--- a/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h
+++ b/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h
@@ -59,7 +59,7 @@
     // should be rarely taken. If we see frequent malloc-and-free pattern, we tier up the allocator from shared mode to fast mode.
     IsoPageBase* page = IsoPageBase::pageFor(ptr);
     if (page->isShared()) {
-        std::lock_guard<Mutex> locker(*m_lock);
+        LockHolder locker(*m_lock);
         static_cast<IsoSharedPage*>(page)->free<Config>(locker, handle, ptr);
         return;
     }
@@ -73,7 +73,7 @@
 template<typename Config>
 BNO_INLINE void IsoDeallocator<Config>::scavenge()
 {
-    std::lock_guard<Mutex> locker(*m_lock);
+    LockHolder locker(*m_lock);
     
     for (void* ptr : m_objectLog)
         IsoPage<Config>::pageFor(ptr)->free(locker, ptr);
diff --git a/Source/bmalloc/bmalloc/IsoDirectory.h b/Source/bmalloc/bmalloc/IsoDirectory.h
index e72b2d1..5a27111 100644
--- a/Source/bmalloc/bmalloc/IsoDirectory.h
+++ b/Source/bmalloc/bmalloc/IsoDirectory.h
@@ -50,7 +50,7 @@
     
     IsoHeapImpl<Config>& heap() { return m_heap; }
     
-    virtual void didBecome(const std::lock_guard<Mutex>&, IsoPage<Config>*, IsoPageTrigger) = 0;
+    virtual void didBecome(const LockHolder&, IsoPage<Config>*, IsoPageTrigger) = 0;
     
 protected:
     IsoHeapImpl<Config>& m_heap;
@@ -65,9 +65,9 @@
     
     // Find the first page that is eligible for allocation and return it. May return null if there is no
     // such thing. May allocate a new page if we have an uncommitted page.
-    EligibilityResult<Config> takeFirstEligible(const std::lock_guard<Mutex>&);
+    EligibilityResult<Config> takeFirstEligible(const LockHolder&);
     
-    void didBecome(const std::lock_guard<Mutex>&, IsoPage<Config>*, IsoPageTrigger) override;
+    void didBecome(const LockHolder&, IsoPage<Config>*, IsoPageTrigger) override;
     
     // This gets called from a bulk decommit function in the Scavenger, so no locks are held. This function
     // needs to get the heap lock.
@@ -75,16 +75,16 @@
     
     // Iterate over all empty and committed pages, and put them into the vector. This also records the
     // pages as being decommitted. It's the caller's job to do the actual decommitting.
-    void scavenge(const std::lock_guard<Mutex>&, Vector<DeferredDecommit>&);
+    void scavenge(const LockHolder&, Vector<DeferredDecommit>&);
 #if BUSE(PARTIAL_SCAVENGE)
-    void scavengeToHighWatermark(const std::lock_guard<Mutex>&, Vector<DeferredDecommit>&);
+    void scavengeToHighWatermark(const LockHolder&, Vector<DeferredDecommit>&);
 #endif
 
     template<typename Func>
-    void forEachCommittedPage(const std::lock_guard<Mutex>&, const Func&);
+    void forEachCommittedPage(const LockHolder&, const Func&);
     
 private:
-    void scavengePage(const std::lock_guard<Mutex>&, size_t, Vector<DeferredDecommit>&);
+    void scavengePage(const LockHolder&, size_t, Vector<DeferredDecommit>&);
 
     std::array<PackedAlignedPtr<IsoPage<Config>, IsoPage<Config>::pageSize>, numPages> m_pages { };
     // NOTE: I suppose that this could be two bitvectors. But from working on the GC, I found that the
diff --git a/Source/bmalloc/bmalloc/IsoDirectoryInlines.h b/Source/bmalloc/bmalloc/IsoDirectoryInlines.h
index 85542df..9cf5401 100644
--- a/Source/bmalloc/bmalloc/IsoDirectoryInlines.h
+++ b/Source/bmalloc/bmalloc/IsoDirectoryInlines.h
@@ -42,7 +42,7 @@
 }
 
 template<typename Config, unsigned passedNumPages>
-EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible(const std::lock_guard<Mutex>&)
+EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible(const LockHolder&)
 {
     unsigned pageIndex = (m_eligible | ~m_committed).findBit(m_firstEligibleOrDecommitted, true);
     m_firstEligibleOrDecommitted = pageIndex;
@@ -91,7 +91,7 @@
 }
 
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::didBecome(const std::lock_guard<Mutex>& locker, IsoPage<Config>* page, IsoPageTrigger trigger)
+void IsoDirectory<Config, passedNumPages>::didBecome(const LockHolder& locker, IsoPage<Config>* page, IsoPageTrigger trigger)
 {
     static constexpr bool verbose = false;
     unsigned pageIndex = page->index();
@@ -121,7 +121,7 @@
     // FIXME: We could do this without grabbing the lock. I just doubt that it matters. This is not going
     // to be a frequently executed path, in the sense that decommitting perf will be dominated by the
     // syscall itself (which has to do many hard things).
-    std::lock_guard<Mutex> locker(this->m_heap.lock);
+    LockHolder locker(this->m_heap.lock);
     BASSERT(!!m_committed[index]);
     this->m_heap.isNoLongerFreeable(m_pages[index].get(), IsoPageBase::pageSize);
     m_committed[index] = false;
@@ -131,7 +131,7 @@
 }
 
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::scavengePage(const std::lock_guard<Mutex>&, size_t index, Vector<DeferredDecommit>& decommits)
+void IsoDirectory<Config, passedNumPages>::scavengePage(const LockHolder&, size_t index, Vector<DeferredDecommit>& decommits)
 {
     // Make sure that this page is now off limits.
     m_empty[index] = false;
@@ -140,7 +140,7 @@
 }
 
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::scavenge(const std::lock_guard<Mutex>& locker, Vector<DeferredDecommit>& decommits)
+void IsoDirectory<Config, passedNumPages>::scavenge(const LockHolder& locker, Vector<DeferredDecommit>& decommits)
 {
     (m_empty & m_committed).forEachSetBit(
         [&] (size_t index) {
@@ -153,7 +153,7 @@
 
 #if BUSE(PARTIAL_SCAVENGE)
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(const std::lock_guard<Mutex>& locker, Vector<DeferredDecommit>& decommits)
+void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(const LockHolder& locker, Vector<DeferredDecommit>& decommits)
 {
     (m_empty & m_committed).forEachSetBit(
         [&] (size_t index) {
@@ -166,7 +166,7 @@
 
 template<typename Config, unsigned passedNumPages>
 template<typename Func>
-void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const std::lock_guard<Mutex>&, const Func& func)
+void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const LockHolder&, const Func& func)
 {
     m_committed.forEachSetBit(
         [&] (size_t index) {
diff --git a/Source/bmalloc/bmalloc/IsoHeapImpl.h b/Source/bmalloc/bmalloc/IsoHeapImpl.h
index 0baa808..8d06b07 100644
--- a/Source/bmalloc/bmalloc/IsoHeapImpl.h
+++ b/Source/bmalloc/bmalloc/IsoHeapImpl.h
@@ -105,11 +105,11 @@
 public:
     IsoHeapImpl();
     
-    EligibilityResult<Config> takeFirstEligible(const std::lock_guard<Mutex>&);
+    EligibilityResult<Config> takeFirstEligible(const LockHolder&);
     
     // Callbacks from directory.
-    void didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, numPagesInInlineDirectory>*);
-    void didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
+    void didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, numPagesInInlineDirectory>*);
+    void didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
     
     void scavenge(Vector<DeferredDecommit>&) override;
 #if BUSE(PARTIAL_SCAVENGE)
@@ -124,17 +124,17 @@
     unsigned numCommittedPages();
     
     template<typename Func>
-    void forEachDirectory(const std::lock_guard<Mutex>&, const Func&);
+    void forEachDirectory(const LockHolder&, const Func&);
     
     template<typename Func>
-    void forEachCommittedPage(const std::lock_guard<Mutex>&, const Func&);
+    void forEachCommittedPage(const LockHolder&, const Func&);
     
     // This is only accurate when all threads are scavenged. Otherwise it will overestimate.
     template<typename Func>
-    void forEachLiveObject(const std::lock_guard<Mutex>&, const Func&);
+    void forEachLiveObject(const LockHolder&, const Func&);
 
     AllocationMode updateAllocationMode();
-    void* allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure);
+    void* allocateFromShared(const LockHolder&, bool abortOnFailure);
 
 private:
     PackedPtr<IsoDirectoryPage<Config>> m_headDirectory { nullptr };
diff --git a/Source/bmalloc/bmalloc/IsoHeapImplInlines.h b/Source/bmalloc/bmalloc/IsoHeapImplInlines.h
index 7f0fc31..61e9e26 100644
--- a/Source/bmalloc/bmalloc/IsoHeapImplInlines.h
+++ b/Source/bmalloc/bmalloc/IsoHeapImplInlines.h
@@ -41,7 +41,7 @@
 }
 
 template<typename Config>
-EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible(const std::lock_guard<Mutex>& locker)
+EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible(const LockHolder& locker)
 {
     if (m_isInlineDirectoryEligibleOrDecommitted) {
         EligibilityResult<Config> result = m_inlineDirectory.takeFirstEligible(locker);
@@ -94,14 +94,14 @@
 }
 
 template<typename Config>
-void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, numPagesInInlineDirectory>* directory)
+void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, numPagesInInlineDirectory>* directory)
 {
     RELEASE_BASSERT(directory == &m_inlineDirectory);
     m_isInlineDirectoryEligibleOrDecommitted = true;
 }
 
 template<typename Config>
-void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory)
+void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory)
 {
     RELEASE_BASSERT(m_firstEligibleOrDecommitedDirectory);
     auto* directoryPage = IsoDirectoryPage<Config>::pageFor(directory);
@@ -112,7 +112,7 @@
 template<typename Config>
 void IsoHeapImpl<Config>::scavenge(Vector<DeferredDecommit>& decommits)
 {
-    std::lock_guard<Mutex> locker(this->lock);
+    LockHolder locker(this->lock);
     forEachDirectory(
         locker,
         [&] (auto& directory) {
@@ -125,7 +125,7 @@
 template<typename Config>
 void IsoHeapImpl<Config>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)
 {
-    std::lock_guard<Mutex> locker(this->lock);
+    LockHolder locker(this->lock);
     if (!m_directoryHighWatermark)
         m_inlineDirectory.scavengeToHighWatermark(locker, decommits);
     for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next) {
@@ -156,7 +156,7 @@
 template<typename Config>
 unsigned IsoHeapImpl<Config>::numLiveObjects()
 {
-    std::lock_guard<Mutex> locker(this->lock);
+    LockHolder locker(this->lock);
     unsigned result = 0;
     forEachLiveObject(
         locker,
@@ -169,7 +169,7 @@
 template<typename Config>
 unsigned IsoHeapImpl<Config>::numCommittedPages()
 {
-    std::lock_guard<Mutex> locker(this->lock);
+    LockHolder locker(this->lock);
     unsigned result = 0;
     forEachCommittedPage(
         locker,
@@ -181,7 +181,7 @@
 
 template<typename Config>
 template<typename Func>
-void IsoHeapImpl<Config>::forEachDirectory(const std::lock_guard<Mutex>&, const Func& func)
+void IsoHeapImpl<Config>::forEachDirectory(const LockHolder&, const Func& func)
 {
     func(m_inlineDirectory);
     for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next)
@@ -190,7 +190,7 @@
 
 template<typename Config>
 template<typename Func>
-void IsoHeapImpl<Config>::forEachCommittedPage(const std::lock_guard<Mutex>& locker, const Func& func)
+void IsoHeapImpl<Config>::forEachCommittedPage(const LockHolder& locker, const Func& func)
 {
     forEachDirectory(
         locker,
@@ -201,7 +201,7 @@
 
 template<typename Config>
 template<typename Func>
-void IsoHeapImpl<Config>::forEachLiveObject(const std::lock_guard<Mutex>& locker, const Func& func)
+void IsoHeapImpl<Config>::forEachLiveObject(const LockHolder& locker, const Func& func)
 {
     forEachCommittedPage(
         locker,
@@ -305,7 +305,7 @@
 }
 
 template<typename Config>
-void* IsoHeapImpl<Config>::allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure)
+void* IsoHeapImpl<Config>::allocateFromShared(const LockHolder&, bool abortOnFailure)
 {
     static constexpr bool verbose = false;
 
diff --git a/Source/bmalloc/bmalloc/IsoPage.h b/Source/bmalloc/bmalloc/IsoPage.h
index a151cf98..cf5b553 100644
--- a/Source/bmalloc/bmalloc/IsoPage.h
+++ b/Source/bmalloc/bmalloc/IsoPage.h
@@ -77,19 +77,19 @@
 
     unsigned index() const { return m_index; }
     
-    void free(const std::lock_guard<Mutex>&, void*);
+    void free(const LockHolder&, void*);
 
     // Called after this page is already selected for allocation.
-    FreeList startAllocating(const std::lock_guard<Mutex>&);
+    FreeList startAllocating(const LockHolder&);
     
     // Called after the allocator picks another page to replace this one.
-    void stopAllocating(const std::lock_guard<Mutex>&, FreeList);
+    void stopAllocating(const LockHolder&, FreeList);
 
     IsoDirectoryBase<Config>& directory() { return m_directory; }
     bool isInUseForAllocation() const { return m_isInUseForAllocation; }
     
     template<typename Func>
-    void forEachLiveObject(const std::lock_guard<Mutex>&, const Func&);
+    void forEachLiveObject(const LockHolder&, const Func&);
     
     IsoHeapImpl<Config>& heap();
     
diff --git a/Source/bmalloc/bmalloc/IsoPageInlines.h b/Source/bmalloc/bmalloc/IsoPageInlines.h
index c5ce871..e39762a 100644
--- a/Source/bmalloc/bmalloc/IsoPageInlines.h
+++ b/Source/bmalloc/bmalloc/IsoPageInlines.h
@@ -65,7 +65,7 @@
 }
 
 template<typename Config>
-void IsoPage<Config>::free(const std::lock_guard<Mutex>& locker, void* passedPtr)
+void IsoPage<Config>::free(const LockHolder& locker, void* passedPtr)
 {
     BASSERT(!m_isShared);
     unsigned offset = static_cast<char*>(passedPtr) - reinterpret_cast<char*>(this);
@@ -87,7 +87,7 @@
 }
 
 template<typename Config>
-FreeList IsoPage<Config>::startAllocating(const std::lock_guard<Mutex>&)
+FreeList IsoPage<Config>::startAllocating(const LockHolder&)
 {
     static constexpr bool verbose = false;
     
@@ -208,7 +208,7 @@
 }
 
 template<typename Config>
-void IsoPage<Config>::stopAllocating(const std::lock_guard<Mutex>& locker, FreeList freeList)
+void IsoPage<Config>::stopAllocating(const LockHolder& locker, FreeList freeList)
 {
     static constexpr bool verbose = false;
     
@@ -229,7 +229,7 @@
 
 template<typename Config>
 template<typename Func>
-void IsoPage<Config>::forEachLiveObject(const std::lock_guard<Mutex>&, const Func& func)
+void IsoPage<Config>::forEachLiveObject(const LockHolder&, const Func& func)
 {
     for (unsigned wordIndex = 0; wordIndex < bitsArrayLength(numObjects); ++wordIndex) {
         unsigned word = m_allocBits[wordIndex];
diff --git a/Source/bmalloc/bmalloc/IsoSharedHeap.h b/Source/bmalloc/bmalloc/IsoSharedHeap.h
index 5ea51e5..96c414d 100644
--- a/Source/bmalloc/bmalloc/IsoSharedHeap.h
+++ b/Source/bmalloc/bmalloc/IsoSharedHeap.h
@@ -53,7 +53,7 @@
 
 class IsoSharedHeap : public StaticPerProcess<IsoSharedHeap> {
 public:
-    IsoSharedHeap(const std::lock_guard<Mutex>&)
+    IsoSharedHeap(const LockHolder&)
     {
     }
 
@@ -62,7 +62,7 @@
 
 private:
     template<unsigned>
-    void* allocateSlow(const std::lock_guard<Mutex>&, bool abortOnFailure);
+    void* allocateSlow(const LockHolder&, bool abortOnFailure);
 
     IsoSharedPage* m_currentPage { nullptr };
     VariadicBumpAllocator m_allocator;
diff --git a/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h b/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h
index da18e6e7..7443043 100644
--- a/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h
+++ b/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h
@@ -51,7 +51,7 @@
 template<unsigned passedObjectSize>
 void* IsoSharedHeap::allocateNew(bool abortOnFailure)
 {
-    std::lock_guard<Mutex> locker(mutex());
+    LockHolder locker(mutex());
     constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
     return m_allocator.template allocate<objectSize>(
         [&] () -> void* {
@@ -60,7 +60,7 @@
 }
 
 template<unsigned passedObjectSize>
-BNO_INLINE void* IsoSharedHeap::allocateSlow(const std::lock_guard<Mutex>& locker, bool abortOnFailure)
+BNO_INLINE void* IsoSharedHeap::allocateSlow(const LockHolder& locker, bool abortOnFailure)
 {
     Scavenger& scavenger = *Scavenger::get();
     scavenger.didStartGrowing();
diff --git a/Source/bmalloc/bmalloc/IsoSharedPage.h b/Source/bmalloc/bmalloc/IsoSharedPage.h
index bff70c2..ed59481 100644
--- a/Source/bmalloc/bmalloc/IsoSharedPage.h
+++ b/Source/bmalloc/bmalloc/IsoSharedPage.h
@@ -38,9 +38,9 @@
     BEXPORT static IsoSharedPage* tryCreate();
 
     template<typename Config, typename Type>
-    void free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>&, void*);
-    VariadicBumpAllocator startAllocating(const std::lock_guard<Mutex>&);
-    void stopAllocating(const std::lock_guard<Mutex>&);
+    void free(const LockHolder&, api::IsoHeap<Type>&, void*);
+    VariadicBumpAllocator startAllocating(const LockHolder&);
+    void stopAllocating(const LockHolder&);
 
 private:
     IsoSharedPage()
diff --git a/Source/bmalloc/bmalloc/IsoSharedPageInlines.h b/Source/bmalloc/bmalloc/IsoSharedPageInlines.h
index e4410ba..15b9d0a 100644
--- a/Source/bmalloc/bmalloc/IsoSharedPageInlines.h
+++ b/Source/bmalloc/bmalloc/IsoSharedPageInlines.h
@@ -35,7 +35,7 @@
 // This is because empty IsoSharedPage is still split into various different objects that should keep some part of virtual memory region dedicated.
 // We cannot set up bump allocation for such a page. Not freeing IsoSharedPages are OK since IsoSharedPage is only used for the lower tier of IsoHeap.
 template<typename Config, typename Type>
-void IsoSharedPage::free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>& handle, void* ptr)
+void IsoSharedPage::free(const LockHolder&, api::IsoHeap<Type>& handle, void* ptr)
 {
     auto& heapImpl = handle.impl();
     uint8_t index = *indexSlotFor<Config>(ptr) & IsoHeapImplBase::maxAllocationFromSharedMask;
@@ -46,7 +46,7 @@
     heapImpl.m_availableShared |= (1U << index);
 }
 
-inline VariadicBumpAllocator IsoSharedPage::startAllocating(const std::lock_guard<Mutex>&)
+inline VariadicBumpAllocator IsoSharedPage::startAllocating(const LockHolder&)
 {
     static constexpr bool verbose = false;
 
@@ -61,7 +61,7 @@
     return VariadicBumpAllocator(payloadEnd, remaining);
 }
 
-inline void IsoSharedPage::stopAllocating(const std::lock_guard<Mutex>&)
+inline void IsoSharedPage::stopAllocating(const LockHolder&)
 {
     static constexpr bool verbose = false;
 
diff --git a/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h b/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h
index a0dbc30..658ec4e 100644
--- a/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h
+++ b/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h
@@ -43,7 +43,7 @@
     Mutex lock;
     
 private:
-    IsoTLSDeallocatorEntry(const std::lock_guard<Mutex>&);
+    IsoTLSDeallocatorEntry(const LockHolder&);
 
     void construct(void* entry) override;
     void scavenge(void* entry) override;
diff --git a/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h b/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h
index c953428..5d94ddd 100644
--- a/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h
+++ b/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h
@@ -28,7 +28,7 @@
 namespace bmalloc {
 
 template<typename Config>
-IsoTLSDeallocatorEntry<Config>::IsoTLSDeallocatorEntry(const std::lock_guard<Mutex>&)
+IsoTLSDeallocatorEntry<Config>::IsoTLSDeallocatorEntry(const LockHolder&)
 {
 }
 
diff --git a/Source/bmalloc/bmalloc/IsoTLSInlines.h b/Source/bmalloc/bmalloc/IsoTLSInlines.h
index 0d4b201..65630af 100644
--- a/Source/bmalloc/bmalloc/IsoTLSInlines.h
+++ b/Source/bmalloc/bmalloc/IsoTLSInlines.h
@@ -184,7 +184,7 @@
 void IsoTLS::ensureHeap(api::IsoHeap<Type>& handle)
 {
     if (!handle.isInitialized()) {
-        std::lock_guard<Mutex> locker(handle.m_initializationLock);
+        LockHolder locker(handle.m_initializationLock);
         if (!handle.isInitialized())
             handle.initialize();
     }
diff --git a/Source/bmalloc/bmalloc/IsoTLSLayout.cpp b/Source/bmalloc/bmalloc/IsoTLSLayout.cpp
index 0bae607..d91b818 100644
--- a/Source/bmalloc/bmalloc/IsoTLSLayout.cpp
+++ b/Source/bmalloc/bmalloc/IsoTLSLayout.cpp
@@ -31,7 +31,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(IsoTLSLayout);
 
-IsoTLSLayout::IsoTLSLayout(const std::lock_guard<Mutex>&)
+IsoTLSLayout::IsoTLSLayout(const LockHolder&)
 {
 }
 
@@ -41,7 +41,7 @@
     RELEASE_BASSERT(!entry->m_next);
     // IsoTLSLayout::head() does not take a lock. So we should emit memory fence to make sure that newly added entry is initialized when it is chained to this linked-list.
     // Emitting memory fence here is OK since this function is not frequently called.
-    std::lock_guard<Mutex> locking(addingMutex);
+    LockHolder locking(addingMutex);
     if (m_head) {
         RELEASE_BASSERT(m_tail);
         size_t offset = roundUpToMultipleOf(entry->alignment(), m_tail->extent());
diff --git a/Source/bmalloc/bmalloc/IsoTLSLayout.h b/Source/bmalloc/bmalloc/IsoTLSLayout.h
index 50c5096..1da5ad5 100644
--- a/Source/bmalloc/bmalloc/IsoTLSLayout.h
+++ b/Source/bmalloc/bmalloc/IsoTLSLayout.h
@@ -35,7 +35,7 @@
 
 class IsoTLSLayout : public StaticPerProcess<IsoTLSLayout> {
 public:
-    BEXPORT IsoTLSLayout(const std::lock_guard<Mutex>&);
+    BEXPORT IsoTLSLayout(const LockHolder&);
     
     BEXPORT void add(IsoTLSEntry*);
     
diff --git a/Source/bmalloc/bmalloc/Mutex.h b/Source/bmalloc/bmalloc/Mutex.h
index e18de84..240c85e 100644
--- a/Source/bmalloc/bmalloc/Mutex.h
+++ b/Source/bmalloc/bmalloc/Mutex.h
@@ -35,6 +35,11 @@
 
 namespace bmalloc {
 
+class Mutex;
+
+using UniqueLockHolder = std::unique_lock<Mutex>;
+using LockHolder = std::lock_guard<Mutex>;
+
 class Mutex {
 public:
     constexpr Mutex() = default;
@@ -51,7 +56,7 @@
 };
 
 static inline void sleep(
-    std::unique_lock<Mutex>& lock, std::chrono::milliseconds duration)
+    UniqueLockHolder& lock, std::chrono::milliseconds duration)
 {
     if (duration == std::chrono::milliseconds(0))
         return;
@@ -62,7 +67,7 @@
 }
 
 static inline void waitUntilFalse(
-    std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration,
+    UniqueLockHolder& lock, std::chrono::milliseconds sleepDuration,
     bool& condition)
 {
     while (condition) {
diff --git a/Source/bmalloc/bmalloc/ObjectType.cpp b/Source/bmalloc/bmalloc/ObjectType.cpp
index 1687619..1e2fd53 100644
--- a/Source/bmalloc/bmalloc/ObjectType.cpp
+++ b/Source/bmalloc/bmalloc/ObjectType.cpp
@@ -38,7 +38,7 @@
         if (!object)
             return ObjectType::Small;
 
-        std::unique_lock<Mutex> lock(Heap::mutex());
+        UniqueLockHolder lock(Heap::mutex());
         if (heap.isLarge(lock, object))
             return ObjectType::Large;
     }
diff --git a/Source/bmalloc/bmalloc/PerProcess.cpp b/Source/bmalloc/bmalloc/PerProcess.cpp
index 292b9f5..d82f25b 100644
--- a/Source/bmalloc/bmalloc/PerProcess.cpp
+++ b/Source/bmalloc/bmalloc/PerProcess.cpp
@@ -60,7 +60,7 @@
 
 PerProcessData* getPerProcessData(unsigned hash, const char* disambiguator, size_t size, size_t alignment)
 {
-    std::lock_guard<Mutex> lock(s_mutex);
+    LockHolder lock(s_mutex);
 
     PerProcessData*& bucket = s_table[hash % tableSize];
     
diff --git a/Source/bmalloc/bmalloc/PerProcess.h b/Source/bmalloc/bmalloc/PerProcess.h
index d263801..1a1f7a9 100644
--- a/Source/bmalloc/bmalloc/PerProcess.h
+++ b/Source/bmalloc/bmalloc/PerProcess.h
@@ -46,7 +46,7 @@
 // x = object->m_field; // OK
 // if (globalFlag) { ... } // Undefined behavior.
 //
-// std::lock_guard<Mutex> lock(PerProcess<Object>::mutex());
+// LockHolder lock(PerProcess<Object>::mutex());
 // Object* object = PerProcess<Object>::get(lock);
 // if (globalFlag) { ... } // OK.
 
@@ -105,7 +105,7 @@
     
     BNO_INLINE static T* getSlowCase()
     {
-        std::lock_guard<Mutex> lock(mutex());
+        LockHolder lock(mutex());
         if (!s_object.load()) {
             if (s_data->isInitialized)
                 s_object.store(static_cast<T*>(s_data->memory));
diff --git a/Source/bmalloc/bmalloc/Scavenger.cpp b/Source/bmalloc/bmalloc/Scavenger.cpp
index 60f0676..d5757e2 100644
--- a/Source/bmalloc/bmalloc/Scavenger.cpp
+++ b/Source/bmalloc/bmalloc/Scavenger.cpp
@@ -68,7 +68,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(Scavenger);
 
-Scavenger::Scavenger(const std::lock_guard<Mutex>&)
+Scavenger::Scavenger(const LockHolder&)
 {
     BASSERT(!Environment::get()->isDebugHeapEnabled());
 
@@ -92,7 +92,7 @@
 
 void Scavenger::run()
 {
-    std::lock_guard<Mutex> lock(mutex());
+    LockHolder lock(mutex());
     runHoldingLock();
 }
 
@@ -104,7 +104,7 @@
 
 void Scavenger::runSoon()
 {
-    std::lock_guard<Mutex> lock(mutex());
+    LockHolder lock(mutex());
     runSoonHoldingLock();
 }
 
@@ -124,7 +124,7 @@
 
 void Scavenger::scheduleIfUnderMemoryPressure(size_t bytes)
 {
-    std::lock_guard<Mutex> lock(mutex());
+    LockHolder lock(mutex());
     scheduleIfUnderMemoryPressureHoldingLock(bytes);
 }
 
@@ -148,7 +148,7 @@
 
 void Scavenger::schedule(size_t bytes)
 {
-    std::lock_guard<Mutex> lock(mutex());
+    LockHolder lock(mutex());
     scheduleIfUnderMemoryPressureHoldingLock(bytes);
     
     if (willRunSoon())
@@ -179,14 +179,14 @@
 
 std::chrono::milliseconds Scavenger::timeSinceLastFullScavenge()
 {
-    std::unique_lock<Mutex> lock(mutex());
+    UniqueLockHolder lock(mutex());
     return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastFullScavengeTime);
 }
 
 #if BUSE(PARTIAL_SCAVENGE)
 std::chrono::milliseconds Scavenger::timeSinceLastPartialScavenge()
 {
-    std::unique_lock<Mutex> lock(mutex());
+    UniqueLockHolder lock(mutex());
     return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastPartialScavengeTime);
 }
 #endif
@@ -200,7 +200,7 @@
 
 void Scavenger::scavenge()
 {
-    std::unique_lock<Mutex> lock(m_scavengingMutex);
+    UniqueLockHolder lock(m_scavengingMutex);
 
     if (verbose) {
         fprintf(stderr, "--------------------------------\n");
@@ -216,7 +216,7 @@
 #if !BUSE(PARTIAL_SCAVENGE)
             size_t deferredDecommits = 0;
 #endif
-            std::lock_guard<Mutex> lock(Heap::mutex());
+            LockHolder lock(Heap::mutex());
             for (unsigned i = numHeaps; i--;) {
                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
                     continue;
@@ -241,7 +241,7 @@
 
         {
             PrintTime printTime("full scavenge mark all as eligible time");
-            std::lock_guard<Mutex> lock(Heap::mutex());
+            LockHolder lock(Heap::mutex());
             for (unsigned i = numHeaps; i--;) {
                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
                     continue;
@@ -267,7 +267,7 @@
     }
 
     {
-        std::unique_lock<Mutex> lock(mutex());
+        UniqueLockHolder lock(mutex());
         m_lastFullScavengeTime = std::chrono::steady_clock::now();
     }
 }
@@ -275,7 +275,7 @@
 #if BUSE(PARTIAL_SCAVENGE)
 void Scavenger::partialScavenge()
 {
-    std::unique_lock<Mutex> lock(m_scavengingMutex);
+    UniqueLockHolder lock(m_scavengingMutex);
 
     if (verbose) {
         fprintf(stderr, "--------------------------------\n");
@@ -287,7 +287,7 @@
         BulkDecommit decommitter;
         {
             PrintTime printTime("\npartialScavenge under lock time");
-            std::lock_guard<Mutex> lock(Heap::mutex());
+            LockHolder lock(Heap::mutex());
             for (unsigned i = numHeaps; i--;) {
                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
                     continue;
@@ -308,7 +308,7 @@
 
         {
             PrintTime printTime("partialScavenge mark all as eligible time");
-            std::lock_guard<Mutex> lock(Heap::mutex());
+            LockHolder lock(Heap::mutex());
             for (unsigned i = numHeaps; i--;) {
                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
                     continue;
@@ -335,7 +335,7 @@
     }
 
     {
-        std::unique_lock<Mutex> lock(mutex());
+        UniqueLockHolder lock(mutex());
         m_lastPartialScavengeTime = std::chrono::steady_clock::now();
     }
 }
@@ -345,7 +345,7 @@
 {
     size_t result = 0;
     {
-        std::lock_guard<Mutex> lock(Heap::mutex());
+        LockHolder lock(Heap::mutex());
         for (unsigned i = numHeaps; i--;) {
             if (!isActiveHeapKind(static_cast<HeapKind>(i)))
                 continue;
@@ -402,12 +402,12 @@
     
     while (true) {
         if (m_state == State::Sleep) {
-            std::unique_lock<Mutex> lock(mutex());
+            UniqueLockHolder lock(mutex());
             m_condition.wait(lock, [&]() { return m_state != State::Sleep; });
         }
         
         if (m_state == State::RunSoon) {
-            std::unique_lock<Mutex> lock(mutex());
+            UniqueLockHolder lock(mutex());
             m_condition.wait_for(lock, m_waitTime, [&]() { return m_state != State::RunSoon; });
         }
         
diff --git a/Source/bmalloc/bmalloc/Scavenger.h b/Source/bmalloc/bmalloc/Scavenger.h
index 728a75e..e897546 100644
--- a/Source/bmalloc/bmalloc/Scavenger.h
+++ b/Source/bmalloc/bmalloc/Scavenger.h
@@ -42,7 +42,7 @@
 
 class Scavenger : public StaticPerProcess<Scavenger> {
 public:
-    BEXPORT Scavenger(const std::lock_guard<Mutex>&);
+    BEXPORT Scavenger(const LockHolder&);
     
     ~Scavenger() = delete;
     
diff --git a/Source/bmalloc/bmalloc/SmallLine.h b/Source/bmalloc/bmalloc/SmallLine.h
index 6be85d3..d9b74ab 100644
--- a/Source/bmalloc/bmalloc/SmallLine.h
+++ b/Source/bmalloc/bmalloc/SmallLine.h
@@ -35,9 +35,9 @@
 
 class SmallLine {
 public:
-    void ref(std::unique_lock<Mutex>&, unsigned char = 1);
-    bool deref(std::unique_lock<Mutex>&);
-    unsigned refCount(std::unique_lock<Mutex>&) { return m_refCount; }
+    void ref(UniqueLockHolder&, unsigned char = 1);
+    bool deref(UniqueLockHolder&);
+    unsigned refCount(UniqueLockHolder&) { return m_refCount; }
     
     char* begin();
     char* end();
@@ -51,13 +51,13 @@
 
 };
 
-inline void SmallLine::ref(std::unique_lock<Mutex>&, unsigned char refCount)
+inline void SmallLine::ref(UniqueLockHolder&, unsigned char refCount)
 {
     BASSERT(!m_refCount);
     m_refCount = refCount;
 }
 
-inline bool SmallLine::deref(std::unique_lock<Mutex>&)
+inline bool SmallLine::deref(UniqueLockHolder&)
 {
     BASSERT(m_refCount);
     --m_refCount;
diff --git a/Source/bmalloc/bmalloc/SmallPage.h b/Source/bmalloc/bmalloc/SmallPage.h
index 6eab433..435e5f7 100644
--- a/Source/bmalloc/bmalloc/SmallPage.h
+++ b/Source/bmalloc/bmalloc/SmallPage.h
@@ -38,15 +38,15 @@
 
 class SmallPage : public ListNode<SmallPage> {
 public:
-    void ref(std::unique_lock<Mutex>&);
-    bool deref(std::unique_lock<Mutex>&);
-    unsigned refCount(std::unique_lock<Mutex>&) { return m_refCount; }
+    void ref(UniqueLockHolder&);
+    bool deref(UniqueLockHolder&);
+    unsigned refCount(UniqueLockHolder&) { return m_refCount; }
     
     size_t sizeClass() { return m_sizeClass; }
     void setSizeClass(size_t sizeClass) { m_sizeClass = sizeClass; }
     
-    bool hasFreeLines(std::unique_lock<Mutex>&) const { return m_hasFreeLines; }
-    void setHasFreeLines(std::unique_lock<Mutex>&, bool hasFreeLines) { m_hasFreeLines = hasFreeLines; }
+    bool hasFreeLines(UniqueLockHolder&) const { return m_hasFreeLines; }
+    void setHasFreeLines(UniqueLockHolder&, bool hasFreeLines) { m_hasFreeLines = hasFreeLines; }
     
     bool hasPhysicalPages() { return m_hasPhysicalPages; }
     void setHasPhysicalPages(bool hasPhysicalPages) { m_hasPhysicalPages = hasPhysicalPages; }
@@ -79,14 +79,14 @@
 
 using LineCache = std::array<List<SmallPage>, sizeClassCount>;
 
-inline void SmallPage::ref(std::unique_lock<Mutex>&)
+inline void SmallPage::ref(UniqueLockHolder&)
 {
     BASSERT(!m_slide);
     ++m_refCount;
     BASSERT(m_refCount);
 }
 
-inline bool SmallPage::deref(std::unique_lock<Mutex>&)
+inline bool SmallPage::deref(UniqueLockHolder&)
 {
     BASSERT(!m_slide);
     BASSERT(m_refCount);
diff --git a/Source/bmalloc/bmalloc/StaticPerProcess.h b/Source/bmalloc/bmalloc/StaticPerProcess.h
index 794b069..b805a74 100644
--- a/Source/bmalloc/bmalloc/StaticPerProcess.h
+++ b/Source/bmalloc/bmalloc/StaticPerProcess.h
@@ -79,7 +79,7 @@
     BNO_INLINE static T* getSlowCase()
     {
         using Storage = typename StaticPerProcessStorageTraits<T>::Storage;
-        std::lock_guard<Mutex> lock(Storage::s_mutex);
+        LockHolder lock(Storage::s_mutex);
         if (!Storage::s_object.load(std::memory_order_consume)) {
             T* t = new (&Storage::s_memory) T(lock);
             Storage::s_object.store(t, std::memory_order_release);
diff --git a/Source/bmalloc/bmalloc/VMHeap.cpp b/Source/bmalloc/bmalloc/VMHeap.cpp
index adf2167..df605ff 100644
--- a/Source/bmalloc/bmalloc/VMHeap.cpp
+++ b/Source/bmalloc/bmalloc/VMHeap.cpp
@@ -31,7 +31,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(VMHeap);
 
-VMHeap::VMHeap(const std::lock_guard<Mutex>&)
+VMHeap::VMHeap(const LockHolder&)
 {
 }
 
diff --git a/Source/bmalloc/bmalloc/VMHeap.h b/Source/bmalloc/bmalloc/VMHeap.h
index 68f0abe..11f10e3 100644
--- a/Source/bmalloc/bmalloc/VMHeap.h
+++ b/Source/bmalloc/bmalloc/VMHeap.h
@@ -47,7 +47,7 @@
 
 class VMHeap : public StaticPerProcess<VMHeap> {
 public:
-    VMHeap(const std::lock_guard<Mutex>&);
+    VMHeap(const LockHolder&);
     
     LargeRange tryAllocateLargeChunk(size_t alignment, size_t);
 };
diff --git a/Source/bmalloc/bmalloc/Zone.cpp b/Source/bmalloc/bmalloc/Zone.cpp
index 0bb8296..37edd24 100644
--- a/Source/bmalloc/bmalloc/Zone.cpp
+++ b/Source/bmalloc/bmalloc/Zone.cpp
@@ -115,7 +115,7 @@
     .statistics = bmalloc::statistics
 };
 
-Zone::Zone(const std::lock_guard<Mutex>&)
+Zone::Zone(const LockHolder&)
 {
     malloc_zone_t::size = &bmalloc::zoneSize;
     malloc_zone_t::zone_name = "WebKit Malloc";
diff --git a/Source/bmalloc/bmalloc/Zone.h b/Source/bmalloc/bmalloc/Zone.h
index f5d6495..65d8eae 100644
--- a/Source/bmalloc/bmalloc/Zone.h
+++ b/Source/bmalloc/bmalloc/Zone.h
@@ -42,7 +42,7 @@
     // Enough capacity to track a 64GB heap, so probably enough for anything.
     static constexpr size_t capacity = 2048;
 
-    Zone(const std::lock_guard<Mutex>&);
+    Zone(const LockHolder&);
     Zone(task_t, memory_reader_t, vm_address_t);
 
     void addRange(Range);
diff --git a/Source/bmalloc/bmalloc/bmalloc.cpp b/Source/bmalloc/bmalloc/bmalloc.cpp
index 16be358..a120d6e 100644
--- a/Source/bmalloc/bmalloc/bmalloc.cpp
+++ b/Source/bmalloc/bmalloc/bmalloc.cpp
@@ -58,7 +58,7 @@
         kind = mapToActiveHeapKind(kind);
         Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);
 
-        std::unique_lock<Mutex> lock(Heap::mutex());
+        UniqueLockHolder lock(Heap::mutex());
         result = heap.allocateLarge(lock, alignment, size, FailureAction::ReturnNull);
         if (result) {
             // Don't track this as dirty memory that dictates how we drive the scavenger.
@@ -82,7 +82,7 @@
     }
     kind = mapToActiveHeapKind(kind);
     Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     // Balance out the externalDecommit when we allocated the zeroed virtual memory.
     heap.externalCommit(lock, object, size);
     heap.deallocateLarge(lock, object);
@@ -108,7 +108,7 @@
 {
     if (DebugHeap::tryGet())
         return;
-    std::unique_lock<Mutex> lock(Heap::mutex());
+    UniqueLockHolder lock(Heap::mutex());
     Scavenger::get()->setScavengerThreadQOSClass(overrideClass);
 }
 #endif