Merge TCMalloc r38
Reviewed by Mark Rowe and Geoff Garen.
It also result in a performance progression between 0.5% and
0.9% depending on the test, however most if not all of this
gain will be consumed by the overhead involved in the later
change to release memory to the system.
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@28434 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/JavaScriptCore/ChangeLog b/JavaScriptCore/ChangeLog
index 2ae3831..f554a23 100644
--- a/JavaScriptCore/ChangeLog
+++ b/JavaScriptCore/ChangeLog
@@ -1,3 +1,119 @@
+2007-12-03 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Mark Rowe and Geoff Garen.
+
+ Merge TCMalloc r38
+
+ It also result in a performance progression between 0.5% and
+ 0.9% depending on the test, however most if not all of this
+ gain will be consumed by the overhead involved in the later
+ change to release memory to the system.
+
+ * JavaScriptCore.vcproj/WTF/WTF.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * wtf/FastMalloc.cpp:
+ (WTF::KernelSupportsTLS):
+ (WTF::CheckIfKernelSupportsTLS):
+ (WTF::):
+ (WTF::ClassIndex):
+ (WTF::SLL_Next):
+ (WTF::SLL_SetNext):
+ (WTF::SLL_Push):
+ (WTF::SLL_Pop):
+ (WTF::SLL_PopRange):
+ (WTF::SLL_PushRange):
+ (WTF::SLL_Size):
+ (WTF::SizeClass):
+ (WTF::ByteSizeForClass):
+ (WTF::NumMoveSize):
+ (WTF::InitSizeClasses):
+ (WTF::AllocationSize):
+ (WTF::TCMalloc_PageHeap::GetSizeClassIfCached):
+ (WTF::TCMalloc_PageHeap::CacheSizeClass):
+ (WTF::TCMalloc_PageHeap::init):
+ (WTF::TCMalloc_PageHeap::New):
+ (WTF::TCMalloc_PageHeap::AllocLarge):
+ (WTF::TCMalloc_PageHeap::Carve):
+ (WTF::TCMalloc_PageHeap::Delete):
+ (WTF::TCMalloc_PageHeap::IncrementalScavenge):
+ (WTF::PagesToMB):
+ (WTF::TCMalloc_PageHeap::Dump):
+ (WTF::TCMalloc_PageHeap::GrowHeap):
+ (WTF::TCMalloc_PageHeap::Check):
+ (WTF::ReleaseFreeList):
+ (WTF::TCMalloc_PageHeap::ReleaseFreePages):
+ (WTF::TCMalloc_ThreadCache_FreeList::Push):
+ (WTF::TCMalloc_ThreadCache_FreeList::PushRange):
+ (WTF::TCMalloc_ThreadCache_FreeList::PopRange):
+ (WTF::TCMalloc_ThreadCache_FreeList::Pop):
+ (WTF::TCMalloc_Central_FreeList::length):
+ (WTF::TCMalloc_Central_FreeList::tc_length):
+ (WTF::TCMalloc_Central_FreeList::Init):
+ (WTF::TCMalloc_Central_FreeList::ReleaseListToSpans):
+ (WTF::TCMalloc_Central_FreeList::EvictRandomSizeClass):
+ (WTF::TCMalloc_Central_FreeList::MakeCacheSpace):
+ (WTF::TCMalloc_Central_FreeList::ShrinkCache):
+ (WTF::TCMalloc_Central_FreeList::InsertRange):
+ (WTF::TCMalloc_Central_FreeList::RemoveRange):
+ (WTF::TCMalloc_Central_FreeList::FetchFromSpansSafe):
+ (WTF::TCMalloc_Central_FreeList::Populate):
+ (WTF::TCMalloc_ThreadCache::Init):
+ (WTF::TCMalloc_ThreadCache::Cleanup):
+ (WTF::TCMalloc_ThreadCache::Allocate):
+ (WTF::TCMalloc_ThreadCache::Deallocate):
+ (WTF::TCMalloc_ThreadCache::FetchFromCentralCache):
+ (WTF::TCMalloc_ThreadCache::ReleaseToCentralCache):
+ (WTF::TCMalloc_ThreadCache::Scavenge):
+ (WTF::TCMalloc_ThreadCache::PickNextSample):
+ (WTF::TCMalloc_ThreadCache::NewHeap):
+ (WTF::TCMalloc_ThreadCache::GetThreadHeap):
+ (WTF::TCMalloc_ThreadCache::GetCache):
+ (WTF::TCMalloc_ThreadCache::GetCacheIfPresent):
+ (WTF::TCMalloc_ThreadCache::InitTSD):
+ (WTF::TCMalloc_ThreadCache::CreateCacheIfNecessary):
+ (WTF::TCMallocStats::ExtractStats):
+ (WTF::TCMallocStats::DumpStats):
+ (WTF::TCMallocStats::DumpStackTraces):
+ (WTF::TCMallocStats::TCMallocImplementation::MarkThreadIdle):
+ (WTF::TCMallocStats::TCMallocImplementation::ReleaseFreeMemory):
+ (WTF::TCMallocStats::TCMallocGuard::TCMallocGuard):
+ (WTF::TCMallocStats::TCMallocGuard::~TCMallocGuard):
+ (WTF::TCMallocStats::DoSampledAllocation):
+ (WTF::TCMallocStats::CheckCachedSizeClass):
+ (WTF::TCMallocStats::CheckedMallocResult):
+ (WTF::TCMallocStats::SpanToMallocResult):
+ (WTF::TCMallocStats::do_malloc):
+ (WTF::TCMallocStats::do_free):
+ (WTF::TCMallocStats::do_memalign):
+ (WTF::TCMallocStats::do_malloc_stats):
+ (WTF::TCMallocStats::do_mallopt):
+ (WTF::TCMallocStats::do_mallinfo):
+ (WTF::TCMallocStats::realloc):
+ (WTF::TCMallocStats::cpp_alloc):
+ (WTF::TCMallocStats::operator new):
+ (WTF::TCMallocStats::):
+ (WTF::TCMallocStats::operator new[]):
+ (WTF::TCMallocStats::malloc_stats):
+ (WTF::TCMallocStats::mallopt):
+ (WTF::TCMallocStats::mallinfo):
+ * wtf/TCPackedCache.h: Added.
+ (PackedCache::PackedCache):
+ (PackedCache::Put):
+ (PackedCache::Has):
+ (PackedCache::GetOrDefault):
+ (PackedCache::Clear):
+ (PackedCache::EntryToValue):
+ (PackedCache::EntryToUpper):
+ (PackedCache::KeyToUpper):
+ (PackedCache::UpperToPartialKey):
+ (PackedCache::Hash):
+ (PackedCache::KeyMatch):
+ * wtf/TCPageMap.h:
+ (TCMalloc_PageMap2::PreallocateMoreMemory):
+ * wtf/TCSystemAlloc.cpp:
+ (TCMalloc_SystemRelease):
+ * wtf/TCSystemAlloc.h:
+
2007-12-04 Anders Carlsson <andersca@apple.com>
Reviewed by Sam.
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
index ba049ae..0d2ad76 100644
--- a/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
+++ b/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
@@ -280,6 +280,10 @@
>
</File>
<File
+ RelativePath="..\..\wtf\TCPackedCache.h"
+ >
+ </File>
+ <File
RelativePath="..\..\wtf\TCPageMap.h"
>
</File>
diff --git a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
index cbecbc9..429e00c 100644
--- a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
+++ b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
@@ -84,6 +84,7 @@
1CAF34890A6C421700ABE06E /* WebScriptObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 1CAF34880A6C421700ABE06E /* WebScriptObject.h */; };
5186111E0CC824900081412B /* Deque.h in Headers */ = {isa = PBXBuildFile; fileRef = 5186111D0CC824830081412B /* Deque.h */; settings = {ATTRIBUTES = (Private, ); }; };
51F648D70BB4E2CA0033D760 /* RetainPtr.h in Headers */ = {isa = PBXBuildFile; fileRef = 51F648D60BB4E2CA0033D760 /* RetainPtr.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 5DA479660CFBCF56009328A0 /* TCPackedCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 5DA479650CFBCF56009328A0 /* TCPackedCache.h */; };
5DBD18AC0C54018700C15EAE /* CollectorHeapIntrospector.h in Headers */ = {isa = PBXBuildFile; fileRef = 5DBD18AA0C54018700C15EAE /* CollectorHeapIntrospector.h */; };
5DBD18B00C5401A700C15EAE /* MallocZoneSupport.h in Headers */ = {isa = PBXBuildFile; fileRef = 5DBD18AF0C5401A700C15EAE /* MallocZoneSupport.h */; };
652246A50C8D7A0E007BDAF7 /* HashIterators.h in Headers */ = {isa = PBXBuildFile; fileRef = 652246A40C8D7A0E007BDAF7 /* HashIterators.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -487,6 +488,7 @@
51F0EC9605C88DC700E6DF1B /* objc_utility.h */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.c.h; name = objc_utility.h; path = bindings/objc/objc_utility.h; sourceTree = "<group>"; tabWidth = 8; };
51F0EC9705C88DC700E6DF1B /* objc_utility.mm */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = objc_utility.mm; path = bindings/objc/objc_utility.mm; sourceTree = "<group>"; tabWidth = 8; };
51F648D60BB4E2CA0033D760 /* RetainPtr.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = RetainPtr.h; sourceTree = "<group>"; };
+ 5DA479650CFBCF56009328A0 /* TCPackedCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TCPackedCache.h; sourceTree = "<group>"; };
5DBD18A90C54018700C15EAE /* CollectorHeapIntrospector.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CollectorHeapIntrospector.cpp; sourceTree = "<group>"; };
5DBD18AA0C54018700C15EAE /* CollectorHeapIntrospector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CollectorHeapIntrospector.h; sourceTree = "<group>"; };
5DBD18AF0C5401A700C15EAE /* MallocZoneSupport.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MallocZoneSupport.h; sourceTree = "<group>"; };
@@ -935,6 +937,7 @@
E11D51750B2E798D0056C188 /* StringExtras.h */,
6541BD6E08E80A17002CBEE7 /* TCPageMap.h */,
6541BD6F08E80A17002CBEE7 /* TCSpinLock.h */,
+ 5DA479650CFBCF56009328A0 /* TCPackedCache.h */,
6541BD7008E80A17002CBEE7 /* TCSystemAlloc.cpp */,
6541BD7108E80A17002CBEE7 /* TCSystemAlloc.h */,
935AF46B09E9D9DB00ACD1D8 /* UnusedParam.h */,
@@ -1264,6 +1267,7 @@
14ABB36F099C076400E2A24F /* value.h in Headers */,
E1EF79AB0CE97BA60088D500 /* UTF8.h in Headers */,
1419D32D0CEA7CDE00FF507A /* RefCounted.h in Headers */,
+ 5DA479660CFBCF56009328A0 /* TCPackedCache.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp
index 5d2f3ae..21ac44d 100644
--- a/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/JavaScriptCore/wtf/FastMalloc.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2005, Google Inc.
+// Copyright (c) 2005, 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -45,17 +45,28 @@
// 4. The pagemap (which maps from page-number to descriptor),
// can be read without holding any locks, and written while holding
// the "pageheap_lock".
+// 5. To improve performance, a subset of the information one can get
+// from the pagemap is cached in a data structure, pagemap_cache_,
+// that atomically reads and writes its entries. This cache can be
+// read and written without locking.
//
// This multi-threaded access to the pagemap is safe for fairly
// subtle reasons. We basically assume that when an object X is
// allocated by thread A and deallocated by thread B, there must
// have been appropriate synchronization in the handoff of object
-// X from thread A to thread B.
+// X from thread A to thread B. The same logic applies to pagemap_cache_.
+//
+// THE PAGEID-TO-SIZECLASS CACHE
+// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
+// returns 0 for a particular PageID then that means "no information," not that
+// the sizeclass is 0. The cache may have stale information for pages that do
+// not hold the beginning of any free()'able object. Staleness is eliminated
+// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
+// do_memalign() for all other relevant pages.
//
// TODO: Bias reclamation to larger addresses
// TODO: implement mallinfo/mallopt
// TODO: Better testing
-// TODO: Return memory to system
//
// 9/28/2003 (new page-level allocator replaces ptmalloc2):
// * malloc/free of small objects goes from ~300 ns to ~50 ns.
@@ -70,6 +81,12 @@
#include <pthread.h>
#endif
+#ifndef NO_TCMALLOC_SAMPLES
+#ifdef WTF_CHANGES
+#define NO_TCMALLOC_SAMPLES
+#endif
+#endif
+
#if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
#define FORCE_SYSTEM_MALLOC 0
#else
@@ -197,6 +214,7 @@
#include "AlwaysInline.h"
#include "Assertions.h"
+#include "TCPackedCache.h"
#include "TCPageMap.h"
#include "TCSpinLock.h"
#include "TCSystemAlloc.h"
@@ -226,6 +244,19 @@
#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
#endif
+#define DEFINE_VARIABLE(type, name, value, meaning) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
+ type FLAGS_##name(value); \
+ char FLAGS_no##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
+
+#define DEFINE_int64(name, value, meaning) \
+ DEFINE_VARIABLE(int64_t, name, value, meaning)
+
+#define DEFINE_double(name, value, meaning) \
+ DEFINE_VARIABLE(double, name, value, meaning)
+
namespace WTF {
#define malloc fastMalloc
@@ -274,12 +305,59 @@
#endif
-#if HAVE(INTTYPES_H)
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-#define LLU PRIu64
+#ifndef WTF_CHANGES
+// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
+// you're porting to a system where you really can't get a stacktrace.
+#ifdef NO_TCMALLOC_SAMPLES
+// We use #define so code compiles even if you #include stacktrace.h somehow.
+# define GetStackTrace(stack, depth, skip) (0)
#else
-#define LLU "llu" // hope for the best
+# include <google/stacktrace.h>
+#endif
+#endif
+
+// Even if we have support for thread-local storage in the compiler
+// and linker, the OS may not support it. We need to check that at
+// runtime. Right now, we have to keep a manual set of "bad" OSes.
+#if defined(HAVE_TLS)
+ static bool kernel_supports_tls = false; // be conservative
+ static inline bool KernelSupportsTLS() {
+ return kernel_supports_tls;
+ }
+# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
+ static void CheckIfKernelSupportsTLS() {
+ kernel_supports_tls = false;
+ }
+# else
+# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
+ static void CheckIfKernelSupportsTLS() {
+ struct utsname buf;
+ if (uname(&buf) != 0) { // should be impossible
+ MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
+ kernel_supports_tls = false;
+ } else if (strcasecmp(buf.sysname, "linux") == 0) {
+ // The linux case: the first kernel to support TLS was 2.6.0
+ if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
+ kernel_supports_tls = false;
+ else if (buf.release[0] == '2' && buf.release[1] == '.' &&
+ buf.release[2] >= '0' && buf.release[2] < '6' &&
+ buf.release[3] == '.') // 2.0 - 2.5
+ kernel_supports_tls = false;
+ else
+ kernel_supports_tls = true;
+ } else { // some other kernel, we'll be optimisitic
+ kernel_supports_tls = true;
+ }
+ // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
+ }
+# endif // HAVE_DECL_UNAME
+#endif // HAVE_TLS
+
+// __THROW is defined in glibc systems. It means, counter-intuitively,
+// "This function will never throw an exception." It's an optional
+// optimization tool, but we may need to use it to match glibc prototypes.
+#ifndef __THROW // I guess we're not on a glibc system
+# define __THROW // __THROW is just an optimization, so ok to make it ""
#endif
//-------------------------------------------------------------------
@@ -294,8 +372,11 @@
static const size_t kMaxSize = 8u * kPageSize;
static const size_t kAlignShift = 3;
static const size_t kAlignment = 1 << kAlignShift;
-static const size_t kNumClasses = 170;
-static const size_t kMaxTinySize = 1 << 8;
+static const size_t kNumClasses = 68;
+
+// Allocates a big block of memory for the pagemap once we reach more than
+// 128MB
+static const size_t kPageMapBigAllocationThreshold = 128 << 20;
// Minimum number of pages to fetch from system at a time. Must be
// significantly bigger than kBlockSize to amortize system-call
@@ -310,7 +391,7 @@
// amortize the lock overhead for accessing the central list. Making
// it too big may temporarily cause unnecessary memory wastage in the
// per-thread free list until the scavenger cleans up the list.
-static const int kNumObjectsToMove = 32;
+static int num_objects_to_move[kNumClasses];
// Maximum length we allow a per-thread free-list to have before we
// move objects from it into the corresponding central free-list. We
@@ -330,57 +411,97 @@
// REQUIRED: kMaxPages >= kMinSystemAlloc;
static const size_t kMaxPages = kMinSystemAlloc;
+/* The smallest prime > 2^n */
+static int primes_list[] = {
+ // Small values might cause high rates of sampling
+ // and hence commented out.
+ // 2, 5, 11, 17, 37, 67, 131, 257,
+ // 521, 1031, 2053, 4099, 8209, 16411,
+ 32771, 65537, 131101, 262147, 524309, 1048583,
+ 2097169, 4194319, 8388617, 16777259, 33554467 };
+
// Twice the approximate gap between sampling actions.
// I.e., we take one sample approximately once every
-// kSampleParameter/2
+// tcmalloc_sample_parameter/2
// bytes of allocation, i.e., ~ once every 128KB.
// Must be a prime number.
-static const size_t kSampleParameter = 266053;
+#ifdef NO_TCMALLOC_SAMPLES
+DEFINE_int64(tcmalloc_sample_parameter, 0,
+ "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
+static size_t sample_period = 0;
+#else
+DEFINE_int64(tcmalloc_sample_parameter, 262147,
+ "Twice the approximate gap between sampling actions."
+ " Must be a prime number. Otherwise will be rounded up to a "
+ " larger prime number");
+static size_t sample_period = 262147;
+#endif
+
+// Protects sample_period above
+static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
+
+// Parameters for controlling how fast memory is returned to the OS.
+
+DEFINE_double(tcmalloc_release_rate, 1,
+ "Rate at which we release unused memory to the system. "
+ "Zero means we never release memory back to the system. "
+ "Increase this flag to return memory faster; decrease it "
+ "to return memory slower. Reasonable rates are in the "
+ "range [0,10]");
//-------------------------------------------------------------------
// Mapping from size to size_class and vice versa
//-------------------------------------------------------------------
-// A pair of arrays we use for implementing the mapping from a size to
-// its size class. Indexed by "floor(lg(size))".
-static const int kSizeBits = 8 * sizeof(size_t);
-static unsigned char size_base[kSizeBits];
-static unsigned char size_shift[kSizeBits];
+// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
+// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
+// So for these larger sizes we have an array indexed by ceil(size/128).
+//
+// We flatten both logical arrays into one physical array and use
+// arithmetic to compute an appropriate index. The constants used by
+// ClassIndex() were selected to make the flattening work.
+//
+// Examples:
+// Size Expression Index
+// -------------------------------------------------------
+// 0 (0 + 7) / 8 0
+// 1 (1 + 7) / 8 1
+// ...
+// 1024 (1024 + 7) / 8 128
+// 1025 (1025 + 127 + (120<<7)) / 128 129
+// ...
+// 32768 (32768 + 127 + (120<<7)) / 128 376
+static const size_t kMaxSmallSize = 1024;
+static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
+static const int add_amount[2] = { 7, 127 + (120 << 7) };
+static unsigned char class_array[377];
-// Mapping from size class to size
+// Compute index of the class_array[] entry for a given size
+static inline int ClassIndex(size_t s) {
+ const int i = (s > kMaxSmallSize);
+ return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
+}
+
+// Mapping from size class to max size storable in that class
static size_t class_to_size[kNumClasses];
// Mapping from size class to number of pages to allocate at a time
static size_t class_to_pages[kNumClasses];
-// Return floor(log2(n)) for n > 0.
-#if PLATFORM(X86) && COMPILER(GCC)
-static ALWAYS_INLINE int LgFloor(size_t n) {
- // "ro" for the input spec means the input can come from either a
- // register ("r") or offsetable memory ("o").
- int result;
- __asm__("bsrl %1, %0"
- : "=r" (result) // Output spec
- : "ro" (n) // Input spec
- : "cc" // Clobbers condition-codes
- );
- return result;
-}
+// TransferCache is used to cache transfers of num_objects_to_move[size_class]
+// back and forth between thread caches and the central cache for a given size
+// class.
+struct TCEntry {
+ void *head; // Head of chain of objects.
+ void *tail; // Tail of chain of objects.
+};
+// A central cache freelist can have anywhere from 0 to kNumTransferEntries
+// slots to put link list chains into. To keep memory usage bounded the total
+// number of TCEntries across size classes is fixed. Currently each size
+// class is initially given one TCEntry which also means that the maximum any
+// one class can have is kNumClasses.
+static const int kNumTransferEntries = kNumClasses;
-#elif PLATFORM(PPC) && COMPILER(GCC)
-static ALWAYS_INLINE int LgFloor(size_t n) {
- // "r" for the input spec means the input must come from a
- // register ("r")
- int result;
-
- __asm__ ("{cntlz|cntlzw} %0,%1"
- : "=r" (result) // Output spec
- : "r" (n)); // Input spec
-
- return 31 - result;
-}
-
-#else
// Note: the following only works for "n"s that fit in 32-bits, but
// that is fine since we only use it for small sizes.
static inline int LgFloor(size_t n) {
@@ -396,29 +517,115 @@
ASSERT(n == 1);
return log;
}
-#endif
+
+// Some very basic linked list functions for dealing with using void * as
+// storage.
+
+static inline void *SLL_Next(void *t) {
+ return *(reinterpret_cast<void**>(t));
+}
+
+static inline void SLL_SetNext(void *t, void *n) {
+ *(reinterpret_cast<void**>(t)) = n;
+}
+
+static inline void SLL_Push(void **list, void *element) {
+ SLL_SetNext(element, *list);
+ *list = element;
+}
+
+static inline void *SLL_Pop(void **list) {
+ void *result = *list;
+ *list = SLL_Next(*list);
+ return result;
+}
+
+
+// Remove N elements from a linked list to which head points. head will be
+// modified to point to the new head. start and end will point to the first
+// and last nodes of the range. Note that end will point to NULL after this
+// function is called.
+static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
+ if (N == 0) {
+ *start = NULL;
+ *end = NULL;
+ return;
+ }
+
+ void *tmp = *head;
+ for (int i = 1; i < N; ++i) {
+ tmp = SLL_Next(tmp);
+ }
+
+ *start = *head;
+ *end = tmp;
+ *head = SLL_Next(tmp);
+ // Unlink range from list.
+ SLL_SetNext(tmp, NULL);
+}
+
+static inline void SLL_PushRange(void **head, void *start, void *end) {
+ if (!start) return;
+ SLL_SetNext(end, *head);
+ *head = start;
+}
+
+static inline size_t SLL_Size(void *head) {
+ int count = 0;
+ while (head) {
+ count++;
+ head = SLL_Next(head);
+ }
+ return count;
+}
+
+// Setup helper functions.
static ALWAYS_INLINE size_t SizeClass(size_t size) {
- size += !size; // change 0 to 1 (with no branches)
- const int lg = LgFloor(size);
- const int align = size_shift[lg];
- return size_base[lg] + ((size-1) >> align);
+ return class_array[ClassIndex(size)];
}
// Get the byte-size for a specified class
static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
return class_to_size[cl];
}
+static int NumMoveSize(size_t size) {
+ if (size == 0) return 0;
+ // Use approx 64k transfers between thread and central caches.
+ int num = static_cast<int>(64.0 * 1024.0 / size);
+ if (num < 2) num = 2;
+ // Clamp well below kMaxFreeListLength to avoid ping pong between central
+ // and thread caches.
+ if (num > static_cast<int>(0.8 * kMaxFreeListLength))
+ num = static_cast<int>(0.8 * kMaxFreeListLength);
+
+ // Also, avoid bringing in too many objects into small object free
+ // lists. There are lots of such lists, and if we allow each one to
+ // fetch too many at a time, we end up having to scavenge too often
+ // (especially when there are lots of threads and each thread gets a
+ // small allowance for its thread cache).
+ //
+ // TODO: Make thread cache free list sizes dynamic so that we do not
+ // have to equally divide a fixed resource amongst lots of threads.
+ if (num > 32) num = 32;
+
+ return num;
+}
// Initialize the mapping arrays
static void InitSizeClasses() {
- // Special initialization for small sizes
- for (size_t lg = 0; lg < kAlignShift; lg++) {
- size_base[lg] = 1;
- size_shift[lg] = kAlignShift;
+ // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
+ if (ClassIndex(0) < 0) {
+ MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
+ abort();
+ }
+ if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
+ MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
+ abort();
}
- size_t next_class = 1;
+ // Compute the size classes we want to use
+ size_t sc = 1; // Next size class to assign
unsigned char alignshift = kAlignShift;
int last_lg = -1;
for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
@@ -427,38 +634,56 @@
// Increase alignment every so often.
//
// Since we double the alignment every time size doubles and
- // size >= 256, this means that space wasted due to alignment is
- // at most 16/256 i.e., 6.25%. Plus we cap the alignment at 512
+ // size >= 128, this means that space wasted due to alignment is
+ // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
// bytes, so the space wasted as a percentage starts falling for
- // sizes > 4K.
- if ((lg >= 8) && (alignshift < 9)) {
+ // sizes > 2K.
+ if ((lg >= 7) && (alignshift < 8)) {
alignshift++;
}
- size_base[lg] = static_cast<unsigned char>(next_class - ((size-1) >> alignshift));
- size_shift[lg] = alignshift;
+ last_lg = lg;
}
- class_to_size[next_class] = size;
- last_lg = lg;
+ // Allocate enough pages so leftover is less than 1/8 of total.
+ // This bounds wasted space to at most 12.5%.
+ size_t psize = kPageSize;
+ while ((psize % size) > (psize >> 3)) {
+ psize += kPageSize;
+ }
+ const size_t my_pages = psize >> kPageShift;
- next_class++;
+ if (sc > 1 && my_pages == class_to_pages[sc-1]) {
+ // See if we can merge this into the previous class without
+ // increasing the fragmentation of the previous class.
+ const size_t my_objects = (my_pages << kPageShift) / size;
+ const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
+ / class_to_size[sc-1];
+ if (my_objects == prev_objects) {
+ // Adjust last class to include this size
+ class_to_size[sc-1] = size;
+ continue;
+ }
+ }
+
+ // Add new class
+ class_to_pages[sc] = my_pages;
+ class_to_size[sc] = size;
+ sc++;
}
- if (next_class >= kNumClasses) {
- MESSAGE("used up too many size classes: %d\n", next_class);
+ if (sc != kNumClasses) {
+ MESSAGE("wrong number of size classes: found %d instead of %d\n",
+ sc, int(kNumClasses));
abort();
}
- // Initialize the number of pages we should allocate to split into
- // small objects for a given class.
- for (size_t cl = 1; cl < next_class; cl++) {
- // Allocate enough pages so leftover is less than 1/16 of total.
- // This bounds wasted space to at most 6.25%.
- size_t psize = kPageSize;
- const size_t s = class_to_size[cl];
- while ((psize % s) > (psize >> 4)) {
- psize += kPageSize;
+ // Initialize the mapping arrays
+ int next_size = 0;
+ for (unsigned char c = 1; c < kNumClasses; c++) {
+ const size_t max_size_in_class = class_to_size[c];
+ for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
+ class_array[ClassIndex(s)] = c;
}
- class_to_pages[cl] = psize >> kPageShift;
+ next_size = static_cast<int>(max_size_in_class + kAlignment);
}
// Double-check sizes just to be safe
@@ -487,6 +712,30 @@
abort();
}
}
+
+ // Initialize the num_objects_to_move array.
+ for (size_t cl = 1; cl < kNumClasses; ++cl) {
+ num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
+ }
+
+#ifndef WTF_CHANGES
+ if (false) {
+ // Dump class sizes and maximum external wastage per size class
+ for (size_t cl = 1; cl < kNumClasses; ++cl) {
+ const int alloc_size = class_to_pages[cl] << kPageShift;
+ const int alloc_objs = alloc_size / class_to_size[cl];
+ const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
+ const int max_waste = alloc_size - min_used;
+ MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
+ int(cl),
+ int(class_to_size[cl-1] + 1),
+ int(class_to_size[cl]),
+ int(class_to_pages[cl] << kPageShift),
+ max_waste * 100.0 / alloc_size
+ );
+ }
+ }
+#endif
}
// -------------------------------------------------------------------------
@@ -573,9 +822,13 @@
// Type that can hold the length of a run of pages
typedef uintptr_t Length;
-// Convert byte size into pages
+static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
+
+// Convert byte size into pages. This won't overflow, but may return
+// an unreasonably large value if bytes is huge enough.
static inline Length pages(size_t bytes) {
- return ((bytes + kPageSize - 1) >> kPageShift);
+ return (bytes >> kPageShift) +
+ ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
}
// Convert a user size into the number of bytes that will actually be
@@ -583,6 +836,7 @@
static size_t AllocationSize(size_t bytes) {
if (bytes > kMaxSize) {
// Large object: we allocate an integral number of pages
+ ASSERT(bytes <= (kMaxValidPages << kPageShift));
return pages(bytes) << kPageShift;
} else {
// Small object: find the size class to which it belongs
@@ -692,29 +946,17 @@
list->next = span;
}
-static void DLL_InsertOrdered(Span* list, Span* span) {
- ASSERT(span->next == NULL);
- ASSERT(span->prev == NULL);
- // Look for appropriate place to insert
- Span* x = list;
- while ((x->next != list) && (x->next->start < span->start)) {
- x = x->next;
- }
- span->next = x->next;
- span->prev = x;
- x->next->prev = span;
- x->next = span;
-}
-
// -------------------------------------------------------------------------
// Stack traces kept for sampled allocations
// The following state is protected by pageheap_lock_.
// -------------------------------------------------------------------------
+// size/depth are made the same size as a pointer so that some generic
+// code below can conveniently cast them back and forth to void*.
static const int kMaxStackDepth = 31;
struct StackTrace {
uintptr_t size; // Size of object
- int depth; // Number of PC values stored in array below
+ uintptr_t depth; // Number of PC values stored in array below
void* stack[kMaxStackDepth];
};
static PageHeapAllocator<StackTrace> stacktrace_allocator;
@@ -725,17 +967,21 @@
// -------------------------------------------------------------------------
// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
+// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
+// because sometimes the sizeclass is all the information we need.
// Selector class -- general selector uses 3-level map
template <int BITS> class MapSelector {
public:
typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
+ typedef PackedCache<BITS, uint64_t> CacheType;
};
// A two-level map for 32-bit machines
template <> class MapSelector<32> {
public:
typedef TCMalloc_PageMap2<32-kPageShift> Type;
+ typedef PackedCache<32-kPageShift, uint16_t> CacheType;
};
// -------------------------------------------------------------------------
@@ -803,16 +1049,39 @@
bool Check();
bool CheckList(Span* list, Length min_pages, Length max_pages);
+ // Release all pages on the free list for reuse by the OS:
+ void ReleaseFreePages();
+
+ // Return 0 if we have no information, or else the correct sizeclass for p.
+ // Reads and writes to pagemap_cache_ do not require locking.
+ // The entries are 64 bits on 64-bit hardware and 16 bits on
+ // 32-bit hardware, and we don't mind raciness as long as each read of
+ // an entry yields a valid entry, not a partially updated entry.
+ size_t GetSizeClassIfCached(PageID p) const {
+ return pagemap_cache_.GetOrDefault(p, 0);
+ }
+ void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
+
private:
- // Pick the appropriate map type based on pointer size
+ // Pick the appropriate map and cache types based on pointer size
typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
+ typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
PageMap pagemap_;
+ mutable PageMapCache pagemap_cache_;
+
+ // We segregate spans of a given size into two circular linked
+ // lists: one for normal spans, and one for spans whose memory
+ // has been returned to the system.
+ struct SpanList {
+ Span normal;
+ Span returned;
+ };
// List of free spans of length >= kMaxPages
- Span large_;
+ SpanList large_;
// Array mapping from span length to a doubly linked list of free spans
- Span free_[kMaxPages];
+ SpanList free_[kMaxPages];
// Number of pages kept in free lists
uintptr_t free_pages_;
@@ -827,7 +1096,9 @@
// span into appropriate free lists. Also update "span" to have
// length exactly "n" and mark it as non-free so it can be returned
// to the client.
- void Carve(Span* span, Length n);
+ //
+ // "released" is true iff "span" was found on a "returned" list.
+ void Carve(Span* span, Length n, bool released);
void RecordSpan(Span* span) {
pagemap_.set(span->start, span);
@@ -835,6 +1106,21 @@
pagemap_.set(span->start + span->length - 1, span);
}
}
+
+ // Allocate a large span of length == n. If successful, returns a
+ // span of exactly the specified length. Else, returns NULL.
+ Span* AllocLarge(Length n);
+
+ // Incrementally release some memory to the system.
+ // IncrementalScavenge(n) is called whenever n pages are freed.
+ void IncrementalScavenge(Length n);
+
+ // Number of pages to deallocate before doing more scavenging
+ int64_t scavenge_counter_;
+
+ // Index of last free list we scavenged
+ size_t scavenge_index_;
+
#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
friend class FastMallocZone;
#endif
@@ -843,54 +1129,99 @@
void TCMalloc_PageHeap::init()
{
pagemap_.init(MetaDataAlloc);
+ pagemap_cache_ = PageMapCache(0);
free_pages_ = 0;
system_bytes_ = 0;
-
- DLL_Init(&large_);
+ scavenge_counter_ = 0;
+ // Start scavenging at kMaxPages list
+ scavenge_index_ = kMaxPages-1;
+ COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
+ DLL_Init(&large_.normal);
+ DLL_Init(&large_.returned);
for (size_t i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i]);
+ DLL_Init(&free_[i].normal);
+ DLL_Init(&free_[i].returned);
}
}
inline Span* TCMalloc_PageHeap::New(Length n) {
ASSERT(Check());
- if (n == 0) n = 1;
+ ASSERT(n > 0);
// Find first size >= n that has a non-empty list
- for (size_t s = n; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s])) {
- Span* result = free_[s].next;
- Carve(result, n);
- ASSERT(Check());
- free_pages_ -= n;
- return result;
+ for (Length s = n; s < kMaxPages; s++) {
+ Span* ll = NULL;
+ bool released = false;
+ if (!DLL_IsEmpty(&free_[s].normal)) {
+ // Found normal span
+ ll = &free_[s].normal;
+ } else if (!DLL_IsEmpty(&free_[s].returned)) {
+ // Found returned span; reallocate it
+ ll = &free_[s].returned;
+ released = true;
+ } else {
+ // Keep looking in larger classes
+ continue;
+ }
+
+ Span* result = ll->next;
+ Carve(result, n, released);
+ ASSERT(Check());
+ free_pages_ -= n;
+ return result;
+ }
+
+ Span* result = AllocLarge(n);
+ if (result != NULL) return result;
+
+ // Grow the heap and try again
+ if (!GrowHeap(n)) {
+ ASSERT(Check());
+ return NULL;
+ }
+
+ return AllocLarge(n);
+}
+
+Span* TCMalloc_PageHeap::AllocLarge(Length n) {
+ // find the best span (closest to n in size).
+ // The following loops implements address-ordered best-fit.
+ bool from_released = false;
+ Span *best = NULL;
+
+ // Search through normal list
+ for (Span* span = large_.normal.next;
+ span != &large_.normal;
+ span = span->next) {
+ if (span->length >= n) {
+ if ((best == NULL)
+ || (span->length < best->length)
+ || ((span->length == best->length) && (span->start < best->start))) {
+ best = span;
+ from_released = false;
+ }
}
}
- // Look in large list. If we first do not find something, we try to
- // grow the heap and try again.
- for (int i = 0; i < 2; i++) {
- // find the best span (closest to n in size)
- Span *best = NULL;
- for (Span* span = large_.next; span != &large_; span = span->next) {
- if (span->length >= n &&
- (best == NULL || span->length < best->length)) {
+ // Search through released list in case it has a better fit
+ for (Span* span = large_.returned.next;
+ span != &large_.returned;
+ span = span->next) {
+ if (span->length >= n) {
+ if ((best == NULL)
+ || (span->length < best->length)
+ || ((span->length == best->length) && (span->start < best->start))) {
best = span;
+ from_released = true;
}
}
- if (best != NULL) {
- Carve(best, n);
- ASSERT(Check());
- free_pages_ -= n;
- return best;
- }
- if (i == 0) {
- // Nothing suitable in large list. Grow the heap and look again.
- if (!GrowHeap(n)) {
- ASSERT(Check());
- return NULL;
- }
- }
+ }
+
+ if (best != NULL) {
+ Carve(best, n, from_released);
+ ASSERT(Check());
+ free_pages_ -= n;
+ return best;
}
return NULL;
}
@@ -912,24 +1243,25 @@
return leftover;
}
-inline void TCMalloc_PageHeap::Carve(Span* span, Length n) {
+inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
ASSERT(n > 0);
DLL_Remove(span);
span->free = 0;
Event(span, 'A', n);
- const size_t extra = span->length - n;
+ const int extra = static_cast<int>(span->length - n);
ASSERT(extra >= 0);
if (extra > 0) {
Span* leftover = NewSpan(span->start + n, extra);
leftover->free = 1;
Event(leftover, 'S', extra);
RecordSpan(leftover);
- if (extra < kMaxPages) {
- DLL_Prepend(&free_[extra], leftover);
- } else {
- DLL_InsertOrdered(&large_, leftover);
- }
+
+ // Place leftover span on appropriate free list
+ SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
+ Span* dst = released ? &listpair->returned : &listpair->normal;
+ DLL_Prepend(dst, leftover);
+
span->length = n;
pagemap_.set(span->start + n - 1, span);
}
@@ -948,6 +1280,10 @@
// necessary. We do not bother resetting the stale pagemap
// entries for the pieces we are merging together because we only
// care about the pagemap entries for the boundaries.
+ //
+ // Note that the spans we merge into "span" may come out of
+ // a "returned" list. For simplicity, we move these into the
+ // "normal" list of the appropriate size class.
const PageID p = span->start;
const Length n = span->length;
Span* prev = GetDescriptor(p-1);
@@ -977,15 +1313,71 @@
Event(span, 'D', span->length);
span->free = 1;
if (span->length < kMaxPages) {
- DLL_Prepend(&free_[span->length], span);
+ DLL_Prepend(&free_[span->length].normal, span);
} else {
- DLL_InsertOrdered(&large_, span);
+ DLL_Prepend(&large_.normal, span);
}
free_pages_ += n;
+ IncrementalScavenge(n);
ASSERT(Check());
}
+void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
+ // Fast path; not yet time to release memory
+ scavenge_counter_ -= n;
+ if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
+
+ // Never delay scavenging for more than the following number of
+ // deallocated pages. With 4K pages, this comes to 4GB of
+ // deallocation.
+ static const int kMaxReleaseDelay = 1 << 20;
+
+ // If there is nothing to release, wait for so many pages before
+ // scavenging again. With 4K pages, this comes to 1GB of memory.
+ static const int kDefaultReleaseDelay = 1 << 18;
+
+ const double rate = FLAGS_tcmalloc_release_rate;
+ if (rate <= 1e-6) {
+ // Tiny release rate means that releasing is disabled.
+ scavenge_counter_ = kDefaultReleaseDelay;
+ return;
+ }
+
+ // Find index of free list to scavenge
+ size_t index = scavenge_index_ + 1;
+ for (size_t i = 0; i < kMaxPages+1; i++) {
+ if (index > kMaxPages) index = 0;
+ SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
+ if (!DLL_IsEmpty(&slist->normal)) {
+ // Release the last span on the normal portion of this list
+ Span* s = slist->normal.prev;
+ DLL_Remove(s);
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ DLL_Prepend(&slist->returned, s);
+
+ // Compute how long to wait until we return memory.
+ // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
+ // after releasing one page.
+ const double mult = 1000.0 / rate;
+ double wait = mult * static_cast<double>(s->length);
+ if (wait > kMaxReleaseDelay) {
+ // Avoid overflow and bound to reasonable range
+ wait = kMaxReleaseDelay;
+ }
+ scavenge_counter_ = static_cast<int64_t>(wait);
+
+ scavenge_index_ = index; // Scavenge at index+1 next time
+ return;
+ }
+ index++;
+ }
+
+ // Nothing to scavenge, delay for a while
+ scavenge_counter_ = kDefaultReleaseDelay;
+}
+
void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
// Associate span object with all interior pages as well
ASSERT(!span->free);
@@ -999,59 +1391,102 @@
}
#ifndef WTF_CHANGES
+static double PagesToMB(uint64_t pages) {
+ return (pages << kPageShift) / 1048576.0;
+}
+
void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
int nonempty_sizes = 0;
for (int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s])) nonempty_sizes++;
+ if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
+ nonempty_sizes++;
+ }
}
out->printf("------------------------------------------------\n");
- out->printf("PageHeap: %d sizes; %6.1f MB free\n", nonempty_sizes,
- (static_cast<double>(free_pages_) * kPageSize) / 1048576.0);
+ out->printf("PageHeap: %d sizes; %6.1f MB free\n",
+ nonempty_sizes, PagesToMB(free_pages_));
out->printf("------------------------------------------------\n");
- uint64_t cumulative = 0;
+ uint64_t total_normal = 0;
+ uint64_t total_returned = 0;
for (int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s])) {
- const int list_length = DLL_Length(&free_[s]);
- uint64_t s_pages = s * list_length;
- cumulative += s_pages;
- out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
- s, list_length,
- (s_pages << kPageShift) / 1048576.0,
- (cumulative << kPageShift) / 1048576.0);
+ const int n_length = DLL_Length(&free_[s].normal);
+ const int r_length = DLL_Length(&free_[s].returned);
+ if (n_length + r_length > 0) {
+ uint64_t n_pages = s * n_length;
+ uint64_t r_pages = s * r_length;
+ total_normal += n_pages;
+ total_returned += r_pages;
+ out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
+ "; unmapped: %6.1f MB; %6.1f MB cum\n",
+ s,
+ (n_length + r_length),
+ PagesToMB(n_pages + r_pages),
+ PagesToMB(total_normal + total_returned),
+ PagesToMB(r_pages),
+ PagesToMB(total_returned));
}
}
- uint64_t large_pages = 0;
- int large_spans = 0;
- for (Span* s = large_.next; s != &large_; s = s->next) {
- out->printf(" [ %6" PRIuS " spans ]\n", s->length);
- large_pages += s->length;
- large_spans++;
+ uint64_t n_pages = 0;
+ uint64_t r_pages = 0;
+ int n_spans = 0;
+ int r_spans = 0;
+ out->printf("Normal large spans:\n");
+ for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
+ out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
+ s->length, PagesToMB(s->length));
+ n_pages += s->length;
+ n_spans++;
}
- cumulative += large_pages;
- out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
- large_spans,
- (large_pages << kPageShift) / 1048576.0,
- (cumulative << kPageShift) / 1048576.0);
+ out->printf("Unmapped large spans:\n");
+ for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
+ out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
+ s->length, PagesToMB(s->length));
+ r_pages += s->length;
+ r_spans++;
+ }
+ total_normal += n_pages;
+ total_returned += r_pages;
+ out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
+ "; unmapped: %6.1f MB; %6.1f MB cum\n",
+ (n_spans + r_spans),
+ PagesToMB(n_pages + r_pages),
+ PagesToMB(total_normal + total_returned),
+ PagesToMB(r_pages),
+ PagesToMB(total_returned));
}
#endif
bool TCMalloc_PageHeap::GrowHeap(Length n) {
ASSERT(kMaxPages >= kMinSystemAlloc);
+ if (n > kMaxValidPages) return false;
Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, 0, kPageSize);
+ size_t actual_size;
+ void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
if (ptr == NULL) {
if (n < ask) {
// Try growing just "n" pages
ask = n;
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, 0, kPageSize);
+ ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);;
}
if (ptr == NULL) return false;
}
+ ask = actual_size >> kPageShift;
+
+ uint64_t old_system_bytes = system_bytes_;
system_bytes_ += (ask << kPageShift);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
ASSERT(p > 0);
+ // If we have already a lot of pages allocated, just pre allocate a bunch of
+ // memory for the page map. This prevents fragmentation by pagemap metadata
+ // when a program keeps allocating and freeing large blocks.
+
+ if (old_system_bytes < kPageMapBigAllocationThreshold
+ && system_bytes_ >= kPageMapBigAllocationThreshold) {
+ pagemap_.PreallocateMoreMemory();
+ }
+
// Make sure pagemap_ has entries for all of the new pages.
// Plus ensure one before and one after so coalescing code
// does not need bounds-checking.
@@ -1073,10 +1508,13 @@
}
bool TCMalloc_PageHeap::Check() {
- ASSERT(free_[0].next == &free_[0]);
- CheckList(&large_, kMaxPages, 1000000000);
+ ASSERT(free_[0].normal.next == &free_[0].normal);
+ ASSERT(free_[0].returned.next == &free_[0].returned);
+ CheckList(&large_.normal, kMaxPages, 1000000000);
+ CheckList(&large_.returned, kMaxPages, 1000000000);
for (Length s = 1; s < kMaxPages; s++) {
- CheckList(&free_[s], s, s);
+ CheckList(&free_[s].normal, s, s);
+ CheckList(&free_[s].returned, s, s);
}
return true;
}
@@ -1098,6 +1536,26 @@
}
#endif
+static void ReleaseFreeList(Span* list, Span* returned) {
+ // Walk backwards through list so that when we push these
+ // spans on the "returned" list, we preserve the order.
+ while (!DLL_IsEmpty(list)) {
+ Span* s = list->prev;
+ DLL_Remove(s);
+ DLL_Prepend(returned, s);
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ }
+}
+
+void TCMalloc_PageHeap::ReleaseFreePages() {
+ for (Length s = 0; s < kMaxPages; s++) {
+ ReleaseFreeList(&free_[s].normal, &free_[s].returned);
+ }
+ ReleaseFreeList(&large_.normal, &large_.returned);
+ ASSERT(Check());
+}
+
//-------------------------------------------------------------------
// Free list
//-------------------------------------------------------------------
@@ -1130,18 +1588,27 @@
void clear_lowwatermark() { lowater_ = length_; }
ALWAYS_INLINE void Push(void* ptr) {
- *(reinterpret_cast<void**>(ptr)) = list_;
- list_ = ptr;
+ SLL_Push(&list_, ptr);
length_++;
}
+ void PushRange(int N, void *start, void *end) {
+ SLL_PushRange(&list_, start, end);
+ length_ = length_ + static_cast<uint16_t>(N);
+ }
+
+ void PopRange(int N, void **start, void **end) {
+ SLL_PopRange(&list_, N, start, end);
+ ASSERT(length_ >= N);
+ length_ = length_ - static_cast<uint16_t>(N);
+ if (length_ < lowater_) lowater_ = length_;
+ }
+
ALWAYS_INLINE void* Pop() {
ASSERT(list_ != NULL);
- void* result = list_;
- list_ = *(reinterpret_cast<void**>(result));
length_--;
if (length_ < lowater_) lowater_ = length_;
- return result;
+ return SLL_Pop(&list_);
}
#ifdef WTF_CHANGES
@@ -1169,13 +1636,18 @@
size_t size_; // Combined size of data
ThreadIdentifier tid_; // Which thread owns it
- bool setspecific_; // Called pthread_setspecific?
+ bool in_setspecific_; // Called pthread_setspecific?
FreeList list_[kNumClasses]; // Array indexed by size-class
// We sample allocations, biased by the size of the allocation
uint32_t rnd_; // Cheap random number generator
size_t bytes_until_sample_; // Bytes until we sample next
+ // Allocate a new heap. REQUIRES: pageheap_lock is held.
+ static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);
+
+ // Use only as pthread thread-specific destructor function.
+ static void DestroyThreadCache(void* ptr);
public:
// All ThreadCache objects are kept in a linked list (for stats collection)
TCMalloc_ThreadCache* next_;
@@ -1203,14 +1675,16 @@
bool SampleAllocation(size_t k);
// Pick next sampling point
- void PickNextSample();
+ void PickNextSample(size_t k);
static void InitModule();
static void InitTSD();
+ static TCMalloc_ThreadCache* GetThreadHeap();
static TCMalloc_ThreadCache* GetCache();
static TCMalloc_ThreadCache* GetCacheIfPresent();
static TCMalloc_ThreadCache* CreateCacheIfNecessary();
- static void DeleteCache(void* ptr);
+ static void DeleteCache(TCMalloc_ThreadCache* heap);
+ static void BecomeIdle();
static void RecomputeThreadCacheSize();
#ifdef WTF_CHANGES
@@ -1231,27 +1705,26 @@
public:
void Init(size_t cl);
- // REQUIRES: lock_ is held
- // Insert object.
- // May temporarily release lock_.
- void Insert(void* object);
+ // These methods all do internal locking.
- // REQUIRES: lock_ is held
- // Remove object from cache and return.
- // Return NULL if no free entries in cache.
- void* Remove();
+ // Insert the specified range into the central freelist. N is the number of
+ // elements in the range.
+ void InsertRange(void *start, void *end, int N);
- // REQUIRES: lock_ is held
- // Populate cache by fetching from the page heap.
- // May temporarily release lock_.
- void Populate();
+ // Returns the actual number of fetched elements into N.
+ void RemoveRange(void **start, void **end, int *N);
- // REQUIRES: lock_ is held
- // Number of free objects in cache
- size_t length() const { return counter_; }
+ // Returns the number of free objects in cache.
+ size_t length() {
+ SpinLockHolder h(&lock_);
+ return counter_;
+ }
- // Lock -- exposed because caller grabs it before touching this object
- SpinLock lock_;
+ // Returns the number of free objects in the transfer cache.
+ int tc_length() {
+ SpinLockHolder h(&lock_);
+ return used_slots_ * num_objects_to_move[size_class_];
+ }
#ifdef WTF_CHANGES
template <class Finder, class Reader>
@@ -1269,11 +1742,76 @@
#endif
private:
- // We keep linked lists of empty and non-emoty spans.
+ // REQUIRES: lock_ is held
+ // Remove object from cache and return.
+ // Return NULL if no free entries in cache.
+ void* FetchFromSpans();
+
+ // REQUIRES: lock_ is held
+ // Remove object from cache and return. Fetches
+ // from pageheap if cache is empty. Only returns
+ // NULL on allocation failure.
+ void* FetchFromSpansSafe();
+
+ // REQUIRES: lock_ is held
+ // Release a linked list of objects to spans.
+ // May temporarily release lock_.
+ void ReleaseListToSpans(void *start);
+
+ // REQUIRES: lock_ is held
+ // Release an object to spans.
+ // May temporarily release lock_.
+ void ReleaseToSpans(void* object);
+
+ // REQUIRES: lock_ is held
+ // Populate cache by fetching from the page heap.
+ // May temporarily release lock_.
+ void Populate();
+
+ // REQUIRES: lock is held.
+ // Tries to make room for a TCEntry. If the cache is full it will try to
+ // expand it at the cost of some other cache size. Return false if there is
+ // no space.
+ bool MakeCacheSpace();
+
+ // REQUIRES: lock_ for locked_size_class is held.
+ // Picks a "random" size class to steal TCEntry slot from. In reality it
+ // just iterates over the sizeclasses but does so without taking a lock.
+ // Returns true on success.
+ // May temporarily lock a "random" size class.
+ static bool EvictRandomSizeClass(size_t locked_size_class, bool force);
+
+ // REQUIRES: lock_ is *not* held.
+ // Tries to shrink the Cache. If force is true it will relase objects to
+ // spans if it allows it to shrink the cache. Return false if it failed to
+ // shrink the cache. Decrements cache_size_ on succeess.
+ // May temporarily take lock_. If it takes lock_, the locked_size_class
+ // lock is released to the thread from holding two size class locks
+ // concurrently which could lead to a deadlock.
+ bool ShrinkCache(int locked_size_class, bool force);
+
+ // This lock protects all the data members. cached_entries and cache_size_
+ // may be looked at without holding the lock.
+ SpinLock lock_;
+
+ // We keep linked lists of empty and non-empty spans.
size_t size_class_; // My size class
Span empty_; // Dummy header for list of empty spans
Span nonempty_; // Dummy header for list of non-empty spans
size_t counter_; // Number of free objects in cache entry
+
+ // Here we reserve space for TCEntry cache slots. Since one size class can
+ // end up getting all the TCEntries quota in the system we just preallocate
+ // sufficient number of entries here.
+ TCEntry tc_slots_[kNumTransferEntries];
+
+ // Number of currently used cached entries in tc_slots_. This variable is
+ // updated under a lock but can be read without one.
+ int32_t used_slots_;
+ // The current number of slots for this size class. This is an
+ // adaptive value that is increased if there is lots of traffic
+ // on a given size class.
+ int32_t cache_size_;
};
// Pad each CentralCache object to multiple of 64 bytes
@@ -1297,19 +1835,18 @@
// Avoid extra level of indirection by making "pageheap" be just an alias
// of pageheap_memory.
+#define pageheap ((TCMalloc_PageHeap*) pageheap_memory)
-typedef union {
- void* m_memory;
- TCMalloc_PageHeap m_pageHeap;
-} PageHeapUnion;
-
-static inline TCMalloc_PageHeap* getPageHeap()
-{
- return &reinterpret_cast<PageHeapUnion*>(&pageheap_memory[0])->m_pageHeap;
-}
-
-#define pageheap getPageHeap()
-
+// If TLS is available, we also store a copy
+// of the per-thread object in a __thread variable
+// since __thread variables are faster to read
+// than pthread_getspecific(). We still need
+// pthread_setspecific() because __thread
+// variables provide no way to run cleanup
+// code when a thread is destroyed.
+#ifdef HAVE_TLS
+static __thread TCMalloc_ThreadCache *threadlocal_heap;
+#endif
// Thread-specific key. Initialization here is somewhat tricky
// because some Linux startup code invokes malloc() before it
// is in a good enough state to handle pthread_keycreate().
@@ -1321,15 +1858,6 @@
DWORD tlsIndex = TLS_OUT_OF_INDEXES;
#endif
-static ALWAYS_INLINE TCMalloc_ThreadCache* getThreadHeap()
-{
-#if COMPILER(MSVC)
- return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
-#else
- return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
-#endif
-}
-
static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
{
// still do pthread_setspecific when using MSVC fast TLS to
@@ -1365,9 +1893,21 @@
DLL_Init(&empty_);
DLL_Init(&nonempty_);
counter_ = 0;
+
+ cache_size_ = 1;
+ used_slots_ = 0;
+ ASSERT(cache_size_ <= kNumTransferEntries);
}
-ALWAYS_INLINE void TCMalloc_Central_FreeList::Insert(void* object) {
+void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
+ while (start) {
+ void *next = SLL_Next(start);
+ ReleaseToSpans(start);
+ start = next;
+ }
+}
+
+ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
Span* span = pageheap->GetDescriptor(p);
ASSERT(span != NULL);
@@ -1412,7 +1952,139 @@
}
}
-ALWAYS_INLINE void* TCMalloc_Central_FreeList::Remove() {
+ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
+ size_t locked_size_class, bool force) {
+ static int race_counter = 0;
+ int t = race_counter++; // Updated without a lock, but who cares.
+ if (t >= static_cast<int>(kNumClasses)) {
+ while (t >= static_cast<int>(kNumClasses)) {
+ t -= kNumClasses;
+ }
+ race_counter = t;
+ }
+ ASSERT(t >= 0);
+ ASSERT(t < static_cast<int>(kNumClasses));
+ if (t == static_cast<int>(locked_size_class)) return false;
+ return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
+}
+
+bool TCMalloc_Central_FreeList::MakeCacheSpace() {
+ // Is there room in the cache?
+ if (used_slots_ < cache_size_) return true;
+ // Check if we can expand this cache?
+ if (cache_size_ == kNumTransferEntries) return false;
+ // Ok, we'll try to grab an entry from some other size class.
+ if (EvictRandomSizeClass(size_class_, false) ||
+ EvictRandomSizeClass(size_class_, true)) {
+ // Succeeded in evicting, we're going to make our cache larger.
+ cache_size_++;
+ return true;
+ }
+ return false;
+}
+
+
+namespace {
+class LockInverter {
+ private:
+ SpinLock *held_, *temp_;
+ public:
+ inline explicit LockInverter(SpinLock* held, SpinLock *temp)
+ : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
+ inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
+};
+}
+
+bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
+ // Start with a quick check without taking a lock.
+ if (cache_size_ == 0) return false;
+ // We don't evict from a full cache unless we are 'forcing'.
+ if (force == false && used_slots_ == cache_size_) return false;
+
+ // Grab lock, but first release the other lock held by this thread. We use
+ // the lock inverter to ensure that we never hold two size class locks
+ // concurrently. That can create a deadlock because there is no well
+ // defined nesting order.
+ LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_);
+ ASSERT(used_slots_ <= cache_size_);
+ ASSERT(0 <= cache_size_);
+ if (cache_size_ == 0) return false;
+ if (used_slots_ == cache_size_) {
+ if (force == false) return false;
+ // ReleaseListToSpans releases the lock, so we have to make all the
+ // updates to the central list before calling it.
+ cache_size_--;
+ used_slots_--;
+ ReleaseListToSpans(tc_slots_[used_slots_].head);
+ return true;
+ }
+ cache_size_--;
+ return true;
+}
+
+void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
+ SpinLockHolder h(&lock_);
+ if (N == num_objects_to_move[size_class_] &&
+ MakeCacheSpace()) {
+ int slot = used_slots_++;
+ ASSERT(slot >=0);
+ ASSERT(slot < kNumTransferEntries);
+ TCEntry *entry = &tc_slots_[slot];
+ entry->head = start;
+ entry->tail = end;
+ return;
+ }
+ ReleaseListToSpans(start);
+}
+
+void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
+ int num = *N;
+ ASSERT(num > 0);
+
+ SpinLockHolder h(&lock_);
+ if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
+ int slot = --used_slots_;
+ ASSERT(slot >= 0);
+ TCEntry *entry = &tc_slots_[slot];
+ *start = entry->head;
+ *end = entry->tail;
+ return;
+ }
+
+ // TODO: Prefetch multiple TCEntries?
+ void *tail = FetchFromSpansSafe();
+ if (!tail) {
+ // We are completely out of memory.
+ *start = *end = NULL;
+ *N = 0;
+ return;
+ }
+
+ SLL_SetNext(tail, NULL);
+ void *head = tail;
+ int count = 1;
+ while (count < num) {
+ void *t = FetchFromSpans();
+ if (!t) break;
+ SLL_Push(&head, t);
+ count++;
+ }
+ *start = head;
+ *end = tail;
+ *N = count;
+}
+
+
+void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
+ void *t = FetchFromSpans();
+ if (!t) {
+ Populate();
+ t = FetchFromSpans();
+ }
+ return t;
+}
+
+void* TCMalloc_Central_FreeList::FetchFromSpans() {
if (DLL_IsEmpty(&nonempty_)) return NULL;
Span* span = nonempty_.next;
@@ -1447,6 +2119,13 @@
lock_.Lock();
return;
}
+ ASSERT(span->length == npages);
+ // Cache sizeclass info eagerly. Locking is not necessary.
+ // (Instead of being eager, we could just replace any stale info
+ // about this span, but that seems to be no better in practice.)
+ for (size_t i = 0; i < npages; i++) {
+ pageheap->CacheSizeClass(span->start + i, size_class_);
+ }
// Split the block into pieces and add to the free-list
// TODO: coloring of objects to avoid cache conflicts?
@@ -1478,7 +2157,7 @@
inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
if (bytes_until_sample_ < k) {
- PickNextSample();
+ PickNextSample(k);
return true;
} else {
bytes_until_sample_ -= k;
@@ -1491,26 +2170,24 @@
next_ = NULL;
prev_ = NULL;
tid_ = tid;
- setspecific_ = false;
+ in_setspecific_ = false;
for (size_t cl = 0; cl < kNumClasses; ++cl) {
list_[cl].Init();
}
// Initialize RNG -- run it for a bit to get to good values
+ bytes_until_sample_ = 0;
rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
for (int i = 0; i < 100; i++) {
- PickNextSample();
+ PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
}
}
void TCMalloc_ThreadCache::Cleanup() {
// Put unused memory back into central cache
for (size_t cl = 0; cl < kNumClasses; ++cl) {
- FreeList* src = &list_[cl];
- TCMalloc_Central_FreeList* dst = ¢ral_cache[cl];
- SpinLockHolder h(&dst->lock_);
- while (!src->empty()) {
- dst->Insert(src->Pop());
+ if (list_[cl].length() > 0) {
+ ReleaseToCentralCache(cl, list_[cl].length());
}
}
}
@@ -1519,7 +2196,7 @@
ASSERT(size <= kMaxSize);
const size_t cl = SizeClass(size);
FreeList* list = &list_[cl];
- size_t allocationSize = (size <= kMaxTinySize) ? (size + 7) & ~0x7 : ByteSizeForClass(cl);
+ size_t allocationSize = ByteSizeForClass(cl);
if (list->empty()) {
FetchFromCentralCache(cl, allocationSize);
if (list->empty()) return NULL;
@@ -1534,43 +2211,39 @@
list->Push(ptr);
// If enough data is free, put back into central cache
if (list->length() > kMaxFreeListLength) {
- ReleaseToCentralCache(cl, kNumObjectsToMove);
+ ReleaseToCentralCache(cl, num_objects_to_move[cl]);
}
if (size_ >= per_thread_cache_size) Scavenge();
}
// Remove some objects of class "cl" from central cache and add to thread heap
-ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t byteSize) {
- TCMalloc_Central_FreeList* src = ¢ral_cache[cl];
- FreeList* dst = &list_[cl];
- SpinLockHolder h(&src->lock_);
- for (int i = 0; i < kNumObjectsToMove; i++) {
- void* object = src->Remove();
- if (object == NULL) {
- if (i == 0) {
- src->Populate(); // Temporarily releases src->lock_
- object = src->Remove();
- }
- if (object == NULL) {
- break;
- }
- }
- dst->Push(object);
- size_ += byteSize;
- }
+ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
+ int fetch_count = num_objects_to_move[cl];
+ void *start, *end;
+ central_cache[cl].RemoveRange(&start, &end, &fetch_count);
+ list_[cl].PushRange(fetch_count, start, end);
+ size_ += allocationSize * fetch_count;
}
// Remove some objects of class "cl" from thread heap and add to central cache
inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
+ ASSERT(N > 0);
FreeList* src = &list_[cl];
- TCMalloc_Central_FreeList* dst = ¢ral_cache[cl];
- SpinLockHolder h(&dst->lock_);
if (N > src->length()) N = src->length();
size_ -= N*ByteSizeForClass(cl);
- while (N-- > 0) {
- void* ptr = src->Pop();
- dst->Insert(ptr);
+
+ // We return prepackaged chains of the correct size to the central cache.
+ // TODO: Use the same format internally in the thread caches?
+ int batch_size = num_objects_to_move[cl];
+ while (N > batch_size) {
+ void *tail, *head;
+ src->PopRange(batch_size, &head, &tail);
+ central_cache[cl].InsertRange(head, tail, batch_size);
+ N -= batch_size;
}
+ void *tail, *head;
+ src->PopRange(N, &head, &tail);
+ central_cache[cl].InsertRange(head, tail, N);
}
// Release idle memory to the central cache
@@ -1581,9 +2254,7 @@
// that situation by dropping L/2 nodes from the free list. This
// may not release much memory, but if so we will call scavenge again
// pretty soon and the low-water marks will be high on that call.
-#ifndef WTF_CHANGES
- int64 start = CycleClock::Now();
-#endif
+ //int64 start = CycleClock::Now();
for (size_t cl = 0; cl < kNumClasses; cl++) {
FreeList* list = &list_[cl];
@@ -1595,42 +2266,55 @@
list->clear_lowwatermark();
}
-#ifndef WTF_CHANGES
- int64 finish = CycleClock::Now();
- CycleTimer ct;
- MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
-#endif
+ //int64 finish = CycleClock::Now();
+ //CycleTimer ct;
+ //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
}
-ALWAYS_INLINE TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
- TCMalloc_ThreadCache* ptr = NULL;
- if (!tsd_inited) {
- InitModule();
- } else {
- ptr = getThreadHeap();
- }
- if (ptr == NULL) ptr = CreateCacheIfNecessary();
- return ptr;
-}
-
-// In deletion paths, we do not try to create a thread-cache. This is
-// because we may be in the thread destruction code and may have
-// already cleaned up the cache for this thread.
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
- if (!tsd_inited) return NULL;
- return getThreadHeap();
-}
-
-void TCMalloc_ThreadCache::PickNextSample() {
+void TCMalloc_ThreadCache::PickNextSample(size_t k) {
// Make next "random" number
// x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
uint32_t r = rnd_;
rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
- // Next point is "rnd_ % (2*sample_period)". I.e., average
- // increment is "sample_period".
- bytes_until_sample_ = rnd_ % kSampleParameter;
+ // Next point is "rnd_ % (sample_period)". I.e., average
+ // increment is "sample_period/2".
+ const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
+ static int last_flag_value = -1;
+
+ if (flag_value != last_flag_value) {
+ SpinLockHolder h(&sample_period_lock);
+ int i;
+ for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
+ if (primes_list[i] >= flag_value) {
+ break;
+ }
+ }
+ sample_period = primes_list[i];
+ last_flag_value = flag_value;
+ }
+
+ bytes_until_sample_ += rnd_ % sample_period;
+
+ if (k > (static_cast<size_t>(-1) >> 2)) {
+ // If the user has asked for a huge allocation then it is possible
+ // for the code below to loop infinitely. Just return (note that
+ // this throws off the sampling accuracy somewhat, but a user who
+ // is allocating more than 1G of memory at a time can live with a
+ // minor inaccuracy in profiling of small allocations, and also
+ // would rather not wait for the loop below to terminate).
+ return;
+ }
+
+ while (bytes_until_sample_ < k) {
+ // Increase bytes_until_sample_ by enough average sampling periods
+ // (sample_period >> 1) to allow us to sample past the current
+ // allocation.
+ bytes_until_sample_ += (sample_period >> 1);
+ }
+
+ bytes_until_sample_ -= k;
}
void TCMalloc_ThreadCache::InitModule() {
@@ -1663,9 +2347,54 @@
}
}
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) {
+ // Create the heap and add it to the linked list
+ TCMalloc_ThreadCache *heap = threadheap_allocator.New();
+ heap->Init(tid);
+ heap->next_ = thread_heaps;
+ heap->prev_ = NULL;
+ if (thread_heaps != NULL) thread_heaps->prev_ = heap;
+ thread_heaps = heap;
+ thread_heap_count++;
+ RecomputeThreadCacheSize();
+ return heap;
+}
+
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
+#ifdef HAVE_TLS
+ // __thread is faster, but only when the kernel supports it
+ if (KernelSupportsTLS())
+ return threadlocal_heap;
+#elif COMPILER(MSVC)
+ return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
+#else
+ return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
+#endif
+}
+
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
+ TCMalloc_ThreadCache* ptr = NULL;
+ if (!tsd_inited) {
+ InitModule();
+ } else {
+ ptr = GetThreadHeap();
+ }
+ if (ptr == NULL) ptr = CreateCacheIfNecessary();
+ return ptr;
+}
+
+// In deletion paths, we do not try to create a thread-cache. This is
+// because we may be in the thread destruction code and may have
+// already cleaned up the cache for this thread.
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
+ if (!tsd_inited) return NULL;
+ void* const p = GetThreadHeap();
+ return reinterpret_cast<TCMalloc_ThreadCache*>(p);
+}
+
void TCMalloc_ThreadCache::InitTSD() {
ASSERT(!tsd_inited);
- pthread_key_create(&heap_key, DeleteCache);
+ pthread_key_create(&heap_key, DestroyThreadCache);
#if COMPILER(MSVC)
tlsIndex = TlsAlloc();
#endif
@@ -1679,7 +2408,7 @@
#ifndef WTF_CHANGES
SpinLockHolder h(&pageheap_lock);
#else
- ASSERT(pageheap_lock.IsLocked());
+ ASSERT(pageheap_lock.IsHeld());
#endif
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
#if COMPILER(MSVC)
@@ -1731,34 +2460,57 @@
}
}
- if (heap == NULL) {
- // Create the heap and add it to the linked list
- heap = threadheap_allocator.New();
- heap->Init(me);
- heap->next_ = thread_heaps;
- heap->prev_ = NULL;
- if (thread_heaps != NULL) thread_heaps->prev_ = heap;
- thread_heaps = heap;
- thread_heap_count++;
- RecomputeThreadCacheSize();
- }
+ if (heap == NULL) heap = NewHeap(me);
}
// We call pthread_setspecific() outside the lock because it may
// call malloc() recursively. The recursive call will never get
// here again because it will find the already allocated heap in the
// linked list of heaps.
- if (!heap->setspecific_ && tsd_inited) {
- heap->setspecific_ = true;
+ if (!heap->in_setspecific_ && tsd_inited) {
+ heap->in_setspecific_ = true;
setThreadHeap(heap);
}
return heap;
}
-void TCMalloc_ThreadCache::DeleteCache(void* ptr) {
+void TCMalloc_ThreadCache::BecomeIdle() {
+ if (!tsd_inited) return; // No caches yet
+ TCMalloc_ThreadCache* heap = GetThreadHeap();
+ if (heap == NULL) return; // No thread cache to remove
+ if (heap->in_setspecific_) return; // Do not disturb the active caller
+
+ heap->in_setspecific_ = true;
+ pthread_setspecific(heap_key, NULL);
+#ifdef HAVE_TLS
+ // Also update the copy in __thread
+ threadlocal_heap = NULL;
+#endif
+ heap->in_setspecific_ = false;
+ if (GetThreadHeap() == heap) {
+ // Somehow heap got reinstated by a recursive call to malloc
+ // from pthread_setspecific. We give up in this case.
+ return;
+ }
+
+ // We can now get rid of the heap
+ DeleteCache(heap);
+}
+
+void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
+ // Note that "ptr" cannot be NULL since pthread promises not
+ // to invoke the destructor on NULL values, but for safety,
+ // we check anyway.
+ if (ptr == NULL) return;
+#ifdef HAVE_TLS
+ // Prevent fast path of GetThreadHeap() from returning heap.
+ threadlocal_heap = NULL;
+#endif
+ DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
+}
+
+void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
// Remove all memory from heap
- TCMalloc_ThreadCache* heap;
- heap = static_cast<TCMalloc_ThreadCache*>(ptr);
heap->Cleanup();
// Remove from linked list
@@ -1798,6 +2550,7 @@
uint64_t system_bytes; // Bytes alloced from system
uint64_t thread_bytes; // Bytes in thread caches
uint64_t central_bytes; // Bytes in central cache
+ uint64_t transfer_bytes; // Bytes in central transfer cache
uint64_t pageheap_bytes; // Bytes in page heap
uint64_t metadata_bytes; // Bytes alloced for metadata
};
@@ -1806,11 +2559,14 @@
// Get stats into "r". Also get per-size-class counts if class_count != NULL
static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
r->central_bytes = 0;
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- SpinLockHolder h(¢ral_cache[cl].lock_);
+ r->transfer_bytes = 0;
+ for (int cl = 0; cl < kNumClasses; ++cl) {
const int length = central_cache[cl].length();
+ const int tc_length = central_cache[cl].tc_length();
r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
- if (class_count) class_count[cl] = length;
+ r->transfer_bytes +=
+ static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
+ if (class_count) class_count[cl] = length + tc_length;
}
// Add stats from per-thread heaps
@@ -1851,7 +2607,7 @@
uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
cumulative += class_bytes;
out->printf("class %3d [ %8" PRIuS " bytes ] : "
- "%8" LLU " objs; %5.1f MB; %5.1f cum MB\n",
+ "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
cl, ByteSizeForClass(cl),
class_count[cl],
class_bytes / 1048576.0,
@@ -1866,22 +2622,25 @@
const uint64_t bytes_in_use = stats.system_bytes
- stats.pageheap_bytes
- stats.central_bytes
+ - stats.transfer_bytes
- stats.thread_bytes;
out->printf("------------------------------------------------\n"
- "MALLOC: %12" LLU " Heap size\n"
- "MALLOC: %12" LLU " Bytes in use by application\n"
- "MALLOC: %12" LLU " Bytes free in page heap\n"
- "MALLOC: %12" LLU " Bytes free in central cache\n"
- "MALLOC: %12" LLU " Bytes free in thread caches\n"
- "MALLOC: %12" LLU " Spans in use\n"
- "MALLOC: %12" LLU " Thread heaps in use\n"
- "MALLOC: %12" LLU " Metadata allocated\n"
+ "MALLOC: %12" PRIu64 " Heap size\n"
+ "MALLOC: %12" PRIu64 " Bytes in use by application\n"
+ "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
+ "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
+ "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
+ "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
+ "MALLOC: %12" PRIu64 " Spans in use\n"
+ "MALLOC: %12" PRIu64 " Thread heaps in use\n"
+ "MALLOC: %12" PRIu64 " Metadata allocated\n"
"------------------------------------------------\n",
stats.system_bytes,
bytes_in_use,
stats.pageheap_bytes,
stats.central_bytes,
+ stats.transfer_bytes,
stats.thread_bytes,
uint64_t(span_allocator.inuse()),
uint64_t(threadheap_allocator.inuse()),
@@ -1927,7 +2686,7 @@
break;
}
- result[used_slots+0] = reinterpret_cast<void*>(1);
+ result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
result[used_slots+1] = reinterpret_cast<void*>(stack->size);
result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
for (int d = 0; d < stack->depth; d++) {
@@ -1935,7 +2694,7 @@
}
used_slots += 3 + stack->depth;
}
- result[used_slots] = reinterpret_cast<void*>(0);
+ result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
return result;
}
#endif
@@ -2021,19 +2780,70 @@
return false;
}
+
+ virtual void MarkThreadIdle() {
+ TCMalloc_ThreadCache::BecomeIdle();
+ }
+
+ virtual void ReleaseFreeMemory() {
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->ReleaseFreePages();
+ }
};
#endif
-// RedHat 9's pthread manager allocates an object directly by calling
-// a __libc_XXX() routine. This memory block is not known to tcmalloc.
-// At cleanup time, the pthread manager calls free() on this
-// pointer, which then crashes.
+// The constructor allocates an object to ensure that initialization
+// runs before main(), and therefore we do not have a chance to become
+// multi-threaded before initialization. We also create the TSD key
+// here. Presumably by the time this constructor runs, glibc is in
+// good enough shape to handle pthread_key_create().
//
-// We hack around this problem by disabling all deallocations
-// after a global object destructor in this module has been called.
-#ifndef WTF_CHANGES
-static bool tcmalloc_is_destroyed = false;
+// The constructor also takes the opportunity to tell STL to use
+// tcmalloc. We want to do this early, before construct time, so
+// all user STL allocations go through tcmalloc (which works really
+// well for STL).
+//
+// The destructor prints stats when the program exits.
+class TCMallocGuard {
+ public:
+
+ TCMallocGuard() {
+#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
+ // Check whether the kernel also supports TLS (needs to happen at runtime)
+ CheckIfKernelSupportsTLS();
#endif
+#ifndef WTF_CHANGES
+#ifdef WIN32 // patch the windows VirtualAlloc, etc.
+ PatchWindowsFunctions(); // defined in windows/patch_functions.cc
+#endif
+#endif
+ free(malloc(1));
+ TCMalloc_ThreadCache::InitTSD();
+ free(malloc(1));
+#ifndef WTF_CHANGES
+ MallocExtension::Register(new TCMallocImplementation);
+#endif
+ }
+
+#ifndef WTF_CHANGES
+ ~TCMallocGuard() {
+ const char* env = getenv("MALLOCSTATS");
+ if (env != NULL) {
+ int level = atoi(env);
+ if (level < 1) level = 1;
+ PrintStats(level);
+ }
+#ifdef WIN32
+ UnpatchWindowsFunctions();
+#endif
+ }
+#endif
+};
+
+#ifndef WTF_CHANGES
+static TCMallocGuard module_enter_exit_hook;
+#endif
+
//-------------------------------------------------------------------
// Helpers for the exported routines below
@@ -2042,24 +2852,27 @@
#ifndef WTF_CHANGES
static Span* DoSampledAllocation(size_t size) {
- SpinLockHolder h(&pageheap_lock);
+ // Grab the stack trace outside the heap lock
+ StackTrace tmp;
+ tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
+ tmp.size = size;
+
+ SpinLockHolder h(&pageheap_lock);
// Allocate span
- Span* span = pageheap->New(pages(size == 0 ? 1 : size));
+ Span *span = pageheap->New(pages(size == 0 ? 1 : size));
if (span == NULL) {
return NULL;
}
// Allocate stack trace
- StackTrace* stack = stacktrace_allocator.New();
+ StackTrace *stack = stacktrace_allocator.New();
if (stack == NULL) {
// Sampling failed because of lack of memory
return span;
}
- // Fill stack trace and record properly
- stack->depth = GetStackTrace(stack->stack, kMaxStackDepth, 2);
- stack->size = size;
+ *stack = tmp;
span->sample = 1;
span->objects = stack;
DLL_Prepend(&sampled_objects, span);
@@ -2068,81 +2881,84 @@
}
#endif
+static inline bool CheckCachedSizeClass(void *ptr) {
+ PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
+ size_t cached_value = pageheap->GetSizeClassIfCached(p);
+ return cached_value == 0 ||
+ cached_value == pageheap->GetDescriptor(p)->sizeclass;
+}
+
+static inline void* CheckedMallocResult(void *result)
+{
+ ASSERT(result == 0 || CheckCachedSizeClass(result));
+ return result;
+}
+
+static inline void* SpanToMallocResult(Span *span) {
+ pageheap->CacheSizeClass(span->start, 0);
+ return
+ CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
+}
+
static ALWAYS_INLINE void* do_malloc(size_t size) {
+ void* ret = NULL;
#ifdef WTF_CHANGES
ASSERT(!isForbidden());
#endif
-#ifndef WTF_CHANGES
- if (TCMallocDebug::level >= TCMallocDebug::kVerbose)
- MESSAGE("In tcmalloc do_malloc(%" PRIuS")\n", size);
-#endif
// The following call forces module initialization
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
#ifndef WTF_CHANGES
- if (heap->SampleAllocation(size)) {
+ if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
Span* span = DoSampledAllocation(size);
- if (span == NULL) return NULL;
- return reinterpret_cast<void*>(span->start << kPageShift);
+ if (span != NULL) {
+ ret = SpanToMallocResult(span);
+ }
} else
#endif
if (size > kMaxSize) {
// Use page-level allocator
SpinLockHolder h(&pageheap_lock);
Span* span = pageheap->New(pages(size));
- if (span == NULL) return NULL;
- return reinterpret_cast<void*>(span->start << kPageShift);
+ if (span != NULL) {
+ ret = SpanToMallocResult(span);
+ }
} else {
- return heap->Allocate(size);
+ // The common case, and also the simplest. This just pops the
+ // size-appropriate freelist, afer replenishing it if it's empty.
+ ret = CheckedMallocResult(heap->Allocate(size));
}
+ if (ret == NULL) errno = ENOMEM;
+ return ret;
}
static ALWAYS_INLINE void do_free(void* ptr) {
-#ifndef WTF_CHANGES
- if (TCMallocDebug::level >= TCMallocDebug::kVerbose)
- MESSAGE("In tcmalloc do_free(%p)\n", ptr);
-#endif
-#if WTF_CHANGES
if (ptr == NULL) return;
-#else
- if (ptr == NULL || tcmalloc_is_destroyed) return;
-#endif
-
ASSERT(pageheap != NULL); // Should not call free() before malloc()
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
+ Span* span = NULL;
+ size_t cl = pageheap->GetSizeClassIfCached(p);
-#ifndef WTF_CHANGES
- if (span == NULL) {
- // We've seen systems where a piece of memory allocated using the
- // allocator built in to libc is deallocated using free() and
- // therefore ends up inside tcmalloc which can't find the
- // corresponding span. We silently throw this object on the floor
- // instead of crashing.
- MESSAGE("tcmalloc: ignoring potential glibc-2.3.5 induced free "
- "of an unknown object %p\n", ptr);
- return;
+ if (cl == 0) {
+ span = pageheap->GetDescriptor(p);
+ cl = span->sizeclass;
+ pageheap->CacheSizeClass(p, cl);
}
-#endif
-
- ASSERT(span != NULL);
- ASSERT(!span->free);
- const size_t cl = span->sizeclass;
if (cl != 0) {
- ASSERT(!span->sample);
+ ASSERT(!pageheap->GetDescriptor(p)->sample);
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
if (heap != NULL) {
heap->Deallocate(ptr, cl);
} else {
// Delete directly into central cache
- SpinLockHolder h(¢ral_cache[cl].lock_);
- central_cache[cl].Insert(ptr);
+ SLL_SetNext(ptr, NULL);
+ central_cache[cl].InsertRange(ptr, ptr, 1);
}
} else {
SpinLockHolder h(&pageheap_lock);
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
- ASSERT(span->start == p);
+ ASSERT(span != NULL && span->start == p);
if (span->sample) {
DLL_Remove(span);
stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
@@ -2181,7 +2997,7 @@
}
if (cl < kNumClasses) {
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
- return heap->Allocate(class_to_size[cl]);
+ return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
}
}
@@ -2193,17 +3009,16 @@
// TODO: We could put the rest of this page in the appropriate
// TODO: cache but it does not seem worth it.
Span* span = pageheap->New(pages(size));
- if (span == NULL) return NULL;
- return reinterpret_cast<void*>(span->start << kPageShift);
+ return span == NULL ? NULL : SpanToMallocResult(span);
}
// Allocate extra pages and carve off an aligned portion
- const int alloc = pages(size + align);
+ const Length alloc = pages(size + align);
Span* span = pageheap->New(alloc);
if (span == NULL) return NULL;
// Skip starting portion so that we end up aligned
- int skip = 0;
+ Length skip = 0;
while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
skip++;
}
@@ -2215,62 +3030,52 @@
}
// Skip trailing portion that we do not need to return
- const size_t needed = pages(size);
+ const Length needed = pages(size);
ASSERT(span->length >= needed);
if (span->length > needed) {
Span* trailer = pageheap->Split(span, needed);
pageheap->Delete(trailer);
}
- return reinterpret_cast<void*>(span->start << kPageShift);
+ return SpanToMallocResult(span);
}
#endif
+// Helpers for use by exported routines below:
-// The constructor allocates an object to ensure that initialization
-// runs before main(), and therefore we do not have a chance to become
-// multi-threaded before initialization. We also create the TSD key
-// here. Presumably by the time this constructor runs, glibc is in
-// good enough shape to handle pthread_key_create().
-//
-// The constructor also takes the opportunity to tell STL to use
-// tcmalloc. We want to do this early, before construct time, so
-// all user STL allocations go through tcmalloc (which works really
-// well for STL).
-//
-// The destructor prints stats when the program exits.
-
-class TCMallocGuard {
- public:
- TCMallocGuard() {
#ifndef WTF_CHANGES
- char *envval;
- if ((envval = getenv("TCMALLOC_DEBUG"))) {
- TCMallocDebug::level = atoi(envval);
- MESSAGE("Set tcmalloc debugging level to %d\n", TCMallocDebug::level);
- }
+static inline void do_malloc_stats() {
+ PrintStats(1);
+}
#endif
- do_free(do_malloc(1));
- TCMalloc_ThreadCache::InitTSD();
- do_free(do_malloc(1));
-#ifndef WTF_CHANGES
- MallocExtension::Register(new TCMallocImplementation);
-#endif
- }
-#ifndef WTF_CHANGES
- ~TCMallocGuard() {
- const char* env = getenv("MALLOCSTATS");
- if (env != NULL) {
- int level = atoi(env);
- if (level < 1) level = 1;
- PrintStats(level);
- }
- }
-#endif
-};
+static inline int do_mallopt(int, int) {
+ return 1; // Indicates error
+}
-#ifndef WTF_CHANGES
-static TCMallocGuard module_enter_exit_hook;
+#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
+static inline struct mallinfo do_mallinfo() {
+ TCMallocStats stats;
+ ExtractStats(&stats, NULL);
+
+ // Just some of the fields are filled in.
+ struct mallinfo info;
+ memset(&info, 0, sizeof(info));
+
+ // Unfortunately, the struct contains "int" field, so some of the
+ // size values will be truncated.
+ info.arena = static_cast<int>(stats.system_bytes);
+ info.fsmblks = static_cast<int>(stats.thread_bytes
+ + stats.central_bytes
+ + stats.transfer_bytes);
+ info.fordblks = static_cast<int>(stats.pageheap_bytes);
+ info.uordblks = static_cast<int>(stats.system_bytes
+ - stats.thread_bytes
+ - stats.central_bytes
+ - stats.transfer_bytes
+ - stats.pageheap_bytes);
+
+ return info;
+}
#endif
//-------------------------------------------------------------------
@@ -2354,11 +3159,18 @@
// Get the size of the old entry
const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
+ size_t cl = pageheap->GetSizeClassIfCached(p);
+ Span *span = NULL;
size_t old_size;
- if (span->sizeclass != 0) {
- old_size = ByteSizeForClass(span->sizeclass);
+ if (cl == 0) {
+ span = pageheap->GetDescriptor(p);
+ cl = span->sizeclass;
+ pageheap->CacheSizeClass(p, cl);
+ }
+ if (cl != 0) {
+ old_size = ByteSizeForClass(cl);
} else {
+ ASSERT(span != NULL);
old_size = span->length << kPageShift;
}
@@ -2377,60 +3189,120 @@
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(old_ptr);
#endif
- free(old_ptr);
+ // We could use a variant of do_free() that leverages the fact
+ // that we already know the sizeclass of old_ptr. The benefit
+ // would be small, so don't bother.
+ do_free(old_ptr);
return new_ptr;
} else {
return old_ptr;
}
}
-#ifndef COMPILER_INTEL
-#define OPNEW_THROW
-#define OPDELETE_THROW
-#else
-#define OPNEW_THROW throw(std::bad_alloc)
-#define OPDELETE_THROW throw()
-#endif
-
#ifndef WTF_CHANGES
-void* operator new(size_t size) OPNEW_THROW {
- void* p = do_malloc(size);
- if (p == NULL) {
- MESSAGE("Unable to allocate %" PRIuS " bytes: new failed\n", size);
- abort();
+static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
+
+static inline void* cpp_alloc(size_t size, bool nothrow) {
+ for (;;) {
+ void* p = do_malloc(size);
+#ifdef PREANSINEW
+ return p;
+#else
+ if (p == NULL) { // allocation failed
+ // Get the current new handler. NB: this function is not
+ // thread-safe. We make a feeble stab at making it so here, but
+ // this lock only protects against tcmalloc interfering with
+ // itself, not with other libraries calling set_new_handler.
+ std::new_handler nh;
+ {
+ SpinLockHolder h(&set_new_handler_lock);
+ nh = std::set_new_handler(0);
+ (void) std::set_new_handler(nh);
+ }
+ // If no new_handler is established, the allocation failed.
+ if (!nh) {
+ if (nothrow) return 0;
+ throw std::bad_alloc();
+ }
+ // Otherwise, try the new_handler. If it returns, retry the
+ // allocation. If it throws std::bad_alloc, fail the allocation.
+ // if it throws something else, don't interfere.
+ try {
+ (*nh)();
+ } catch (const std::bad_alloc&) {
+ if (!nothrow) throw;
+ return p;
+ }
+ } else { // allocation success
+ return p;
+ }
+#endif
}
+}
+
+void* operator new(size_t size) {
+ void* p = cpp_alloc(size, false);
+ // We keep this next instruction out of cpp_alloc for a reason: when
+ // it's in, and new just calls cpp_alloc, the optimizer may fold the
+ // new call into cpp_alloc, which messes up our whole section-based
+ // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
+ // isn't the last thing this fn calls, and prevents the folding.
MallocHook::InvokeNewHook(p, size);
return p;
}
-void operator delete(void* p) OPDELETE_THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-void* operator new[](size_t size) OPNEW_THROW {
- void* p = do_malloc(size);
- if (p == NULL) {
- MESSAGE("Unable to allocate %" PRIuS " bytes: new failed\n", size);
- abort();
- }
+void* operator new(size_t size, const std::nothrow_t&) __THROW {
+ void* p = cpp_alloc(size, true);
MallocHook::InvokeNewHook(p, size);
return p;
}
-void operator delete[](void* p) OPDELETE_THROW {
+void operator delete(void* p) __THROW {
MallocHook::InvokeDeleteHook(p);
do_free(p);
}
-extern "C" void* memalign(size_t align, size_t size) {
+void operator delete(void* p, const std::nothrow_t&) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+void* operator new[](size_t size) {
+ void* p = cpp_alloc(size, false);
+ // We keep this next instruction out of cpp_alloc for a reason: when
+ // it's in, and new just calls cpp_alloc, the optimizer may fold the
+ // new call into cpp_alloc, which messes up our whole section-based
+ // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
+ // isn't the last thing this fn calls, and prevents the folding.
+ MallocHook::InvokeNewHook(p, size);
+ return p;
+}
+
+void* operator new[](size_t size, const std::nothrow_t&) __THROW {
+ void* p = cpp_alloc(size, true);
+ MallocHook::InvokeNewHook(p, size);
+ return p;
+}
+
+void operator delete[](void* p) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+void operator delete[](void* p, const std::nothrow_t&) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+extern "C" void* memalign(size_t align, size_t size) __THROW {
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
-extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) {
+extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
+ __THROW {
if (((align % sizeof(void*)) != 0) ||
((align & (align - 1)) != 0) ||
(align == 0)) {
@@ -2449,7 +3321,7 @@
static size_t pagesize = 0;
-extern "C" void* valloc(size_t size) {
+extern "C" void* valloc(size_t size) __THROW {
// Allocate page-aligned object of length >= size bytes
if (pagesize == 0) pagesize = getpagesize();
void* result = do_memalign(pagesize, size);
@@ -2457,7 +3329,7 @@
return result;
}
-extern "C" void* pvalloc(size_t size) {
+extern "C" void* pvalloc(size_t size) __THROW {
// Round up size to a multiple of pagesize
if (pagesize == 0) pagesize = getpagesize();
size = (size + pagesize - 1) & ~(pagesize - 1);
@@ -2467,45 +3339,33 @@
}
extern "C" void malloc_stats(void) {
- PrintStats(1);
+ do_malloc_stats();
}
extern "C" int mallopt(int cmd, int value) {
- return 1; // Indicates error
+ return do_mallopt(cmd, value);
}
+#ifdef HAVE_STRUCT_MALLINFO
extern "C" struct mallinfo mallinfo(void) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
-
- // Just some of the fields are filled in.
- struct mallinfo info;
- memset(&info, 0, sizeof(info));
-
- // Unfortunately, the struct contains "int" field, so some of the
- // size values will be truncated.
- info.arena = static_cast<int>(stats.system_bytes);
- info.fsmblks = static_cast<int>(stats.thread_bytes + stats.central_bytes);
- info.fordblks = static_cast<int>(stats.pageheap_bytes);
- info.uordblks = static_cast<int>(stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.pageheap_bytes);
-
- return info;
+ return do_mallinfo();
}
+#endif
//-------------------------------------------------------------------
// Some library routines on RedHat 9 allocate memory using malloc()
// and free it using __libc_free() (or vice-versa). Since we provide
// our own implementations of malloc/free, we need to make sure that
-// the __libc_XXX variants also point to the same implementations.
+// the __libc_XXX variants (defined as part of glibc) also point to
+// the same implementations.
//-------------------------------------------------------------------
+#if defined(__GLIBC__)
extern "C" {
-#if COMPILER(GCC) && HAVE(__ATTRIBUTE__)
- // Potentially faster variants that use the gcc alias extension
-#define ALIAS(x) __attribute__ ((weak, alias (x)))
+# if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
+ // Potentially faster variants that use the gcc alias extension.
+ // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
+# define ALIAS(x) __attribute__ ((weak, alias (x)))
void* __libc_malloc(size_t size) ALIAS("malloc");
void __libc_free(void* ptr) ALIAS("free");
void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
@@ -2515,8 +3375,8 @@
void* __libc_valloc(size_t size) ALIAS("valloc");
void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
-#undef ALIAS
-#else
+# undef ALIAS
+# else /* not __GNUC__ */
// Portable wrappers
void* __libc_malloc(size_t size) { return malloc(size); }
void __libc_free(void* ptr) { free(ptr); }
@@ -2529,9 +3389,24 @@
int __posix_memalign(void** r, size_t a, size_t s) {
return posix_memalign(r, a, s);
}
-#endif
-
+# endif /* __GNUC__ */
}
+#endif /* __GLIBC__ */
+
+// Override __libc_memalign in libc on linux boxes specially.
+// They have a bug in libc that causes them to (very rarely) allocate
+// with __libc_memalign() yet deallocate with free() and the
+// definitions above don't catch it.
+// This function is an exception to the rule of calling MallocHook method
+// from the stack frame of the allocation function;
+// heap-checker handles this special case explicitly.
+static void *MemalignOverride(size_t align, size_t size, const void *caller)
+ __THROW {
+ void* result = do_memalign(align, size);
+ MallocHook::InvokeNewHook(result, size);
+ return result;
+}
+void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
#endif
@@ -2740,7 +3615,7 @@
void FastMallocZone::init()
{
- static FastMallocZone zone(getPageHeap(), &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache));
+ static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache));
}
#endif
diff --git a/JavaScriptCore/wtf/TCPackedCache.h b/JavaScriptCore/wtf/TCPackedCache.h
new file mode 100644
index 0000000..0464f8f
--- /dev/null
+++ b/JavaScriptCore/wtf/TCPackedCache.h
@@ -0,0 +1,234 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Geoff Pike
+//
+// This file provides a minimal cache that can hold a <key, value> pair
+// with little if any wasted space. The types of the key and value
+// must be unsigned integral types or at least have unsigned semantics
+// for >>, casting, and similar operations.
+//
+// Synchronization is not provided. However, the cache is implemented
+// as an array of cache entries whose type is chosen at compile time.
+// If a[i] is atomic on your hardware for the chosen array type then
+// raciness will not necessarily lead to bugginess. The cache entries
+// must be large enough to hold a partial key and a value packed
+// together. The partial keys are bit strings of length
+// kKeybits - kHashbits, and the values are bit strings of length kValuebits.
+//
+// In an effort to use minimal space, every cache entry represents
+// some <key, value> pair; the class provides no way to mark a cache
+// entry as empty or uninitialized. In practice, you may want to have
+// reserved keys or values to get around this limitation. For example, in
+// tcmalloc's PageID-to-sizeclass cache, a value of 0 is used as
+// "unknown sizeclass."
+//
+// Usage Considerations
+// --------------------
+//
+// kHashbits controls the size of the cache. The best value for
+// kHashbits will of course depend on the application. Perhaps try
+// tuning the value of kHashbits by measuring different values on your
+// favorite benchmark. Also remember not to be a pig; other
+// programs that need resources may suffer if you are.
+//
+// The main uses for this class will be when performance is
+// critical and there's a convenient type to hold the cache's
+// entries. As described above, the number of bits required
+// for a cache entry is (kKeybits - kHashbits) + kValuebits. Suppose
+// kKeybits + kValuebits is 43. Then it probably makes sense to
+// chose kHashbits >= 11 so that cache entries fit in a uint32.
+//
+// On the other hand, suppose kKeybits = kValuebits = 64. Then
+// using this class may be less worthwhile. You'll probably
+// be using 128 bits for each entry anyway, so maybe just pick
+// a hash function, H, and use an array indexed by H(key):
+// void Put(K key, V value) { a_[H(key)] = pair<K, V>(key, value); }
+// V GetOrDefault(K key, V default) { const pair<K, V> &p = a_[H(key)]; ... }
+// etc.
+//
+// Further Details
+// ---------------
+//
+// For caches used only by one thread, the following is true:
+// 1. For a cache c,
+// (c.Put(key, value), c.GetOrDefault(key, 0)) == value
+// and
+// (c.Put(key, value), <...>, c.GetOrDefault(key, 0)) == value
+// if the elided code contains no c.Put calls.
+//
+// 2. Has(key) will return false if no <key, value> pair with that key
+// has ever been Put. However, a newly initialized cache will have
+// some <key, value> pairs already present. When you create a new
+// cache, you must specify an "initial value." The initialization
+// procedure is equivalent to Clear(initial_value), which is
+// equivalent to Put(k, initial_value) for all keys k from 0 to
+// 2^kHashbits - 1.
+//
+// 3. If key and key' differ then the only way Put(key, value) may
+// cause Has(key') to change is that Has(key') may change from true to
+// false. Furthermore, a Put() call that doesn't change Has(key')
+// doesn't change GetOrDefault(key', ...) either.
+//
+// Implementation details:
+//
+// This is a direct-mapped cache with 2^kHashbits entries;
+// the hash function simply takes the low bits of the key.
+// So, we don't have to store the low bits of the key in the entries.
+// Instead, an entry is the high bits of a key and a value, packed
+// together. E.g., a 20 bit key and a 7 bit value only require
+// a uint16 for each entry if kHashbits >= 11.
+//
+// Alternatives to this scheme will be added as needed.
+
+#ifndef TCMALLOC_PACKED_CACHE_INL_H__
+#define TCMALLOC_PACKED_CACHE_INL_H__
+
+#ifndef WTF_CHANGES
+#include "base/basictypes.h" // for COMPILE_ASSERT
+#include "base/logging.h" // for DCHECK
+#endif
+
+#ifndef DCHECK_EQ
+#define DCHECK_EQ(val1, val2) ASSERT((val1) == (val2))
+#endif
+
+// A safe way of doing "(1 << n) - 1" -- without worrying about overflow
+// Note this will all be resolved to a constant expression at compile-time
+#define N_ONES_(IntType, N) \
+ ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + \
+ (static_cast<IntType>(1) << ((N)-1))) )
+
+// The types K and V provide upper bounds on the number of valid keys
+// and values, but we explicitly require the keys to be less than
+// 2^kKeybits and the values to be less than 2^kValuebits. The size of
+// the table is controlled by kHashbits, and the type of each entry in
+// the cache is T. See also the big comment at the top of the file.
+template <int kKeybits, typename T>
+class PackedCache {
+ public:
+ typedef uintptr_t K;
+ typedef size_t V;
+ static const size_t kHashbits = 12;
+ static const size_t kValuebits = 8;
+
+ explicit PackedCache(V initial_value) {
+ COMPILE_ASSERT(kKeybits <= sizeof(K) * 8, key_size);
+ COMPILE_ASSERT(kValuebits <= sizeof(V) * 8, value_size);
+ COMPILE_ASSERT(kHashbits <= kKeybits, hash_function);
+ COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits,
+ entry_size_must_be_big_enough);
+ Clear(initial_value);
+ }
+
+ void Put(K key, V value) {
+ DCHECK_EQ(key, key & kKeyMask);
+ DCHECK_EQ(value, value & kValueMask);
+ array_[Hash(key)] = static_cast<T>(KeyToUpper(key) | value);
+ }
+
+ bool Has(K key) const {
+ DCHECK_EQ(key, key & kKeyMask);
+ return KeyMatch(array_[Hash(key)], key);
+ }
+
+ V GetOrDefault(K key, V default_value) const {
+ // As with other code in this class, we touch array_ as few times
+ // as we can. Assuming entries are read atomically (e.g., their
+ // type is uintptr_t on most hardware) then certain races are
+ // harmless.
+ DCHECK_EQ(key, key & kKeyMask);
+ T entry = array_[Hash(key)];
+ return KeyMatch(entry, key) ? EntryToValue(entry) : default_value;
+ }
+
+ void Clear(V value) {
+ DCHECK_EQ(value, value & kValueMask);
+ for (int i = 0; i < 1 << kHashbits; i++) {
+ array_[i] = static_cast<T>(value);
+ }
+ }
+
+ private:
+ // We are going to pack a value and the upper part of a key into
+ // an entry of type T. The UPPER type is for the upper part of a key,
+ // after the key has been masked and shifted for inclusion in an entry.
+ typedef T UPPER;
+
+ static V EntryToValue(T t) { return t & kValueMask; }
+
+ static UPPER EntryToUpper(T t) { return t & kUpperMask; }
+
+ // If v is a V and u is an UPPER then you can create an entry by
+ // doing u | v. kHashbits determines where in a K to find the upper
+ // part of the key, and kValuebits determines where in the entry to put
+ // it.
+ static UPPER KeyToUpper(K k) {
+ const int shift = kHashbits - kValuebits;
+ // Assume kHashbits >= kValuebits. It would be easy to lift this assumption.
+ return static_cast<T>(k >> shift) & kUpperMask;
+ }
+
+ // This is roughly the inverse of KeyToUpper(). Some of the key has been
+ // thrown away, since KeyToUpper() masks off the low bits of the key.
+ static K UpperToPartialKey(UPPER u) {
+ DCHECK_EQ(u, u & kUpperMask);
+ const int shift = kHashbits - kValuebits;
+ // Assume kHashbits >= kValuebits. It would be easy to lift this assumption.
+ return static_cast<K>(u) << shift;
+ }
+
+ static size_t Hash(K key) {
+ return static_cast<size_t>(key) & N_ONES_(size_t, kHashbits);
+ }
+
+ // Does the entry's partial key match the relevant part of the given key?
+ static bool KeyMatch(T entry, K key) {
+ return ((KeyToUpper(key) ^ entry) & kUpperMask) == 0;
+ }
+
+ static const size_t kTbits = 8 * sizeof(T);
+ static const int kUpperbits = kKeybits - kHashbits;
+
+ // For masking a K.
+ static const K kKeyMask = N_ONES_(K, kKeybits);
+
+ // For masking a T.
+ static const T kUpperMask = N_ONES_(T, kUpperbits) << kValuebits;
+
+ // For masking a V or a T.
+ static const V kValueMask = N_ONES_(V, kValuebits);
+
+ T array_[1 << kHashbits];
+};
+
+#undef N_ONES_
+
+#endif // TCMALLOC_PACKED_CACHE_INL_H__
diff --git a/JavaScriptCore/wtf/TCPageMap.h b/JavaScriptCore/wtf/TCPageMap.h
index 5c45139..21a87e4 100644
--- a/JavaScriptCore/wtf/TCPageMap.h
+++ b/JavaScriptCore/wtf/TCPageMap.h
@@ -78,6 +78,8 @@
return true;
}
+ void PreallocateMoreMemory() {}
+
// REQUIRES "k" is in range "[0,2^BITS-1]".
// REQUIRES "k" has been ensured before.
//
@@ -155,6 +157,11 @@
return true;
}
+ void PreallocateMoreMemory() {
+ // Allocate enough to keep track of all possible pages
+ Ensure(0, 1 << BITS);
+ }
+
#ifdef WTF_CHANGES
template<class Visitor, class MemoryReader>
void visit(const Visitor& visitor, const MemoryReader& reader)
@@ -254,6 +261,9 @@
return true;
}
+ void PreallocateMoreMemory() {
+ }
+
#ifdef WTF_CHANGES
template<class Visitor, class MemoryReader>
void visit(const Visitor& visitor, const MemoryReader& reader) {
diff --git a/JavaScriptCore/wtf/TCSystemAlloc.cpp b/JavaScriptCore/wtf/TCSystemAlloc.cpp
index 9d4d8da..a5bf9ac 100644
--- a/JavaScriptCore/wtf/TCSystemAlloc.cpp
+++ b/JavaScriptCore/wtf/TCSystemAlloc.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2005, Google Inc.
+// Copyright (c) 2005, 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -370,3 +370,41 @@
}
return NULL;
}
+
+#ifndef MADV_DONTNEED
+void TCMalloc_SystemRelease(void*, size_t) {}
+#else
+void TCMalloc_SystemRelease(void* start, size_t length) {
+ if (FLAGS_malloc_devmem_start) {
+ // It's not safe to use MADV_DONTNEED if we've been mapping
+ // /dev/mem for heap memory
+ return;
+ }
+ if (pagesize == 0) pagesize = getpagesize();
+ const size_t pagemask = pagesize - 1;
+
+ size_t new_start = reinterpret_cast<size_t>(start);
+ size_t end = new_start + length;
+ size_t new_end = end;
+
+ // Round up the starting address and round down the ending address
+ // to be page aligned:
+ new_start = (new_start + pagesize - 1) & ~pagemask;
+ new_end = new_end & ~pagemask;
+
+ ASSERT((new_start & pagemask) == 0);
+ ASSERT((new_end & pagemask) == 0);
+ ASSERT(new_start >= reinterpret_cast<size_t>(start));
+ ASSERT(new_end <= end);
+
+ if (new_end > new_start) {
+ // Note -- ignoring most return codes, because if this fails it
+ // doesn't matter...
+ while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start,
+ MADV_DONTNEED) == -1 &&
+ errno == EAGAIN) {
+ // NOP
+ }
+ }
+}
+#endif
diff --git a/JavaScriptCore/wtf/TCSystemAlloc.h b/JavaScriptCore/wtf/TCSystemAlloc.h
index 462f3927e..a4d14ed 100644
--- a/JavaScriptCore/wtf/TCSystemAlloc.h
+++ b/JavaScriptCore/wtf/TCSystemAlloc.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2005, Google Inc.
+// Copyright (c) 2005, 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without