namespace __sanitizer {
-// Depending on allocator_may_return_null either return 0 or crash.
-void *AllocatorReturnNull();
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull();
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
// Memory allocator statistics
enum AllocatorStat {
- AllocatorStatMalloced,
- AllocatorStatFreed,
- AllocatorStatMmapped,
- AllocatorStatUnmapped,
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
AllocatorStatCount
};
-typedef u64 AllocatorStatCounters[AllocatorStatCount];
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
void Init() {
internal_memset(this, 0, sizeof(*this));
}
+ void InitLinkerInitialized() {}
- void Add(AllocatorStat i, u64 v) {
+ void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- void Set(AllocatorStat i, u64 v) {
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- u64 Get(AllocatorStat i) const {
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
return atomic_load(&stats_[i], memory_order_relaxed);
}
friend class AllocatorGlobalStats;
AllocatorStats *next_;
AllocatorStats *prev_;
- atomic_uint64_t stats_[AllocatorStatCount];
+ atomic_uintptr_t stats_[AllocatorStatCount];
};
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void InitLinkerInitialized() {
next_ = this;
prev_ = this;
}
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
}
void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
if (stats == this)
break;
}
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
}
private:
void Init() {
CHECK_EQ(kSpaceBeg,
- reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
+ reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
MapWithCallback(kSpaceEnd, AdditionalSize());
}
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
Batch *b = region->free_list.Pop();
- if (b == 0)
+ if (!b)
b = PopulateFreeList(stat, c, class_id, region);
region->n_allocated += b->count;
return b;
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
- if (!size) return 0;
+ if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return 0;
+ if (class_id >= kNumClasses) return nullptr;
RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
- return 0;
+ return nullptr;
}
static uptr GetActuallyAllocatedSize(void *p) {
}
}
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
RegionInfo *GetRegionInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
internal_memset(map1_, 0, sizeof(map1_));
mu_.Init();
}
+
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i);
void PrintStats() {
}
+ static uptr AdditionalSize() {
+ return 0;
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMmapped, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
uptr reg = AllocateRegion(stat, class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = 0;
+ Batch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (b == 0) {
+ if (!b) {
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else
if (b->count == max_count) {
CHECK_GT(b->count, 0);
sci->free_list.push_back(b);
- b = 0;
+ b = nullptr;
}
}
if (b) {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void InitLinkerInitialized(bool may_return_null) {
page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
- if (map_size < size) return AllocatorReturnNull(); // Overflow.
+ // Overflow.
+ if (map_size < size)
+ return ReturnNullOrDie();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
- CHECK_EQ(0, res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
CHECK_LE(res + size, map_end);
Header *h = GetHeader(res);
h->size = size;
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatMalloced, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
}
return reinterpret_cast<void*>(res);
}
+ void *ReturnNullOrDie() {
+ if (atomic_load(&may_return_null_, memory_order_acquire))
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
- stat->Add(AllocatorStatFreed, h->map_size);
- stat->Add(AllocatorStatUnmapped, h->map_size);
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
}
bool PointerIsMine(const void *p) {
- return GetBlockBegin(p) != 0;
+ return GetBlockBegin(p) != nullptr;
}
uptr GetActuallyAllocatedSize(void *p) {
nearest_chunk = ch;
}
if (!nearest_chunk)
- return 0;
+ return nullptr;
Header *h = reinterpret_cast<Header *>(nearest_chunk);
CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p);
if (h->map_beg + h->map_size <= p)
- return 0;
+ return nullptr;
return GetUser(h);
}
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
- if (!n) return 0;
+ if (!n) return nullptr;
if (!chunks_sorted_) {
// Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
SortArray(reinterpret_cast<uptr*>(chunks_), n);
chunks_[n - 1]->map_size;
}
if (p < min_mmap_ || p >= max_mmap_)
- return 0;
+ return nullptr;
uptr beg = 0, end = n - 1;
// This loop is a log(n) lower_bound. It does not check for the exact match
// to avoid expensive cache-thrashing loads.
Header *h = chunks_[beg];
if (h->map_beg + h->map_size <= p || p < h->map_beg)
- return 0;
+ return nullptr;
return GetUser(h);
}
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
+ atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void Init() {
+ void InitCommon(bool may_return_null) {
primary_.Init();
- secondary_.Init();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(bool may_return_null) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null);
+ }
+
+ void Init(bool may_return_null) {
+ secondary_.Init(may_return_null);
stats_.Init();
+ InitCommon(may_return_null);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false) {
+ bool cleared = false, bool check_rss_limit = false) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size)
- return AllocatorReturnNull();
+ return ReturnNullOrDie();
+ if (check_rss_limit && RssLimitIsExceeded())
+ return ReturnNullOrDie();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
return res;
}
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDie() {
+ if (MayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
return Allocate(cache, new_size, alignment);
if (!new_size) {
Deallocate(cache, p);
- return 0;
+ return nullptr;
}
CHECK(PointerIsMine(p));
uptr old_size = GetActuallyAllocatedSize(p);
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
};
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
-} // namespace __sanitizer
-
-#endif // SANITIZER_ALLOCATOR_H
+} // namespace __sanitizer
+#endif // SANITIZER_ALLOCATOR_H