1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Hardened Allocator implementation.
11 /// It uses the sanitizer_common allocator as a base and aims at mitigating
12 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13 /// header, a delayed free list, and additional sanity checks.
15 //===----------------------------------------------------------------------===//
17 #include "scudo_allocator.h"
18 #include "scudo_crc32.h"
19 #include "scudo_flags.h"
20 #include "scudo_tsd.h"
21 #include "scudo_utils.h"
23 #include "sanitizer_common/sanitizer_allocator_checks.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_quarantine.h"
32 // Global static cookie, initialized at start-up.
35 // We default to software CRC32 if the alternatives are not supported, either
36 // at compilation or at runtime.
37 static atomic_uint8_t HashAlgorithm
= { CRC32Software
};
39 INLINE u32
computeCRC32(u32 Crc
, uptr Value
, uptr
*Array
, uptr ArraySize
) {
40 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
41 // as opposed to only for scudo_crc32.cpp. This means that other hardware
42 // specific instructions were likely emitted at other places, and as a
43 // result there is no reason to not use it here.
44 #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
45 Crc
= CRC32_INTRINSIC(Crc
, Value
);
46 for (uptr i
= 0; i
< ArraySize
; i
++)
47 Crc
= CRC32_INTRINSIC(Crc
, Array
[i
]);
50 if (atomic_load_relaxed(&HashAlgorithm
) == CRC32Hardware
) {
51 Crc
= computeHardwareCRC32(Crc
, Value
);
52 for (uptr i
= 0; i
< ArraySize
; i
++)
53 Crc
= computeHardwareCRC32(Crc
, Array
[i
]);
56 Crc
= computeSoftwareCRC32(Crc
, Value
);
57 for (uptr i
= 0; i
< ArraySize
; i
++)
58 Crc
= computeSoftwareCRC32(Crc
, Array
[i
]);
60 #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
63 static ScudoBackendAllocator
&getBackendAllocator();
66 // We can't use the offset member of the chunk itself, as we would double
67 // fetch it without any warranty that it wouldn't have been tampered. To
68 // prevent this, we work with a local copy of the header.
69 static INLINE
void *getBackendPtr(const void *Ptr
, UnpackedHeader
*Header
) {
70 return reinterpret_cast<void *>(reinterpret_cast<uptr
>(Ptr
) -
71 AlignedChunkHeaderSize
-
72 (Header
->Offset
<< MinAlignmentLog
));
75 static INLINE AtomicPackedHeader
*getAtomicHeader(void *Ptr
) {
76 return reinterpret_cast<AtomicPackedHeader
*>(reinterpret_cast<uptr
>(Ptr
) -
77 AlignedChunkHeaderSize
);
80 const AtomicPackedHeader
*getConstAtomicHeader(const void *Ptr
) {
81 return reinterpret_cast<const AtomicPackedHeader
*>(
82 reinterpret_cast<uptr
>(Ptr
) - AlignedChunkHeaderSize
);
85 static INLINE
bool isAligned(const void *Ptr
) {
86 return IsAligned(reinterpret_cast<uptr
>(Ptr
), MinAlignment
);
89 // Returns the usable size for a chunk, meaning the amount of bytes from the
90 // beginning of the user data to the end of the backend allocated chunk.
91 static INLINE uptr
getUsableSize(const void *Ptr
, UnpackedHeader
*Header
) {
92 const uptr Size
= getBackendAllocator().getActuallyAllocatedSize(
93 getBackendPtr(Ptr
, Header
), Header
->ClassId
);
96 return Size
- AlignedChunkHeaderSize
- (Header
->Offset
<< MinAlignmentLog
);
99 // Compute the checksum of the chunk pointer and its header.
100 static INLINE u16
computeChecksum(const void *Ptr
, UnpackedHeader
*Header
) {
101 UnpackedHeader ZeroChecksumHeader
= *Header
;
102 ZeroChecksumHeader
.Checksum
= 0;
103 uptr HeaderHolder
[sizeof(UnpackedHeader
) / sizeof(uptr
)];
104 memcpy(&HeaderHolder
, &ZeroChecksumHeader
, sizeof(HeaderHolder
));
105 const u32 Crc
= computeCRC32(Cookie
, reinterpret_cast<uptr
>(Ptr
),
106 HeaderHolder
, ARRAY_SIZE(HeaderHolder
));
107 return static_cast<u16
>(Crc
);
110 // Checks the validity of a chunk by verifying its checksum. It doesn't
111 // incur termination in the event of an invalid chunk.
112 static INLINE
bool isValid(const void *Ptr
) {
113 PackedHeader NewPackedHeader
=
114 atomic_load_relaxed(getConstAtomicHeader(Ptr
));
115 UnpackedHeader NewUnpackedHeader
=
116 bit_cast
<UnpackedHeader
>(NewPackedHeader
);
117 return (NewUnpackedHeader
.Checksum
==
118 computeChecksum(Ptr
, &NewUnpackedHeader
));
121 // Nulls out a chunk header. When returning the chunk to the backend, there
122 // is no need to store a valid ChunkAvailable header, as this would be
123 // computationally expensive. Zeroing out serves the same purpose by making
124 // the header invalid. In the extremely rare event where 0 would be a valid
125 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
126 COMPILER_CHECK(ChunkAvailable
== 0);
127 static INLINE
void eraseHeader(void *Ptr
) {
128 const PackedHeader NullPackedHeader
= 0;
129 atomic_store_relaxed(getAtomicHeader(Ptr
), NullPackedHeader
);
132 // Loads and unpacks the header, verifying the checksum in the process.
134 void loadHeader(const void *Ptr
, UnpackedHeader
*NewUnpackedHeader
) {
135 PackedHeader NewPackedHeader
=
136 atomic_load_relaxed(getConstAtomicHeader(Ptr
));
137 *NewUnpackedHeader
= bit_cast
<UnpackedHeader
>(NewPackedHeader
);
138 if (UNLIKELY(NewUnpackedHeader
->Checksum
!=
139 computeChecksum(Ptr
, NewUnpackedHeader
))) {
140 dieWithMessage("ERROR: corrupted chunk header at address %p\n", Ptr
);
144 // Packs and stores the header, computing the checksum in the process.
145 static INLINE
void storeHeader(void *Ptr
, UnpackedHeader
*NewUnpackedHeader
) {
146 NewUnpackedHeader
->Checksum
= computeChecksum(Ptr
, NewUnpackedHeader
);
147 PackedHeader NewPackedHeader
= bit_cast
<PackedHeader
>(*NewUnpackedHeader
);
148 atomic_store_relaxed(getAtomicHeader(Ptr
), NewPackedHeader
);
151 // Packs and stores the header, computing the checksum in the process. We
152 // compare the current header with the expected provided one to ensure that
153 // we are not being raced by a corruption occurring in another thread.
154 static INLINE
void compareExchangeHeader(void *Ptr
,
155 UnpackedHeader
*NewUnpackedHeader
,
156 UnpackedHeader
*OldUnpackedHeader
) {
157 NewUnpackedHeader
->Checksum
= computeChecksum(Ptr
, NewUnpackedHeader
);
158 PackedHeader NewPackedHeader
= bit_cast
<PackedHeader
>(*NewUnpackedHeader
);
159 PackedHeader OldPackedHeader
= bit_cast
<PackedHeader
>(*OldUnpackedHeader
);
160 if (UNLIKELY(!atomic_compare_exchange_strong(
161 getAtomicHeader(Ptr
), &OldPackedHeader
, NewPackedHeader
,
162 memory_order_relaxed
))) {
163 dieWithMessage("ERROR: race on chunk header at address %p\n", Ptr
);
168 struct QuarantineCallback
{
169 explicit QuarantineCallback(AllocatorCache
*Cache
)
172 // Chunk recycling function, returns a quarantined chunk to the backend,
173 // first making sure it hasn't been tampered with.
174 void Recycle(void *Ptr
) {
175 UnpackedHeader Header
;
176 Chunk::loadHeader(Ptr
, &Header
);
177 if (UNLIKELY(Header
.State
!= ChunkQuarantine
)) {
178 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
181 Chunk::eraseHeader(Ptr
);
182 void *BackendPtr
= Chunk::getBackendPtr(Ptr
, &Header
);
184 getBackendAllocator().deallocatePrimary(Cache_
, BackendPtr
,
187 getBackendAllocator().deallocateSecondary(BackendPtr
);
190 // Internal quarantine allocation and deallocation functions. We first check
191 // that the batches are indeed serviced by the Primary.
192 // TODO(kostyak): figure out the best way to protect the batches.
193 void *Allocate(uptr Size
) {
194 return getBackendAllocator().allocatePrimary(Cache_
, BatchClassId
);
197 void Deallocate(void *Ptr
) {
198 getBackendAllocator().deallocatePrimary(Cache_
, Ptr
, BatchClassId
);
201 AllocatorCache
*Cache_
;
202 COMPILER_CHECK(sizeof(QuarantineBatch
) < SizeClassMap::kMaxSize
);
203 const uptr BatchClassId
= SizeClassMap::ClassID(sizeof(QuarantineBatch
));
206 typedef Quarantine
<QuarantineCallback
, void> ScudoQuarantine
;
207 typedef ScudoQuarantine::Cache ScudoQuarantineCache
;
208 COMPILER_CHECK(sizeof(ScudoQuarantineCache
) <=
209 sizeof(ScudoTSD::QuarantineCachePlaceHolder
));
211 ScudoQuarantineCache
*getQuarantineCache(ScudoTSD
*TSD
) {
212 return reinterpret_cast<ScudoQuarantineCache
*>(
213 TSD
->QuarantineCachePlaceHolder
);
216 struct ScudoAllocator
{
217 static const uptr MaxAllowedMallocSize
=
218 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
220 typedef ReturnNullOrDieOnFailure FailureHandler
;
222 ScudoBackendAllocator BackendAllocator
;
223 ScudoQuarantine AllocatorQuarantine
;
225 u32 QuarantineChunksUpToSize
;
227 bool DeallocationTypeMismatch
;
229 bool DeleteSizeMismatch
;
234 atomic_uint8_t RssLimitExceeded
;
235 atomic_uint64_t RssLastCheckedAtNS
;
237 explicit ScudoAllocator(LinkerInitialized
)
238 : AllocatorQuarantine(LINKER_INITIALIZED
) {}
240 void performSanityChecks() {
241 // Verify that the header offset field can hold the maximum offset. In the
242 // case of the Secondary allocator, it takes care of alignment and the
243 // offset will always be 0. In the case of the Primary, the worst case
244 // scenario happens in the last size class, when the backend allocation
245 // would already be aligned on the requested alignment, which would happen
246 // to be the maximum alignment that would fit in that size class. As a
247 // result, the maximum offset will be at most the maximum alignment for the
248 // last size class minus the header size, in multiples of MinAlignment.
249 UnpackedHeader Header
= {};
250 const uptr MaxPrimaryAlignment
=
251 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize
- MinAlignment
);
252 const uptr MaxOffset
=
253 (MaxPrimaryAlignment
- AlignedChunkHeaderSize
) >> MinAlignmentLog
;
254 Header
.Offset
= MaxOffset
;
255 if (Header
.Offset
!= MaxOffset
) {
256 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
259 // Verify that we can fit the maximum size or amount of unused bytes in the
260 // header. Given that the Secondary fits the allocation to a page, the worst
261 // case scenario happens in the Primary. It will depend on the second to
262 // last and last class sizes, as well as the dynamic base for the Primary.
263 // The following is an over-approximation that works for our needs.
264 const uptr MaxSizeOrUnusedBytes
= SizeClassMap::kMaxSize
- 1;
265 Header
.SizeOrUnusedBytes
= MaxSizeOrUnusedBytes
;
266 if (Header
.SizeOrUnusedBytes
!= MaxSizeOrUnusedBytes
) {
267 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
271 const uptr LargestClassId
= SizeClassMap::kLargestClassID
;
272 Header
.ClassId
= LargestClassId
;
273 if (Header
.ClassId
!= LargestClassId
) {
274 dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
279 SanitizerToolName
= "Scudo";
282 performSanityChecks();
284 // Check if hardware CRC32 is supported in the binary and by the platform,
285 // if so, opt for the CRC32 hardware version of the checksum.
286 if (&computeHardwareCRC32
&& hasHardwareCRC32())
287 atomic_store_relaxed(&HashAlgorithm
, CRC32Hardware
);
289 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
290 BackendAllocator
.init(common_flags()->allocator_release_to_os_interval_ms
);
291 HardRssLimitMb
= common_flags()->hard_rss_limit_mb
;
292 SoftRssLimitMb
= common_flags()->soft_rss_limit_mb
;
293 AllocatorQuarantine
.Init(
294 static_cast<uptr
>(getFlags()->QuarantineSizeKb
) << 10,
295 static_cast<uptr
>(getFlags()->ThreadLocalQuarantineSizeKb
) << 10);
296 QuarantineChunksUpToSize
= getFlags()->QuarantineChunksUpToSize
;
297 DeallocationTypeMismatch
= getFlags()->DeallocationTypeMismatch
;
298 DeleteSizeMismatch
= getFlags()->DeleteSizeMismatch
;
299 ZeroContents
= getFlags()->ZeroContents
;
301 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie
), sizeof(Cookie
),
302 /*blocking=*/false))) {
303 Cookie
= static_cast<u32
>((NanoTime() >> 12) ^
304 (reinterpret_cast<uptr
>(this) >> 4));
307 CheckRssLimit
= HardRssLimitMb
|| SoftRssLimitMb
;
309 atomic_store_relaxed(&RssLastCheckedAtNS
, MonotonicNanoTime());
312 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
313 bool isValidPointer(const void *Ptr
) {
317 if (!Chunk::isAligned(Ptr
))
319 return Chunk::isValid(Ptr
);
322 // Opportunistic RSS limit check. This will update the RSS limit status, if
323 // it can, every 100ms, otherwise it will just return the current one.
324 bool isRssLimitExceeded() {
325 u64 LastCheck
= atomic_load_relaxed(&RssLastCheckedAtNS
);
326 const u64 CurrentCheck
= MonotonicNanoTime();
327 if (LIKELY(CurrentCheck
< LastCheck
+ (100ULL * 1000000ULL)))
328 return atomic_load_relaxed(&RssLimitExceeded
);
329 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS
, &LastCheck
,
330 CurrentCheck
, memory_order_relaxed
))
331 return atomic_load_relaxed(&RssLimitExceeded
);
332 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
333 // RSS from /proc/self/statm by default. We might want to
334 // call getrusage directly, even if it's less accurate.
335 const uptr CurrentRssMb
= GetRSS() >> 20;
336 if (HardRssLimitMb
&& HardRssLimitMb
< CurrentRssMb
) {
337 Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n",
338 SanitizerToolName
, HardRssLimitMb
, CurrentRssMb
);
342 if (SoftRssLimitMb
) {
343 if (atomic_load_relaxed(&RssLimitExceeded
)) {
344 if (CurrentRssMb
<= SoftRssLimitMb
)
345 atomic_store_relaxed(&RssLimitExceeded
, false);
347 if (CurrentRssMb
> SoftRssLimitMb
) {
348 atomic_store_relaxed(&RssLimitExceeded
, true);
349 Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
350 SanitizerToolName
, SoftRssLimitMb
, CurrentRssMb
);
354 return atomic_load_relaxed(&RssLimitExceeded
);
357 // Allocates a chunk.
358 void *allocate(uptr Size
, uptr Alignment
, AllocType Type
,
359 bool ForceZeroContents
= false) {
361 if (UNLIKELY(Alignment
> MaxAlignment
))
362 return FailureHandler::OnBadRequest();
363 if (UNLIKELY(Alignment
< MinAlignment
))
364 Alignment
= MinAlignment
;
365 if (UNLIKELY(Size
>= MaxAllowedMallocSize
))
366 return FailureHandler::OnBadRequest();
367 if (UNLIKELY(Size
== 0))
370 uptr NeededSize
= RoundUpTo(Size
, MinAlignment
) + AlignedChunkHeaderSize
;
371 uptr AlignedSize
= (Alignment
> MinAlignment
) ?
372 NeededSize
+ (Alignment
- AlignedChunkHeaderSize
) : NeededSize
;
373 if (UNLIKELY(AlignedSize
>= MaxAllowedMallocSize
))
374 return FailureHandler::OnBadRequest();
376 if (CheckRssLimit
&& UNLIKELY(isRssLimitExceeded()))
377 return FailureHandler::OnOOM();
379 // Primary and Secondary backed allocations have a different treatment. We
380 // deal with alignment requirements of Primary serviced allocations here,
381 // but the Secondary will take care of its own alignment needs.
385 if (PrimaryAllocator::CanAllocate(AlignedSize
, MinAlignment
)) {
386 BackendSize
= AlignedSize
;
387 ClassId
= SizeClassMap::ClassID(BackendSize
);
388 ScudoTSD
*TSD
= getTSDAndLock();
389 BackendPtr
= BackendAllocator
.allocatePrimary(&TSD
->Cache
, ClassId
);
392 BackendSize
= NeededSize
;
394 BackendPtr
= BackendAllocator
.allocateSecondary(BackendSize
, Alignment
);
396 if (UNLIKELY(!BackendPtr
))
397 return FailureHandler::OnOOM();
399 // If requested, we will zero out the entire contents of the returned chunk.
400 if ((ForceZeroContents
|| ZeroContents
) && ClassId
)
401 memset(BackendPtr
, 0,
402 BackendAllocator
.getActuallyAllocatedSize(BackendPtr
, ClassId
));
404 UnpackedHeader Header
= {};
405 uptr UserPtr
= reinterpret_cast<uptr
>(BackendPtr
) + AlignedChunkHeaderSize
;
406 if (UNLIKELY(!IsAligned(UserPtr
, Alignment
))) {
407 // Since the Secondary takes care of alignment, a non-aligned pointer
408 // means it is from the Primary. It is also the only case where the offset
409 // field of the header would be non-zero.
411 const uptr AlignedUserPtr
= RoundUpTo(UserPtr
, Alignment
);
412 Header
.Offset
= (AlignedUserPtr
- UserPtr
) >> MinAlignmentLog
;
413 UserPtr
= AlignedUserPtr
;
415 CHECK_LE(UserPtr
+ Size
, reinterpret_cast<uptr
>(BackendPtr
) + BackendSize
);
416 Header
.State
= ChunkAllocated
;
417 Header
.AllocType
= Type
;
419 Header
.ClassId
= ClassId
;
420 Header
.SizeOrUnusedBytes
= Size
;
422 // The secondary fits the allocations to a page, so the amount of unused
423 // bytes is the difference between the end of the user allocation and the
424 // next page boundary.
425 const uptr PageSize
= GetPageSizeCached();
426 const uptr TrailingBytes
= (UserPtr
+ Size
) & (PageSize
- 1);
428 Header
.SizeOrUnusedBytes
= PageSize
- TrailingBytes
;
430 void *Ptr
= reinterpret_cast<void *>(UserPtr
);
431 Chunk::storeHeader(Ptr
, &Header
);
432 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(Ptr, Size);
436 // Place a chunk in the quarantine or directly deallocate it in the event of
437 // a zero-sized quarantine, or if the size of the chunk is greater than the
438 // quarantine chunk size threshold.
439 void quarantineOrDeallocateChunk(void *Ptr
, UnpackedHeader
*Header
,
441 const bool BypassQuarantine
= (AllocatorQuarantine
.GetCacheSize() == 0) ||
442 (Size
> QuarantineChunksUpToSize
);
443 if (BypassQuarantine
) {
444 Chunk::eraseHeader(Ptr
);
445 void *BackendPtr
= Chunk::getBackendPtr(Ptr
, Header
);
446 if (Header
->ClassId
) {
447 ScudoTSD
*TSD
= getTSDAndLock();
448 getBackendAllocator().deallocatePrimary(&TSD
->Cache
, BackendPtr
,
452 getBackendAllocator().deallocateSecondary(BackendPtr
);
455 // If a small memory amount was allocated with a larger alignment, we want
456 // to take that into account. Otherwise the Quarantine would be filled
457 // with tiny chunks, taking a lot of VA memory. This is an approximation
458 // of the usable size, that allows us to not call
459 // GetActuallyAllocatedSize.
460 uptr EstimatedSize
= Size
+ (Header
->Offset
<< MinAlignmentLog
);
461 UnpackedHeader NewHeader
= *Header
;
462 NewHeader
.State
= ChunkQuarantine
;
463 Chunk::compareExchangeHeader(Ptr
, &NewHeader
, Header
);
464 ScudoTSD
*TSD
= getTSDAndLock();
465 AllocatorQuarantine
.Put(getQuarantineCache(TSD
),
466 QuarantineCallback(&TSD
->Cache
), Ptr
,
472 // Deallocates a Chunk, which means either adding it to the quarantine or
473 // directly returning it to the backend if criteria are met.
474 void deallocate(void *Ptr
, uptr DeleteSize
, AllocType Type
) {
475 // For a deallocation, we only ensure minimal initialization, meaning thread
476 // local data will be left uninitialized for now (when using ELF TLS). The
477 // fallback cache will be used instead. This is a workaround for a situation
478 // where the only heap operation performed in a thread would be a free past
479 // the TLS destructors, ending up in initialized thread specific data never
480 // being destroyed properly. Any other heap operation will do a full init.
481 initThreadMaybe(/*MinimalInit=*/true);
482 // if (&__sanitizer_free_hook) __sanitizer_free_hook(Ptr);
485 if (UNLIKELY(!Chunk::isAligned(Ptr
))) {
486 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
487 "aligned at address %p\n", Ptr
);
489 UnpackedHeader Header
;
490 Chunk::loadHeader(Ptr
, &Header
);
491 if (UNLIKELY(Header
.State
!= ChunkAllocated
)) {
492 dieWithMessage("ERROR: invalid chunk state when deallocating address "
495 if (DeallocationTypeMismatch
) {
496 // The deallocation type has to match the allocation one.
497 if (Header
.AllocType
!= Type
) {
498 // With the exception of memalign'd Chunks, that can be still be free'd.
499 if (Header
.AllocType
!= FromMemalign
|| Type
!= FromMalloc
) {
500 dieWithMessage("ERROR: allocation type mismatch when deallocating "
501 "address %p\n", Ptr
);
505 uptr Size
= Header
.ClassId
? Header
.SizeOrUnusedBytes
:
506 Chunk::getUsableSize(Ptr
, &Header
) - Header
.SizeOrUnusedBytes
;
507 if (DeleteSizeMismatch
) {
508 if (DeleteSize
&& DeleteSize
!= Size
) {
509 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
513 quarantineOrDeallocateChunk(Ptr
, &Header
, Size
);
516 // Reallocates a chunk. We can save on a new allocation if the new requested
517 // size still fits in the chunk.
518 void *reallocate(void *OldPtr
, uptr NewSize
) {
520 if (UNLIKELY(!Chunk::isAligned(OldPtr
))) {
521 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
522 "aligned at address %p\n", OldPtr
);
524 UnpackedHeader OldHeader
;
525 Chunk::loadHeader(OldPtr
, &OldHeader
);
526 if (UNLIKELY(OldHeader
.State
!= ChunkAllocated
)) {
527 dieWithMessage("ERROR: invalid chunk state when reallocating address "
530 if (DeallocationTypeMismatch
) {
531 if (UNLIKELY(OldHeader
.AllocType
!= FromMalloc
)) {
532 dieWithMessage("ERROR: allocation type mismatch when reallocating "
533 "address %p\n", OldPtr
);
536 const uptr UsableSize
= Chunk::getUsableSize(OldPtr
, &OldHeader
);
537 // The new size still fits in the current chunk, and the size difference
539 if (NewSize
<= UsableSize
&&
540 (UsableSize
- NewSize
) < (SizeClassMap::kMaxSize
/ 2)) {
541 UnpackedHeader NewHeader
= OldHeader
;
542 NewHeader
.SizeOrUnusedBytes
=
543 OldHeader
.ClassId
? NewSize
: UsableSize
- NewSize
;
544 Chunk::compareExchangeHeader(OldPtr
, &NewHeader
, &OldHeader
);
547 // Otherwise, we have to allocate a new chunk and copy the contents of the
549 void *NewPtr
= allocate(NewSize
, MinAlignment
, FromMalloc
);
551 uptr OldSize
= OldHeader
.ClassId
? OldHeader
.SizeOrUnusedBytes
:
552 UsableSize
- OldHeader
.SizeOrUnusedBytes
;
553 memcpy(NewPtr
, OldPtr
, Min(NewSize
, UsableSize
));
554 quarantineOrDeallocateChunk(OldPtr
, &OldHeader
, OldSize
);
559 // Helper function that returns the actual usable size of a chunk.
560 uptr
getUsableSize(const void *Ptr
) {
564 UnpackedHeader Header
;
565 Chunk::loadHeader(Ptr
, &Header
);
566 // Getting the usable size of a chunk only makes sense if it's allocated.
567 if (UNLIKELY(Header
.State
!= ChunkAllocated
)) {
568 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
571 return Chunk::getUsableSize(Ptr
, &Header
);
574 void *calloc(uptr NMemB
, uptr Size
) {
576 if (UNLIKELY(CheckForCallocOverflow(NMemB
, Size
)))
577 return FailureHandler::OnBadRequest();
578 return allocate(NMemB
* Size
, MinAlignment
, FromMalloc
, true);
581 void commitBack(ScudoTSD
*TSD
) {
582 AllocatorQuarantine
.Drain(getQuarantineCache(TSD
),
583 QuarantineCallback(&TSD
->Cache
));
584 BackendAllocator
.destroyCache(&TSD
->Cache
);
587 uptr
getStats(AllocatorStat StatType
) {
589 uptr stats
[AllocatorStatCount
];
590 BackendAllocator
.getStats(stats
);
591 return stats
[StatType
];
594 void *handleBadRequest() {
596 return FailureHandler::OnBadRequest();
599 void setRssLimit(uptr LimitMb
, bool HardLimit
) {
601 HardRssLimitMb
= LimitMb
;
603 SoftRssLimitMb
= LimitMb
;
604 CheckRssLimit
= HardRssLimitMb
|| SoftRssLimitMb
;
608 static ScudoAllocator
Instance(LINKER_INITIALIZED
);
610 static ScudoBackendAllocator
&getBackendAllocator() {
611 return Instance
.BackendAllocator
;
618 void ScudoTSD::init(bool Shared
) {
619 UnlockRequired
= Shared
;
620 getBackendAllocator().initCache(&Cache
);
621 memset(QuarantineCachePlaceHolder
, 0, sizeof(QuarantineCachePlaceHolder
));
624 void ScudoTSD::commitBack() {
625 Instance
.commitBack(this);
628 void *scudoMalloc(uptr Size
, AllocType Type
) {
629 return SetErrnoOnNull(Instance
.allocate(Size
, MinAlignment
, Type
));
632 void scudoFree(void *Ptr
, AllocType Type
) {
633 Instance
.deallocate(Ptr
, 0, Type
);
636 void scudoSizedFree(void *Ptr
, uptr Size
, AllocType Type
) {
637 Instance
.deallocate(Ptr
, Size
, Type
);
640 void *scudoRealloc(void *Ptr
, uptr Size
) {
642 return SetErrnoOnNull(Instance
.allocate(Size
, MinAlignment
, FromMalloc
));
644 Instance
.deallocate(Ptr
, 0, FromMalloc
);
647 return SetErrnoOnNull(Instance
.reallocate(Ptr
, Size
));
650 void *scudoCalloc(uptr NMemB
, uptr Size
) {
651 return SetErrnoOnNull(Instance
.calloc(NMemB
, Size
));
654 void *scudoValloc(uptr Size
) {
655 return SetErrnoOnNull(
656 Instance
.allocate(Size
, GetPageSizeCached(), FromMemalign
));
659 void *scudoPvalloc(uptr Size
) {
660 uptr PageSize
= GetPageSizeCached();
661 if (UNLIKELY(CheckForPvallocOverflow(Size
, PageSize
))) {
663 return Instance
.handleBadRequest();
665 // pvalloc(0) should allocate one page.
666 Size
= Size
? RoundUpTo(Size
, PageSize
) : PageSize
;
667 return SetErrnoOnNull(Instance
.allocate(Size
, PageSize
, FromMemalign
));
670 void *scudoMemalign(uptr Alignment
, uptr Size
) {
671 if (UNLIKELY(!IsPowerOfTwo(Alignment
))) {
673 return Instance
.handleBadRequest();
675 return SetErrnoOnNull(Instance
.allocate(Size
, Alignment
, FromMemalign
));
678 int scudoPosixMemalign(void **MemPtr
, uptr Alignment
, uptr Size
) {
679 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment
))) {
680 Instance
.handleBadRequest();
683 void *Ptr
= Instance
.allocate(Size
, Alignment
, FromMemalign
);
690 void *scudoAlignedAlloc(uptr Alignment
, uptr Size
) {
691 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment
, Size
))) {
693 return Instance
.handleBadRequest();
695 return SetErrnoOnNull(Instance
.allocate(Size
, Alignment
, FromMalloc
));
698 uptr
scudoMallocUsableSize(void *Ptr
) {
699 return Instance
.getUsableSize(Ptr
);
702 } // namespace __scudo
704 using namespace __scudo
;
706 // MallocExtension helper functions
708 uptr
__sanitizer_get_current_allocated_bytes() {
709 return Instance
.getStats(AllocatorStatAllocated
);
712 uptr
__sanitizer_get_heap_size() {
713 return Instance
.getStats(AllocatorStatMapped
);
716 uptr
__sanitizer_get_free_bytes() {
720 uptr
__sanitizer_get_unmapped_bytes() {
724 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
728 int __sanitizer_get_ownership(const void *Ptr
) {
729 return Instance
.isValidPointer(Ptr
);
732 uptr
__sanitizer_get_allocated_size(const void *Ptr
) {
733 return Instance
.getUsableSize(Ptr
);
736 // Interface functions
739 void __scudo_set_rss_limit(unsigned long LimitMb
, int HardLimit
) { // NOLINT
740 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE
)
742 Instance
.setRssLimit(LimitMb
, !!HardLimit
);