20 #ifndef SLAM_USE_DEBUG_ALLOCATOR 22 #include "pedigree/kernel/core/SlamAllocator.h" 23 #include "pedigree/kernel/Log.h" 24 #include "pedigree/kernel/panic.h" 25 #include "pedigree/kernel/utilities/assert.h" 26 #include "pedigree/kernel/utilities/utility.h" 28 #ifndef PEDIGREE_BENCHMARK 29 #include "pedigree/kernel/LockGuard.h" 31 #include "pedigree/kernel/utilities/MemoryTracing.h" 33 #if CRIPPLINGLY_VIGILANT 34 #include "pedigree/kernel/machine/Machine.h" 36 #include "pedigree/kernel/processor/PhysicalMemoryManager.h" 37 #include "pedigree/kernel/processor/Processor.h" 38 #include "pedigree/kernel/processor/ProcessorInformation.h" 39 #include "pedigree/kernel/processor/VirtualAddressSpace.h" 40 #if VIGILANT_OVERRUN_CHECK 41 #include "pedigree/kernel/debugger/Backtrace.h" 42 #include "pedigree/kernel/debugger/commands/SlamCommand.h" 47 #include "pedigree/kernel/process/Process.h" 48 #include "pedigree/kernel/process/Thread.h" 52 #define ATOMIC_MEMORY_ORDER __ATOMIC_RELEASE 53 #define ATOMIC_CAS_WEAK true 55 #define ATOMIC_MEMORY_ORDER __ATOMIC_RELAXED 56 #define ATOMIC_CAS_WEAK true 59 #ifndef PEDIGREE_BENCHMARK 64 inline T *untagged(T *p)
PURE;
67 inline T *tagged(T *p)
PURE;
70 inline T *touch_tag(T *p)
PURE;
73 inline T *untagged(T *p)
77 uintptr_t ptr =
reinterpret_cast<uintptr_t
>(p);
78 #if defined(PEDIGREE_BENCHMARK) || defined(HOSTED) 80 ptr &= ~0xFFFF000000000000ULL;
83 ptr |= 0xFFFF000000000000ULL;
85 return reinterpret_cast<T *
>(ptr);
89 inline T *tagged(T *p)
91 uintptr_t ptr =
reinterpret_cast<uintptr_t
>(p);
92 ptr &= 0xFFFFFFFFFFFFULL;
93 return reinterpret_cast<T *
>(ptr);
97 inline T *touch_tag(T *p)
100 uintptr_t ptr =
reinterpret_cast<uintptr_t
>(p);
101 ptr += 0x1000000000000ULL;
102 return reinterpret_cast<T *
>(ptr);
105 inline void spin_pause()
107 #ifdef PEDIGREE_BENCHMARK 114 inline uintptr_t getHeapBase()
116 #ifdef PEDIGREE_BENCHMARK 117 return SlamSupport::getHeapBase();
123 inline uintptr_t getHeapEnd()
125 #ifdef PEDIGREE_BENCHMARK 126 return SlamSupport::getHeapEnd();
132 inline size_t getPageSize()
134 #ifdef PEDIGREE_BENCHMARK 141 inline void allocateAndMapAt(
void *addr,
bool cowOk =
false)
143 #ifdef PEDIGREE_BENCHMARK 144 SlamSupport::getPageAt(addr);
146 size_t standardFlags =
149 static physical_uintptr_t physZero = 0;
150 bool needZeroPage =
false;
151 size_t extraFlags = 0;
153 physical_uintptr_t phys = 0;
181 if (!va.
map(phys, addr, standardFlags | extraFlags))
183 FATAL(
"SlamAllocator: failed to allocate and map at " << addr);
196 inline void unmap(
void *addr)
198 #ifdef PEDIGREE_BENCHMARK 199 SlamSupport::unmapPage(addr);
206 physical_uintptr_t phys;
216 : m_PartialLists(), m_ObjectSize(0), m_SlabSize(0), m_FirstSlab(),
218 m_RecoveryLock(false),
230 if (objectSize < OBJECT_MINIMUM_SIZE)
233 m_ObjectSize = objectSize;
234 if (m_ObjectSize > SLAB_MINIMUM_SIZE)
235 m_SlabSize = m_ObjectSize;
237 m_SlabSize = SLAB_MINIMUM_SIZE;
239 #ifdef MULTIPROCESSOR 245 for (
size_t i = 0; i < maxCpu; i++)
246 m_PartialLists[i] = tagged(&m_EmptyNode);
249 ByteSet(&m_EmptyNode, 0xAB,
sizeof(m_EmptyNode));
250 m_EmptyNode.next = tagged(&m_EmptyNode);
254 assert((m_SlabSize % m_ObjectSize) == 0);
259 Node *N = 0, *pNext = 0;
260 alignedNode currentHead = *head;
264 N = untagged(const_cast<Node *>(currentHead));
267 if (__atomic_compare_exchange_n(
268 head, ¤tHead, touch_tag(pNext), ATOMIC_CAS_WEAK,
269 ATOMIC_MEMORY_ORDER, __ATOMIC_RELAXED))
282 void SlamCache::push(
289 newTail->next =
const_cast<Node *
>(*head);
290 while (!__atomic_compare_exchange_n(
291 head, const_cast<alignedNode *>(&newTail->next), touch_tag(newHead),
292 ATOMIC_CAS_WEAK, ATOMIC_MEMORY_ORDER, __ATOMIC_RELAXED))
300 #if EVERY_ALLOCATION_IS_A_SLAB 304 #if SLABS_FOR_HUGE_ALLOCS 305 if (m_ObjectSize >= getPageSize())
312 #ifdef MULTIPROCESSOR 318 Node *N = pop(&m_PartialLists[thisCpu]);
323 Node *pNode = initialiseSlab(getSlab());
324 uintptr_t slab =
reinterpret_cast<uintptr_t
>(pNode);
325 #if CRIPPLINGLY_VIGILANT 333 assert(N->next != reinterpret_cast<Node *>(VIGILANT_MAGIC));
335 assert(N->magic == TEMP_MAGIC || N->magic == MAGIC_VALUE);
336 N->magic = TEMP_MAGIC;
339 return reinterpret_cast<uintptr_t
>(N);
344 #if EVERY_ALLOCATION_IS_A_SLAB 346 size_t numPages = m_SlabSize / getPageSize();
347 if (m_SlabSize % getPageSize())
351 object =
object & ~(getPageSize() - 1);
352 for (
size_t i = 0; i < numPages; ++i)
354 unmap(reinterpret_cast<void *>(
object + (i * getPageSize())));
360 #if SLABS_FOR_HUGE_ALLOCS 361 if (m_ObjectSize >= getPageSize())
369 #ifdef MULTIPROCESSOR 375 Node *N =
reinterpret_cast<Node *
>(object);
381 assert(pFoot->magic == VIGILANT_MAGIC);
383 #if BOCHS_MAGIC_WATCHPOINTS 384 asm volatile(
"xchg %%dx,%%dx" ::
"a"(&pFoot->catcher));
390 assert(N->magic != MAGIC_VALUE);
391 N->magic = MAGIC_VALUE;
394 push(&m_PartialLists[thisCpu], N);
397 bool SlamCache::isPointerValid(uintptr_t
object)
const 399 #if SLABS_FOR_HUGE_ALLOCS 400 if (m_ObjectSize >= getPageSize())
407 Node *N =
reinterpret_cast<Node *
>(object);
413 if (pFoot->magic != VIGILANT_MAGIC)
421 if (N->magic == MAGIC_VALUE)
423 #if VERBOSE_ISPOINTERVALID 425 "SlamCache::isPointerValid: memory " 426 <<
Hex <<
object <<
" has invalid magic (" << N->magic
427 <<
" != " << MAGIC_VALUE <<
").");
436 uintptr_t SlamCache::getSlab()
441 void SlamCache::freeSlab(uintptr_t slab)
448 #if EVERY_ALLOCATION_IS_A_SLAB 452 #if SLABS_FOR_HUGE_ALLOCS 453 if (m_ObjectSize >= getPageSize())
460 #ifdef MULTIPROCESSOR 470 if (untagged(m_PartialLists[thisCpu]) == &m_EmptyNode)
473 size_t freedSlabs = 0;
474 if (m_ObjectSize < getPageSize())
476 Node *reinsertHead = tagged(&m_EmptyNode);
477 Node *reinsertTail = &m_EmptyNode;
481 Node *N = pop(&m_PartialLists[thisCpu]);
484 if (N == &m_EmptyNode)
490 reinterpret_cast<uintptr_t
>(N) & ~(getPageSize() - 1);
493 bool bSlabNotFree =
false;
494 for (
size_t i = 0; i < (m_SlabSize / m_ObjectSize); ++i)
497 reinterpret_cast<Node *
>(slab + (i * m_ObjectSize));
500 if (pHeader->cache ==
this)
507 else if (pNode->magic != MAGIC_VALUE)
520 if (untagged(reinsertHead) == &m_EmptyNode)
522 reinsertHead = tagged(N);
524 N->next = tagged(&m_EmptyNode);
528 N->next = reinsertHead;
529 reinsertHead = tagged(N);
537 alignedNode head = untagged(m_PartialLists[thisCpu]);
538 alignedNode prev = head;
539 while (head != &m_EmptyNode)
542 ((head >=
reinterpret_cast<void *
>(slab)) ||
543 (head <= reinterpret_cast<void *>(slab + getPageSize())));
548 prev->next = touch_tag(head->next);
554 prev = untagged(head->next);
555 m_PartialLists[thisCpu] = touch_tag(head->next);
567 head = untagged(head->next);
577 if (reinsertTail != &m_EmptyNode)
580 push(&m_PartialLists[thisCpu], reinsertTail, reinsertHead);
587 if (untagged(m_PartialLists[thisCpu]) == &m_EmptyNode)
591 Node *N = pop(&m_PartialLists[thisCpu]);
592 if (N == &m_EmptyNode)
599 assert(N->magic == MAGIC_VALUE);
603 uintptr_t slab =
reinterpret_cast<uintptr_t
>(N);
615 #if SLABS_FOR_HUGE_ALLOCS 616 if (m_ObjectSize >= getPageSize())
622 #ifdef MULTIPROCESSOR 628 size_t nObjects = m_SlabSize / m_ObjectSize;
630 Node *N =
reinterpret_cast<Node *
>(slab);
631 N->next = tagged(&m_EmptyNode);
633 N->magic = TEMP_MAGIC;
642 Node *pFirst = 0, *pLast = 0;
643 for (
size_t i = 1; i < nObjects; i++)
645 Node *pNode =
reinterpret_cast<Node *
>(slab + (i * m_ObjectSize));
646 pNode->next =
reinterpret_cast<Node *
>(slab + ((i + 1) * m_ObjectSize));
647 pNode->next = tagged(pNode->next);
649 pNode->magic = MAGIC_VALUE;
653 pFirst = tagged(pNode);
660 push(&m_PartialLists[thisCpu], pLast, pFirst);
665 #if CRIPPLINGLY_VIGILANT 667 void SlamCache::check()
669 if (m_ObjectSize >= getPageSize())
676 if (m_ObjectSize == 0)
680 size_t nObjects = m_SlabSize / m_ObjectSize;
682 size_t maxPerSlab = (m_SlabSize /
sizeof(uintptr_t)) - 2;
684 uintptr_t curSlab = m_FirstSlab;
692 uintptr_t numAlloced = *
reinterpret_cast<uintptr_t *
>(curSlab);
694 *
reinterpret_cast<uintptr_t *
>(curSlab +
sizeof(uintptr_t));
696 for (
size_t i = 0; i < numAlloced; i++)
698 uintptr_t slab = *
reinterpret_cast<uintptr_t *
>(
699 curSlab +
sizeof(uintptr_t) * (i + 2));
700 for (
size_t i = 0; i < nObjects; i++)
702 uintptr_t addr = slab + i * m_ObjectSize;
703 Node *pNode =
reinterpret_cast<Node *
>(addr);
704 if (pNode->magic == MAGIC_VALUE || pNode->magic == TEMP_MAGIC)
711 addr + m_ObjectSize -
713 if (pHead->magic != VIGILANT_MAGIC)
716 "Possible heap underrun: object starts at " 717 << addr <<
", size: " << m_ObjectSize <<
", block: " 720 if (pFoot->magic != VIGILANT_MAGIC)
722 ERROR(
"Possible heap overrun: object starts at " << addr);
727 if (numAlloced == maxPerSlab)
735 void SlamCache::trackSlab(uintptr_t slab)
739 if (m_ObjectSize == 0)
744 m_FirstSlab = getSlab();
745 uintptr_t *numAlloced =
reinterpret_cast<uintptr_t *
>(m_FirstSlab);
747 reinterpret_cast<uintptr_t *
>(m_FirstSlab +
sizeof(uintptr_t));
752 size_t maxPerSlab = (m_SlabSize /
sizeof(uintptr_t)) - 2;
754 uintptr_t curSlab = m_FirstSlab;
757 uintptr_t *numAlloced =
reinterpret_cast<uintptr_t *
>(curSlab);
759 reinterpret_cast<uintptr_t *
>(curSlab +
sizeof(uintptr_t));
761 if (*numAlloced < maxPerSlab)
763 uintptr_t *p =
reinterpret_cast<uintptr_t *
>(
764 curSlab + (*numAlloced + 2) *
sizeof(uintptr_t));
766 *numAlloced = *numAlloced + 1;
774 uintptr_t newSlab = getSlab();
778 uintptr_t *numAlloced =
reinterpret_cast<uintptr_t *
>(curSlab);
780 reinterpret_cast<uintptr_t *
>(curSlab +
sizeof(uintptr_t));
788 SlamAllocator::SlamAllocator()
789 : m_bInitialised(
false)
790 #if CRIPPLINGLY_VIGILANT 796 m_SlabRegionLock(
false)
799 m_HeapPageCount(0), m_SlabRegionBitmap(), m_SlabRegionBitmapEntries(0),
804 SlamAllocator::~SlamAllocator()
812 void SlamAllocator::initialise()
824 uintptr_t bitmapBase = getHeapBase();
825 uintptr_t heapEnd = getHeapEnd();
826 size_t heapSize = heapEnd - bitmapBase;
827 size_t bitmapBytes = (heapSize / getPageSize()) / 8;
829 m_SlabRegionBitmap =
reinterpret_cast<uint64_t *
>(bitmapBase);
830 m_SlabRegionBitmapEntries = bitmapBytes /
sizeof(uint64_t);
833 if (bitmapBytes & (getPageSize() - 1))
835 bitmapBytes &= ~(getPageSize() - 1);
836 bitmapBytes += getPageSize();
839 m_Base = bitmapBase + bitmapBytes;
841 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH 851 for (uintptr_t addr = bitmapBase; addr < m_Base; addr += getPageSize())
855 allocateAndMapAt(reinterpret_cast<void *>(addr), numPages++ >= 32);
858 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH 863 #ifndef PEDIGREE_BENCHMARK 865 "Kernel heap range prepared from " <<
Hex << m_Base <<
" to " << heapEnd
866 <<
", size: " << (heapEnd - m_Base));
868 " -> kernel heap bitmap is " <<
Dec << (bitmapBytes / 1024) <<
Hex 872 for (
size_t i = 0; i < 32; i++)
874 m_Caches[i].initialise(
this, 1ULL << i);
877 m_bInitialised =
true;
880 #ifdef PEDIGREE_BENCHMARK 881 void SlamAllocator::clearAll()
895 if (!m_SlabRegionBitmap)
901 m_SlabRegionLock.acquire();
904 m_bInitialised =
false;
907 for (
size_t entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
909 if (!m_SlabRegionBitmap[entry])
914 for (
size_t bit = 0; bit < 64; ++bit)
916 uint64_t test = 1ULL << bit;
917 if ((m_SlabRegionBitmap[entry] & test) == 0)
922 uintptr_t slab = m_Base + (((entry * 64) + bit) * getPageSize());
923 freeSlab(slab, getPageSize());
928 m_SlabRegionBitmap =
nullptr;
929 m_SlabRegionBitmapEntries = 0;
932 for (uintptr_t addr = getHeapBase(); addr < m_Base; addr += getPageSize())
934 unmap(reinterpret_cast<void *>(addr));
938 m_SlabRegionLock.release();
942 uintptr_t SlamAllocator::getSlab(
size_t fullSize)
944 ssize_t nPages = fullSize / getPageSize();
947 panic(
"Attempted to get a slab smaller than the native page size.");
951 m_SlabRegionLock.acquire();
960 for (entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
962 if (!m_SlabRegionBitmap[entry])
967 else if (m_SlabRegionBitmap[entry] != 0xFFFFFFFFFFFFFFFFULL)
972 bit = __builtin_ffsll(~m_SlabRegionBitmap[entry]) - 1;
977 else if (nPages > 64)
980 for (entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
983 if (m_SlabRegionBitmap[entry])
988 size_t needed = nPages - 64;
989 size_t checkEntry = entry + 1;
993 if (m_SlabRegionBitmap[checkEntry])
1007 else if (needed < 64)
1012 __builtin_ctzll(m_SlabRegionBitmap[checkEntry]);
1013 if (leading >= needed)
1027 uint64_t search = (1ULL << nPages) - 1;
1028 size_t maxBit = 64 - nPages;
1029 for (entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
1031 if (m_SlabRegionBitmap[entry] == 0ULL)
1036 else if (m_SlabRegionBitmap[entry] != ~0ULL)
1039 for (bit = 0; bit < maxBit; ++bit)
1041 if (m_SlabRegionBitmap[entry] & (search << bit))
1058 "SlamAllocator::getSlab cannot find a place to allocate this slab (" 1059 <<
Dec << fullSize <<
Hex <<
" bytes) - consumed " 1060 << m_HeapPageCount <<
" pages! " 1061 <<
" --> " <<
this);
1064 uintptr_t slab = m_Base + (((entry * 64) + bit) * getPageSize());
1067 for (ssize_t i = 0; i < nPages; ++i)
1069 m_SlabRegionBitmap[entry] |= 1ULL << bit;
1081 m_SlabRegionLock.release();
1084 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH 1094 for (ssize_t i = 0; i < nPages; ++i)
1096 void *p =
reinterpret_cast<void *
>(slab + (i * getPageSize()));
1097 allocateAndMapAt(p);
1100 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH 1105 m_HeapPageCount += fullSize / getPageSize();
1110 void SlamAllocator::freeSlab(uintptr_t address,
size_t length)
1112 size_t nPages = length / getPageSize();
1115 panic(
"Attempted to free a slab smaller than the native page size.");
1124 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH 1132 for (uintptr_t base = address; base < (address + length);
1133 base += getPageSize())
1135 void *p =
reinterpret_cast<void *
>(base);
1139 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH 1146 address /= getPageSize();
1147 size_t entry = address / 64;
1148 size_t bit = address % 64;
1150 for (
size_t i = 0; i < nPages; ++i)
1152 m_SlabRegionBitmap[entry] &= ~(1ULL << bit);
1162 m_HeapPageCount -= length / getPageSize();
1165 size_t SlamAllocator::recovery(
size_t maxSlabs)
1170 for (
size_t i = 0; i < 32; ++i)
1173 if (!m_Caches[i].slabSize())
1176 size_t thisSlabs = m_Caches[i].recovery(maxSlabs);
1177 nPages += (thisSlabs * m_Caches[i].slabSize()) / getPageSize();
1178 nSlabs += thisSlabs;
1179 if (nSlabs >= maxSlabs)
1190 #if DEBUGGING_SLAB_ALLOCATOR 1191 NOTICE_NOLOCK(
"SlabAllocator::allocate(" <<
Dec << nBytes <<
Hex <<
")");
1201 #if CRIPPLINGLY_VIGILANT 1203 for (
int i = 0; i < 32; i++)
1204 m_Caches[i].check();
1207 #if defined(MEMORY_TRACING) || WARN_PAGE_SIZE_OR_LARGER 1208 size_t origSize = nBytes;
1220 assert(nBytes < (1U << 31));
1224 if (
UNLIKELY(nBytes < OBJECT_MINIMUM_SIZE))
1226 nBytes = OBJECT_MINIMUM_SIZE;
1230 lg2 = 32 - __builtin_clz(nBytes);
1232 ret = m_Caches[lg2].allocate();
1234 #if WARN_PAGE_SIZE_OR_LARGER 1237 if (nBytes >= getPageSize())
1239 #pragma GCC diagnostic push 1240 #pragma GCC diagnostic ignored "-Wframe-address" 1242 void *ret0 = __builtin_return_address(0);
1243 void *ret1 = __builtin_return_address(1);
1245 "alloc of " << origSize <<
" rounded to " << nBytes
1246 <<
" exceeds page size [at " << ret0 <<
" " << ret1
1248 #pragma GCC diagnostic pop 1253 #if DEBUGGING_SLAB_ALLOCATOR 1257 "SlabAllocator::allocate: Allocation failed (" <<
Dec << nBytes
1258 <<
Hex <<
" bytes)");
1272 head->cache = &m_Caches[lg2];
1274 head->magic = VIGILANT_MAGIC;
1275 foot->magic = VIGILANT_MAGIC;
1277 #if BOCHS_MAGIC_WATCHPOINTS 1280 asm volatile(
"xchg %%cx,%%cx" ::
"a"(&foot->catcher));
1282 #if VIGILANT_OVERRUN_CHECK 1289 NUM_SLAM_BT_FRAMES *
sizeof(uintptr_t));
1290 head->requested = nBytes;
1291 g_SlamCommand.addAllocation(head->backtrace, head->requested);
1302 pThread->
getParent()->trackHeap(nBytes);
1307 #ifdef MEMORY_TRACING 1309 reinterpret_cast<void *>(ret), MemoryTracing::Allocation, origSize);
1315 size_t SlamAllocator::allocSize(uintptr_t
mem)
1325 assert(head->cache != 0);
1326 size_t result = head->cache->objectSize();
1335 void SlamAllocator::free(uintptr_t mem)
1337 #if DEBUGGING_SLAB_ALLOCATOR 1338 NOTICE_NOLOCK(
"SlabAllocator::free");
1351 #if CRIPPLINGLY_VIGILANT 1353 for (
int i = 0; i < 32; i++)
1354 m_Caches[i].check();
1358 #ifndef PEDIGREE_BENCHMARK 1360 reinterpret_cast<void *>(mem)))
1362 "SlamAllocator::free - given pointer '" 1363 << mem <<
"' was completely invalid.");
1371 assert(head->cache != 0);
1373 assert(head->magic == VIGILANT_MAGIC);
1376 #if BOCHS_MAGIC_WATCHPOINTS 1380 #if VIGILANT_OVERRUN_CHECK 1382 g_SlamCommand.removeAllocation(head->backtrace, head->requested);
1391 #if SCRIBBLE_FREED_BLOCKS 1394 ByteSet(reinterpret_cast<void *>(mem), 0xAB, size);
1403 pThread->
getParent()->trackHeap(-pCache->objectSize());
1411 #ifdef MEMORY_TRACING 1412 traceAllocation(reinterpret_cast<void *>(mem), MemoryTracing::Free, 0);
1416 bool SlamAllocator::isPointerValid(uintptr_t mem)
1421 #if DEBUGGING_SLAB_ALLOCATOR 1422 NOTICE_NOLOCK(
"SlabAllocator::isPointerValid");
1442 #ifndef PEDIGREE_BENCHMARK 1444 reinterpret_cast<void *>(mem)))
1446 #if VERBOSE_ISPOINTERVALID 1448 "SlamAllocator::isPointerValid: memory " 1449 <<
Hex << mem <<
" is not in the heap region.");
1455 #if CRIPPLINGLY_VIGILANT 1457 for (
int i = 0; i < 32; i++)
1458 m_Caches[i].check();
1466 if (head->magic != VIGILANT_MAGIC)
1468 #if VERBOSE_ISPOINTERVALID 1470 "SlamAllocator::isPointerValid: memory " 1471 <<
Hex << mem <<
" failed magic check (" << head->magic
1472 <<
" != " << VIGILANT_MAGIC <<
").");
1480 if (head->cache == 0)
1482 #if VERBOSE_ISPOINTERVALID 1484 "SlamAllocator::isPointerValid: memory " 1485 <<
Hex << mem <<
" does not reference a valid SlamCache.");
1491 bool bValid =
false;
1492 for (
int i = 0; i < 32; i++)
1494 if (head->cache == &m_Caches[i])
1504 "SlamAllocator::isPointerValid - cache pointer '" 1505 << reinterpret_cast<uintptr_t>(head->cache) <<
"' is invalid.");
1510 return head->cache->isPointerValid(mem -
sizeof(
AllocHeader));
1513 bool SlamAllocator::isWithinHeap(uintptr_t mem)
const 1515 #ifndef PEDIGREE_BENCHMARK 1517 reinterpret_cast<void *>(mem)))
1519 #if VERBOSE_ISPOINTERVALID 1521 "SlamAllocator::isWithinHeap: memory " 1522 <<
Hex << mem <<
" is not in the heap region.");
1531 bool _assert_ptr_valid(uintptr_t ptr)
1533 return SlamAllocator::instance().isPointerValid(ptr);
1536 #endif // !defined(SLAM_USE_DEBUG_ALLOCATOR)
virtual void unmap(void *virtualAddress)=0
static size_t getPageSize() PURE
uintptr_t allocate(size_t nBytes)
static PhysicalMemoryManager & instance()
static EXPORTED_PUBLIC SlamAllocator m_Instance
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
void free(uintptr_t object)
virtual void setFlags(void *virtualAddress, size_t newFlags)=0
virtual uintptr_t getKernelHeapEnd() const =0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
bool acquire(bool recurse=false, bool safe=true)
static ProcessorInformation & information()
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
static size_t m_Initialised
static const size_t Write
uintptr_t m_pReturnAddresses[MAX_STACK_FRAMES]
static const size_t KernelMode
SlamAllocator * m_pParentAllocator
size_t recovery(size_t maxSlabs)
virtual uintptr_t getKernelHeapStart() const =0
Process * getParent() const
void initialise(SlamAllocator *parent, size_t objectSize)
void performBpBacktrace(uintptr_t base, uintptr_t instruction)
virtual void freePage(physical_uintptr_t page)=0
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
static const size_t CopyOnWrite