The Pedigree Project  0.1
SlamAllocator.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef SLAM_USE_DEBUG_ALLOCATOR
21 
22 #include "pedigree/kernel/core/SlamAllocator.h"
23 #include "pedigree/kernel/Log.h"
24 #include "pedigree/kernel/panic.h"
25 #include "pedigree/kernel/utilities/assert.h"
26 #include "pedigree/kernel/utilities/utility.h"
27 
28 #ifndef PEDIGREE_BENCHMARK
29 #include "pedigree/kernel/LockGuard.h"
30 #ifdef MEMORY_TRACING
31 #include "pedigree/kernel/utilities/MemoryTracing.h"
32 #endif
33 #if CRIPPLINGLY_VIGILANT
34 #include "pedigree/kernel/machine/Machine.h"
35 #endif
36 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
37 #include "pedigree/kernel/processor/Processor.h"
38 #include "pedigree/kernel/processor/ProcessorInformation.h"
39 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
40 #if VIGILANT_OVERRUN_CHECK
41 #include "pedigree/kernel/debugger/Backtrace.h"
42 #include "pedigree/kernel/debugger/commands/SlamCommand.h"
43 #endif
44 #endif
45 
46 #ifdef THREADS
47 #include "pedigree/kernel/process/Process.h"
48 #include "pedigree/kernel/process/Thread.h"
49 #endif
50 
51 #ifdef MULTIPROCESSOR
52 #define ATOMIC_MEMORY_ORDER __ATOMIC_RELEASE
53 #define ATOMIC_CAS_WEAK true
54 #else
55 #define ATOMIC_MEMORY_ORDER __ATOMIC_RELAXED
56 #define ATOMIC_CAS_WEAK true
57 #endif
58 
59 #ifndef PEDIGREE_BENCHMARK
61 #endif
62 
63 template <typename T>
64 inline T *untagged(T *p) PURE;
65 
66 template <typename T>
67 inline T *tagged(T *p) PURE;
68 
69 template <typename T>
70 inline T *touch_tag(T *p) PURE;
71 
72 template <typename T>
73 inline T *untagged(T *p)
74 {
76  // All heap pointers begin with 32 bits of ones. So we shove a tag there.
77  uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
78 #if defined(PEDIGREE_BENCHMARK) || defined(HOSTED)
79  // Top four bits available to us (addresses from 0 -> 0x00007FFFFFFFFFFF).
80  ptr &= ~0xFFFF000000000000ULL;
81 #else
82  // Top four bits make this a canonical address
83  ptr |= 0xFFFF000000000000ULL;
84 #endif
85  return reinterpret_cast<T *>(ptr);
86 }
87 
88 template <typename T>
89 inline T *tagged(T *p)
90 {
91  uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
92  ptr &= 0xFFFFFFFFFFFFULL;
93  return reinterpret_cast<T *>(ptr);
94 }
95 
96 template <typename T>
97 inline T *touch_tag(T *p)
98 {
99  // Add one to the tag.
100  uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
101  ptr += 0x1000000000000ULL;
102  return reinterpret_cast<T *>(ptr);
103 }
104 
105 inline void spin_pause()
106 {
107 #ifdef PEDIGREE_BENCHMARK
108  asm("pause");
109 #else
111 #endif
112 }
113 
114 inline uintptr_t getHeapBase()
115 {
116 #ifdef PEDIGREE_BENCHMARK
117  return SlamSupport::getHeapBase();
118 #else
120 #endif
121 }
122 
123 inline uintptr_t getHeapEnd()
124 {
125 #ifdef PEDIGREE_BENCHMARK
126  return SlamSupport::getHeapEnd();
127 #else
129 #endif
130 }
131 
132 inline size_t getPageSize()
133 {
134 #ifdef PEDIGREE_BENCHMARK
135  return 0x1000;
136 #else
138 #endif
139 }
140 
141 inline void allocateAndMapAt(void *addr, bool cowOk = false)
142 {
143 #ifdef PEDIGREE_BENCHMARK
144  SlamSupport::getPageAt(addr);
145 #else
146  size_t standardFlags =
148 
149  static physical_uintptr_t physZero = 0;
150  bool needZeroPage = false;
151  size_t extraFlags = 0;
152 
153  physical_uintptr_t phys = 0;
154  if (cowOk)
155  {
156  if (!physZero)
157  {
158  // allocate the zero page, we'll zero it shortly
160  needZeroPage = true;
161 
162  // allow us to zero out the page
163  extraFlags |= VirtualAddressSpace::Write;
164  }
165  else
166  {
167  extraFlags |= VirtualAddressSpace::CopyOnWrite;
168  }
169 
170  // disable writing (for CoW to work properly)
171  standardFlags &= ~VirtualAddressSpace::Write;
172 
173  phys = physZero;
174  }
175  else
176  {
178  }
179 
181  if (!va.map(phys, addr, standardFlags | extraFlags))
182  {
183  FATAL("SlamAllocator: failed to allocate and map at " << addr);
184  }
185 
186  if (needZeroPage)
187  {
188  ByteSet(addr, 0, PhysicalMemoryManager::getPageSize());
189 
190  // Page zeroed - mark page copy on write now so the zero page works
191  va.setFlags(addr, standardFlags | VirtualAddressSpace::CopyOnWrite);
192  }
193 #endif
194 }
195 
196 inline void unmap(void *addr)
197 {
198 #ifdef PEDIGREE_BENCHMARK
199  SlamSupport::unmapPage(addr);
200 // munmap(addr, getPageSize());
201 #else
203  if (!va.isMapped(addr))
204  return;
205 
206  physical_uintptr_t phys;
207  size_t flags;
208  va.getMapping(addr, phys, flags);
209  va.unmap(addr);
210 
212 #endif
213 }
214 
216  : m_PartialLists(), m_ObjectSize(0), m_SlabSize(0), m_FirstSlab(),
217 #ifdef THREADS
218  m_RecoveryLock(false),
219 #endif
220  m_EmptyNode()
221 {
222 }
223 
225 {
226 }
227 
228 void SlamCache::initialise(SlamAllocator *parent, size_t objectSize)
229 {
230  if (objectSize < OBJECT_MINIMUM_SIZE)
231  return;
232 
233  m_ObjectSize = objectSize;
234  if (m_ObjectSize > SLAB_MINIMUM_SIZE)
235  m_SlabSize = m_ObjectSize;
236  else
237  m_SlabSize = SLAB_MINIMUM_SIZE;
238 
239 #ifdef MULTIPROCESSOR
240  size_t maxCpu = 255;
242 #else
243  size_t maxCpu = 1;
244 #endif
245  for (size_t i = 0; i < maxCpu; i++)
246  m_PartialLists[i] = tagged(&m_EmptyNode);
247 
248  // Make the empty node loop always, so it can be easily linked into place.
249  ByteSet(&m_EmptyNode, 0xAB, sizeof(m_EmptyNode));
250  m_EmptyNode.next = tagged(&m_EmptyNode);
251 
252  m_pParentAllocator = parent;
253 
254  assert((m_SlabSize % m_ObjectSize) == 0);
255 }
256 
257 SlamCache::Node *SlamCache::pop(SlamCache::alignedNode *head)
258 {
259  Node *N = 0, *pNext = 0;
260  alignedNode currentHead = *head;
261  while (true)
262  {
263  // Grab result.
264  N = untagged(const_cast<Node *>(currentHead));
265  pNext = N->next;
266 
267  if (__atomic_compare_exchange_n(
268  head, &currentHead, touch_tag(pNext), ATOMIC_CAS_WEAK,
269  ATOMIC_MEMORY_ORDER, __ATOMIC_RELAXED))
270  {
271  // Successful CAS, we have a node to use.
272  break;
273  }
274 
275  // Unsuccessful CAS, pause for a bit to back off.
276  spin_pause();
277  }
278 
279  return N;
280 }
281 
282 void SlamCache::push(
283  SlamCache::alignedNode *head, SlamCache::Node *newTail,
284  SlamCache::Node *newHead)
285 {
286  if (!newHead)
287  newHead = newTail;
288 
289  newTail->next = const_cast<Node *>(*head);
290  while (!__atomic_compare_exchange_n(
291  head, const_cast<alignedNode *>(&newTail->next), touch_tag(newHead),
292  ATOMIC_CAS_WEAK, ATOMIC_MEMORY_ORDER, __ATOMIC_RELAXED))
293  {
294  spin_pause();
295  }
296 }
297 
299 {
300 #if EVERY_ALLOCATION_IS_A_SLAB
301  return getSlab();
302 #endif
303 
304 #if SLABS_FOR_HUGE_ALLOCS
305  if (m_ObjectSize >= getPageSize())
306  {
307  // just return a big-enough slab - allocation is page-sized or bigger
308  return getSlab();
309  }
310 #endif
311 
312 #ifdef MULTIPROCESSOR
313  size_t thisCpu = Processor::id();
314 #else
315  size_t thisCpu = 0;
316 #endif
317 
318  Node *N = pop(&m_PartialLists[thisCpu]);
319 
320  // Something else got there first if N == 0. Just allocate a new slab.
321  if (UNLIKELY(N == &m_EmptyNode))
322  {
323  Node *pNode = initialiseSlab(getSlab());
324  uintptr_t slab = reinterpret_cast<uintptr_t>(pNode);
325 #if CRIPPLINGLY_VIGILANT
326  if (m_pParentAllocator->getVigilance())
327  trackSlab(slab);
328 #endif
329  return slab;
330  }
331 
332  // Check that the block was indeed free.
333  assert(N->next != reinterpret_cast<Node *>(VIGILANT_MAGIC));
334 #if USING_MAGIC
335  assert(N->magic == TEMP_MAGIC || N->magic == MAGIC_VALUE);
336  N->magic = TEMP_MAGIC;
337 #endif
338 
339  return reinterpret_cast<uintptr_t>(N);
340 }
341 
342 void SlamCache::free(uintptr_t object)
343 {
344 #if EVERY_ALLOCATION_IS_A_SLAB
345  // Free the slab in the address space, but don't return it to the allocator.
346  size_t numPages = m_SlabSize / getPageSize();
347  if (m_SlabSize % getPageSize())
348  {
349  ++numPages;
350  }
351  object = object & ~(getPageSize() - 1);
352  for (size_t i = 0; i < numPages; ++i)
353  {
354  unmap(reinterpret_cast<void *>(object + (i * getPageSize())));
355  }
356 
357  return;
358 #endif
359 
360 #if SLABS_FOR_HUGE_ALLOCS
361  if (m_ObjectSize >= getPageSize())
362  {
363  // just free the object directly, it's an entire slab
364  freeSlab(object);
365  return;
366  }
367 #endif
368 
369 #ifdef MULTIPROCESSOR
370  size_t thisCpu = Processor::id();
371 #else
372  size_t thisCpu = 0;
373 #endif
374 
375  Node *N = reinterpret_cast<Node *>(object);
376 #if OVERRUN_CHECK
377  // Grab the footer and check it.
379  reinterpret_cast<SlamAllocator::AllocFooter *>(
380  object + m_ObjectSize - sizeof(SlamAllocator::AllocFooter));
381  assert(pFoot->magic == VIGILANT_MAGIC);
382 
383 #if BOCHS_MAGIC_WATCHPOINTS
384  asm volatile("xchg %%dx,%%dx" ::"a"(&pFoot->catcher));
385 #endif
386 #endif
387 
388 #if USING_MAGIC
389  // Possible double free?
390  assert(N->magic != MAGIC_VALUE);
391  N->magic = MAGIC_VALUE;
392 #endif
393 
394  push(&m_PartialLists[thisCpu], N);
395 }
396 
397 bool SlamCache::isPointerValid(uintptr_t object) const
398 {
399 #if SLABS_FOR_HUGE_ALLOCS
400  if (m_ObjectSize >= getPageSize())
401  {
403  return true;
404  }
405 #endif
406 
407  Node *N = reinterpret_cast<Node *>(object);
408 #if OVERRUN_CHECK
409  // Grab the footer and check it.
411  reinterpret_cast<SlamAllocator::AllocFooter *>(
412  object + m_ObjectSize - sizeof(SlamAllocator::AllocFooter));
413  if (pFoot->magic != VIGILANT_MAGIC)
414  {
415  return false;
416  }
417 #endif
418 
419 #if USING_MAGIC
420  // Possible double free?
421  if (N->magic == MAGIC_VALUE)
422  {
423 #if VERBOSE_ISPOINTERVALID
424  WARNING(
425  "SlamCache::isPointerValid: memory "
426  << Hex << object << " has invalid magic (" << N->magic
427  << " != " << MAGIC_VALUE << ").");
428 #endif
429  return false;
430  }
431 #endif
432 
433  return true;
434 }
435 
436 uintptr_t SlamCache::getSlab()
437 {
438  return m_pParentAllocator->getSlab(m_SlabSize);
439 }
440 
441 void SlamCache::freeSlab(uintptr_t slab)
442 {
443  m_pParentAllocator->freeSlab(slab, m_SlabSize);
444 }
445 
446 size_t SlamCache::recovery(size_t maxSlabs)
447 {
448 #if EVERY_ALLOCATION_IS_A_SLAB
449  return 0;
450 #endif
451 
452 #if SLABS_FOR_HUGE_ALLOCS
453  if (m_ObjectSize >= getPageSize())
454  {
455  // Caches with slabs page-sized or bigger don't hold onto freed regions
456  return 0;
457  }
458 #endif
459 
460 #ifdef MULTIPROCESSOR
461  size_t thisCpu = Processor::id();
462 #else
463  size_t thisCpu = 0;
464 #endif
465 
466 #ifdef THREADS
468 #endif
469 
470  if (untagged(m_PartialLists[thisCpu]) == &m_EmptyNode)
471  return 0;
472 
473  size_t freedSlabs = 0;
474  if (m_ObjectSize < getPageSize())
475  {
476  Node *reinsertHead = tagged(&m_EmptyNode);
477  Node *reinsertTail = &m_EmptyNode;
478  while (maxSlabs--)
479  {
480  // Grab the head node of the free list.
481  Node *N = pop(&m_PartialLists[thisCpu]);
482 
483  // If no head node, we're done with this free list.
484  if (N == &m_EmptyNode)
485  {
486  break;
487  }
488 
489  uintptr_t slab =
490  reinterpret_cast<uintptr_t>(N) & ~(getPageSize() - 1);
491 
492  // A possible node found! Any luck?
493  bool bSlabNotFree = false;
494  for (size_t i = 0; i < (m_SlabSize / m_ObjectSize); ++i)
495  {
496  Node *pNode =
497  reinterpret_cast<Node *>(slab + (i * m_ObjectSize));
498  SlamAllocator::AllocHeader *pHeader =
499  reinterpret_cast<SlamAllocator::AllocHeader *>(pNode);
500  if (pHeader->cache == this)
501  {
502  // Oops, an active allocation was found.
503  bSlabNotFree = true;
504  break;
505  }
506 #if USING_MAGIC
507  else if (pNode->magic != MAGIC_VALUE)
508  {
509  // Not free.
510  bSlabNotFree = true;
511  break;
512  }
513 #endif
514  }
515 
516  if (bSlabNotFree)
517  {
518  // Link the node into our reinsert lists, as the slab contains
519  // in-use nodes.
520  if (untagged(reinsertHead) == &m_EmptyNode)
521  {
522  reinsertHead = tagged(N);
523  reinsertTail = N;
524  N->next = tagged(&m_EmptyNode);
525  }
526  else
527  {
528  N->next = reinsertHead;
529  reinsertHead = tagged(N);
530  }
531 
532  continue;
533  }
534 
535  // Unlink any of our items that exist in the free list.
536  // Yes, this is slow, but we've already stopped the world.
537  alignedNode head = untagged(m_PartialLists[thisCpu]);
538  alignedNode prev = head;
539  while (head != &m_EmptyNode)
540  {
541  bool overlaps =
542  ((head >= reinterpret_cast<void *>(slab)) ||
543  (head <= reinterpret_cast<void *>(slab + getPageSize())));
544 
545  if (overlaps)
546  {
547  // Update previous node to point past us.
548  prev->next = touch_tag(head->next);
549 
550  // If we're pointing at the head of the list, we need to
551  // update the head of the list too.
552  if (prev == head)
553  {
554  prev = untagged(head->next);
555  m_PartialLists[thisCpu] = touch_tag(head->next);
556  }
557  else
558  {
559  prev = head;
560  }
561  }
562  else
563  {
564  prev = head;
565  }
566 
567  head = untagged(head->next);
568  }
569 
570  // Kill off the slab!
571  freeSlab(slab);
572  ++freedSlabs;
573  }
574 
575  // Relink any nodes we decided we couldn't free. This must be done here
576  // as the loop may terminate before we get a chance to do this.
577  if (reinsertTail != &m_EmptyNode)
578  {
579  // Re-link the nodes we passed over.
580  push(&m_PartialLists[thisCpu], reinsertTail, reinsertHead);
581  }
582  }
583  else
584  {
585  while (maxSlabs--)
586  {
587  if (untagged(m_PartialLists[thisCpu]) == &m_EmptyNode)
588  break;
589 
590  // Pop the first free node off the free list.
591  Node *N = pop(&m_PartialLists[thisCpu]);
592  if (N == &m_EmptyNode)
593  {
594  // Emptied the partial list!
595  break;
596  }
597 
598 #if USING_MAGIC
599  assert(N->magic == MAGIC_VALUE);
600 #endif
601 
602  // Can just outright free - no need to do any further checks.
603  uintptr_t slab = reinterpret_cast<uintptr_t>(N);
604 
605  freeSlab(slab);
606  ++freedSlabs;
607  }
608  }
609 
610  return freedSlabs;
611 }
612 
613 SlamCache::Node *SlamCache::initialiseSlab(uintptr_t slab)
614 {
615 #if SLABS_FOR_HUGE_ALLOCS
616  if (m_ObjectSize >= getPageSize())
617  {
618  return nullptr;
619  }
620 #endif
621 
622 #ifdef MULTIPROCESSOR
623  size_t thisCpu = Processor::id();
624 #else
625  size_t thisCpu = 0;
626 #endif
627 
628  size_t nObjects = m_SlabSize / m_ObjectSize;
629 
630  Node *N = reinterpret_cast<Node *>(slab);
631  N->next = tagged(&m_EmptyNode);
632 #if USING_MAGIC
633  N->magic = TEMP_MAGIC;
634 #endif
635 
636  // Early exit if there's no other free objects in this slab.
637  if (nObjects <= 1)
638  return N;
639 
640  // All objects in slab are free, generate Node*'s for each (except the
641  // first) and link them together.
642  Node *pFirst = 0, *pLast = 0;
643  for (size_t i = 1; i < nObjects; i++)
644  {
645  Node *pNode = reinterpret_cast<Node *>(slab + (i * m_ObjectSize));
646  pNode->next = reinterpret_cast<Node *>(slab + ((i + 1) * m_ObjectSize));
647  pNode->next = tagged(pNode->next);
648 #if USING_MAGIC
649  pNode->magic = MAGIC_VALUE;
650 #endif
651 
652  if (!pFirst)
653  pFirst = tagged(pNode);
654 
655  pLast = pNode;
656  }
657 
658  N->next = pFirst;
659 
660  push(&m_PartialLists[thisCpu], pLast, pFirst);
661 
662  return N;
663 }
664 
665 #if CRIPPLINGLY_VIGILANT
666 Spinlock rarp;
667 void SlamCache::check()
668 {
669  if (m_ObjectSize >= getPageSize())
670  {
671  return;
672  }
673 
674  if (!Machine::instance().isInitialised() || Processor::m_Initialised != 2)
675  return;
676  if (m_ObjectSize == 0)
677  return;
678  rarp.acquire();
679 
680  size_t nObjects = m_SlabSize / m_ObjectSize;
681 
682  size_t maxPerSlab = (m_SlabSize / sizeof(uintptr_t)) - 2;
683 
684  uintptr_t curSlab = m_FirstSlab;
685  while (true)
686  {
687  if (!curSlab)
688  {
689  rarp.release();
690  return;
691  }
692  uintptr_t numAlloced = *reinterpret_cast<uintptr_t *>(curSlab);
693  uintptr_t next =
694  *reinterpret_cast<uintptr_t *>(curSlab + sizeof(uintptr_t));
695 
696  for (size_t i = 0; i < numAlloced; i++)
697  {
698  uintptr_t slab = *reinterpret_cast<uintptr_t *>(
699  curSlab + sizeof(uintptr_t) * (i + 2));
700  for (size_t i = 0; i < nObjects; i++)
701  {
702  uintptr_t addr = slab + i * m_ObjectSize;
703  Node *pNode = reinterpret_cast<Node *>(addr);
704  if (pNode->magic == MAGIC_VALUE || pNode->magic == TEMP_MAGIC)
705  // Free, continue.
706  continue;
708  reinterpret_cast<SlamAllocator::AllocHeader *>(addr);
710  reinterpret_cast<SlamAllocator::AllocFooter *>(
711  addr + m_ObjectSize -
713  if (pHead->magic != VIGILANT_MAGIC)
714  {
715  ERROR(
716  "Possible heap underrun: object starts at "
717  << addr << ", size: " << m_ObjectSize << ", block: "
718  << (addr + sizeof(SlamAllocator::AllocHeader)));
719  }
720  if (pFoot->magic != VIGILANT_MAGIC)
721  {
722  ERROR("Possible heap overrun: object starts at " << addr);
723  assert(false);
724  }
725  }
726  }
727  if (numAlloced == maxPerSlab)
728  curSlab = next;
729  else
730  break;
731  }
732  rarp.release();
733 }
734 
735 void SlamCache::trackSlab(uintptr_t slab)
736 {
737  if (!Machine::instance().isInitialised() || Processor::m_Initialised != 2)
738  return;
739  if (m_ObjectSize == 0)
740  return;
741 
742  if (!m_FirstSlab)
743  {
744  m_FirstSlab = getSlab();
745  uintptr_t *numAlloced = reinterpret_cast<uintptr_t *>(m_FirstSlab);
746  uintptr_t *next =
747  reinterpret_cast<uintptr_t *>(m_FirstSlab + sizeof(uintptr_t));
748  *numAlloced = 0;
749  *next = 0;
750  }
751 
752  size_t maxPerSlab = (m_SlabSize / sizeof(uintptr_t)) - 2;
753 
754  uintptr_t curSlab = m_FirstSlab;
755  while (true)
756  {
757  uintptr_t *numAlloced = reinterpret_cast<uintptr_t *>(curSlab);
758  uintptr_t *next =
759  reinterpret_cast<uintptr_t *>(curSlab + sizeof(uintptr_t));
760 
761  if (*numAlloced < maxPerSlab)
762  {
763  uintptr_t *p = reinterpret_cast<uintptr_t *>(
764  curSlab + (*numAlloced + 2) * sizeof(uintptr_t));
765  *p = slab;
766  *numAlloced = *numAlloced + 1;
767  return;
768  }
769 
770  if (*next)
771  curSlab = *next;
772  else
773  {
774  uintptr_t newSlab = getSlab();
775  *next = newSlab;
776  curSlab = newSlab;
777 
778  uintptr_t *numAlloced = reinterpret_cast<uintptr_t *>(curSlab);
779  uintptr_t *next =
780  reinterpret_cast<uintptr_t *>(curSlab + sizeof(uintptr_t));
781  *numAlloced = 0;
782  *next = 0;
783  }
784  }
785 }
786 #endif
787 
788 SlamAllocator::SlamAllocator()
789  : m_bInitialised(false)
790 #if CRIPPLINGLY_VIGILANT
791  ,
792  m_bVigilant(false)
793 #endif
794 #ifdef THREADS
795  ,
796  m_SlabRegionLock(false)
797 #endif
798  ,
799  m_HeapPageCount(0), m_SlabRegionBitmap(), m_SlabRegionBitmapEntries(0),
800  m_Base(0)
801 {
802 }
803 
804 SlamAllocator::~SlamAllocator()
805 {
806  if (m_bInitialised)
807  {
808  wipe();
809  }
810 }
811 
812 void SlamAllocator::initialise()
813 {
814 #ifdef THREADS
815  LockGuard<Spinlock> guard(m_SlabRegionLock);
816 #endif
817 
818  if (m_bInitialised)
819  {
820  return;
821  }
822 
823  // We need to allocate our bitmap for this purpose.
824  uintptr_t bitmapBase = getHeapBase();
825  uintptr_t heapEnd = getHeapEnd();
826  size_t heapSize = heapEnd - bitmapBase;
827  size_t bitmapBytes = (heapSize / getPageSize()) / 8;
828 
829  m_SlabRegionBitmap = reinterpret_cast<uint64_t *>(bitmapBase);
830  m_SlabRegionBitmapEntries = bitmapBytes / sizeof(uint64_t);
831 
832  // Ensure the bitmap size is now page-aligned before we allocate it.
833  if (bitmapBytes & (getPageSize() - 1))
834  {
835  bitmapBytes &= ~(getPageSize() - 1);
836  bitmapBytes += getPageSize();
837  }
838 
839  m_Base = bitmapBase + bitmapBytes;
840 
841 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
843  VirtualAddressSpace &currva =
844  Processor::information().getVirtualAddressSpace();
845  if (Processor::m_Initialised == 2)
847 #endif
848 
849  // Allocate bitmap.
850  size_t numPages = 0;
851  for (uintptr_t addr = bitmapBase; addr < m_Base; addr += getPageSize())
852  {
853  // Don't CoW the first 32 pages so we have some slabs on hand for
854  // startup before CoW is viable
855  allocateAndMapAt(reinterpret_cast<void *>(addr), numPages++ >= 32);
856  }
857 
858 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
859  if (Processor::m_Initialised == 2)
861 #endif
862 
863 #ifndef PEDIGREE_BENCHMARK
864  NOTICE(
865  "Kernel heap range prepared from " << Hex << m_Base << " to " << heapEnd
866  << ", size: " << (heapEnd - m_Base));
867  DEBUG_LOG(
868  " -> kernel heap bitmap is " << Dec << (bitmapBytes / 1024) << Hex
869  << "K");
870 #endif
871 
872  for (size_t i = 0; i < 32; i++)
873  {
874  m_Caches[i].initialise(this, 1ULL << i);
875  }
876 
877  m_bInitialised = true;
878 }
879 
880 #ifdef PEDIGREE_BENCHMARK
881 void SlamAllocator::clearAll()
882 {
883  wipe();
884  initialise();
885 }
886 #endif
887 
889 {
890  if (!m_bInitialised)
891  {
892  return;
893  }
894 
895  if (!m_SlabRegionBitmap)
896  {
897  return;
898  }
899 
900 #ifdef THREADS
901  m_SlabRegionLock.acquire();
902 #endif
903 
904  m_bInitialised = false;
905 
906  // Clean up all slabs we obtained.
907  for (size_t entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
908  {
909  if (!m_SlabRegionBitmap[entry])
910  {
911  continue;
912  }
913 
914  for (size_t bit = 0; bit < 64; ++bit)
915  {
916  uint64_t test = 1ULL << bit;
917  if ((m_SlabRegionBitmap[entry] & test) == 0)
918  {
919  continue;
920  }
921 
922  uintptr_t slab = m_Base + (((entry * 64) + bit) * getPageSize());
923  freeSlab(slab, getPageSize());
924  }
925  }
926 
927  // about to destroy the bitmap mappings
928  m_SlabRegionBitmap = nullptr;
929  m_SlabRegionBitmapEntries = 0;
930 
931  // Clean up the bitmap.
932  for (uintptr_t addr = getHeapBase(); addr < m_Base; addr += getPageSize())
933  {
934  unmap(reinterpret_cast<void *>(addr));
935  }
936 
937 #ifdef THREADS
938  m_SlabRegionLock.release();
939 #endif
940 }
941 
942 uintptr_t SlamAllocator::getSlab(size_t fullSize)
943 {
944  ssize_t nPages = fullSize / getPageSize();
945  if (!nPages)
946  {
947  panic("Attempted to get a slab smaller than the native page size.");
948  }
949 
950 #ifdef THREADS
951  m_SlabRegionLock.acquire();
952 #endif
953 
954  // Try to find space for this allocation.
955  size_t entry = 0;
956  size_t bit = ~0UL;
957  if (nPages == 1)
958  {
959  // Fantastic - easy search.
960  for (entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
961  {
962  if (!m_SlabRegionBitmap[entry])
963  {
964  bit = 0;
965  break;
966  }
967  else if (m_SlabRegionBitmap[entry] != 0xFFFFFFFFFFFFFFFFULL)
968  {
969  // First set of the INVERTED entry will be the first zero bit.
970  // Note - the check for this block ensures we always get a
971  // result from ffsll here.
972  bit = __builtin_ffsll(~m_SlabRegionBitmap[entry]) - 1;
973  break;
974  }
975  }
976  }
977  else if (nPages > 64)
978  {
979  // This allocation does not fit within a single bitmap entry.
980  for (entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
981  {
982  // If there are any bits set in this entry, we must disregard it.
983  if (m_SlabRegionBitmap[entry])
984  continue;
985 
986  // This entry has 64 free pages. Now we need to see if we can get
987  // contiguously free bitmap entries.
988  size_t needed = nPages - 64;
989  size_t checkEntry = entry + 1;
990  while (needed >= 64)
991  {
992  // If the entry has any set bits whatsoever, it's no good.
993  if (m_SlabRegionBitmap[checkEntry])
994  break;
995 
996  // Success.
997  ++checkEntry;
998  needed -= 64;
999  }
1000 
1001  // Check for the ideal case.
1002  if (needed == 0)
1003  {
1004  bit = 0;
1005  break;
1006  }
1007  else if (needed < 64)
1008  {
1009  // Possible! Can we get enough trailing zeroes in the next entry
1010  // to make this work?
1011  size_t leading =
1012  __builtin_ctzll(m_SlabRegionBitmap[checkEntry]);
1013  if (leading >= needed)
1014  {
1015  bit = 0;
1016  break;
1017  }
1018  }
1019 
1020  // Skip already-checked entries.
1021  entry = checkEntry;
1022  }
1023  }
1024  else
1025  {
1026  // Have to search within entries.
1027  uint64_t search = (1ULL << nPages) - 1;
1028  size_t maxBit = 64 - nPages;
1029  for (entry = 0; entry < m_SlabRegionBitmapEntries; ++entry)
1030  {
1031  if (m_SlabRegionBitmap[entry] == 0ULL)
1032  {
1033  bit = 0;
1034  break;
1035  }
1036  else if (m_SlabRegionBitmap[entry] != ~0ULL)
1037  {
1038  // Try and see if we fit somewhere.
1039  for (bit = 0; bit < maxBit; ++bit)
1040  {
1041  if (m_SlabRegionBitmap[entry] & (search << bit))
1042  continue;
1043 
1044  break;
1045  }
1046 
1047  if (bit < maxBit)
1048  break;
1049 
1050  bit = ~0UL;
1051  }
1052  }
1053  }
1054 
1055  if (bit == ~0UL)
1056  {
1057  FATAL(
1058  "SlamAllocator::getSlab cannot find a place to allocate this slab ("
1059  << Dec << fullSize << Hex << " bytes) - consumed "
1060  << m_HeapPageCount << " pages! "
1061  << " --> " << this);
1062  }
1063 
1064  uintptr_t slab = m_Base + (((entry * 64) + bit) * getPageSize());
1065 
1066  // Map and mark as used.
1067  for (ssize_t i = 0; i < nPages; ++i)
1068  {
1069  m_SlabRegionBitmap[entry] |= 1ULL << bit;
1070 
1071  // Handle crossing a bitmap entry boundary.
1072  if ((++bit) >= 64)
1073  {
1074  ++entry;
1075  bit = 0;
1076  }
1077  }
1078 
1079 #ifdef THREADS
1080  // Now that we've marked the slab bits as used, we can map the pages.
1081  m_SlabRegionLock.release();
1082 #endif
1083 
1084 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
1086  VirtualAddressSpace &currva =
1087  Processor::information().getVirtualAddressSpace();
1088  if (Processor::m_Initialised == 2)
1090 #endif
1091 
1092  // Map. This could break as we're allocating physical memory; though we are
1093  // free of the lock so that helps.
1094  for (ssize_t i = 0; i < nPages; ++i)
1095  {
1096  void *p = reinterpret_cast<void *>(slab + (i * getPageSize()));
1097  allocateAndMapAt(p);
1098  }
1099 
1100 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
1101  if (Processor::m_Initialised == 2)
1103 #endif
1104 
1105  m_HeapPageCount += fullSize / getPageSize();
1106 
1107  return slab;
1108 }
1109 
1110 void SlamAllocator::freeSlab(uintptr_t address, size_t length)
1111 {
1112  size_t nPages = length / getPageSize();
1113  if (!nPages)
1114  {
1115  panic("Attempted to free a slab smaller than the native page size.");
1116  }
1117 
1118 #ifdef THREADS
1119  LockGuard<Spinlock> guard(m_SlabRegionLock);
1120 #endif
1121 
1122  // Perform unmapping first (so we can just modify 'address').
1123 
1124 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
1126  VirtualAddressSpace &currva =
1127  Processor::information().getVirtualAddressSpace();
1128  if (Processor::m_Initialised == 2)
1130 #endif
1131 
1132  for (uintptr_t base = address; base < (address + length);
1133  base += getPageSize())
1134  {
1135  void *p = reinterpret_cast<void *>(base);
1136  unmap(p);
1137  }
1138 
1139 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
1140  if (Processor::m_Initialised == 2)
1142 #endif
1143 
1144  // Adjust bitmap.
1145  address -= m_Base;
1146  address /= getPageSize();
1147  size_t entry = address / 64;
1148  size_t bit = address % 64;
1149 
1150  for (size_t i = 0; i < nPages; ++i)
1151  {
1152  m_SlabRegionBitmap[entry] &= ~(1ULL << bit);
1153 
1154  // Handle overflow (eg, if we cross a bitmap entry.)
1155  if ((++bit) >= 64)
1156  {
1157  ++entry;
1158  bit = 0;
1159  }
1160  }
1161 
1162  m_HeapPageCount -= length / getPageSize();
1163 }
1164 
1165 size_t SlamAllocator::recovery(size_t maxSlabs)
1166 {
1167  size_t nSlabs = 0;
1168  size_t nPages = 0;
1169 
1170  for (size_t i = 0; i < 32; ++i)
1171  {
1172  // Things without slabs don't get recovered.
1173  if (!m_Caches[i].slabSize())
1174  continue;
1175 
1176  size_t thisSlabs = m_Caches[i].recovery(maxSlabs);
1177  nPages += (thisSlabs * m_Caches[i].slabSize()) / getPageSize();
1178  nSlabs += thisSlabs;
1179  if (nSlabs >= maxSlabs)
1180  {
1181  break;
1182  }
1183  }
1184 
1185  return nPages;
1186 }
1187 
1188 uintptr_t SlamAllocator::allocate(size_t nBytes)
1189 {
1190 #if DEBUGGING_SLAB_ALLOCATOR
1191  NOTICE_NOLOCK("SlabAllocator::allocate(" << Dec << nBytes << Hex << ")");
1192 #endif
1193 
1194 #if SLAM_LOCKED
1195  LockGuard<Spinlock> guard(m_Lock);
1196 #endif
1197 
1198  if (UNLIKELY(!m_bInitialised))
1199  initialise();
1200 
1201 #if CRIPPLINGLY_VIGILANT
1202  if (m_bVigilant)
1203  for (int i = 0; i < 32; i++)
1204  m_Caches[i].check();
1205 #endif
1206 
1207 #if defined(MEMORY_TRACING) || WARN_PAGE_SIZE_OR_LARGER
1208  size_t origSize = nBytes;
1209 #endif
1210 
1211  // Return value.
1212  uintptr_t ret = 0;
1213 
1214  // Add in room for the allocation footer
1215  nBytes += sizeof(AllocHeader) + sizeof(AllocFooter);
1216 
1217  // Don't allow huge allocations.
1220  assert(nBytes < (1U << 31));
1221 
1222  // Default to minimum object size if we must.
1223  size_t lg2 = 0;
1224  if (UNLIKELY(nBytes < OBJECT_MINIMUM_SIZE))
1225  {
1226  nBytes = OBJECT_MINIMUM_SIZE;
1227  }
1228 
1229  // log2 of nBytes, where nBytes is rounded up to the next power-of-two.
1230  lg2 = 32 - __builtin_clz(nBytes);
1231  nBytes = 1U << lg2; // Round up nBytes now.
1232  ret = m_Caches[lg2].allocate();
1233 
1234 #if WARN_PAGE_SIZE_OR_LARGER
1235  // Does the allocation fit inside a slab?
1236  // NOTE: use something else to allocate 4K or more.
1237  if (nBytes >= getPageSize())
1238  {
1239 #pragma GCC diagnostic push
1240 #pragma GCC diagnostic ignored "-Wframe-address"
1241  // return address of operator new()
1242  void *ret0 = __builtin_return_address(0);
1243  void *ret1 = __builtin_return_address(1);
1244  ERROR(
1245  "alloc of " << origSize << " rounded to " << nBytes
1246  << " exceeds page size [at " << ret0 << " " << ret1
1247  << "]!");
1248 #pragma GCC diagnostic pop
1249  }
1250 #endif
1251 
1252 // l.release();
1253 #if DEBUGGING_SLAB_ALLOCATOR
1254  if (UNLIKELY(!ret))
1255  {
1256  ERROR_NOLOCK(
1257  "SlabAllocator::allocate: Allocation failed (" << Dec << nBytes
1258  << Hex << " bytes)");
1259  return ret;
1260  }
1261 #else
1262  assert(ret != 0);
1263 #endif
1264 
1265  // Shove some data on the front that we'll use later
1266  AllocHeader *head = reinterpret_cast<AllocHeader *>(ret);
1267  AllocFooter *foot =
1268  reinterpret_cast<AllocFooter *>(ret + nBytes - sizeof(AllocFooter));
1269  ret += sizeof(AllocHeader);
1270 
1271  // Set up the header
1272  head->cache = &m_Caches[lg2];
1273 #if OVERRUN_CHECK
1274  head->magic = VIGILANT_MAGIC;
1275  foot->magic = VIGILANT_MAGIC;
1276 
1277 #if BOCHS_MAGIC_WATCHPOINTS
1278  // asm volatile("xchg %%cx,%%cx" :: "a" (&head->catcher));
1280  asm volatile("xchg %%cx,%%cx" ::"a"(&foot->catcher));
1281 #endif
1282 #if VIGILANT_OVERRUN_CHECK
1283  if (Processor::m_Initialised == 2)
1284  {
1285  Backtrace bt;
1286  bt.performBpBacktrace(0, 0);
1287  MemoryCopy(
1288  &head->backtrace, bt.m_pReturnAddresses,
1289  NUM_SLAM_BT_FRAMES * sizeof(uintptr_t));
1290  head->requested = nBytes;
1291  g_SlamCommand.addAllocation(head->backtrace, head->requested);
1292  }
1293 #endif
1294 #endif
1295 
1296 #ifdef THREADS
1297  if (Processor::m_Initialised == 2)
1298  {
1299  Thread *pThread = Processor::information().getCurrentThread();
1300  if (pThread)
1301  {
1302  pThread->getParent()->trackHeap(nBytes);
1303  }
1304  }
1305 #endif
1306 
1307 #ifdef MEMORY_TRACING
1308  traceAllocation(
1309  reinterpret_cast<void *>(ret), MemoryTracing::Allocation, origSize);
1310 #endif
1311 
1312  return ret;
1313 }
1314 
1315 size_t SlamAllocator::allocSize(uintptr_t mem)
1316 {
1317  if (!mem)
1318  return 0;
1319 
1320  // Grab the header
1321  AllocHeader *head =
1322  reinterpret_cast<AllocHeader *>(mem - sizeof(AllocHeader));
1323 
1324  // If the cache is null, then the pointer is corrupted.
1325  assert(head->cache != 0);
1326  size_t result = head->cache->objectSize();
1327 
1328  // Remove size of header/footer.
1329  // This is important as we're returning the size of each object itself,
1330  // but we return memory framed by headers and footers. So, the "true" size
1331  // of memory pointed to by 'mem' is not the true object size.
1332  return result - (sizeof(AllocHeader) + sizeof(AllocFooter));
1333 }
1334 
1335 void SlamAllocator::free(uintptr_t mem)
1336 {
1337 #if DEBUGGING_SLAB_ALLOCATOR
1338  NOTICE_NOLOCK("SlabAllocator::free");
1339 #endif
1340 
1341 #if SLAM_LOCKED
1342  LockGuard<Spinlock> guard(m_Lock);
1343 #endif
1344 
1345  // If we're not initialised, fix that
1346  if (UNLIKELY(!m_bInitialised))
1347  initialise();
1348  if (UNLIKELY(!mem))
1349  return;
1350 
1351 #if CRIPPLINGLY_VIGILANT
1352  if (m_bVigilant)
1353  for (int i = 0; i < 32; i++)
1354  m_Caches[i].check();
1355 #endif
1356 
1357 // Ensure this pointer is even on the heap...
1358 #ifndef PEDIGREE_BENCHMARK
1359  if (!Processor::information().getVirtualAddressSpace().memIsInKernelHeap(
1360  reinterpret_cast<void *>(mem)))
1361  FATAL_NOLOCK(
1362  "SlamAllocator::free - given pointer '"
1363  << mem << "' was completely invalid.");
1364 #endif
1365 
1366  // Grab the header
1367  AllocHeader *head =
1368  reinterpret_cast<AllocHeader *>(mem - sizeof(AllocHeader));
1369 
1370  // If the cache is null, then the pointer is corrupted.
1371  assert(head->cache != 0);
1372 #if OVERRUN_CHECK
1373  assert(head->magic == VIGILANT_MAGIC);
1374  // Footer gets checked in SlamCache::free, as we don't know the object size.
1375 
1376 #if BOCHS_MAGIC_WATCHPOINTS
1377 // asm volatile("xchg %%dx,%%dx" :: "a" (&head->catcher));
1379 #endif
1380 #if VIGILANT_OVERRUN_CHECK
1381  if (Processor::m_Initialised == 2)
1382  g_SlamCommand.removeAllocation(head->backtrace, head->requested);
1383 #endif
1384 #endif
1385 
1386  SlamCache *pCache = head->cache;
1387  head->cache = 0; // Wipe out the cache - freed page.
1388 
1389 // Scribble the freed buffer (both to avoid leaking information, and also
1390 // to ensure anything using a freed object will absolutely fail).
1391 #if SCRIBBLE_FREED_BLOCKS
1392  size_t size =
1393  pCache->objectSize() - sizeof(AllocHeader) - sizeof(AllocFooter);
1394  ByteSet(reinterpret_cast<void *>(mem), 0xAB, size);
1395 #endif
1396 
1397 #ifdef THREADS
1398  if (Processor::m_Initialised == 2)
1399  {
1400  Thread *pThread = Processor::information().getCurrentThread();
1401  if (pThread)
1402  {
1403  pThread->getParent()->trackHeap(-pCache->objectSize());
1404  }
1405  }
1406 #endif
1407 
1408  // Free now.
1409  pCache->free(mem - sizeof(AllocHeader));
1410 
1411 #ifdef MEMORY_TRACING
1412  traceAllocation(reinterpret_cast<void *>(mem), MemoryTracing::Free, 0);
1413 #endif
1414 }
1415 
1416 bool SlamAllocator::isPointerValid(uintptr_t mem)
1417 #if !SLAM_LOCKED
1418  const
1419 #endif
1420 {
1421 #if DEBUGGING_SLAB_ALLOCATOR
1422  NOTICE_NOLOCK("SlabAllocator::isPointerValid");
1423 #endif
1424 
1425 #if SLAM_LOCKED
1426  LockGuard<Spinlock> guard(m_Lock);
1427 #endif
1428 
1429  // If we're not initialised, fix that
1430  if (UNLIKELY(!m_bInitialised))
1431  {
1432  return false;
1433  }
1434 
1435  // 0 is fine to free.
1436  if (!mem)
1437  {
1438  return true;
1439  }
1440 
1441 // On the heap?
1442 #ifndef PEDIGREE_BENCHMARK
1443  if (!Processor::information().getVirtualAddressSpace().memIsInKernelHeap(
1444  reinterpret_cast<void *>(mem)))
1445  {
1446 #if VERBOSE_ISPOINTERVALID
1447  WARNING(
1448  "SlamAllocator::isPointerValid: memory "
1449  << Hex << mem << " is not in the heap region.");
1450 #endif
1451  return false;
1452  }
1453 #endif
1454 
1455 #if CRIPPLINGLY_VIGILANT
1456  if (m_bVigilant)
1457  for (int i = 0; i < 32; i++)
1458  m_Caches[i].check();
1459 #endif
1460 
1461  // Grab the header
1462  AllocHeader *head =
1463  reinterpret_cast<AllocHeader *>(mem - sizeof(AllocHeader));
1464 
1465 #if OVERRUN_CHECK
1466  if (head->magic != VIGILANT_MAGIC)
1467  {
1468 #if VERBOSE_ISPOINTERVALID
1469  WARNING(
1470  "SlamAllocator::isPointerValid: memory "
1471  << Hex << mem << " failed magic check (" << head->magic
1472  << " != " << VIGILANT_MAGIC << ").");
1473 #endif
1474  return false;
1475  }
1476 // Footer gets checked in SlamCache::free, as we don't know the object size.
1477 #endif
1478 
1479  // If the cache is null, then the pointer is corrupted.
1480  if (head->cache == 0)
1481  {
1482 #if VERBOSE_ISPOINTERVALID
1483  WARNING(
1484  "SlamAllocator::isPointerValid: memory "
1485  << Hex << mem << " does not reference a valid SlamCache.");
1486 #endif
1487  return false;
1488  }
1489 
1490  // Check for a valid cache
1491  bool bValid = false;
1492  for (int i = 0; i < 32; i++)
1493  {
1494  if (head->cache == &m_Caches[i])
1495  {
1496  bValid = true;
1497  break;
1498  }
1499  }
1500 
1501  if (!bValid)
1502  {
1503  WARNING_NOLOCK(
1504  "SlamAllocator::isPointerValid - cache pointer '"
1505  << reinterpret_cast<uintptr_t>(head->cache) << "' is invalid.");
1506  return false;
1507  }
1508 
1509  // Final validation.
1510  return head->cache->isPointerValid(mem - sizeof(AllocHeader));
1511 }
1512 
1513 bool SlamAllocator::isWithinHeap(uintptr_t mem) const
1514 {
1515 #ifndef PEDIGREE_BENCHMARK
1516  if (!Processor::information().getVirtualAddressSpace().memIsInKernelHeap(
1517  reinterpret_cast<void *>(mem)))
1518  {
1519 #if VERBOSE_ISPOINTERVALID
1520  WARNING(
1521  "SlamAllocator::isWithinHeap: memory "
1522  << Hex << mem << " is not in the heap region.");
1523 #endif
1524  return false;
1525  }
1526 #endif
1527 
1528  return true;
1529 }
1530 
1531 bool _assert_ptr_valid(uintptr_t ptr)
1532 {
1533  return SlamAllocator::instance().isPointerValid(ptr);
1534 }
1535 
1536 #endif // !defined(SLAM_USE_DEBUG_ALLOCATOR)
void release()
Definition: Spinlock.cc:273
virtual void unmap(void *virtualAddress)=0
uintptr_t allocate(size_t nBytes)
static PhysicalMemoryManager & instance()
static EXPORTED_PUBLIC SlamAllocator m_Instance
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
Spinlock m_RecoveryLock
void free(uintptr_t object)
virtual void setFlags(void *virtualAddress, size_t newFlags)=0
virtual uintptr_t getKernelHeapEnd() const =0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
static void pause()
virtual bool isMapped(void *virtualAddress)=0
uintptr_t allocate()
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
static ProcessorInformation & information()
Definition: Processor.cc:45
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
#define WARNING(text)
Definition: Log.h:78
static size_t m_Initialised
Definition: Processor.h:371
uintptr_t m_pReturnAddresses[MAX_STACK_FRAMES]
Definition: Backtrace.h:88
SlamAllocator * m_pParentAllocator
size_t recovery(size_t maxSlabs)
#define NOTICE(text)
Definition: Log.h:74
virtual ~SlamCache()
Definition: Log.h:136
#define assert(x)
Definition: assert.h:37
virtual uintptr_t getKernelHeapStart() const =0
Process * getParent() const
Definition: Thread.h:181
static ProcessorId id()
Definition: Processor.cc:40
Definition: Thread.h:54
void initialise(SlamAllocator *parent, size_t objectSize)
#define ERROR(text)
Definition: Log.h:82
void performBpBacktrace(uintptr_t base, uintptr_t instruction)
Definition: Backtrace.cc:95
virtual void freePage(physical_uintptr_t page)=0
Definition: Log.h:138
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
Definition: mem.c:283
#define FATAL(text)
Definition: Log.h:89
#define DEBUG_LOG(text)
Definition: Log.h:69