The Pedigree Project  0.1
SlamAllocator.h
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef SLAM_ALLOCATOR_H
21 #define SLAM_ALLOCATOR_H
22 
29 #include "pedigree/kernel/Spinlock.h"
30 #include "pedigree/kernel/compiler.h"
31 #include "pedigree/kernel/processor/types.h"
32 
33 #ifdef PEDIGREE_BENCHMARK
34 namespace SlamSupport
35 {
36 uintptr_t getHeapBase();
37 uintptr_t getHeapEnd();
38 void getPageAt(void *addr);
39 void unmapPage(void *page);
40 void unmapAll();
41 } // namespace SlamSupport
42 #endif
43 
44 class SlamAllocator;
45 class SlamCache;
46 
48 #define SLAB_SIZE 1
49 
51 #define SLAB_MINIMUM_SIZE (4096 * SLAB_SIZE)
52 
58 #define USING_MAGIC 1
59 
61 #define MAGIC_TYPE uintptr_t
62 
64 #define MAGIC_VALUE 0xb00b1e55ULL
65 
67 #define ABSOLUTE_MINIMUM_SIZE 64
68 #define ALL_HEADERS_SIZE \
69  (sizeof(SlamCache::Node) + sizeof(SlamAllocator::AllocHeader) + \
70  sizeof(SlamAllocator::AllocFooter))
71 #define OBJECT_MINIMUM_SIZE \
72  (ALL_HEADERS_SIZE < ABSOLUTE_MINIMUM_SIZE ? ABSOLUTE_MINIMUM_SIZE : \
73  ALL_HEADERS_SIZE)
74 
76 #define DEBUGGING_SLAB_ALLOCATOR 0
77 
79 #define TEMP_MAGIC 0x67845753
80 
83 #ifdef USE_DEBUG_ALLOCATOR
84 #define OVERRUN_CHECK 0
85 #else
86 #define OVERRUN_CHECK 1
87 #endif
88 
91 #define VIGILANT_OVERRUN_CHECK 0
92 
93 #define VIGILANT_MAGIC 0x1337cafe
94 
97 #define CRIPPLINGLY_VIGILANT 0
98 
103 #define BOCHS_MAGIC_WATCHPOINTS 0
104 
108 #define SCRIBBLE_FREED_BLOCKS 1
109 
111 #define WARN_PAGE_SIZE_OR_LARGER 0
112 
117 #define SLABS_FOR_HUGE_ALLOCS 0
118 
120 #define VERBOSE_ISPOINTERVALID 0
121 
126 #define EVERY_ALLOCATION_IS_A_SLAB 0
127 
130 #ifdef SLAM_USE_DEBUG_ALLOCATOR
131 #define SLAM_LOCKED 1 // need the lock for the debug allocator
132 #elif !defined(THREADS)
133 #define SLAM_LOCKED 0 // never use if no threading
134 #else
135 #define SLAM_LOCKED 0
136 #endif
137 
138 // Define this to enable the debug allocator (which is basically placement new).
139 // #define SLAM_USE_DEBUG_ALLOCATOR
140 
143 {
144  // struct Node must be public so that sizeof(SlamCache::Node) is available.
145  public:
147  struct Node
148  {
149  Node *next;
150 #if USING_MAGIC
151  MAGIC_TYPE magic;
152 #endif
153  } __attribute__((aligned(16)));
154 
156  SlamCache();
159  virtual ~SlamCache();
160 
162  void initialise(SlamAllocator *parent, size_t objectSize);
163 
165  uintptr_t allocate();
166 
168  void free(uintptr_t object);
169 
171  size_t recovery(size_t maxSlabs);
172 
173  bool isPointerValid(uintptr_t object) const;
174 
175  inline size_t objectSize() const
176  {
177  return m_ObjectSize;
178  }
179 
180  inline size_t slabSize() const
181  {
182  return m_SlabSize;
183  }
184 
185 #if CRIPPLINGLY_VIGILANT
186  void trackSlab(uintptr_t slab);
187  void check();
188 #endif
189 
190  private:
191  SlamCache(const SlamCache &);
192  const SlamCache &operator=(const SlamCache &);
193 
194 #ifdef MULTIPROCESSOR
195 #define NUM_LISTS 255
197 #else
198 #define NUM_LISTS 1
199 #endif
200  typedef volatile Node *alignedNode;
201  alignedNode m_PartialLists[NUM_LISTS];
202 
203  Node *pop(alignedNode *head);
204  /* newHead = 0 to use newTail. */
205  void push(alignedNode *head, Node *newTail, Node *newHead = 0);
206 
207  uintptr_t getSlab();
208  void freeSlab(uintptr_t slab);
209 
210  Node *initialiseSlab(uintptr_t slab);
211 
212  size_t m_ObjectSize;
213  size_t m_SlabSize;
214 
215  // This version of the allocator doesn't have a free list, instead
216  // the reap() function returns memory directly to the VMM. This
217  // avoids needing to lock the free list on MP systems.
218 
219  uintptr_t m_FirstSlab;
220 
221 #ifdef THREADS
222 
229 #endif
230 
233 
234  struct Node m_EmptyNode;
235 };
236 
238 {
239  public:
240  SlamAllocator();
241  virtual ~SlamAllocator();
242 
243  void initialise();
244 
245 #ifdef PEDIGREE_BENCHMARK
246  // quickly clear all allocations from the allocator
247  void clearAll();
248 #endif
249 
250  uintptr_t allocate(size_t nBytes);
251  void free(uintptr_t mem);
252 
253  size_t recovery(size_t maxSlabs = 1);
254 
255  bool isPointerValid(uintptr_t mem)
256 #if !SLAM_LOCKED
257  const
258 #endif
259  ;
260  bool isWithinHeap(uintptr_t mem) const;
261 
262  size_t allocSize(uintptr_t mem);
263 
264  static SlamAllocator &instance()
265  {
266 #ifdef PEDIGREE_BENCHMARK
267  static SlamAllocator instance;
268  return instance;
269 #else
270  return m_Instance;
271 #endif
272  }
273 
274  size_t heapPageCount() const
275  {
276  return m_HeapPageCount;
277  }
278 
279  uintptr_t getSlab(size_t fullSize);
280  void freeSlab(uintptr_t address, size_t length);
281 
282 #ifdef USE_DEBUG_ALLOCATOR
283  inline size_t headerSize() const
284  {
285  return sizeof(AllocHeader);
286  }
287  inline size_t footerSize() const
288  {
289  return sizeof(AllocFooter);
290  }
291 #endif
292 
293  void setVigilance(bool b)
294  {
295  m_bVigilant = b;
296  }
297  bool getVigilance() const
298  {
299  return m_bVigilant;
300  }
301 
302  private:
303  SlamAllocator(const SlamAllocator &);
304  const SlamAllocator &operator=(const SlamAllocator &);
305 
306 #ifndef PEDIGREE_BENCHMARK
307  EXPORTED_PUBLIC static SlamAllocator m_Instance;
309 #endif
310 
312  void wipe();
313 
314  SlamCache m_Caches[32];
315 
316  public:
319  struct AllocHeader
320  {
321  // Already-present and embedded Node fields.
322  SlamCache::Node node;
323 #if OVERRUN_CHECK
324 #if BOCHS_MAGIC_WATCHPOINTS
325  uint32_t catcher;
326 #endif
327  size_t magic;
328 #if VIGILANT_OVERRUN_CHECK
329  uintptr_t backtrace[NUM_SLAM_BT_FRAMES];
330  size_t requested;
331 #endif
332 #endif
333  SlamCache *cache;
334  } __attribute__((aligned(16)));
335 
336  struct AllocFooter
337  {
338 #if OVERRUN_CHECK
339 #if BOCHS_MAGIC_WATCHPOINTS
340  uint32_t catcher;
341 #endif
342  size_t magic;
343 #endif
344  } __attribute__((aligned(16)));
345 
346  private:
347  bool m_bInitialised;
348 
349  bool m_bVigilant;
350 
351 #ifdef THREADS
352  Spinlock m_SlabRegionLock;
353 #endif
354 
355  size_t m_HeapPageCount;
356 
357  uint64_t *m_SlabRegionBitmap;
358  size_t m_SlabRegionBitmapEntries;
359 
360  uintptr_t m_Base;
361 
362 #if SLAM_LOCKED
363  Spinlock m_Lock;
364 #endif
365 };
366 
367 #endif
Spinlock m_RecoveryLock
SlamAllocator * m_pParentAllocator
Definition: mem.c:283