The Pedigree Project  0.1
DebugAllocator.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef SLAM_USE_DEBUG_ALLOCATOR
21 
22 #include "pedigree/kernel/LockGuard.h"
23 #include "pedigree/kernel/core/SlamAllocator.h"
24 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
25 #include "pedigree/kernel/processor/Processor.h"
26 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
27 #include "pedigree/kernel/utilities/MemoryTracing.h"
28 #include "pedigree/kernel/utilities/assert.h"
29 
30 #ifdef THREADS
31 #include "pedigree/kernel/process/Process.h"
32 #include "pedigree/kernel/process/Thread.h"
33 #endif
34 
36 
37 inline uintptr_t getHeapBase()
38 {
40 }
41 
42 inline uintptr_t getHeapEnd()
43 {
45 }
46 
47 inline size_t getPageSize()
48 {
50 }
51 
52 inline void allocateAndMapAt(void *addr)
53 {
54  size_t standardFlags =
56 
57  static physical_uintptr_t physZero = 0;
58 
59  physical_uintptr_t phys = PhysicalMemoryManager::instance().allocatePage();
60 
62  if (!va.map(phys, addr, standardFlags))
63  {
64  FATAL("SlamAllocator: failed to allocate and map at " << addr);
65  }
66 }
67 
68 inline void unmap(void *addr)
69 {
71  if (!va.isMapped(addr))
72  return;
73 
74  physical_uintptr_t phys;
75  size_t flags;
76  va.getMapping(addr, phys, flags);
77  va.unmap(addr);
78 
80 }
81 
82 inline bool isMapped(void *addr)
83 {
85  return va.isMapped(addr);
86 }
87 
88 inline void markReadOnly(void *addr)
89 {
92 }
93 
95  : m_PartialLists(), m_ObjectSize(0), m_SlabSize(0), m_FirstSlab()
96 #ifdef THREADS
97  ,
98  m_RecoveryLock(false)
99 #endif
100  ,
101  m_EmptyNode()
102 {
103 }
104 
106 {
107 }
108 
109 void SlamCache::initialise(SlamAllocator *parent, size_t objectSize)
110 {
111  // no-op for debug allocator
112 }
113 
114 SlamCache::Node *SlamCache::pop(SlamCache::alignedNode *head)
115 {
116  // no-op for debug allocator
117  return nullptr;
118 }
119 
120 void SlamCache::push(
121  SlamCache::alignedNode *head, SlamCache::Node *newTail,
122  SlamCache::Node *newHead)
123 {
124  // no-op for debug allocator
125 }
126 
127 uintptr_t SlamCache::allocate()
128 {
129  // no-op for debug allocator
130  return 0;
131 }
132 
133 void SlamCache::free(uintptr_t object)
134 {
135  // no-op for debug allocator
136 }
137 
138 bool SlamCache::isPointerValid(uintptr_t object) const
139 {
140  // no-op for debug allocator
141  return false;
142 }
143 
144 uintptr_t SlamCache::getSlab()
145 {
146  // no-op for debug allocator
147  return 0;
148 }
149 
150 void SlamCache::freeSlab(uintptr_t slab)
151 {
152  // no-op for debug allocator
153 }
154 
155 size_t SlamCache::recovery(size_t maxSlabs)
156 {
157  // no-op for debug allocator
158  return 0;
159 }
160 
161 SlamCache::Node *SlamCache::initialiseSlab(uintptr_t slab)
162 {
163  // no-op for debug allocator
164  return nullptr;
165 }
166 
167 #if CRIPPLINGLY_VIGILANT
168 void SlamCache::check()
169 {
170  // no-op for debug allocator
171 }
172 
173 void SlamCache::trackSlab(uintptr_t slab)
174 {
175  // no-op for debug allocator
176 }
177 #endif
178 
179 SlamAllocator::SlamAllocator()
180  : m_bInitialised(false), m_bVigilant(false),
181 #ifdef THREADS
182  m_SlabRegionLock(false),
183 #endif
184  m_HeapPageCount(0), m_SlabRegionBitmap(), m_SlabRegionBitmapEntries(0),
185  m_Base(0)
186 {
187 }
188 
189 SlamAllocator::~SlamAllocator()
190 {
191  if (m_bInitialised)
192  {
193  wipe();
194  }
195 }
196 
197 void SlamAllocator::initialise()
198 {
199  RecursingLockGuard<Spinlock> guard(m_Lock);
200 
201  if (m_bInitialised)
202  {
203  return;
204  }
205 
206  m_Base = getHeapBase();
207  m_bInitialised = true;
208 }
209 
210 void SlamAllocator::wipe()
211 {
212  // no-op for debug allocator
213 }
214 
215 uintptr_t SlamAllocator::getSlab(size_t fullSize)
216 {
217  // no-op for debug allocator
218  return 0;
219 }
220 
221 void SlamAllocator::freeSlab(uintptr_t address, size_t length)
222 {
223  // no-op for debug allocator
224 }
225 
226 size_t SlamAllocator::recovery(size_t maxSlabs)
227 {
228  // no-op for debug allocator
229  return 0;
230 }
231 
232 uintptr_t SlamAllocator::allocate(size_t nBytes)
233 {
234  if (!m_bInitialised)
235  {
236  initialise();
237  }
238 
239  if (!nBytes)
240  {
241  return 0;
242  }
243 
244  uintptr_t mapStart = 0, mapEnd = 0, result = 0;
245  size_t nTotalBytes = 0, numPages = 0;
246 
247  numPages = nBytes / getPageSize();
248  if (nBytes % getPageSize())
249  {
250  ++numPages;
251  }
252  if (!numPages)
253  {
254  ++numPages;
255  }
256  nTotalBytes = numPages * getPageSize();
257 
258  {
259  RecursingLockGuard<Spinlock> guard(m_Lock);
260 
261  m_Base += getPageSize(); // gap between allocations
262  mapStart = m_Base;
263  m_Base += getPageSize(); // page for the allocation header (readonly once
264  // it's written to)
265  result = m_Base;
266  m_Base += numPages * getPageSize();
267  mapEnd = m_Base;
268  }
269 
270  for (uintptr_t addr = mapStart; addr < mapEnd; addr += getPageSize())
271  {
272  allocateAndMapAt(reinterpret_cast<void *>(addr));
273 
274  ++m_HeapPageCount;
275  }
276 
277  *((size_t *) (result - sizeof(size_t))) = numPages;
278 
279  // now that the size is written we can mark the header section readonly
280  markReadOnly(reinterpret_cast<void *>(mapStart));
281 
282 #ifdef THREADS
283  if (Processor::m_Initialised == 2)
284  {
285  Thread *pThread = Processor::information().getCurrentThread();
286  if (pThread)
287  {
288  pThread->getParent()->trackHeap(nTotalBytes);
289  }
290  }
291 #endif
292 
293 #ifdef MEMORY_TRACING
294  traceAllocation(
295  reinterpret_cast<void *>(result), MemoryTracing::Allocation,
296  nTotalBytes);
297 #endif
298 
299  return result;
300 }
301 
302 size_t SlamAllocator::allocSize(uintptr_t mem)
303 {
304  if (!m_bInitialised)
305  {
306  return 0;
307  }
308 
309  if (!mem)
310  {
311  return 0;
312  }
313 
314  return *((size_t *) (mem - sizeof(size_t))) * getPageSize();
315 }
316 
317 void SlamAllocator::free(uintptr_t mem)
318 {
319  assert(m_bInitialised);
320 
321  if (!mem)
322  {
323  return;
324  }
325 
326 #ifdef MEMORY_TRACING
327  // do this first so we can detect double frees before the asserts/memory
328  // accesses below - this just helps a lot with tracing these issues after
329  // the fact
330  traceAllocation(reinterpret_cast<void *>(mem), MemoryTracing::Free, 0);
331 #endif
332 
333  assert(isMapped(reinterpret_cast<void *>(mem)));
334 
335  if (!isPointerValid(mem))
336  {
337  return;
338  }
339 
340  size_t numPages = *((size_t *) (mem - sizeof(size_t)));
341  size_t nBytes = numPages * getPageSize();
342 
343  uintptr_t unmapStart = mem - getPageSize();
344  uintptr_t unmapEnd = mem + nBytes;
345 
346  for (uintptr_t addr = unmapStart; addr < unmapEnd; addr += getPageSize())
347  {
348  unmap(reinterpret_cast<void *>(addr));
349 
350  --m_HeapPageCount;
351  }
352 
353 #ifdef THREADS
354  if (Processor::m_Initialised == 2)
355  {
356  Thread *pThread = Processor::information().getCurrentThread();
357  if (pThread)
358  {
359  pThread->getParent()->trackHeap(-nBytes);
360  }
361  }
362 #endif
363 
364 #ifdef MEMORY_TRACING
365  traceAllocation(reinterpret_cast<void *>(mem), MemoryTracing::Free, 0);
366 #endif
367 }
368 
369 bool SlamAllocator::isPointerValid(uintptr_t mem)
370 #if !SLAM_LOCKED
371  const
372 #endif
373 {
374  if (!m_bInitialised)
375  {
376  return false;
377  }
378 
379  // On the heap?
380  if (!Processor::information().getVirtualAddressSpace().memIsInKernelHeap(
381  reinterpret_cast<void *>(mem)))
382  {
383 #if VERBOSE_ISPOINTERVALID
384  WARNING(
385  "SlamAllocator::isPointerValid: memory "
386  << Hex << mem << " is not in the heap region.");
387 #endif
388  return false;
389  }
390 
391  if (!isMapped(reinterpret_cast<void *>(mem)))
392  {
393 #if VERBOSE_ISPOINTERVALID
394  WARNING(
395  "SlamAllocator::isPointerValid: memory "
396  << Hex << mem << " is not mapped [current base = " << Hex << m_Base
397  << "].");
398 #endif
399  if (mem >= m_Base)
400  {
401 #if VERBOSE_ISPOINTERVALID
402  WARNING(" (pointer being deleted is beyond the end of the heap "
403  "somehow)");
404 #endif
405  }
406  return false;
407  }
408 
409  return true;
410 }
411 
412 bool SlamAllocator::isWithinHeap(uintptr_t mem) const
413 {
414  if (!Processor::information().getVirtualAddressSpace().memIsInKernelHeap(
415  reinterpret_cast<void *>(mem)))
416  {
417 #if VERBOSE_ISPOINTERVALID
418  WARNING(
419  "SlamAllocator::isWithinHeap: memory "
420  << Hex << mem << " is not in the heap region.");
421 #endif
422  return false;
423  }
424 
425  return true;
426 }
427 
428 bool _assert_ptr_valid(uintptr_t ptr)
429 {
430  return SlamAllocator::instance().isPointerValid(ptr);
431 }
432 
433 #endif // defined(SLAM_USE_DEBUG_ALLOCATOR)
virtual void unmap(void *virtualAddress)=0
uintptr_t allocate(size_t nBytes)
static PhysicalMemoryManager & instance()
static EXPORTED_PUBLIC SlamAllocator m_Instance
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
void free(uintptr_t object)
virtual void setFlags(void *virtualAddress, size_t newFlags)=0
virtual uintptr_t getKernelHeapEnd() const =0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
uintptr_t allocate()
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
static ProcessorInformation & information()
Definition: Processor.cc:45
#define WARNING(text)
Definition: Log.h:78
static size_t m_Initialised
Definition: Processor.h:371
size_t recovery(size_t maxSlabs)
virtual ~SlamCache()
Definition: Log.h:136
#define assert(x)
Definition: assert.h:37
virtual uintptr_t getKernelHeapStart() const =0
Process * getParent() const
Definition: Thread.h:181
Definition: Thread.h:54
void initialise(SlamAllocator *parent, size_t objectSize)
virtual void freePage(physical_uintptr_t page)=0
Definition: mem.c:283
#define FATAL(text)
Definition: Log.h:89