The Pedigree Project  0.1
system/kernel/core/lib/cppsupport.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "pedigree/kernel/core/cppsupport.h"
21 #include "pedigree/kernel/Log.h"
22 #include "pedigree/kernel/compiler.h"
23 #include "pedigree/kernel/core/SlamAllocator.h"
24 #include "pedigree/kernel/processor/types.h"
25 #include "pedigree/kernel/utilities/utility.h"
26 
29 #define DEBUG_ALLOCATOR_CHECK_UNDERFLOWS
30 
31 // Required for G++ to link static init/destructors.
32 #ifndef HOSTED
33 extern "C" void *__dso_handle;
34 #endif
35 
36 // Defined in the linker.
37 extern uintptr_t start_kernel_ctors;
38 extern uintptr_t end_kernel_ctors;
39 extern uintptr_t start_kernel_dtors;
40 extern uintptr_t end_kernel_dtors;
41 
44 void initialiseConstructors()
45 {
46  // Constructor list is defined in the linker script.
47  // The .ctors section is just an array of function pointers.
48  // iterate through, calling each in turn.
49  uintptr_t *iterator = &start_kernel_ctors;
50  while (iterator < &end_kernel_ctors)
51  {
52  void (*fp)(void) = reinterpret_cast<void (*)(void)>(*iterator);
53  fp();
54  iterator++;
55  }
56 }
57 
58 void runKernelDestructors()
59 {
60  uintptr_t *iterator = &start_kernel_dtors;
61  while (iterator < &end_kernel_dtors)
62  {
63  void (*fp)(void) = reinterpret_cast<void (*)(void)>(*iterator);
64  fp();
65  iterator++;
66  }
67 }
68 
69 #ifndef MEMORY_TRACING
70 static bool traceAllocations = false;
71 #endif
72 
73 #ifdef MEMORY_TRACING
74 static bool traceAllocations = true;
75 void startTracingAllocations()
76 {
77  traceAllocations = true;
78 }
79 
80 void stopTracingAllocations()
81 {
82  traceAllocations = false;
83 }
84 
85 void toggleTracingAllocations()
86 {
87  traceAllocations = !traceAllocations;
88 }
89 
90 static volatile int g_TraceLock = 0;
91 
92 void traceAllocation(
93  void *ptr, MemoryTracing::AllocationTrace type, size_t size)
94 {
95  // Don't trace if we're not allowed to.
96  if (!traceAllocations)
97  return;
98 
99  // Ignore physical allocations just for now.
100  switch (type)
101  {
102  case MemoryTracing::Allocation:
103  case MemoryTracing::Free:
104  case MemoryTracing::Metadata:
105  break;
106  default:
107  return; // ignore
108  }
109 
111 
112  MemoryTracing::AllocationTraceEntry entry;
113  entry.data.type = type;
114  entry.data.sz = size & 0xFFFFFFFFU;
115  entry.data.ptr = reinterpret_cast<uintptr_t>(ptr);
116  for (size_t i = 0; i < MemoryTracing::num_backtrace_entries; ++i)
117  {
118  entry.data.bt[i] = 0;
119  }
120 
121 #define BT_FRAME(M, N) \
122  do \
123  { \
124  if (M && !entry.data.bt[M - 1]) \
125  break; \
126  void *frame_addr = __builtin_frame_address(N); \
127  if (!(frame_addr && va.isMapped(frame_addr))) \
128  { \
129  entry.data.bt[M] = 0; \
130  break; \
131  } \
132  entry.data.bt[M] = \
133  reinterpret_cast<uintptr_t>(__builtin_return_address(N)) & \
134  0xFFFFFFFFU; \
135  } while (0)
136 
137  // we want to skip the allocate()/free() call and get a little bit of
138  // context
139  if (MemoryTracing::num_backtrace_entries >= 1)
140  BT_FRAME(0, 1);
141  if (MemoryTracing::num_backtrace_entries >= 2)
142  BT_FRAME(1, 2);
143  if (MemoryTracing::num_backtrace_entries >= 3)
144  BT_FRAME(2, 3);
145  if (MemoryTracing::num_backtrace_entries >= 4)
146  BT_FRAME(3, 4);
147  if (MemoryTracing::num_backtrace_entries >= 5)
148  BT_FRAME(4, 5);
149 
150  __asm__ __volatile__("pushfq; cli" ::: "memory");
151 
152  for (size_t i = 0; i < sizeof entry.buf; ++i)
153  {
154  __asm__ __volatile__(
155  "outb %%al, %%dx" ::"Nd"(0x2E8), "a"(entry.buf[i]));
156  }
157 
158  __asm__ __volatile__("popf" ::: "memory");
159 }
160 
167 void traceMetadata(NormalStaticString str, void *p1, void *p2)
168 {
169 // this can be provided by scripts/addr2line.py these days
170 #if 0
171  LockGuard<Spinlock> guard(traceLock);
172 
173  // Yes, this means we'll lose early init mallocs. Oh well...
174  if(!Machine::instance().isInitialised())
175  return;
176 
177  Serial *pSerial = Machine::instance().getSerial(1);
178  if(!pSerial)
179  return;
180 
181  char buf[128];
182  ByteSet(buf, 0, 128);
183 
184  size_t off = 0;
185 
186  const MemoryTracing::AllocationTrace type = MemoryTracing::Metadata;
187 
188  MemoryCopy(&buf[off], &type, 1);
189  ++off;
190  MemoryCopy(&buf[off], static_cast<const char *>(str), str.length());
191  off += 64; // Statically sized segment.
192  MemoryCopy(&buf[off], &p1, sizeof(void*));
193  off += sizeof(void*);
194  MemoryCopy(&buf[off], &p2, sizeof(void*));
195  off += sizeof(void*);
196 
197  for(size_t i = 0; i < off; ++i)
198  {
199  pSerial->write(buf[i]);
200  }
201 #endif
202 }
203 #endif
204 
205 #ifdef ARM_COMMON
206 #define ATEXIT __aeabi_atexit
207 #else
208 #define ATEXIT atexit
209 #endif
210 
212 extern "C" EXPORTED_PUBLIC void ATEXIT(void (*f)(void *), void *p, void *d);
213 void ATEXIT(void (*f)(void *), void *p, void *d)
214 {
215 }
216 
219 extern "C" EXPORTED_PUBLIC void __cxa_pure_virtual() NORETURN;
220 void __cxa_pure_virtual()
221 {
222  FATAL_NOLOCK("Pure virtual function call made");
223 }
224 
226 #if !HAS_THREAD_SANITIZER
227 extern "C" EXPORTED_PUBLIC int __cxa_guard_acquire();
228 extern "C" EXPORTED_PUBLIC void __cxa_guard_release();
229 
230 int __cxa_guard_acquire()
231 {
232  return 1;
233 }
234 void __cxa_guard_release()
235 {
236  // TODO
237 }
238 #endif
239 
240 #if !(defined(HOSTED) && defined(HOSTED_SYSTEM_MALLOC))
241 #ifdef HOSTED
242 #define MALLOC _malloc
243 #define CALLOC _calloc
244 #define FREE _free
245 #define REALLOC _realloc
246 #else
247 #define MALLOC malloc
248 #define CALLOC calloc
249 #define FREE free
250 #define REALLOC realloc
251 #endif
252 
253 extern "C" void *MALLOC(size_t sz)
254 {
255  return reinterpret_cast<void *>(new uint8_t[sz]);
256 }
257 
258 extern "C" void *CALLOC(size_t num, size_t sz)
259 {
260  void *result = reinterpret_cast<void *>(new uint8_t[num * sz]);
261  ByteSet(result, 0, num * sz);
262  return result;
263 }
264 
265 extern "C" void FREE(void *p)
266 {
267  if (p == 0)
268  return;
269  // SlamAllocator::instance().free(reinterpret_cast<uintptr_t>(p));
270  delete[] reinterpret_cast<uint8_t *>(p);
271 }
272 
273 extern "C" void *REALLOC(void *p, size_t sz)
274 {
275  if (p == 0)
276  return MALLOC(sz);
277  if (sz == 0)
278  {
279  free(p);
280  return 0;
281  }
282 
283  // Don't attempt to read past the end of the source buffer if we can help it
284  size_t copySz =
285  SlamAllocator::instance().allocSize(reinterpret_cast<uintptr_t>(p)) -
287  if (copySz > sz)
288  copySz = sz;
289 
291  void *tmp = MALLOC(sz);
292  MemoryCopy(tmp, p, copySz);
293  FREE(p);
294 
295  return tmp;
296 }
297 
298 void *operator new(size_t size) noexcept
299 {
300  void *ret =
301  reinterpret_cast<void *>(SlamAllocator::instance().allocate(size));
302  return ret;
303 }
304 void *operator new[](size_t size) noexcept
305 {
306  void *ret =
307  reinterpret_cast<void *>(SlamAllocator::instance().allocate(size));
308  return ret;
309 }
310 void *operator new(size_t size, void *memory) noexcept
311 {
312  return memory;
313 }
314 void *operator new[](size_t size, void *memory) noexcept
315 {
316  return memory;
317 }
318 static void delete_shared(void *p) noexcept
319 {
320  if (p == 0)
321  return;
322  uintptr_t mem = reinterpret_cast<uintptr_t>(p);
323  // We want to attempt to delete even if this is not a valid pointer if
324  // allocations are being traced, so we can catch the bad free and get a
325  // backtrace for it.
326  if (traceAllocations || SlamAllocator::instance().isPointerValid(mem))
327  {
328  SlamAllocator::instance().free(mem);
329  }
330  else
331  {
332  if (SlamAllocator::instance().isWithinHeap(mem))
333  {
334  FATAL("delete_shared failed as pointer was invalid: " << p);
335  }
336  else
337  {
338  // less critical - still annoying
339  ERROR(
340  "delete_shared failed as pointer was not in the kernel heap: "
341  << p);
342  }
343  }
344 }
345 void operator delete(void *p) noexcept
346 {
347  delete_shared(p);
348 }
349 void operator delete[](void *p) noexcept
350 {
351  delete_shared(p);
352 }
353 void operator delete(void *p, size_t sz) noexcept
354 {
355  delete_shared(p);
356 }
357 void operator delete[](void *p, size_t sz) noexcept
358 {
359  delete_shared(p);
360 }
361 void operator delete(void *p, void *q) noexcept
362 {
363  // no-op
364 }
365 void operator delete[](void *p, void *q) noexcept
366 {
367  // no-op
368 }
369 
370 #ifdef HOSTED
371 extern "C" {
372 
373 void *__wrap_malloc(size_t sz)
374 {
375  return _malloc(sz);
376 }
377 
378 void *__wrap_realloc(void *p, size_t sz)
379 {
380  return _realloc(p, sz);
381 }
382 
383 void __wrap_free(void *p)
384 {
385  return _free(p);
386 }
387 }
388 #endif
389 
390 #endif
uintptr_t allocate(size_t nBytes)
virtual Serial * getSerial(size_t n)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
#define ERROR(text)
Definition: Log.h:82
Definition: mem.c:283
#define FATAL(text)
Definition: Log.h:89