The Pedigree Project  0.1
hosted/VirtualAddressSpace.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "VirtualAddressSpace.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/panic.h"
23 #include "pedigree/kernel/process/Process.h"
24 #include "pedigree/kernel/process/Scheduler.h"
25 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
26 #include "pedigree/kernel/processor/Processor.h"
27 #include "pedigree/kernel/utilities/utility.h"
28 
29 #include "PhysicalMemoryManager.h"
30 
31 #include <dlfcn.h>
32 #include <errno.h>
33 #include <sys/mman.h>
34 
35 VirtualAddressSpace *g_pCurrentlyCloning = 0;
36 
37 HostedVirtualAddressSpace HostedVirtualAddressSpace::m_KernelSpace(
38  KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_STACK);
39 
40 typedef void *(*malloc_t)(size_t);
41 typedef void *(*realloc_t)(void *, size_t);
42 typedef void (*free_t)(void *);
43 
44 #include <stdio.h>
45 void *__libc_malloc(size_t n)
46 {
47  static malloc_t local = (malloc_t) dlsym(RTLD_NEXT, "malloc");
48  return local(n);
49 }
50 
51 void *__libc_realloc(void *p, size_t n)
52 {
53  static realloc_t local = (realloc_t) dlsym(RTLD_NEXT, "realloc");
54  return local(p, n);
55 }
56 
57 void __libc_free(void *p)
58 {
59  static free_t local = (free_t) dlsym(RTLD_NEXT, "free");
60  local(p);
61 }
62 
64 {
65  return HostedVirtualAddressSpace::m_KernelSpace;
66 }
67 
69 {
70  return new HostedVirtualAddressSpace();
71 }
72 
74 {
75  if (pMem < KERNEL_VIRTUAL_HEAP)
76  return false;
77  else if (pMem >= getEndOfHeap())
78  return false;
79  else
80  return true;
81 }
82 
84 {
85  return adjust_pointer(KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_HEAP_SIZE);
86 }
87 
89 {
90  if (reinterpret_cast<uint64_t>(virtualAddress) < 0x0008000000000000ULL ||
91  reinterpret_cast<uint64_t>(virtualAddress) >= 0xFFF8000000000000ULL)
92  return true;
93  return false;
94 }
95 
96 bool HostedVirtualAddressSpace::isMapped(void *virtualAddress)
97 {
98  LockGuard<Spinlock> guard(m_Lock);
99 
100  virtualAddress = page_align(virtualAddress);
101 
102  int r =
103  msync(virtualAddress, PhysicalMemoryManager::getPageSize(), MS_ASYNC);
104  if (r < 0)
105  {
106  if (errno == ENOMEM)
107  {
108  return false;
109  }
110  }
111 
112  if (this != &getKernelAddressSpace())
113  {
114  bool r = getKernelAddressSpace().isMapped(virtualAddress);
115  if (r)
116  return r;
117  }
118 
119  // Find this mapping if we can.
120  for (size_t i = 0; i < m_KnownMapsSize; ++i)
121  {
122  if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
123  return true;
124  }
125 
126  return false;
127 }
128 
130  physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
131 {
132  virtualAddress = page_align(virtualAddress);
133 
134  // If this should be a kernel mapping, use the kernel address space.
135  if (this != &getKernelAddressSpace())
136  if ((virtualAddress >= KERNEL_SPACE_START) || (flags & KernelMode))
137  return getKernelAddressSpace().map(
138  physAddress, virtualAddress, flags);
139 
140  // mmap() won't fail if the address is already mapped, but we need to.
141  if (isMapped(virtualAddress))
142  {
143  return false;
144  }
145 
146  LockGuard<Spinlock> guard(m_Lock);
147 
148  // Map, backed onto the "physical memory" of the system.
149  int prot = toFlags(flags, true);
150  void *r = mmap(
151  virtualAddress, PhysicalMemoryManager::getPageSize(), prot,
152  MAP_FIXED | MAP_SHARED,
153  HostedPhysicalMemoryManager::instance().getBackingFile(), physAddress);
154 
155  if (UNLIKELY(r == MAP_FAILED))
156  return false;
157 
158  assert(r == virtualAddress);
159 
160  // Extend list of known maps if we can't fit this one in.
161  if (m_numKnownMaps == m_KnownMapsSize)
162  {
163  size_t oldSize = m_KnownMapsSize;
164  if (m_KnownMapsSize == 0)
165  m_KnownMapsSize = 1;
166 
167  m_KnownMapsSize *= 2;
168 
169  size_t newSizeBytes = sizeof(mapping_t) * m_KnownMapsSize;
170  if (!m_pKnownMaps)
171  m_pKnownMaps = (mapping_t *) __libc_malloc(newSizeBytes);
172  else
173  m_pKnownMaps =
174  (mapping_t *) __libc_realloc(m_pKnownMaps, newSizeBytes);
175 
176  // Mark all inactive.
177  for (size_t i = oldSize; i < m_KnownMapsSize; ++i)
178  m_pKnownMaps[i].active = false;
179  }
180 
181  // Register in the list of known mappings.
182  bool bRegistered = false;
183  size_t idx = m_nLastUnmap;
184  for (; idx < m_KnownMapsSize; ++idx)
185  {
186  if (m_pKnownMaps[idx].active)
187  continue;
188 
189  bRegistered = true;
190  break;
191  }
192  if (!bRegistered)
193  {
194  // Try again from the beginning.
195  for (idx = 0; idx < m_nLastUnmap; ++idx)
196  {
197  if (m_pKnownMaps[idx].active)
198  continue;
199 
200  bRegistered = true;
201  break;
202  }
203  }
204 
205  if (!bRegistered)
206  panic("Fatal algorithmic error in HostedVirtualAddressSpace::map");
207 
208  m_pKnownMaps[idx].active = true;
209  m_pKnownMaps[idx].vaddr = virtualAddress;
210  m_pKnownMaps[idx].paddr = physAddress;
211  m_pKnownMaps[idx].flags = flags;
212 
213  ++m_numKnownMaps;
214 
215  return true;
216 }
217 
219  void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
220 {
221  LockGuard<Spinlock> guard(m_Lock);
222 
223  virtualAddress = page_align(virtualAddress);
224 
225  // Handle kernel mappings, if needed.
226  if (this != &getKernelAddressSpace())
227  {
228  if (getKernelAddressSpace().isMapped(virtualAddress))
229  {
231  virtualAddress, physAddress, flags);
232  return;
233  }
234  }
235 
236  size_t pageSize = PhysicalMemoryManager::getPageSize();
237  uintptr_t alignedVirtualAddress =
238  reinterpret_cast<uintptr_t>(virtualAddress) & ~(pageSize - 1);
239  virtualAddress = reinterpret_cast<void *>(alignedVirtualAddress);
240 
241  // Find this mapping if we can.
242  for (size_t i = 0; i < m_KnownMapsSize; ++i)
243  {
244  if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
245  {
246  physAddress = m_pKnownMaps[i].paddr;
247  flags = fromFlags(m_pKnownMaps[i].flags, true);
248  return;
249  }
250  }
251 
252  panic("HostedVirtualAddressSpace::getMapping - function misused");
253 }
254 
255 void HostedVirtualAddressSpace::setFlags(void *virtualAddress, size_t newFlags)
256 {
257  LockGuard<Spinlock> guard(m_Lock);
258 
259  virtualAddress = page_align(virtualAddress);
260 
261  // Check for kernel mappings.
262  if (this != &getKernelAddressSpace())
263  {
264  if (getKernelAddressSpace().isMapped(virtualAddress))
265  {
266  getKernelAddressSpace().setFlags(virtualAddress, newFlags);
267  return;
268  }
269  else if (newFlags & KernelMode)
270  WARNING("setFlags called with KernelMode as a flag, page is not "
271  "mapped in kernel.");
272  }
273 
274  for (size_t i = 0; i < m_KnownMapsSize; ++i)
275  {
276  if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
277  {
278  m_pKnownMaps[i].flags = newFlags;
279  break;
280  }
281  }
282 
283  size_t flags = toFlags(newFlags, true);
284  mprotect(virtualAddress, PhysicalMemoryManager::getPageSize(), flags);
285 }
286 
287 void HostedVirtualAddressSpace::unmap(void *virtualAddress)
288 {
289  LockGuard<Spinlock> guard(m_Lock);
290 
291  virtualAddress = page_align(virtualAddress);
292 
293  // Check for kernel mappings.
294  if (this != &getKernelAddressSpace())
295  {
296  if (getKernelAddressSpace().isMapped(virtualAddress))
297  {
298  getKernelAddressSpace().unmap(virtualAddress);
299  return;
300  }
301  }
302 
303  for (size_t i = 0; i < m_KnownMapsSize; ++i)
304  {
305  if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
306  {
307  m_pKnownMaps[i].active = false;
308  m_nLastUnmap = i;
309  break;
310  }
311  }
312 
313  munmap(virtualAddress, PhysicalMemoryManager::getPageSize());
314 }
315 
317 {
320 
321  {
322  LockGuard<Spinlock> guard(m_Lock);
323 
324  // Copy over the known maps so the new address space can find them.
325  pNew->m_pKnownMaps =
326  (mapping_t *) __libc_malloc(m_KnownMapsSize * sizeof(mapping_t));
327  MemoryCopy(
328  pNew->m_pKnownMaps, m_pKnownMaps,
329  m_KnownMapsSize * sizeof(mapping_t));
330  pNew->m_KnownMapsSize = m_KnownMapsSize;
331  pNew->m_numKnownMaps = m_numKnownMaps;
332  pNew->m_nLastUnmap = m_nLastUnmap;
333 
334  // Readjust flags on the new mappings if needed.
335  for (size_t i = 0; i < pNew->m_KnownMapsSize; ++i)
336  {
337  mapping_t *mapping = &pNew->m_pKnownMaps[i];
338  if (!mapping->active)
339  continue;
340 
341  PhysicalMemoryManager::instance().pin(mapping->paddr);
342 
343  if (mapping->flags & Shared)
344  {
345  continue;
346  }
347 
348  if (!(mapping->flags & CopyOnWrite))
349  PhysicalMemoryManager::instance().pin(mapping->paddr);
350 
351  if (mapping->flags & Write)
352  {
353  mapping->flags |= CopyOnWrite;
354  }
355  mapping->flags &= ~Write;
356  }
357  }
358 
359  if (m_pStackTop < KERNEL_SPACE_START)
360  {
361  pNew->m_pStackTop = m_pStackTop;
362  for (Vector<Stack *>::Iterator it = m_freeStacks.begin();
363  it != m_freeStacks.end(); ++it)
364  {
365  Stack *pNewStack = new Stack(**it);
366  pNew->m_freeStacks.pushBack(pNewStack);
367  }
368  }
369 
370  if (m_Heap < KERNEL_SPACE_START)
371  {
372  pNew->m_Heap = m_Heap;
373  pNew->m_HeapEnd = m_HeapEnd;
374  NOTICE("clone: heap=" << m_Heap << " end=" << m_HeapEnd);
375  }
376 
377  return pNew;
378 }
379 
381 {
382  LockGuard<Spinlock> guard(m_Lock);
383 
384  for (size_t i = 0; i < m_KnownMapsSize; ++i)
385  {
386  if (m_pKnownMaps[i].active)
387  {
388  if (getKernelAddressSpace().isMapped(m_pKnownMaps[i].vaddr))
389  {
390  m_pKnownMaps[i].active = false;
391  m_nLastUnmap = i;
392  continue;
393  }
394  else if (m_pKnownMaps[i].vaddr > KERNEL_SPACE_START)
395  continue;
396 
397  munmap(m_pKnownMaps[i].vaddr, PhysicalMemoryManager::getPageSize());
398 
399  // Clean up references to physical memory as needed.
400  if ((m_pKnownMaps[i].flags & (Shared | Swapped)) == 0)
402  m_pKnownMaps[i].paddr);
403 
404  m_pKnownMaps[i].active = false;
405  }
406  }
407 }
408 
410 {
411  size_t sz = USERSPACE_VIRTUAL_STACK_SIZE;
412  if (this == &getKernelAddressSpace())
413  sz = KERNEL_STACK_SIZE;
414  return doAllocateStack(sz);
415 }
416 
419 {
420  if (stackSz == 0)
421  return allocateStack();
422  return doAllocateStack(stackSz);
423 }
424 
427 {
428  size_t flags = 0;
429  bool bMapAll = true;
430  if (this == &m_KernelSpace)
431  {
432  // Don't demand map kernel mode stacks.
434  bMapAll = true;
435  }
436 
437  m_Lock.acquire();
438 
439  size_t pageSz = PhysicalMemoryManager::getPageSize();
440 
441  // Grab a new stack pointer. Use the list of freed stacks if we can,
442  // otherwise adjust the internal stack pointer. Using the list of freed
443  // stacks helps avoid having the virtual address creep downwards.
444  void *pStack = 0;
445  if (m_freeStacks.count() != 0)
446  {
447  Stack *poppedStack = m_freeStacks.popBack();
448  if (poppedStack->getSize() >= sSize)
449  {
450  pStack = poppedStack->getTop();
451  }
452  delete poppedStack;
453  }
454  else
455  {
456  pStack = m_pStackTop;
457 
458  // Always leave one page unmapped between each stack to catch overflow.
459  m_pStackTop =
460  adjust_pointer(m_pStackTop, -static_cast<ssize_t>(sSize + pageSz));
461  }
462 
463  m_Lock.release();
464 
465  // Map the top of the stack in proper.
466  uintptr_t firstPage = reinterpret_cast<uintptr_t>(pStack) - pageSz;
467  physical_uintptr_t phys = PhysicalMemoryManager::instance().allocatePage();
468  if (!bMapAll)
470  if (!map(
471  phys, reinterpret_cast<void *>(firstPage),
473  WARNING("map() failed in doAllocateStack");
474 
475  // Bring in the rest of the stack as CoW.
476  uintptr_t stackBottom = reinterpret_cast<uintptr_t>(pStack) - sSize;
477  for (uintptr_t addr = stackBottom; addr < firstPage; addr += pageSz)
478  {
479  size_t map_flags = 0;
480 
481  if (!bMapAll)
482  {
483  // Copy first stack page on write.
486  }
487  else
488  {
490  map_flags = VirtualAddressSpace::Write;
491  }
492 
493  if (!map(phys, reinterpret_cast<void *>(addr), flags | map_flags))
494  WARNING("CoW map() failed in doAllocateStack");
495  }
496 
497  Stack *stackInfo = new Stack(pStack, sSize);
498  return stackInfo;
499 }
500 
502 {
503  size_t pageSz = PhysicalMemoryManager::getPageSize();
504 
505  // Clean up the stack
506  uintptr_t stackTop = reinterpret_cast<uintptr_t>(pStack->getTop());
507  for (size_t i = 0; i < pStack->getSize(); i += pageSz)
508  {
509  stackTop -= pageSz;
510  void *v = reinterpret_cast<void *>(stackTop);
511  if (!isMapped(v))
512  break; // Hit end of stack.
513 
514  size_t flags = 0;
515  physical_uintptr_t phys = 0;
516  getMapping(v, phys, flags);
517 
518  unmap(v);
520  }
521 
522  // Add the stack to the list
523  m_Lock.acquire();
524  m_freeStacks.pushBack(pStack);
525  m_Lock.release();
526 }
527 
529 {
530  // TODO: Free other things, perhaps in VirtualAddressSpace
531  // We can't do this in VirtualAddressSpace destructor though!
532 }
533 
535  : VirtualAddressSpace(USERSPACE_VIRTUAL_HEAP),
536  m_pStackTop(USERSPACE_VIRTUAL_STACK), m_freeStacks(),
537  m_bKernelSpace(false), m_Lock(false, true), m_pKnownMaps(0),
538  m_numKnownMaps(0), m_nLastUnmap(0)
539 {
540 }
541 
543  void *Heap, void *VirtualStack)
544  : VirtualAddressSpace(Heap), m_pStackTop(VirtualStack), m_freeStacks(),
545  m_bKernelSpace(true), m_Lock(false, true), m_pKnownMaps(0),
547 {
548 }
549 
550 uint64_t HostedVirtualAddressSpace::toFlags(size_t flags, bool bFinal)
551 {
552  uint64_t Flags = 0;
553  if (flags & Write)
554  Flags |= PROT_WRITE;
555  if (flags & Swapped)
556  Flags |= PROT_NONE;
557  else
558  Flags |= PROT_READ;
559  if (flags & Execute)
560  Flags |= PROT_EXEC;
561  return Flags;
562 }
563 
564 size_t HostedVirtualAddressSpace::fromFlags(uint64_t Flags, bool bFinal)
565 {
566  return Flags;
567 }
568 
571 {
572  HostedVirtualAddressSpace &oldSpace =
573  static_cast<HostedVirtualAddressSpace &>(a);
574  HostedVirtualAddressSpace &newSpace =
575  static_cast<HostedVirtualAddressSpace &>(b);
576 
577  if (&oldSpace != &getKernelAddressSpace())
578  {
579  for (size_t i = 0; i < oldSpace.m_KnownMapsSize; ++i)
580  {
581  if (oldSpace.m_pKnownMaps[i].active)
582  {
584  oldSpace.m_pKnownMaps[i].vaddr))
585  {
586  continue;
587  }
588  else if (oldSpace.m_pKnownMaps[i].flags & KernelMode)
589  {
590  continue;
591  }
592 
593  munmap(
594  oldSpace.m_pKnownMaps[i].vaddr,
596  }
597  }
598  }
599 
600  for (size_t i = 0; i < newSpace.m_KnownMapsSize; ++i)
601  {
602  if (newSpace.m_pKnownMaps[i].active)
603  {
605  newSpace.m_pKnownMaps[i].vaddr))
606  {
607  continue;
608  }
609 
610  mmap(
611  newSpace.m_pKnownMaps[i].vaddr,
613  newSpace.toFlags(newSpace.m_pKnownMaps[i].flags, true),
614  MAP_FIXED | MAP_SHARED,
615  HostedPhysicalMemoryManager::instance().getBackingFile(),
616  newSpace.m_pKnownMaps[i].paddr);
617  }
618  }
619 }
void pushBack(const T &value)
Definition: Vector.h:270
virtual void unmap(void *virtualAddress)=0
Iterator begin()
Definition: Vector.h:148
virtual bool isMapped(void *virtualAddress)
static PhysicalMemoryManager & instance()
virtual bool map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual void pin(physical_uintptr_t page)=0
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
virtual bool isAddressValid(void *virtualAddress)
A vector / dynamic array.
virtual void setFlags(void *virtualAddress, size_t newFlags)=0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
uint64_t toFlags(size_t flags, bool bFinal=false)
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual Stack * allocateStack()=0
virtual void setFlags(void *virtualAddress, size_t newFlags)
size_t fromFlags(uint64_t Flags, bool bFinal=false)
virtual bool memIsInHeap(void *pMem)
#define WARNING(text)
Definition: Log.h:78
static VirtualAddressSpace * create()
#define NOTICE(text)
Definition: Log.h:74
virtual VirtualAddressSpace * clone(bool copyOnWrite=true)
#define assert(x)
Definition: assert.h:37
EXPORTED_PUBLIC void * page_align(void *p) PURE
Definition: utility.cc:28
virtual void * getEndOfHeap()=0
virtual void freePage(physical_uintptr_t page)=0
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
virtual void unmap(void *virtualAddress)
virtual void freeStack(Stack *pStack)
static void switchAddressSpace(VirtualAddressSpace &oldSpace, VirtualAddressSpace &newSpace)