The Pedigree Project  0.1
x64/VirtualAddressSpace.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "VirtualAddressSpace.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/Log.h"
23 #include "pedigree/kernel/panic.h"
24 #include "pedigree/kernel/process/Process.h"
25 #include "pedigree/kernel/process/Scheduler.h"
26 #include "pedigree/kernel/process/Thread.h"
27 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
28 #include "pedigree/kernel/processor/Processor.h"
29 #include "pedigree/kernel/processor/ProcessorInformation.h"
30 #include "pedigree/kernel/utilities/utility.h"
31 #include "utils.h"
32 
33 //
34 // Page Table/Directory entry flags
35 //
36 #define PAGE_PRESENT 0x01
37 #define PAGE_WRITE 0x02
38 #define PAGE_USER 0x04
39 #define PAGE_WRITE_COMBINE 0x08
40 #define PAGE_CACHE_DISABLE 0x10
41 #define PAGE_ACCESSED 0x20
42 #define PAGE_DIRTY 0x40
43 #define PAGE_2MB 0x80
44 #define PAGE_PAT 0x80
45 #define PAGE_GLOBAL 0x100
46 #define PAGE_SWAPPED 0x200
47 #define PAGE_COPY_ON_WRITE 0x400
48 #define PAGE_SHARED 0x800
49 #define PAGE_NX 0x8000000000000000
50 #define PAGE_WRITE_THROUGH (PAGE_PAT | PAGE_WRITE_COMBINE)
51 
52 //
53 // Macros
54 //
55 #define PML4_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 39) & 0x1FF)
56 #define PAGE_DIRECTORY_POINTER_INDEX(x) \
57  ((reinterpret_cast<uintptr_t>(x) >> 30) & 0x1FF)
58 #define PAGE_DIRECTORY_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 21) & 0x1FF)
59 #define PAGE_TABLE_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 12) & 0x1FF)
60 
61 #define TABLE_ENTRY(table, index) \
62  (&physicalAddress(reinterpret_cast<uint64_t *>(table))[index])
63 
64 #define PAGE_GET_FLAGS(x) (*x & 0x8000000000000FFFULL)
65 #define PAGE_SET_FLAGS(x, f) *x = (*x & ~0x8000000000000FFFULL) | f
66 #define PAGE_GET_PHYSICAL_ADDRESS(x) (*x & ~0x8000000000000FFFULL)
67 
68 // Defined in boot-standalone.s
69 extern void *pml4;
70 
72  KERNEL_VIRTUAL_HEAP,
73  reinterpret_cast<uintptr_t>(&pml4) -
74  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
75  KERNEL_VIRTUAL_STACK);
76 
77 static void trackPages(ssize_t v, ssize_t p, ssize_t s)
78 {
79  // Track, if we can.
80  Thread *pThread = Processor::information().getCurrentThread();
81  if (pThread)
82  {
83  Process *pProcess = pThread->getParent();
84  if (pProcess)
85  {
86  pProcess->trackPages(v, p, s);
87  }
88  }
89 }
90 
92 {
94 }
95 
97 {
98  return new X64VirtualAddressSpace();
99 }
100 
102 {
103  if (pMem < KERNEL_VIRTUAL_HEAP)
104  {
105  return false;
106  }
107  else if (
108  pMem >= adjust_pointer(KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_HEAP_SIZE))
109  {
110  return false;
111  }
112 
113  return true;
114 }
115 
117 {
118  if (pMem < m_Heap)
119  {
120  WARNING("memIsInHeap: " << pMem << " is below the kernel heap.");
121  return false;
122  }
123  else if (pMem >= getEndOfHeap())
124  {
125  WARNING(
126  "memIsInHeap: " << pMem << " is beyond the end of the heap ("
127  << getEndOfHeap() << ").");
128  return false;
129  }
130  else
131  return true;
132 }
134 {
135  if (m_Heap == KERNEL_VIRTUAL_HEAP)
136  {
137  return reinterpret_cast<void *>(
138  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_HEAP) +
139  KERNEL_VIRTUAL_HEAP_SIZE);
140  }
141  else
142  {
143  return m_HeapEnd;
144  }
145 }
146 
147 bool X64VirtualAddressSpace::isAddressValid(void *virtualAddress)
148 {
149  if (reinterpret_cast<uint64_t>(virtualAddress) < 0x0008000000000000ULL ||
150  reinterpret_cast<uint64_t>(virtualAddress) >= 0xFFF8000000000000ULL)
151  {
152  return true;
153  }
154  return false;
155 }
156 bool X64VirtualAddressSpace::isMapped(void *virtualAddress)
157 {
158  LockGuard<Spinlock> guard(m_Lock);
159 
160  size_t pml4Index = PML4_INDEX(virtualAddress);
161  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
162 
163  // Is a page directory pointer table present?
164  if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
165  return false;
166 
167  size_t pageDirectoryPointerIndex =
168  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
169  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
170  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
171 
172  // Is a page directory present?
173  if ((*pageDirectoryPointerEntry & PAGE_PRESENT) != PAGE_PRESENT)
174  return false;
175 
176  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
177  uint64_t *pageDirectoryEntry = TABLE_ENTRY(
178  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
179  pageDirectoryIndex);
180 
181  // Is a page table or 2MB page present?
182  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
183  return false;
184 
185  // Is it a 2MB page?
186  if ((*pageDirectoryEntry & PAGE_2MB) == PAGE_2MB)
187  return true;
188 
189  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
190  uint64_t *pageTableEntry = TABLE_ENTRY(
191  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
192 
193  // Is a page present?
194  return ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT);
195 }
196 
198  physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
199 {
200  LockGuard<Spinlock> guard(m_Lock);
201 
202  return mapUnlocked(physAddress, virtualAddress, flags, m_Lock.acquired());
203 }
204 
206  physical_uintptr_t physAddress, void *virtualAddress, size_t count,
207  size_t flags)
208 {
209  uint32_t a, b, c, d;
210  Processor::cpuid(0x80000001UL, 0, a, b, c, d);
211 
212  size_t numHugePages = 0;
213  bool hasHuge = d & (1 << 26);
214  if (hasHuge)
215  {
216  // 1 GB pages are available.
217  // NOTE: we intentionally let this truncate to zero, which will fall
218  // back to 2MB pages for mappings that are less than 1GB big.
220  numHugePages = count / (1 << (30UL - 12UL));
221  }
222 
223  if (numHugePages == 0)
224  {
225  // Fall back to 2 MB pages.
226  numHugePages = count / (1 << (21UL - 12UL));
227  }
228 
229  if (numHugePages == 0)
230  {
231  // Just map the normal way - less than 2MB!
233  physAddress, virtualAddress, count, flags);
234  }
235 
236  LockGuard<Spinlock> guard(m_Lock);
237 
238  size_t smallPageSize = PhysicalMemoryManager::getPageSize();
239 
240  // Clean up any existing mapping before we go ahead and map the huge pages
241  for (size_t i = 0; i < count; ++i)
242  {
243  unmapUnlocked(adjust_pointer(virtualAddress, i * smallPageSize), false);
244  }
245 
246  // Ensure correct page size for this mapping.
247  const size_t pageSize = hasHuge ? (1 << 30UL) : (1 << 21UL);
248 
249  size_t Flags = toFlags(flags, true);
250  for (size_t i = 0; i < numHugePages; ++i)
251  {
252  size_t pml4Index = PML4_INDEX(virtualAddress);
253  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
254 
255  // Is a page directory pointer table present?
256  if (conditionalTableEntryAllocation(pml4Entry, flags) == false)
257  {
258  return false;
259  }
260 
261  size_t pageDirectoryPointerIndex =
262  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
263  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
264  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
265 
268 
269  if (hasHuge)
270  {
271  // 1G pages.
274  *pageDirectoryPointerEntry = physAddress | PAGE_2MB | Flags;
275  }
276  else
277  {
278  // 2 MB pages.
279 
280  // Is a page directory present?
281  if (conditionalTableEntryAllocation(
282  pageDirectoryPointerEntry, flags) == false)
283  {
284  return false;
285  }
286 
287  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
288  uint64_t *pageDirectoryEntry = TABLE_ENTRY(
289  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
290  pageDirectoryIndex);
291 
292  *pageDirectoryEntry = physAddress | PAGE_2MB | Flags;
293  }
294 
295  virtualAddress = adjust_pointer(virtualAddress, pageSize);
296  physAddress += pageSize;
297  }
298 
299  return true;
300 }
301 
303  physical_uintptr_t physAddress, void *virtualAddress, size_t flags,
304  bool locked)
305 {
306  size_t Flags = toFlags(flags, true);
307  size_t pml4Index = PML4_INDEX(virtualAddress);
308  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
309 
310  // Check if a page directory pointer table was present *before* the
311  // conditional allocation.
312  bool pdWasPresent = (*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT;
313 
314  // Is a page directory pointer table present?
315  if (conditionalTableEntryAllocation(pml4Entry, flags) == false)
316  {
317  return false;
318  }
319 
320  size_t pageDirectoryPointerIndex =
321  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
322  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
323  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
324 
325  // Is a page directory present?
326  if (conditionalTableEntryAllocation(pageDirectoryPointerEntry, flags) ==
327  false)
328  {
329  return false;
330  }
331 
332  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
333  uint64_t *pageDirectoryEntry = TABLE_ENTRY(
334  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
335  pageDirectoryIndex);
336 
337  // Is a page table present?
338  if (conditionalTableEntryAllocation(pageDirectoryEntry, flags) == false)
339  {
340  return false;
341  }
342 
343  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
344  uint64_t *pageTableEntry = TABLE_ENTRY(
345  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
346 
347  // Is a page already present?
348  if ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT)
349  {
350  return false;
351  }
352 
353  // Map the page
354  *pageTableEntry = physAddress | Flags;
355 
356  // Flush the TLB
357  Processor::invalidate(virtualAddress);
358 
359  trackPages(1, 0, 0);
360 
361  // We don't need the lock to propagate the PDPT.
362  if (locked)
363  m_Lock.release();
364 
365  // If there wasn't a PDPT already present, and the address is in the kernel
366  // area of memory, we need to propagate this change across all address
367  // spaces.
368  if (!pdWasPresent && Processor::m_Initialised == 2 &&
369  virtualAddress >= KERNEL_VIRTUAL_HEAP)
370  {
371  uint64_t thisPml4Entry = *pml4Entry;
374  for (size_t i = 0; i < Scheduler::instance().getNumProcesses(); i++)
375  {
377  if (!p)
378  {
379  continue;
380  }
381 
382  X64VirtualAddressSpace *x64VAS =
383  reinterpret_cast<X64VirtualAddressSpace *>(
384  p->getAddressSpace());
385  uint64_t *otherPml4Entry =
386  TABLE_ENTRY(x64VAS->m_PhysicalPML4, pml4Index);
387  *otherPml4Entry = thisPml4Entry;
388  }
389  }
390 
391  // If we were locked before, take the lock to enforce that.
392  if (locked)
393  m_Lock.acquire();
394 
395  return true;
396 }
397 
399  void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
400 {
401  // Get a pointer to the page-table entry (Also checks whether the page is
402  // actually present or marked swapped out)
403  uint64_t *pageTableEntry = 0;
404  if (getPageTableEntry(virtualAddress, pageTableEntry) == false)
405  {
406  panic("VirtualAddressSpace::getMapping(): function misused");
407  }
408 
409  // Extract the physical address and the flags
410  physAddress = PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
411  flags = fromFlags(PAGE_GET_FLAGS(pageTableEntry), true);
412 }
413 
414 void X64VirtualAddressSpace::setFlags(void *virtualAddress, size_t newFlags)
415 {
416  LockGuard<Spinlock> guard(m_Lock);
417 
418  // Get a pointer to the page-table entry (Also checks whether the page is
419  // actually present or marked swapped out)
420  uint64_t *pageTableEntry = 0;
421  if (getPageTableEntry(virtualAddress, pageTableEntry) == false)
422  {
423  panic("VirtualAddressSpace::setFlags(): function misused");
424  }
425 
426  // Set the flags
427  PAGE_SET_FLAGS(pageTableEntry, toFlags(newFlags, true));
428 
429  // Flush TLB - modified the mapping for this address.
430  Processor::invalidate(virtualAddress);
431 }
432 
433 void X64VirtualAddressSpace::unmap(void *virtualAddress)
434 {
435  LockGuard<Spinlock> guard(m_Lock);
436 
437  unmapUnlocked(virtualAddress);
438 }
439 
441  void *virtualAddress, bool requireMapped)
442 {
443  // Get a pointer to the page-table entry (Also checks whether the page is
444  // actually present or marked swapped out)
445  uint64_t *pageTableEntry = 0;
446  if (getPageTableEntry(virtualAddress, pageTableEntry) == false)
447  {
448  // Not mapped! This is a panic for most cases, but private usage of
449  // unmap within X64VirtualAddressSpace is allowed to do this.
450  if (requireMapped)
451  {
452  panic("VirtualAddressSpace::unmap(): function misused");
453  }
454  else
455  {
456  return;
457  }
458  }
459 
460  // Unmap the page
461  *pageTableEntry = 0;
462 
463  // Invalidate the TLB entry
464  Processor::invalidate(virtualAddress);
465 
466  trackPages(-1, 0, 0);
467 
468  // Possibly wipe out paging structures now that we've unmapped the page.
469  // This can clear all the way up to, but not including, the PML4 - can be
470  // extremely useful to conserve memory.
471  maybeFreeTables(virtualAddress);
472 }
473 
475 {
477 
478  // Create a new virtual address space
479  X64VirtualAddressSpace *pClone =
481  if (pClone == 0)
482  {
483  WARNING("X64VirtualAddressSpace: Clone() failed!");
484  return 0;
485  }
486 
487  // Lock both address spaces so we can clone safely.
488  LockGuard<Spinlock> cloneGuard(pClone->m_Lock);
489  LockGuard<Spinlock> cloneStacksGuard(pClone->m_StacksLock);
490  m_Lock.acquire();
491 
492  // The userspace area is only the bottom half of the address space - the top
493  // 256 PML4 entries are for the kernel, and these should be mapped anyway.
494  for (uint64_t i = 0; i < 256; i++)
495  {
496  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, i);
497  if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
498  continue;
499 
500  for (uint64_t j = 0; j < 512; j++)
501  {
502  uint64_t *pdptEntry =
503  TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), j);
504  if ((*pdptEntry & PAGE_PRESENT) != PAGE_PRESENT)
505  continue;
506 
507  for (uint64_t k = 0; k < 512; k++)
508  {
509  uint64_t *pdEntry =
510  TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdptEntry), k);
511  if ((*pdEntry & PAGE_PRESENT) != PAGE_PRESENT)
512  continue;
513 
515  if ((*pdEntry & PAGE_2MB) == PAGE_2MB)
516  continue;
517 
518  for (uint64_t l = 0; l < 512; l++)
519  {
520  uint64_t *ptEntry =
521  TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdEntry), l);
522  if ((*ptEntry & PAGE_PRESENT) != PAGE_PRESENT)
523  continue;
524 
525  uint64_t flags = PAGE_GET_FLAGS(ptEntry);
526  physical_uintptr_t physicalAddress =
527  PAGE_GET_PHYSICAL_ADDRESS(ptEntry);
528 
529  void *virtualAddress = reinterpret_cast<void *>(
530  ((i & 0x100) ? (~0ULL << 48) :
531  0ULL) | /* Sign-extension. */
532  (i << 39) |
533  (j << 30) | (k << 21) | (l << 12));
534 
535  if (flags & PAGE_SHARED)
536  {
537  // The physical address is now referenced (shared) in
538  // two address spaces, so make sure we hold another
539  // reference on it. Otherwise, if one of the two
540  // address spaces frees the page, the other may still
541  // refer to the bad page (and eventually double-free).
542  PhysicalMemoryManager::instance().pin(physicalAddress);
543 
544  // Handle shared mappings - don't copy the original
545  // page.
546  pClone->mapUnlocked(
547  physicalAddress, virtualAddress,
548  fromFlags(flags, true));
549  continue;
550  }
551 
552  // Map the new page in to the new address space for
553  // copy-on-write. This implies read-only (so we #PF for copy
554  // on write).
555  bool bWasCopyOnWrite = (flags & PAGE_COPY_ON_WRITE);
556  if (copyOnWrite && (flags & PAGE_WRITE))
557  {
558  flags |= PAGE_COPY_ON_WRITE;
559  flags &= ~PAGE_WRITE;
560  }
561  pClone->mapUnlocked(
562  physicalAddress, virtualAddress,
563  fromFlags(flags, true));
564 
565  // We need to modify the entry in *this* address space as
566  // well to also have the read-only and copy-on-write flag
567  // set, as otherwise writes in the parent process will cause
568  // the child process to see those changes immediately. Note:
569  // changes only needed if we're setting copy-on-write as
570  // otherwise the flags are unchanged in the parent space.
571  if (copyOnWrite)
572  {
573  PAGE_SET_FLAGS(ptEntry, flags);
574  Processor::invalidate(virtualAddress);
575  }
576 
577  // Pin the page twice - once for each side of the clone.
578  // But only pin for the parent if the parent page is not
579  // already copy on write. If we pin the CoW page, it'll be
580  // leaked when both parent and child terminate if the parent
581  // clone()s again.
582  if (!bWasCopyOnWrite)
583  PhysicalMemoryManager::instance().pin(physicalAddress);
584  PhysicalMemoryManager::instance().pin(physicalAddress);
585  }
586  }
587  }
588  }
589 
590  // Before returning the address space, bring across metadata.
591  // Note though that if the parent of the clone (ie, this address space)
592  // is the kernel address space, we mustn't copy metadata or else the
593  // userspace defaults in the constructor get wiped out.
594 
595  if (m_Heap < KERNEL_SPACE_START)
596  {
597  pClone->m_Heap = m_Heap;
598  pClone->m_HeapEnd = m_HeapEnd;
599  }
600 
601  // No longer need this address space's lock - cloning is mostly done.
602  m_Lock.release();
603 
604  // Now we pick up the stacks lock, so we can copy safely. However, we don't
605  // have the VirtualAddressSpace lock, so we can still safely use the heap
606  // without worrying about re-entering.
607  m_StacksLock.acquire();
608 
609  if (m_pStackTop < KERNEL_SPACE_START)
610  {
611  pClone->m_pStackTop = m_pStackTop;
612  for (Vector<Stack *>::Iterator it = m_freeStacks.begin();
613  it != m_freeStacks.end(); ++it)
614  {
615  Stack *pNewStack = new Stack(**it);
616  pClone->m_freeStacks.pushBack(pNewStack);
617  }
618  }
619 
620  m_StacksLock.release();
621 
622  return pClone;
623 }
624 
626 {
627  LockGuard<Spinlock> guard(m_Lock);
628 
629  // The userspace area is only the bottom half of the address space - the top
630  // 256 PML4 entries are for the kernel, and these should be mapped anyway.
631  for (uint64_t i = 0; i < 256; i++)
632  {
633  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, i);
634  if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
635  continue;
636 
637  for (uint64_t j = 0; j < 512; j++)
638  {
639  uint64_t *pdptEntry =
640  TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), j);
641  if ((*pdptEntry & PAGE_PRESENT) != PAGE_PRESENT)
642  continue;
643 
644  for (uint64_t k = 0; k < 512; k++)
645  {
646  uint64_t *pdEntry =
647  TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdptEntry), k);
648  if ((*pdEntry & PAGE_PRESENT) != PAGE_PRESENT)
649  continue;
650 
651  // Address this region begins at.
652  void *regionVirtualAddress = reinterpret_cast<void *>(
653  ((i & 0x100) ? (~0ULL << 48) : 0ULL) | /* Sign-extension. */
654  (i << 39) | (j << 30) | (k << 21));
655 
656  if (regionVirtualAddress < USERSPACE_VIRTUAL_START)
657  continue;
658  if (regionVirtualAddress > KERNEL_SPACE_START)
659  break;
660 
662  if ((*pdEntry & PAGE_2MB) == PAGE_2MB)
663  continue;
664 
665  for (uint64_t l = 0; l < 512; l++)
666  {
667  uint64_t *ptEntry =
668  TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdEntry), l);
669  if ((*ptEntry & PAGE_PRESENT) != PAGE_PRESENT)
670  continue;
671 
672  void *virtualAddress = reinterpret_cast<void *>(
673  reinterpret_cast<uintptr_t>(regionVirtualAddress) |
674  (l << 12));
675 
676  size_t flags = PAGE_GET_FLAGS(ptEntry);
677  physical_uintptr_t physicalAddress =
678  PAGE_GET_PHYSICAL_ADDRESS(ptEntry);
679 
680  // Release the physical memory if it is not shared with
681  // another process (eg, memory mapped file) Also avoid
682  // stumbling over a swapped out page.
686  if ((flags & (PAGE_SHARED | PAGE_SWAPPED)) == 0)
687  {
689  physicalAddress);
690  }
691 
692  // Free the page.
693  trackPages(-1, 0, 0);
694  *ptEntry = 0;
695  Processor::invalidate(virtualAddress);
696  }
697 
698  // Remove the table.
700  PAGE_GET_PHYSICAL_ADDRESS(pdEntry));
701  *pdEntry = 0;
702  }
703 
705  PAGE_GET_PHYSICAL_ADDRESS(pdptEntry));
706  *pdptEntry = 0;
707  }
708 
710  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry));
711  *pml4Entry = 0;
712  }
713 
714  // Reset heap; it's been wiped out by this reversion.
715  m_HeapEnd = m_Heap;
716 }
717 
719  physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
720 {
721  LockGuard<Spinlock> guard(m_Lock);
722 
723  size_t Flags = toFlags(flags);
724  size_t pml4Index = PML4_INDEX(virtualAddress);
725  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
726 
727  // Is a page directory pointer table present?
728  if (conditionalTableEntryMapping(pml4Entry, physAddress, Flags) == true)
729  return true;
730 
731  size_t pageDirectoryPointerIndex =
732  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
733  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
734  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
735 
736  // Is a page directory present?
737  if (conditionalTableEntryMapping(
738  pageDirectoryPointerEntry, physAddress, Flags) == true)
739  return true;
740 
741  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
742  uint64_t *pageDirectoryEntry = TABLE_ENTRY(
743  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
744  pageDirectoryIndex);
745 
746  // Is a page table present?
747  if (conditionalTableEntryMapping(pageDirectoryEntry, physAddress, Flags) ==
748  true)
749  return true;
750 
751  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
752  uint64_t *pageTableEntry = TABLE_ENTRY(
753  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
754 
755  // Is a page already present?
756  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
757  {
758  *pageTableEntry = physAddress | flags;
759  return true;
760  }
761  return false;
762 }
763 
764 bool X64VirtualAddressSpace::mapPageStructuresAbove4GB(
765  physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
766 {
767  LockGuard<Spinlock> guard(m_Lock);
768 
769  size_t Flags = toFlags(flags);
770  size_t pml4Index = PML4_INDEX(virtualAddress);
771  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
772 
773  // Is a page directory pointer table present?
774  if (conditionalTableEntryAllocation(pml4Entry, Flags) == false)
775  return true;
776 
777  size_t pageDirectoryPointerIndex =
778  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
779  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
780  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
781 
782  // Is a page directory present?
783  if (conditionalTableEntryAllocation(pageDirectoryPointerEntry, Flags) ==
784  false)
785  return true;
786 
787  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
788  uint64_t *pageDirectoryEntry = TABLE_ENTRY(
789  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
790  pageDirectoryIndex);
791 
792  // Is a page table present?
793  if (conditionalTableEntryAllocation(pageDirectoryEntry, Flags) == false)
794  return true;
795 
796  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
797  uint64_t *pageTableEntry = TABLE_ENTRY(
798  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
799 
800  // Is a page already present?
801  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
802  {
803  *pageTableEntry = physAddress | flags;
804  return true;
805  }
806  return false;
807 }
808 
810 {
811  size_t sz = USERSPACE_VIRTUAL_STACK_SIZE;
812  if (this == &m_KernelSpace)
813  sz = KERNEL_STACK_SIZE;
814  return doAllocateStack(sz);
815 }
816 
819 {
820  if (stackSz == 0)
821  return allocateStack();
822  return doAllocateStack(stackSz);
823 }
824 
827 {
828  size_t flags = 0;
829  bool bMapAll = false;
830  if (this == &m_KernelSpace)
831  {
832  // Don't demand map kernel mode stacks.
834  bMapAll = true;
835  }
836 
837  const size_t pageSz = PhysicalMemoryManager::getPageSize();
838 
839  // Grab a new stack pointer. Use the list of freed stacks if we can,
840  // otherwise adjust the internal stack pointer. Using the list of freed
841  // stacks helps avoid having the virtual address creep downwards.
842  void *pStack = 0;
843  m_StacksLock.acquire();
844  if (m_freeStacks.count() != 0)
845  {
846  Stack *poppedStack = m_freeStacks.popBack();
847  if (poppedStack->getSize() >= sSize)
848  {
849  pStack = poppedStack->getTop();
850  }
851  delete poppedStack;
852  }
853  m_StacksLock.release();
854 
855  if (!pStack)
856  {
857  // Need the main address space lock now so we can adjust the next stack
858  // pointer without interference.
859  m_Lock.acquire();
860  pStack = m_pStackTop;
861 
862  // Always leave one page unmapped between each stack to catch overflow.
863  m_pStackTop = adjust_pointer(m_pStackTop, -(sSize + pageSz));
864  m_Lock.release();
865  }
866 
867  // Map the top of the stack in proper.
868  uintptr_t firstPage = reinterpret_cast<uintptr_t>(pStack) - pageSz;
869  physical_uintptr_t phys = PhysicalMemoryManager::instance().allocatePage();
870  if (!bMapAll)
872  if (!map(
873  phys, reinterpret_cast<void *>(firstPage),
875  WARNING("map() failed in doAllocateStack");
876 
877  // Bring in the rest of the stack as CoW.
878  uintptr_t stackBottom = reinterpret_cast<uintptr_t>(pStack) - sSize;
879  for (uintptr_t addr = stackBottom; addr < firstPage; addr += pageSz)
880  {
881  size_t map_flags = 0;
882 
883  if (!bMapAll)
884  {
885  // Copy first stack page on write.
888  }
889  else
890  {
892  map_flags = VirtualAddressSpace::Write;
893  }
894 
895  if (!map(phys, reinterpret_cast<void *>(addr), flags | map_flags))
896  WARNING("CoW map() failed in doAllocateStack");
897  }
898 
899  Stack *stackInfo = new Stack(pStack, sSize);
900  return stackInfo;
901 }
902 
904 {
905  const size_t pageSz = PhysicalMemoryManager::getPageSize();
906 
907  // Clean up the stack
908  uintptr_t stackTop = reinterpret_cast<uintptr_t>(pStack->getTop());
909  for (size_t i = 0; i < pStack->getSize(); i += pageSz)
910  {
911  stackTop -= pageSz;
912  void *v = reinterpret_cast<void *>(stackTop);
913  if (!isMapped(v))
914  {
915  continue;
916  }
917 
918  size_t flags = 0;
919  physical_uintptr_t phys = 0;
920  getMapping(v, phys, flags);
921 
922  unmap(v);
924  }
925 
926  // Add the stack to the list; using the stacks lock, not the main address
927  // space lock, as pushing could require mapping pages via the heap.
928  m_StacksLock.acquire();
929  m_freeStacks.pushBack(pStack);
930  m_StacksLock.release();
931 }
932 
934 {
935  PhysicalMemoryManager &physicalMemoryManager =
937 
940 
941  // Drop back to the kernel address space. This will blow away the child's
942  // mappings, but maintains shared pages as needed.
944 
945  // Free the PageMapLevel4
946  physicalMemoryManager.freePage(m_PhysicalPML4);
947 }
948 
950  : VirtualAddressSpace(USERSPACE_VIRTUAL_HEAP), m_PhysicalPML4(0),
951  m_pStackTop(USERSPACE_VIRTUAL_STACK), m_freeStacks(),
952  m_bKernelSpace(false), m_Lock(false, false), m_StacksLock(false)
953 {
954  // Allocate a new PageMapLevel4
955  PhysicalMemoryManager &physicalMemoryManager =
957  m_PhysicalPML4 = physicalMemoryManager.allocatePage();
958 
959  // Initialise the page directory
960  ByteSet(
961  reinterpret_cast<void *>(physicalAddress(m_PhysicalPML4)), 0, 0x800);
962 
963  // Copy the kernel PageMapLevel4
964  MemoryCopy(
965  reinterpret_cast<void *>(physicalAddress(m_PhysicalPML4) + 0x800),
966  reinterpret_cast<void *>(
968  0x800);
969 }
970 
972  void *Heap, physical_uintptr_t PhysicalPML4, void *VirtualStack)
973  : VirtualAddressSpace(Heap), m_PhysicalPML4(PhysicalPML4),
974  m_pStackTop(VirtualStack), m_freeStacks(), m_bKernelSpace(true),
975  m_Lock(false, false), m_StacksLock(false)
976 {
977 }
978 
980  void *virtualAddress, uint64_t *&pageTableEntry) const
981 {
982  size_t pml4Index = PML4_INDEX(virtualAddress);
983  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
984 
985  // Is a page directory pointer table present?
986  if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
987  return false;
988 
989  size_t pageDirectoryPointerIndex =
990  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
991  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
992  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
993 
994  // Is a page directory present?
995  if ((*pageDirectoryPointerEntry & PAGE_PRESENT) != PAGE_PRESENT)
996  return false;
997 
998  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
999  uint64_t *pageDirectoryEntry = TABLE_ENTRY(
1000  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
1001  pageDirectoryIndex);
1002 
1003  // Is a page table or 2MB page present?
1004  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
1005  return false;
1006  if ((*pageDirectoryEntry & PAGE_2MB) == PAGE_2MB)
1007  return false;
1008 
1009  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
1010  pageTableEntry = TABLE_ENTRY(
1011  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
1012 
1013  // Is a page present?
1014  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT &&
1015  (*pageTableEntry & PAGE_SWAPPED) != PAGE_SWAPPED)
1016  return false;
1017 
1018  return true;
1019 }
1020 
1022 {
1023  bool bCanFreePageTable = true;
1024 
1025  uint64_t *pageDirectoryEntry = 0;
1026 
1027  size_t pml4Index = PML4_INDEX(virtualAddress);
1028  uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
1029 
1030  // Is a page directory pointer table present?
1031  if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
1032  return;
1033 
1034  size_t pageDirectoryPointerIndex =
1035  PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
1036  uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
1037  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
1038 
1039  if ((*pageDirectoryPointerEntry & PAGE_PRESENT) == PAGE_PRESENT)
1040  {
1041  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
1042  pageDirectoryEntry = TABLE_ENTRY(
1043  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
1044  pageDirectoryIndex);
1045 
1046  if ((*pageDirectoryEntry & PAGE_PRESENT) == PAGE_PRESENT)
1047  {
1048  if ((*pageDirectoryEntry & PAGE_2MB) == PAGE_2MB)
1049  {
1050  bCanFreePageTable = false;
1051  }
1052  else
1053  {
1054  for (size_t i = 0; i < 0x200; ++i)
1055  {
1056  uint64_t *entry = TABLE_ENTRY(
1057  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), i);
1058  if ((*entry & PAGE_PRESENT) == PAGE_PRESENT ||
1059  (*entry & PAGE_SWAPPED) == PAGE_SWAPPED)
1060  {
1061  bCanFreePageTable = false;
1062  break;
1063  }
1064  }
1065  }
1066  }
1067  }
1068 
1069  if (bCanFreePageTable && pageDirectoryEntry)
1070  {
1072  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry));
1073  *pageDirectoryEntry = 0;
1074  }
1075  else if (!bCanFreePageTable)
1076  {
1077  return;
1078  }
1079 
1080  // Now that we've cleaned up the page table, we can scan the parent tables.
1081 
1082  bool bCanFreeDirectory = true;
1083  for (size_t i = 0; i < 0x200; ++i)
1084  {
1085  uint64_t *entry = TABLE_ENTRY(
1086  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry), i);
1087  if ((*entry & PAGE_PRESENT) == PAGE_PRESENT)
1088  {
1089  bCanFreeDirectory = false;
1090  break;
1091  }
1092  }
1093 
1094  if (bCanFreeDirectory)
1095  {
1097  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry));
1098  *pageDirectoryPointerEntry = 0;
1099  }
1100  else
1101  {
1102  return;
1103  }
1104 
1105  bool bCanFreeDirectoryPointerTable = true;
1106  for (size_t i = 0; i < 0x200; ++i)
1107  {
1108  uint64_t *entry = TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), i);
1109  if ((*entry & PAGE_PRESENT) == PAGE_PRESENT)
1110  {
1111  bCanFreeDirectoryPointerTable = false;
1112  break;
1113  }
1114  }
1115 
1116  if (bCanFreeDirectoryPointerTable)
1117  {
1119  PAGE_GET_PHYSICAL_ADDRESS(pml4Entry));
1120  *pml4Entry = 0;
1121  }
1122 }
1123 
1124 uint64_t X64VirtualAddressSpace::toFlags(size_t flags, bool bFinal) const
1125 {
1126  uint64_t Flags = 0;
1127  if ((flags & KernelMode) == KernelMode)
1128  Flags |= PAGE_GLOBAL;
1129  else
1130  Flags |= PAGE_USER;
1131  if ((flags & Write) == Write)
1132  Flags |= PAGE_WRITE;
1133  if ((flags & WriteCombine) == WriteCombine)
1134  Flags |= PAGE_WRITE_COMBINE;
1135  if ((flags & CacheDisable) == CacheDisable)
1136  Flags |= PAGE_CACHE_DISABLE;
1137  if ((flags & Execute) != Execute)
1138  Flags |= PAGE_NX;
1139  if ((flags & Swapped) == Swapped)
1140  Flags |= PAGE_SWAPPED;
1141  else
1142  Flags |= PAGE_PRESENT;
1143  if ((flags & CopyOnWrite) == CopyOnWrite)
1144  Flags |= PAGE_COPY_ON_WRITE;
1145  if ((flags & Shared) == Shared)
1146  Flags |= PAGE_SHARED;
1147  if (bFinal)
1148  {
1149  if ((flags & WriteThrough) == WriteThrough)
1150  Flags |= PAGE_WRITE_THROUGH;
1151  if ((flags & Accessed) == Accessed)
1152  Flags |= PAGE_ACCESSED;
1153  if ((flags & Dirty) == Dirty)
1154  Flags |= PAGE_DIRTY;
1155  if ((flags & ClearDirty) == ClearDirty)
1156  Flags &= ~PAGE_DIRTY;
1157  }
1158  return Flags;
1159 }
1160 
1161 size_t X64VirtualAddressSpace::fromFlags(uint64_t Flags, bool bFinal) const
1162 {
1163  size_t flags = 0;
1164  if ((Flags & PAGE_USER) != PAGE_USER)
1165  flags |= KernelMode;
1166  if ((Flags & PAGE_WRITE) == PAGE_WRITE)
1167  flags |= Write;
1168  if ((Flags & PAGE_WRITE_COMBINE) == PAGE_WRITE_COMBINE)
1169  flags |= WriteCombine;
1170  if ((Flags & PAGE_CACHE_DISABLE) == PAGE_CACHE_DISABLE)
1171  flags |= CacheDisable;
1172  if ((Flags & PAGE_NX) != PAGE_NX)
1173  flags |= Execute;
1174  if ((Flags & PAGE_SWAPPED) == PAGE_SWAPPED)
1175  flags |= Swapped;
1176  if ((Flags & PAGE_COPY_ON_WRITE) == PAGE_COPY_ON_WRITE)
1177  flags |= CopyOnWrite;
1178  if ((Flags & PAGE_SHARED) == PAGE_SHARED)
1179  flags |= Shared;
1180  if (bFinal)
1181  {
1182  if ((Flags & PAGE_WRITE_THROUGH) == PAGE_WRITE_THROUGH)
1183  flags |= WriteThrough;
1184  if ((Flags & PAGE_ACCESSED) == PAGE_ACCESSED)
1185  flags |= Accessed;
1186  if ((Flags & PAGE_DIRTY) == PAGE_DIRTY)
1187  flags |= Dirty;
1188  }
1189  return flags;
1190 }
1191 
1193  uint64_t *tableEntry, uint64_t flags)
1194 {
1195  // Convert VirtualAddressSpace::* flags to X64 flags.
1196  flags = toFlags(flags);
1197 
1198  if ((*tableEntry & PAGE_PRESENT) != PAGE_PRESENT)
1199  {
1200  // Allocate a page
1201  PhysicalMemoryManager &PMemoryManager =
1203  uint64_t page = PMemoryManager.allocatePage();
1204  if (page == 0)
1205  {
1206  ERROR("OOM in "
1207  "X64VirtualAddressSpace::conditionalTableEntryAllocation!");
1208  return false;
1209  }
1210 
1211  // Add the WRITE and USER flags so that these can be controlled
1212  // on a page-granularity level.
1213  flags &= ~(PAGE_GLOBAL | PAGE_NX | PAGE_SWAPPED | PAGE_COPY_ON_WRITE);
1214  flags |= PAGE_WRITE | PAGE_USER;
1215 
1216  // Map the page.
1217  *tableEntry = page | flags;
1218 
1219  // Zero the page directory pointer table.
1220  ByteSet(
1221  physicalAddress(reinterpret_cast<void *>(page)), 0,
1223  }
1224  else if (((*tableEntry & PAGE_USER) != PAGE_USER) && (flags & PAGE_USER))
1225  {
1226  // Flags request user mapping, entry doesn't have that.
1227  *tableEntry |= PAGE_USER;
1228  }
1229 
1230  return true;
1231 }
1232 
1234  uint64_t *tableEntry, uint64_t physAddress, uint64_t flags)
1235 {
1236  // Convert VirtualAddressSpace::* flags to X64 flags.
1237  flags = toFlags(flags, true);
1238 
1239  if ((*tableEntry & PAGE_PRESENT) != PAGE_PRESENT)
1240  {
1241  // Map the page. Add the WRITE and USER flags so that these can be
1242  // controlled on a page-granularity level.
1243  *tableEntry =
1244  physAddress | ((flags & ~(PAGE_GLOBAL | PAGE_NX | PAGE_SWAPPED |
1245  PAGE_COPY_ON_WRITE)) |
1246  PAGE_WRITE | PAGE_USER);
1247 
1248  // Zero the page directory pointer table
1249  ByteSet(
1250  physicalAddress(reinterpret_cast<void *>(physAddress)), 0,
1252 
1253  return true;
1254  }
1255  else if (((*tableEntry & PAGE_USER) != PAGE_USER) && (flags & PAGE_USER))
1256  {
1257  // Flags request user mapping, entry doesn't have that.
1258  *tableEntry |= PAGE_USER;
1259  }
1260 
1261  return false;
1262 }
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
void pushBack(const T &value)
Definition: Vector.h:270
virtual void unmap(void *virtualAddress)=0
Iterator begin()
Definition: Vector.h:148
static void cpuid(uint32_t inEax, uint32_t inEcx, uint32_t &eax, uint32_t &ebx, uint32_t &ecx, uint32_t &edx)
bool mapUnlocked(physical_uintptr_t physAddress, void *virtualAddress, size_t flags, bool locked=false)
static PhysicalMemoryManager & instance()
Stack * doAllocateStack(size_t sSize)
virtual void pin(physical_uintptr_t page)=0
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
A vector / dynamic array.
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
virtual void unmap(void *virtualAddress)
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual bool map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual Stack * allocateStack()=0
static ProcessorInformation & information()
Definition: Processor.cc:45
virtual void freeStack(Stack *pStack)
#define WARNING(text)
Definition: Log.h:78
static size_t m_Initialised
Definition: Processor.h:371
virtual void setFlags(void *virtualAddress, size_t newFlags)
static VirtualAddressSpace * create()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
virtual bool memIsInHeap(void *pMem)
virtual bool memIsInKernelHeap(void *pMem)
void unmapUnlocked(void *virtualAddress, bool requireMapped=true)
bool getPageTableEntry(void *virtualAddress, uint64_t *&pageTableEntry) const
size_t getNumProcesses()
Definition: Scheduler.cc:140
static Scheduler & instance()
Definition: Scheduler.h:48
virtual VirtualAddressSpace * clone(bool copyOnWrite=true)
Process * getProcess(size_t n)
Definition: Scheduler.cc:149
bool conditionalTableEntryAllocation(uint64_t *tableEntry, uint64_t flags)
Process * getParent() const
Definition: Thread.h:181
Definition: Thread.h:54
VirtualAddressSpace * getAddressSpace()
Definition: Process.h:120
size_t fromFlags(uint64_t Flags, bool bFinal=false) const PURE
bool conditionalTableEntryMapping(uint64_t *tableEntry, uint64_t physAddress, uint64_t flags)
#define ERROR(text)
Definition: Log.h:82
virtual void revertToKernelAddressSpace()=0
virtual void * getEndOfHeap()=0
virtual void freePage(physical_uintptr_t page)=0
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
bool mapPageStructures(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
virtual bool isAddressValid(void *virtualAddress)
void maybeFreeTables(void *virtualAddress)
Possibly cleans up tables for the given address.
uint64_t toFlags(size_t flags, bool bFinal=false) const PURE
static void invalidate(void *pAddress)