The Pedigree Project  0.1
x86/VirtualAddressSpace.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "VirtualAddressSpace.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/panic.h"
23 #include "pedigree/kernel/process/Process.h"
24 #include "pedigree/kernel/process/Scheduler.h"
25 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
26 #include "pedigree/kernel/processor/Processor.h"
27 #include "pedigree/kernel/utilities/utility.h"
28 
29 #include "pedigree/kernel/Log.h"
30 
31 //
32 // Page Table/Directory entry flags
33 //
34 #define PAGE_PRESENT 0x01
35 #define PAGE_WRITE 0x02
36 #define PAGE_USER 0x04
37 #define PAGE_WRITE_COMBINE 0x08
38 #define PAGE_CACHE_DISABLE 0x10
39 #define PAGE_ACCESSED 0x20
40 #define PAGE_DIRTY 0x40
41 #define PAGE_4MB 0x80
42 #define PAGE_PAT 0x80
43 #define PAGE_GLOBAL 0x100
44 #define PAGE_SWAPPED 0x200
45 #define PAGE_COPY_ON_WRITE 0x400
46 #define PAGE_SHARED 0x800
47 #define PAGE_WRITE_THROUGH (PAGE_PAT | PAGE_WRITE_COMBINE)
48 
49 //
50 // Macros
51 //
52 #define PAGE_DIRECTORY_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 22) & 0x3FF)
53 #define PAGE_TABLE_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 12) & 0x3FF)
54 
55 #define PAGE_DIRECTORY_ENTRY(pageDir, index) \
56  (&reinterpret_cast<uint32_t *>(pageDir)[index])
57 #define PAGE_TABLE_ENTRY(VirtualPageTables, pageDirectoryIndex, index) \
58  (&reinterpret_cast<uint32_t *>( \
59  adjust_pointer(VirtualPageTables, pageDirectoryIndex * 4096))[index])
60 
61 #define PAGE_GET_FLAGS(x) (*x & 0xFFF)
62 #define PAGE_SET_FLAGS(x, f) *x = (*x & ~0xFFF) | f
63 #define PAGE_GET_PHYSICAL_ADDRESS(x) (*x & ~0xFFF)
64 
65 // Defined in boot-standalone.s
66 extern void *pagedirectory;
67 
73 physical_uintptr_t g_EscrowPages[256];
74 
76 
77 VirtualAddressSpace *g_pCurrentlyCloning = 0;
78 
80 {
82 }
83 
85 {
86  return new X86VirtualAddressSpace();
87 }
88 
90 {
91  if (pMem < KERNEL_VIRTUAL_HEAP)
92  return false;
93  else if (pMem >= getEndOfHeap())
94  return false;
95  else
96  return true;
97 }
99 {
100  return reinterpret_cast<void *>(
101  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_HEAP) +
102  KERNEL_VIRTUAL_HEAP_SIZE);
103 }
104 
105 bool X86VirtualAddressSpace::isAddressValid(void *virtualAddress)
106 {
107  return true;
108 }
109 bool X86VirtualAddressSpace::isMapped(void *virtualAddress)
110 {
111 #if defined(ADDITIONAL_CHECKS)
112  if (Processor::readCr3() != m_PhysicalPageDirectory)
113  panic(
114  "VirtualAddressSpace::isMapped(): not in this VirtualAddressSpace");
115 #endif
116 
117  return doIsMapped(virtualAddress);
118 }
120  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
121 {
122 #if defined(ADDITIONAL_CHECKS)
123  if (Processor::readCr3() != m_PhysicalPageDirectory)
124  panic("VirtualAddressSpace::map(): not in this VirtualAddressSpace");
125 #endif
126 
127  return doMap(physicalAddress, virtualAddress, flags);
128 }
130  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
131 {
132 #if defined(ADDITIONAL_CHECKS)
133  if (Processor::readCr3() != m_PhysicalPageDirectory)
134  panic("VirtualAddressSpace::getMapping(): not in this "
135  "VirtualAddressSpace");
136 #endif
137 
138  doGetMapping(virtualAddress, physicalAddress, flags);
139 }
140 void X86VirtualAddressSpace::setFlags(void *virtualAddress, size_t newFlags)
141 {
142 #if defined(ADDITIONAL_CHECKS)
143  if (Processor::readCr3() != m_PhysicalPageDirectory)
144  panic(
145  "VirtualAddressSpace::setFlags(): not in this VirtualAddressSpace");
146 #endif
147 
148  doSetFlags(virtualAddress, newFlags);
149 }
150 void X86VirtualAddressSpace::unmap(void *virtualAddress)
151 {
152 #if defined(ADDITIONAL_CHECKS)
153  if (Processor::readCr3() != m_PhysicalPageDirectory)
154  panic("VirtualAddressSpace::unmap(): not in this VirtualAddressSpace");
155 #endif
156 
157  doUnmap(virtualAddress);
158 }
160 {
161  void *st = doAllocateStack(USERSPACE_VIRTUAL_MAX_STACK_SIZE);
162 
163  return st;
164 }
166 {
167  if (stackSz == 0)
168  stackSz = USERSPACE_VIRTUAL_MAX_STACK_SIZE;
169  void *st = doAllocateStack(stackSz);
170 
171  return st;
172 }
173 void X86VirtualAddressSpace::freeStack(void *pStack)
174 {
175  // Add the stack to the list
176  m_freeStacks.pushBack(pStack);
177 }
178 
180  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
181 {
182  LockGuard<Spinlock> guard(m_Lock);
183 
184  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
185  uint32_t *pageDirectoryEntry =
186  PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, pageDirectoryIndex);
187 
188  // Page table present?
189  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
190  {
191  // TODO: Map the page table into all other address spaces
192  *pageDirectoryEntry = physicalAddress | toFlags(flags);
193 
194  // Zero the page table
195  ByteSet(
196  PAGE_TABLE_ENTRY(m_VirtualPageTables, pageDirectoryIndex, 0), 0,
198 
199  return true;
200  }
201  else
202  {
203  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
204  uint32_t *pageTableEntry = PAGE_TABLE_ENTRY(
205  m_VirtualPageTables, pageDirectoryIndex, pageTableIndex);
206 
207  // Page frame present?
208  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
209  {
210  *pageTableEntry = physicalAddress | toFlags(flags);
211  return true;
212  }
213  }
214  return false;
215 }
216 
218 {
219  PhysicalMemoryManager &physicalMemoryManager =
221  VirtualAddressSpace &VAddressSpace =
222  Processor::information().getVirtualAddressSpace();
223 
224  // Switch to this virtual address space
226 
227  // Get the page table used to map this page directory into the address space
228  physical_uintptr_t pageTable = PAGE_GET_PHYSICAL_ADDRESS(
229  PAGE_DIRECTORY_ENTRY(VIRTUAL_PAGE_DIRECTORY, 0x3FE));
230 
231  // Switch to the original virtual address space
232  Processor::switchAddressSpace(VAddressSpace);
233 
234  // TODO: Free other things, perhaps in VirtualAddressSpace
235  // We can't do this in VirtualAddressSpace destructor though!
236 
237  // Free the page table used to map the page directory into the address space
238  // and the page directory itself
239  physicalMemoryManager.freePage(pageTable);
240  physicalMemoryManager.freePage(m_PhysicalPageDirectory);
241 }
242 
244  : VirtualAddressSpace(USERSPACE_VIRTUAL_HEAP), m_PhysicalPageDirectory(0),
245  m_VirtualPageDirectory(VIRTUAL_PAGE_DIRECTORY),
246  m_VirtualPageTables(VIRTUAL_PAGE_TABLES),
247  m_pStackTop(USERSPACE_VIRTUAL_STACK), m_freeStacks(), m_Lock(false, true)
248 {
249  // Allocate a new page directory
250  PhysicalMemoryManager &physicalMemoryManager =
252  m_PhysicalPageDirectory = physicalMemoryManager.allocatePage();
253  physical_uintptr_t pageTable = physicalMemoryManager.allocatePage();
254 
255  // Get the current address space
256  VirtualAddressSpace &virtualAddressSpace =
257  Processor::information().getVirtualAddressSpace();
258 
259  // Map the page directory and page table into the address space (at a
260  // temporary location)
261  virtualAddressSpace.map(
262  m_PhysicalPageDirectory, KERNEL_VIRTUAL_TEMP1,
264  virtualAddressSpace.map(
265  pageTable, KERNEL_VIRTUAL_TEMP2,
267 
268  // Initialise the page directory
269  ByteSet(KERNEL_VIRTUAL_TEMP1, 0, 0xC00);
270 
271  // Initialise the page table
272  ByteSet(KERNEL_VIRTUAL_TEMP2, 0, 0x1000);
273 
274  // Copy the kernel address space to the new address space
275  MemoryCopy(
276  adjust_pointer(KERNEL_VIRTUAL_TEMP1, 0xC00),
277  adjust_pointer(
278  X86KernelVirtualAddressSpace::m_Instance.m_VirtualPageDirectory,
279  0xC00),
280  0x3F8);
281 
282  // Map the page tables into the new address space
283  *reinterpret_cast<uint32_t *>(adjust_pointer(KERNEL_VIRTUAL_TEMP1, 0xFFC)) =
284  m_PhysicalPageDirectory | PAGE_PRESENT | PAGE_WRITE;
285 
286  // Map the page directory into the new address space
287  *reinterpret_cast<uint32_t *>(adjust_pointer(KERNEL_VIRTUAL_TEMP1, 0xFF8)) =
288  pageTable | PAGE_PRESENT | PAGE_WRITE;
289  *reinterpret_cast<uint32_t *>(adjust_pointer(KERNEL_VIRTUAL_TEMP2, 0xFFC)) =
290  m_PhysicalPageDirectory | PAGE_PRESENT | PAGE_WRITE;
291 
292  // Unmap the page directory and page table from the address space (from the
293  // temporary location)
294  virtualAddressSpace.unmap(KERNEL_VIRTUAL_TEMP1);
295  virtualAddressSpace.unmap(KERNEL_VIRTUAL_TEMP2);
296 }
297 
299  void *Heap, physical_uintptr_t PhysicalPageDirectory,
300  void *VirtualPageDirectory, void *VirtualPageTables, void *VirtualStack)
301  : VirtualAddressSpace(Heap), m_PhysicalPageDirectory(PhysicalPageDirectory),
302  m_VirtualPageDirectory(VirtualPageDirectory),
303  m_VirtualPageTables(VirtualPageTables), m_pStackTop(VirtualStack),
304  m_freeStacks(), m_Lock(false, true)
305 {
306 }
307 
308 bool X86VirtualAddressSpace::doIsMapped(void *virtualAddress)
309 {
310 #ifndef TRACK_LOCKS
312 #endif
313 
314  virtualAddress = reinterpret_cast<void *>(
315  reinterpret_cast<uintptr_t>(virtualAddress) & ~0xFFF);
316 
317  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
318  uint32_t *pageDirectoryEntry =
319  PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, pageDirectoryIndex);
320 
321  // Is a page table or 4MB page present?
322  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
323  return false;
324 
325  // Is it a 4MB page?
326  if ((*pageDirectoryEntry & PAGE_4MB) == PAGE_4MB)
327  return true;
328 
329  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
330  uint32_t *pageTableEntry = PAGE_TABLE_ENTRY(
331  m_VirtualPageTables, pageDirectoryIndex, pageTableIndex);
332 
333  // Is a page present?
334  return ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT);
335 }
336 bool X86VirtualAddressSpace::doMap(
337  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
338 {
339  // Check if we have an allocated escrow page - if we don't, allocate it.
340  if (g_EscrowPages[Processor::id()] == 0)
341  {
342  g_EscrowPages[Processor::id()] =
344  if (g_EscrowPages[Processor::id()] == 0)
345  {
346  // Still 0, we have problems.
347  FATAL("Out of memory");
348  }
349  }
350 
352 
353  size_t Flags = toFlags(flags, true);
354  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
355  uint32_t *pageDirectoryEntry =
356  PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, pageDirectoryIndex);
357 
358  // Is a page table present?
359  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
360  {
361  size_t PdeFlags = toFlags(flags);
362 
363  // We need a page, but calling the PMM could cause reentrancy issues. We
364  // use our alotted page in the escrow cache then set it to zero so that
365  // it will be replenished next time it needs to be used.
366  uint32_t page = g_EscrowPages[Processor::id()];
367  g_EscrowPages[Processor::id()] = 0;
368 
369  // Map the page
370  *pageDirectoryEntry =
371  page | PAGE_USER |
372  ((PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED | PAGE_COPY_ON_WRITE)) |
373  PAGE_WRITE);
374 
375  // Zero the page table
376  ByteSet(
377  PAGE_TABLE_ENTRY(m_VirtualPageTables, pageDirectoryIndex, 0), 0,
379 
380  // If we map within the kernel space, we need to add this page table to
381  // the other address spaces!
382  //
383  // Also, we don't want to do this if the processor isn't initialised...
384  VirtualAddressSpace &VAS =
385  Processor::information().getVirtualAddressSpace();
386  if (Processor::m_Initialised == 2 &&
387  virtualAddress >= KERNEL_SPACE_START)
388  {
389  for (size_t i = 0; i < Scheduler::instance().getNumProcesses(); i++)
390  {
392 
393  X86VirtualAddressSpace *x86VAS =
394  reinterpret_cast<X86VirtualAddressSpace *>(
395  p->getAddressSpace());
396  if (x86VAS == &VAS)
397  continue;
398 
400 
401  pageDirectoryEntry = PAGE_DIRECTORY_ENTRY(
402  x86VAS->m_VirtualPageDirectory, pageDirectoryIndex);
403  *pageDirectoryEntry = page | PAGE_WRITE | PAGE_USER |
404  (PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED |
405  PAGE_COPY_ON_WRITE));
406  }
407  if (g_pCurrentlyCloning)
408  {
409  X86VirtualAddressSpace *x86VAS =
410  reinterpret_cast<X86VirtualAddressSpace *>(
411  g_pCurrentlyCloning);
412  if (x86VAS != &VAS)
413  {
414  Processor::switchAddressSpace(*g_pCurrentlyCloning);
415 
416  pageDirectoryEntry = PAGE_DIRECTORY_ENTRY(
417  x86VAS->m_VirtualPageDirectory, pageDirectoryIndex);
418  *pageDirectoryEntry =
419  page | PAGE_WRITE | PAGE_USER |
420  (PdeFlags &
421  ~(PAGE_GLOBAL | PAGE_SWAPPED | PAGE_COPY_ON_WRITE));
422  }
423  }
424  if (&VAS != &getKernelAddressSpace())
425  {
427  X86VirtualAddressSpace *x86VAS =
428  reinterpret_cast<X86VirtualAddressSpace *>(
430 
431  pageDirectoryEntry = PAGE_DIRECTORY_ENTRY(
432  x86VAS->m_VirtualPageDirectory, pageDirectoryIndex);
433  *pageDirectoryEntry = page | PAGE_WRITE | PAGE_USER |
434  (PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED |
435  PAGE_COPY_ON_WRITE));
436  }
438  }
439  }
440  // The other corner case is when a table has been used for the kernel before
441  // but is now being mapped as USER mode. We need to ensure that the
442  // directory entry has the USER flags set.
443  else if (
444  (Flags & PAGE_USER) && ((*pageDirectoryEntry & PAGE_USER) != PAGE_USER))
445  {
446  *pageDirectoryEntry |= PAGE_USER;
447  }
448 
449  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
450  uint32_t *pageTableEntry = PAGE_TABLE_ENTRY(
451  m_VirtualPageTables, pageDirectoryIndex, pageTableIndex);
452 
453  // Is a page already present
454  if ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT)
455  return false;
456 
457  // Map the page
458  *pageTableEntry = physicalAddress | Flags;
459 
460  // Flush the TLB
461  Processor::invalidate(virtualAddress);
462 
463  return true;
464 }
465 void X86VirtualAddressSpace::doGetMapping(
466  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
467 {
468  // Get a pointer to the page-table entry (Also checks whether the page is
469  // actually present or marked swapped out)
470  uint32_t *pageTableEntry = 0;
471  if (getPageTableEntry(virtualAddress, pageTableEntry) == false)
472  panic("VirtualAddressSpace::getMapping(): function misused");
473 
474  // Extract the physical address and the flags
475  physicalAddress = PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
476  flags = fromFlags(PAGE_GET_FLAGS(pageTableEntry), true);
477 }
478 void X86VirtualAddressSpace::doSetFlags(void *virtualAddress, size_t newFlags)
479 {
481 
482  // Get a pointer to the page-table entry (Also checks whether the page is
483  // actually present or marked swapped out)
484  uint32_t *pageTableEntry = 0;
485  if (getPageTableEntry(virtualAddress, pageTableEntry) == false)
486  panic("VirtualAddressSpace::setFlags(): function misused");
487 
488  // Set the flags
489  PAGE_SET_FLAGS(pageTableEntry, toFlags(newFlags, true));
490 
491  // Flush TLB - modified the mapping for this address.
492  Processor::invalidate(virtualAddress);
493 }
494 void X86VirtualAddressSpace::doUnmap(void *virtualAddress)
495 {
497 
498  // Get a pointer to the page-table entry (Also checks whether the page is
499  // actually present or marked swapped out)
500  uint32_t *pageTableEntry = 0;
501  if (getPageTableEntry(virtualAddress, pageTableEntry) == false)
502  panic("VirtualAddressSpace::unmap(): function misused");
503 
504  // Unmap the page
505  *pageTableEntry = 0;
506 
507  // Invalidate the TLB entry
508  Processor::invalidate(virtualAddress);
509 }
510 void *X86VirtualAddressSpace::doAllocateStack(size_t sSize)
511 {
513 
514  // Get a virtual address for the stack
515  void *pStack = 0;
516  if (m_freeStacks.count() != 0)
517  {
518  pStack = m_freeStacks.popBack();
519  }
520  else
521  {
522  pStack = m_pStackTop;
523  m_pStackTop = adjust_pointer(m_pStackTop, -sSize);
524 
525  // Map in the top page, then everything else will be demand paged
526  uintptr_t stackBottom = reinterpret_cast<uintptr_t>(pStack) - sSize;
527  for (size_t j = 0; j < sSize; j += 0x1000)
528  {
529  physical_uintptr_t phys =
531  bool b =
532  map(phys, reinterpret_cast<void *>(stackBottom + j),
534  if (!b)
535  WARNING("map() failed in doAllocateStack");
536  }
537  }
538  return pStack;
539 }
540 
542  void *virtualAddress, uint32_t *&pageTableEntry)
543 {
544  // Not a public-facing function - locking shouldn't be needed.
545 
546  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
547  uint32_t *pageDirectoryEntry =
548  PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, pageDirectoryIndex);
549 
550  // Is a page table or 4MB page present?
551  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
552  return false;
553  if ((*pageDirectoryEntry & PAGE_4MB) == PAGE_4MB)
554  return false;
555 
556  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
557  pageTableEntry = PAGE_TABLE_ENTRY(
558  m_VirtualPageTables, pageDirectoryIndex, pageTableIndex);
559 
560  // Is a page present?
561  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT &&
562  (*pageTableEntry & PAGE_SWAPPED) != PAGE_SWAPPED)
563  return false;
564 
565  return true;
566 }
567 
568 uint32_t X86VirtualAddressSpace::toFlags(size_t flags, bool bFinal)
569 {
570  uint32_t Flags = 0;
571  if ((flags & KernelMode) == KernelMode)
572  Flags |= PAGE_GLOBAL;
573  else
574  Flags |= PAGE_USER;
575  if ((flags & Write) == Write)
576  Flags |= PAGE_WRITE;
577  if ((flags & WriteCombine) == WriteCombine)
578  Flags |= PAGE_WRITE_COMBINE;
579  if ((flags & CacheDisable) == CacheDisable)
580  Flags |= PAGE_CACHE_DISABLE;
581  if ((flags & Swapped) == Swapped)
582  Flags |= PAGE_SWAPPED;
583  else
584  Flags |= PAGE_PRESENT;
585  if ((flags & CopyOnWrite) == CopyOnWrite)
586  Flags |= PAGE_COPY_ON_WRITE;
587  if ((flags & Shared) == Shared)
588  Flags |= PAGE_SHARED;
589  if (bFinal)
590  {
591  if ((flags & WriteThrough) == WriteThrough)
592  Flags |= PAGE_WRITE_THROUGH;
593  if ((flags & Accessed) == Accessed)
594  Flags |= PAGE_ACCESSED;
595  if ((flags & Dirty) == Dirty)
596  Flags |= PAGE_DIRTY;
597  if ((flags & ClearDirty) == ClearDirty)
598  Flags &= ~PAGE_DIRTY;
599  }
600  return Flags;
601 }
602 size_t X86VirtualAddressSpace::fromFlags(uint32_t Flags, bool bFinal)
603 {
604  size_t flags = Execute;
605  if ((Flags & PAGE_USER) != PAGE_USER)
606  flags |= KernelMode;
607  if ((Flags & PAGE_WRITE) == PAGE_WRITE)
608  flags |= Write;
609  if ((Flags & PAGE_WRITE_COMBINE) == PAGE_WRITE_COMBINE)
610  flags |= WriteCombine;
611  if ((Flags & PAGE_CACHE_DISABLE) == PAGE_CACHE_DISABLE)
612  flags |= CacheDisable;
613  if ((Flags & PAGE_SWAPPED) == PAGE_SWAPPED)
614  flags |= Swapped;
615  if ((Flags & PAGE_COPY_ON_WRITE) == PAGE_COPY_ON_WRITE)
616  flags |= CopyOnWrite;
617  if ((Flags & PAGE_SHARED) == PAGE_SHARED)
618  flags |= Shared;
619  if (bFinal)
620  {
621  if ((Flags & PAGE_WRITE_THROUGH) == PAGE_WRITE_THROUGH)
622  flags |= WriteThrough;
623  if ((Flags & PAGE_ACCESSED) == PAGE_ACCESSED)
624  flags |= Accessed;
625  if ((Flags & PAGE_DIRTY) == PAGE_DIRTY)
626  flags |= Dirty;
627  }
628  return flags;
629 }
630 
631 VirtualAddressSpace *X86VirtualAddressSpace::clone()
632 {
634 
635  VirtualAddressSpace &thisAddressSpace =
636  Processor::information().getVirtualAddressSpace();
637 
638  // Create a new virtual address space
640  if (pClone == 0)
641  {
642  WARNING("X86VirtualAddressSpace: Clone() failed!");
643  return 0;
644  }
645 
646  g_pCurrentlyCloning = pClone;
647 
648  uintptr_t v =
649  beginCrossSpace(reinterpret_cast<X86VirtualAddressSpace *>(pClone));
650 
651  for (uintptr_t i = 0; i < 1024; i++)
652  {
653  uint32_t *pageDirectoryEntry =
654  PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, i);
655 
656  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
657  continue;
658 
659  // Don't clone 4 MB pages.
660  if ((*pageDirectoryEntry & PAGE_4MB) == PAGE_4MB)
661  continue;
662 
663  for (uintptr_t j = 0; j < 1024; j++)
664  {
665  uint32_t *pageTableEntry =
666  PAGE_TABLE_ENTRY(m_VirtualPageTables, i, j);
667 
668  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
669  continue;
670 
671  uint32_t flags = PAGE_GET_FLAGS(pageTableEntry);
672  physical_uintptr_t physicalAddress =
673  PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
674 
675  void *virtualAddress =
676  reinterpret_cast<void *>(((i * 1024) + j) * 4096);
677  if ((virtualAddress < USERSPACE_VIRTUAL_START) ||
678  (virtualAddress >= KERNEL_SPACE_START))
679  continue;
680 
681  if (flags & PAGE_SHARED)
682  {
683  // Handle shared mappings - don't copy the original page.
685  v, physicalAddress, virtualAddress, fromFlags(flags, true));
686  continue;
687  }
688 
689  // Map the new page in to the new address space for copy-on-write.
690  // This implies read-only (so we #PF for copy on write).
691  bool bWasCopyOnWrite = (flags & PAGE_COPY_ON_WRITE);
692  flags |= PAGE_COPY_ON_WRITE;
693  flags &= ~PAGE_WRITE;
695  v, physicalAddress, virtualAddress, fromFlags(flags, true));
696 
697  // We need to modify the entry in *this* address space as well to
698  // also have the read-only and copy-on-write flag set, as otherwise
699  // writes in the parent process will cause the child process to see
700  // those changes immediately.
701  PAGE_SET_FLAGS(pageTableEntry, flags);
702  Processor::invalidate(virtualAddress);
703 
704  // Pin the page twice - once for each side of the clone.
705  // But only pin for the parent if the parent page is not already
706  // copy on write. If we pin the CoW page, it'll be leaked when
707  // both parent and child terminate if the parent clone()s again.
708  if (!bWasCopyOnWrite)
709  PhysicalMemoryManager::instance().pin(physicalAddress);
710  PhysicalMemoryManager::instance().pin(physicalAddress);
711  }
712  }
713 
714  endCrossSpace();
715 
716  g_pCurrentlyCloning = 0;
717 
718  X86VirtualAddressSpace *pX86Clone =
719  static_cast<X86VirtualAddressSpace *>(pClone);
720 
721  // Before returning the address space, bring across metadata.
722  // Note though that if the parent of the clone (ie, this address space)
723  // is the kernel address space, we mustn't copy metadata or else the
724  // userspace defaults in the constructor get wiped out.
725 
726  if (m_pStackTop < KERNEL_SPACE_START)
727  {
728  pX86Clone->m_pStackTop = m_pStackTop;
730  it != m_freeStacks.end(); ++it)
731  {
732  pX86Clone->m_freeStacks.pushBack(*it);
733  }
734  }
735 
736  if (m_Heap < KERNEL_SPACE_START)
737  {
738  pX86Clone->m_Heap = m_Heap;
739  pX86Clone->m_HeapEnd = m_HeapEnd;
740  }
741 
742  return pClone;
743 }
744 
746 {
748 
749  for (uintptr_t i = 0; i < 1024; i++)
750  {
751  uint32_t *pageDirectoryEntry =
752  PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, i);
753 
754  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
755  continue;
756 
757  if (reinterpret_cast<void *>(i * 1024 * 4096) >= KERNEL_SPACE_START)
758  continue;
759  if (reinterpret_cast<void *>(i * 1024 * 4096) < USERSPACE_VIRTUAL_START)
760  continue;
761 
762  bool bDidSkip = false;
763  for (uintptr_t j = 0; j < 1024; j++)
764  {
765  uint32_t *pageTableEntry =
766  PAGE_TABLE_ENTRY(m_VirtualPageTables, i, j);
767 
768  if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
769  continue;
770 
771  size_t flags = PAGE_GET_FLAGS(pageTableEntry);
772 
773  // Grab the physical address for it.
774  physical_uintptr_t physicalAddress =
775  PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
776 
777  // Unmap it.
778  void *virtualAddress =
779  reinterpret_cast<void *>(((i * 1024) + j) * 4096);
780  unmap(virtualAddress);
781 
782  // And release the physical memory if it is not shared with another
783  // process (eg, memory mapped file)
784  // Also avoid stumbling over a swapped out page.
787  if ((flags & (PAGE_SHARED | PAGE_SWAPPED)) == 0)
788  PhysicalMemoryManager::instance().freePage(physicalAddress);
789 
790  // This PTE is no longer valid
791  *pageTableEntry = 0;
792  }
793 
794  // Remove the page table from the directory
796  PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry));
797  *pageDirectoryEntry = 0;
798 
799  // Invalidate the page table mapping
800  uint32_t *pageTable = PAGE_TABLE_ENTRY(m_VirtualPageTables, i, 0);
801  Processor::invalidate(pageTable);
802  }
803 }
804 
805 uintptr_t
807 {
808  map(pOther->m_PhysicalPageDirectory, KERNEL_VIRTUAL_TEMP2,
810 
811  uint32_t *pDir = reinterpret_cast<uint32_t *>(KERNEL_VIRTUAL_TEMP2);
812  map(pDir[0], KERNEL_VIRTUAL_TEMP3,
814 
815  return 0x00000000;
816 }
817 
819  uintptr_t &v, physical_uintptr_t physicalAddress, void *virtualAddress,
820  size_t flags)
821 {
822  size_t Flags = toFlags(flags, true);
823  size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
824 
825  uint32_t *pageDirectoryEntry =
826  PAGE_DIRECTORY_ENTRY(KERNEL_VIRTUAL_TEMP2, pageDirectoryIndex);
827  uint32_t *pDir = reinterpret_cast<uint32_t *>(KERNEL_VIRTUAL_TEMP2);
828 
829  if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
830  {
831  size_t PdeFlags = toFlags(flags);
832 
833  uint32_t page = PhysicalMemoryManager::instance().allocatePage();
834  // Set the page.
835  *pageDirectoryEntry =
836  page |
837  ((PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED | PAGE_COPY_ON_WRITE)) |
838  PAGE_WRITE);
839 
840  // Map it in.
841  v = pageDirectoryIndex;
842  unmap(KERNEL_VIRTUAL_TEMP3);
843  map(pDir[pageDirectoryIndex], KERNEL_VIRTUAL_TEMP3,
845 
846  // Zero.
847  ByteSet(KERNEL_VIRTUAL_TEMP3, 0, PhysicalMemoryManager::getPageSize());
848  }
849  else if (
850  (Flags & PAGE_USER) && ((*pageDirectoryEntry & PAGE_USER) != PAGE_USER))
851  {
852  *pageDirectoryEntry |= PAGE_USER;
853  }
854 
855  // Check we now have the right page table mapped in. If not, map it in.
856  if (v != pageDirectoryIndex)
857  {
858  v = pageDirectoryIndex;
859  unmap(KERNEL_VIRTUAL_TEMP3);
860  map(pDir[pageDirectoryIndex], KERNEL_VIRTUAL_TEMP3,
862  }
863 
864  size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
865  uint32_t *pageTableEntry =
866  &(reinterpret_cast<uint32_t *>(KERNEL_VIRTUAL_TEMP3)[pageTableIndex]);
867 
868  // Is a page already present
869  if ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT)
870  return false;
871 
872  // Map the page
873  *pageTableEntry = physicalAddress | Flags;
874 
875  return true;
876 }
877 
879 {
880  unmap(KERNEL_VIRTUAL_TEMP2);
881  unmap(KERNEL_VIRTUAL_TEMP3);
882 }
883 
884 bool X86KernelVirtualAddressSpace::isMapped(void *virtualAddress)
885 {
886  return doIsMapped(virtualAddress);
887 }
889  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
890 {
891  return doMap(physicalAddress, virtualAddress, flags);
892 }
894  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
895 {
896  doGetMapping(virtualAddress, physicalAddress, flags);
897 }
899  void *virtualAddress, size_t newFlags)
900 {
901  doSetFlags(virtualAddress, newFlags);
902 }
903 void X86KernelVirtualAddressSpace::unmap(void *virtualAddress)
904 {
905  doUnmap(virtualAddress);
906 }
908 {
909  void *pStack = doAllocateStack(KERNEL_STACK_SIZE + 0x1000);
910 
911  return pStack;
912 }
915  KERNEL_VIRTUAL_HEAP,
916  reinterpret_cast<uintptr_t>(&pagedirectory) -
917  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
918  KERNEL_VIRUTAL_PAGE_DIRECTORY, VIRTUAL_PAGE_TABLES,
919  KERNEL_VIRTUAL_STACK)
920 {
922  for (int i = 0; i < 256; i++)
923  {
924  g_EscrowPages[i] = 0;
925  }
926 }
928 {
929 }
virtual void unmap(void *virtualAddress)
void pushBack(const T &value)
Definition: Vector.h:270
size_t fromFlags(uint32_t Flags, bool bFinal=false)
virtual void unmap(void *virtualAddress)
virtual void unmap(void *virtualAddress)=0
T popBack()
Definition: Vector.h:286
Iterator begin()
Definition: Vector.h:148
Iterator end()
Definition: Vector.h:160
size_t count() const
Definition: Vector.h:264
static PhysicalMemoryManager & instance()
virtual void pin(physical_uintptr_t page)=0
A vector / dynamic array.
virtual bool isMapped(void *virtualAddress)
virtual bool isAddressValid(void *virtualAddress)
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
virtual bool memIsInHeap(void *pMem)
static ProcessorInformation & information()
Definition: Processor.cc:45
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
#define WARNING(text)
Definition: Log.h:78
static size_t m_Initialised
Definition: Processor.h:371
static VirtualAddressSpace * create()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
bool mapPageStructures(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
size_t getNumProcesses()
Definition: Scheduler.cc:140
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
uintptr_t beginCrossSpace(X86VirtualAddressSpace *pOther)
static Scheduler & instance()
Definition: Scheduler.h:48
Process * getProcess(size_t n)
Definition: Scheduler.cc:149
virtual void setFlags(void *virtualAddress, size_t newFlags)
bool getPageTableEntry(void *virtualAddress, uint32_t *&pageTableEntry)
static ProcessorId id()
Definition: Processor.cc:40
VirtualAddressSpace * getAddressSpace()
Definition: Process.h:120
virtual void * getEndOfHeap()=0
virtual void freePage(physical_uintptr_t page)=0
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
virtual bool isMapped(void *virtualAddress)
#define FATAL(text)
Definition: Log.h:89
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
bool mapCrossSpace(uintptr_t &v, physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual void setFlags(void *virtualAddress, size_t newFlags)
static void invalidate(void *pAddress)
uint32_t toFlags(size_t flags, bool bFinal=false)