20 #include "VirtualAddressSpace.h" 21 #include "pedigree/kernel/LockGuard.h" 22 #include "pedigree/kernel/Log.h" 23 #include "pedigree/kernel/panic.h" 24 #include "pedigree/kernel/process/Process.h" 25 #include "pedigree/kernel/process/Scheduler.h" 26 #include "pedigree/kernel/process/Thread.h" 27 #include "pedigree/kernel/processor/PhysicalMemoryManager.h" 28 #include "pedigree/kernel/processor/Processor.h" 29 #include "pedigree/kernel/processor/ProcessorInformation.h" 30 #include "pedigree/kernel/utilities/utility.h" 36 #define PAGE_PRESENT 0x01 37 #define PAGE_WRITE 0x02 38 #define PAGE_USER 0x04 39 #define PAGE_WRITE_COMBINE 0x08 40 #define PAGE_CACHE_DISABLE 0x10 41 #define PAGE_ACCESSED 0x20 42 #define PAGE_DIRTY 0x40 45 #define PAGE_GLOBAL 0x100 46 #define PAGE_SWAPPED 0x200 47 #define PAGE_COPY_ON_WRITE 0x400 48 #define PAGE_SHARED 0x800 49 #define PAGE_NX 0x8000000000000000 50 #define PAGE_WRITE_THROUGH (PAGE_PAT | PAGE_WRITE_COMBINE) 55 #define PML4_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 39) & 0x1FF) 56 #define PAGE_DIRECTORY_POINTER_INDEX(x) \ 57 ((reinterpret_cast<uintptr_t>(x) >> 30) & 0x1FF) 58 #define PAGE_DIRECTORY_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 21) & 0x1FF) 59 #define PAGE_TABLE_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 12) & 0x1FF) 61 #define TABLE_ENTRY(table, index) \ 62 (&physicalAddress(reinterpret_cast<uint64_t *>(table))[index]) 64 #define PAGE_GET_FLAGS(x) (*x & 0x8000000000000FFFULL) 65 #define PAGE_SET_FLAGS(x, f) *x = (*x & ~0x8000000000000FFFULL) | f 66 #define PAGE_GET_PHYSICAL_ADDRESS(x) (*x & ~0x8000000000000FFFULL) 73 reinterpret_cast<uintptr_t>(&pml4) -
74 reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
75 KERNEL_VIRTUAL_STACK);
77 static void trackPages(ssize_t v, ssize_t p, ssize_t s)
86 pProcess->trackPages(v, p, s);
103 if (pMem < KERNEL_VIRTUAL_HEAP)
108 pMem >= adjust_pointer(KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_HEAP_SIZE))
120 WARNING(
"memIsInHeap: " << pMem <<
" is below the kernel heap.");
126 "memIsInHeap: " << pMem <<
" is beyond the end of the heap (" 135 if (
m_Heap == KERNEL_VIRTUAL_HEAP)
137 return reinterpret_cast<void *
>(
138 reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_HEAP) +
139 KERNEL_VIRTUAL_HEAP_SIZE);
149 if (reinterpret_cast<uint64_t>(virtualAddress) < 0x0008000000000000ULL ||
150 reinterpret_cast<uint64_t>(virtualAddress) >= 0xFFF8000000000000ULL)
160 size_t pml4Index = PML4_INDEX(virtualAddress);
161 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
164 if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
167 size_t pageDirectoryPointerIndex =
168 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
169 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
170 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
173 if ((*pageDirectoryPointerEntry & PAGE_PRESENT) != PAGE_PRESENT)
176 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
177 uint64_t *pageDirectoryEntry = TABLE_ENTRY(
178 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
182 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
186 if ((*pageDirectoryEntry & PAGE_2MB) == PAGE_2MB)
189 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
190 uint64_t *pageTableEntry = TABLE_ENTRY(
191 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
194 return ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT);
198 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags)
202 return mapUnlocked(physAddress, virtualAddress, flags, m_Lock.acquired());
206 physical_uintptr_t physAddress,
void *virtualAddress,
size_t count,
212 size_t numHugePages = 0;
213 bool hasHuge = d & (1 << 26);
220 numHugePages = count / (1 << (30UL - 12UL));
223 if (numHugePages == 0)
226 numHugePages = count / (1 << (21UL - 12UL));
229 if (numHugePages == 0)
233 physAddress, virtualAddress, count, flags);
241 for (
size_t i = 0; i < count; ++i)
243 unmapUnlocked(adjust_pointer(virtualAddress, i * smallPageSize),
false);
247 const size_t pageSize = hasHuge ? (1 << 30UL) : (1 << 21UL);
249 size_t Flags = toFlags(flags,
true);
250 for (
size_t i = 0; i < numHugePages; ++i)
252 size_t pml4Index = PML4_INDEX(virtualAddress);
253 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
256 if (conditionalTableEntryAllocation(pml4Entry, flags) ==
false)
261 size_t pageDirectoryPointerIndex =
262 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
263 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
264 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
274 *pageDirectoryPointerEntry = physAddress | PAGE_2MB | Flags;
281 if (conditionalTableEntryAllocation(
282 pageDirectoryPointerEntry, flags) ==
false)
287 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
288 uint64_t *pageDirectoryEntry = TABLE_ENTRY(
289 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
292 *pageDirectoryEntry = physAddress | PAGE_2MB | Flags;
295 virtualAddress = adjust_pointer(virtualAddress, pageSize);
296 physAddress += pageSize;
303 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags,
306 size_t Flags = toFlags(flags,
true);
307 size_t pml4Index = PML4_INDEX(virtualAddress);
308 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
312 bool pdWasPresent = (*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT;
315 if (conditionalTableEntryAllocation(pml4Entry, flags) ==
false)
320 size_t pageDirectoryPointerIndex =
321 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
322 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
323 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
326 if (conditionalTableEntryAllocation(pageDirectoryPointerEntry, flags) ==
332 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
333 uint64_t *pageDirectoryEntry = TABLE_ENTRY(
334 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
338 if (conditionalTableEntryAllocation(pageDirectoryEntry, flags) ==
false)
343 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
344 uint64_t *pageTableEntry = TABLE_ENTRY(
345 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
348 if ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT)
354 *pageTableEntry = physAddress | Flags;
369 virtualAddress >= KERNEL_VIRTUAL_HEAP)
371 uint64_t thisPml4Entry = *pml4Entry;
385 uint64_t *otherPml4Entry =
387 *otherPml4Entry = thisPml4Entry;
399 void *virtualAddress, physical_uintptr_t &physAddress,
size_t &flags)
403 uint64_t *pageTableEntry = 0;
404 if (getPageTableEntry(virtualAddress, pageTableEntry) ==
false)
406 panic(
"VirtualAddressSpace::getMapping(): function misused");
410 physAddress = PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
411 flags = fromFlags(PAGE_GET_FLAGS(pageTableEntry),
true);
420 uint64_t *pageTableEntry = 0;
421 if (getPageTableEntry(virtualAddress, pageTableEntry) ==
false)
423 panic(
"VirtualAddressSpace::setFlags(): function misused");
427 PAGE_SET_FLAGS(pageTableEntry, toFlags(newFlags,
true));
437 unmapUnlocked(virtualAddress);
441 void *virtualAddress,
bool requireMapped)
445 uint64_t *pageTableEntry = 0;
446 if (getPageTableEntry(virtualAddress, pageTableEntry) ==
false)
452 panic(
"VirtualAddressSpace::unmap(): function misused");
466 trackPages(-1, 0, 0);
471 maybeFreeTables(virtualAddress);
483 WARNING(
"X64VirtualAddressSpace: Clone() failed!");
494 for (uint64_t i = 0; i < 256; i++)
496 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, i);
497 if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
500 for (uint64_t j = 0; j < 512; j++)
502 uint64_t *pdptEntry =
503 TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), j);
504 if ((*pdptEntry & PAGE_PRESENT) != PAGE_PRESENT)
507 for (uint64_t k = 0; k < 512; k++)
510 TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdptEntry), k);
511 if ((*pdEntry & PAGE_PRESENT) != PAGE_PRESENT)
515 if ((*pdEntry & PAGE_2MB) == PAGE_2MB)
518 for (uint64_t l = 0; l < 512; l++)
521 TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdEntry), l);
522 if ((*ptEntry & PAGE_PRESENT) != PAGE_PRESENT)
525 uint64_t flags = PAGE_GET_FLAGS(ptEntry);
527 PAGE_GET_PHYSICAL_ADDRESS(ptEntry);
529 void *virtualAddress =
reinterpret_cast<void *
>(
530 ((i & 0x100) ? (~0ULL << 48) :
533 (j << 30) | (k << 21) | (l << 12));
535 if (flags & PAGE_SHARED)
547 physicalAddress, virtualAddress,
548 fromFlags(flags,
true));
555 bool bWasCopyOnWrite = (flags & PAGE_COPY_ON_WRITE);
556 if (copyOnWrite && (flags & PAGE_WRITE))
558 flags |= PAGE_COPY_ON_WRITE;
559 flags &= ~PAGE_WRITE;
562 physicalAddress, virtualAddress,
563 fromFlags(flags,
true));
573 PAGE_SET_FLAGS(ptEntry, flags);
582 if (!bWasCopyOnWrite)
595 if (
m_Heap < KERNEL_SPACE_START)
607 m_StacksLock.acquire();
609 if (m_pStackTop < KERNEL_SPACE_START)
613 it != m_freeStacks.end(); ++it)
620 m_StacksLock.release();
631 for (uint64_t i = 0; i < 256; i++)
633 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, i);
634 if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
637 for (uint64_t j = 0; j < 512; j++)
639 uint64_t *pdptEntry =
640 TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), j);
641 if ((*pdptEntry & PAGE_PRESENT) != PAGE_PRESENT)
644 for (uint64_t k = 0; k < 512; k++)
647 TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdptEntry), k);
648 if ((*pdEntry & PAGE_PRESENT) != PAGE_PRESENT)
652 void *regionVirtualAddress =
reinterpret_cast<void *
>(
653 ((i & 0x100) ? (~0ULL << 48) : 0ULL) |
654 (i << 39) | (j << 30) | (k << 21));
656 if (regionVirtualAddress < USERSPACE_VIRTUAL_START)
658 if (regionVirtualAddress > KERNEL_SPACE_START)
662 if ((*pdEntry & PAGE_2MB) == PAGE_2MB)
665 for (uint64_t l = 0; l < 512; l++)
668 TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pdEntry), l);
669 if ((*ptEntry & PAGE_PRESENT) != PAGE_PRESENT)
672 void *virtualAddress =
reinterpret_cast<void *
>(
673 reinterpret_cast<uintptr_t
>(regionVirtualAddress) |
676 size_t flags = PAGE_GET_FLAGS(ptEntry);
678 PAGE_GET_PHYSICAL_ADDRESS(ptEntry);
686 if ((flags & (PAGE_SHARED | PAGE_SWAPPED)) == 0)
693 trackPages(-1, 0, 0);
700 PAGE_GET_PHYSICAL_ADDRESS(pdEntry));
705 PAGE_GET_PHYSICAL_ADDRESS(pdptEntry));
710 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry));
719 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags)
723 size_t Flags = toFlags(flags);
724 size_t pml4Index = PML4_INDEX(virtualAddress);
725 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
728 if (conditionalTableEntryMapping(pml4Entry, physAddress, Flags) ==
true)
731 size_t pageDirectoryPointerIndex =
732 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
733 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
734 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
737 if (conditionalTableEntryMapping(
738 pageDirectoryPointerEntry, physAddress, Flags) ==
true)
741 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
742 uint64_t *pageDirectoryEntry = TABLE_ENTRY(
743 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
747 if (conditionalTableEntryMapping(pageDirectoryEntry, physAddress, Flags) ==
751 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
752 uint64_t *pageTableEntry = TABLE_ENTRY(
753 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
756 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
758 *pageTableEntry = physAddress | flags;
764 bool X64VirtualAddressSpace::mapPageStructuresAbove4GB(
765 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags)
769 size_t Flags = toFlags(flags);
770 size_t pml4Index = PML4_INDEX(virtualAddress);
771 uint64_t *pml4Entry = TABLE_ENTRY(m_PhysicalPML4, pml4Index);
774 if (conditionalTableEntryAllocation(pml4Entry, Flags) ==
false)
777 size_t pageDirectoryPointerIndex =
778 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
779 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
780 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
783 if (conditionalTableEntryAllocation(pageDirectoryPointerEntry, Flags) ==
787 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
788 uint64_t *pageDirectoryEntry = TABLE_ENTRY(
789 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
793 if (conditionalTableEntryAllocation(pageDirectoryEntry, Flags) ==
false)
796 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
797 uint64_t *pageTableEntry = TABLE_ENTRY(
798 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
801 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
803 *pageTableEntry = physAddress | flags;
811 size_t sz = USERSPACE_VIRTUAL_STACK_SIZE;
812 if (
this == &m_KernelSpace)
813 sz = KERNEL_STACK_SIZE;
814 return doAllocateStack(sz);
822 return doAllocateStack(stackSz);
829 bool bMapAll =
false;
830 if (
this == &m_KernelSpace)
843 m_StacksLock.acquire();
844 if (m_freeStacks.count() != 0)
846 Stack *poppedStack = m_freeStacks.popBack();
847 if (poppedStack->getSize() >= sSize)
849 pStack = poppedStack->getTop();
853 m_StacksLock.release();
860 pStack = m_pStackTop;
863 m_pStackTop = adjust_pointer(m_pStackTop, -(sSize + pageSz));
868 uintptr_t firstPage =
reinterpret_cast<uintptr_t
>(pStack) - pageSz;
873 phys, reinterpret_cast<void *>(firstPage),
875 WARNING(
"map() failed in doAllocateStack");
878 uintptr_t stackBottom =
reinterpret_cast<uintptr_t
>(pStack) - sSize;
879 for (uintptr_t addr = stackBottom; addr < firstPage; addr += pageSz)
881 size_t map_flags = 0;
895 if (!
map(phys, reinterpret_cast<void *>(addr), flags | map_flags))
896 WARNING(
"CoW map() failed in doAllocateStack");
908 uintptr_t stackTop =
reinterpret_cast<uintptr_t
>(pStack->getTop());
909 for (
size_t i = 0; i < pStack->getSize(); i += pageSz)
912 void *v =
reinterpret_cast<void *
>(stackTop);
919 physical_uintptr_t phys = 0;
928 m_StacksLock.acquire();
929 m_freeStacks.pushBack(pStack);
930 m_StacksLock.release();
946 physicalMemoryManager.
freePage(m_PhysicalPML4);
951 m_pStackTop(USERSPACE_VIRTUAL_STACK), m_freeStacks(),
952 m_bKernelSpace(false), m_Lock(false, false), m_StacksLock(false)
966 reinterpret_cast<void *>(
972 void *Heap, physical_uintptr_t PhysicalPML4,
void *VirtualStack)
980 void *virtualAddress, uint64_t *&pageTableEntry)
const 982 size_t pml4Index = PML4_INDEX(virtualAddress);
986 if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
989 size_t pageDirectoryPointerIndex =
990 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
991 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
992 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
995 if ((*pageDirectoryPointerEntry & PAGE_PRESENT) != PAGE_PRESENT)
998 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
999 uint64_t *pageDirectoryEntry = TABLE_ENTRY(
1000 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
1001 pageDirectoryIndex);
1004 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
1006 if ((*pageDirectoryEntry & PAGE_2MB) == PAGE_2MB)
1009 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
1010 pageTableEntry = TABLE_ENTRY(
1011 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), pageTableIndex);
1014 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT &&
1015 (*pageTableEntry & PAGE_SWAPPED) != PAGE_SWAPPED)
1023 bool bCanFreePageTable =
true;
1025 uint64_t *pageDirectoryEntry = 0;
1027 size_t pml4Index = PML4_INDEX(virtualAddress);
1031 if ((*pml4Entry & PAGE_PRESENT) != PAGE_PRESENT)
1034 size_t pageDirectoryPointerIndex =
1035 PAGE_DIRECTORY_POINTER_INDEX(virtualAddress);
1036 uint64_t *pageDirectoryPointerEntry = TABLE_ENTRY(
1037 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), pageDirectoryPointerIndex);
1039 if ((*pageDirectoryPointerEntry & PAGE_PRESENT) == PAGE_PRESENT)
1041 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
1042 pageDirectoryEntry = TABLE_ENTRY(
1043 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry),
1044 pageDirectoryIndex);
1046 if ((*pageDirectoryEntry & PAGE_PRESENT) == PAGE_PRESENT)
1048 if ((*pageDirectoryEntry & PAGE_2MB) == PAGE_2MB)
1050 bCanFreePageTable =
false;
1054 for (
size_t i = 0; i < 0x200; ++i)
1056 uint64_t *entry = TABLE_ENTRY(
1057 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry), i);
1058 if ((*entry & PAGE_PRESENT) == PAGE_PRESENT ||
1059 (*entry & PAGE_SWAPPED) == PAGE_SWAPPED)
1061 bCanFreePageTable =
false;
1069 if (bCanFreePageTable && pageDirectoryEntry)
1072 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry));
1073 *pageDirectoryEntry = 0;
1075 else if (!bCanFreePageTable)
1082 bool bCanFreeDirectory =
true;
1083 for (
size_t i = 0; i < 0x200; ++i)
1085 uint64_t *entry = TABLE_ENTRY(
1086 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry), i);
1087 if ((*entry & PAGE_PRESENT) == PAGE_PRESENT)
1089 bCanFreeDirectory =
false;
1094 if (bCanFreeDirectory)
1097 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryPointerEntry));
1098 *pageDirectoryPointerEntry = 0;
1105 bool bCanFreeDirectoryPointerTable =
true;
1106 for (
size_t i = 0; i < 0x200; ++i)
1108 uint64_t *entry = TABLE_ENTRY(PAGE_GET_PHYSICAL_ADDRESS(pml4Entry), i);
1109 if ((*entry & PAGE_PRESENT) == PAGE_PRESENT)
1111 bCanFreeDirectoryPointerTable =
false;
1116 if (bCanFreeDirectoryPointerTable)
1119 PAGE_GET_PHYSICAL_ADDRESS(pml4Entry));
1128 Flags |= PAGE_GLOBAL;
1131 if ((flags &
Write) == Write)
1132 Flags |= PAGE_WRITE;
1134 Flags |= PAGE_WRITE_COMBINE;
1136 Flags |= PAGE_CACHE_DISABLE;
1137 if ((flags &
Execute) != Execute)
1139 if ((flags &
Swapped) == Swapped)
1140 Flags |= PAGE_SWAPPED;
1142 Flags |= PAGE_PRESENT;
1144 Flags |= PAGE_COPY_ON_WRITE;
1145 if ((flags &
Shared) == Shared)
1146 Flags |= PAGE_SHARED;
1150 Flags |= PAGE_WRITE_THROUGH;
1151 if ((flags &
Accessed) == Accessed)
1152 Flags |= PAGE_ACCESSED;
1153 if ((flags &
Dirty) == Dirty)
1154 Flags |= PAGE_DIRTY;
1156 Flags &= ~PAGE_DIRTY;
1164 if ((Flags & PAGE_USER) != PAGE_USER)
1166 if ((Flags & PAGE_WRITE) == PAGE_WRITE)
1168 if ((Flags & PAGE_WRITE_COMBINE) == PAGE_WRITE_COMBINE)
1170 if ((Flags & PAGE_CACHE_DISABLE) == PAGE_CACHE_DISABLE)
1172 if ((Flags & PAGE_NX) != PAGE_NX)
1174 if ((Flags & PAGE_SWAPPED) == PAGE_SWAPPED)
1176 if ((Flags & PAGE_COPY_ON_WRITE) == PAGE_COPY_ON_WRITE)
1178 if ((Flags & PAGE_SHARED) == PAGE_SHARED)
1182 if ((Flags & PAGE_WRITE_THROUGH) == PAGE_WRITE_THROUGH)
1184 if ((Flags & PAGE_ACCESSED) == PAGE_ACCESSED)
1186 if ((Flags & PAGE_DIRTY) == PAGE_DIRTY)
1193 uint64_t *tableEntry, uint64_t flags)
1198 if ((*tableEntry & PAGE_PRESENT) != PAGE_PRESENT)
1207 "X64VirtualAddressSpace::conditionalTableEntryAllocation!");
1213 flags &= ~(PAGE_GLOBAL | PAGE_NX | PAGE_SWAPPED | PAGE_COPY_ON_WRITE);
1214 flags |= PAGE_WRITE | PAGE_USER;
1217 *tableEntry = page | flags;
1224 else if (((*tableEntry & PAGE_USER) != PAGE_USER) && (flags & PAGE_USER))
1227 *tableEntry |= PAGE_USER;
1234 uint64_t *tableEntry, uint64_t physAddress, uint64_t flags)
1239 if ((*tableEntry & PAGE_PRESENT) != PAGE_PRESENT)
1244 physAddress | ((flags & ~(PAGE_GLOBAL | PAGE_NX | PAGE_SWAPPED |
1245 PAGE_COPY_ON_WRITE)) |
1246 PAGE_WRITE | PAGE_USER);
1255 else if (((*tableEntry & PAGE_USER) != PAGE_USER) && (flags & PAGE_USER))
1258 *tableEntry |= PAGE_USER;
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
virtual void * getEndOfHeap()
void pushBack(const T &value)
virtual void unmap(void *virtualAddress)=0
static size_t getPageSize() PURE
static void cpuid(uint32_t inEax, uint32_t inEcx, uint32_t &eax, uint32_t &ebx, uint32_t &ecx, uint32_t &edx)
bool mapUnlocked(physical_uintptr_t physAddress, void *virtualAddress, size_t flags, bool locked=false)
static PhysicalMemoryManager & instance()
Vector< Stack * > m_freeStacks
Stack * doAllocateStack(size_t sSize)
virtual void pin(physical_uintptr_t page)=0
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
A vector / dynamic array.
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
static const size_t Accessed
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
static const size_t Execute
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
static const size_t WriteThrough
virtual void unmap(void *virtualAddress)
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual bool map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual Stack * allocateStack()=0
static ProcessorInformation & information()
virtual void freeStack(Stack *pStack)
static const size_t WriteCombine
static size_t m_Initialised
static const size_t Write
virtual void setFlags(void *virtualAddress, size_t newFlags)
static VirtualAddressSpace * create()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
virtual void revertToKernelAddressSpace()
virtual bool memIsInHeap(void *pMem)
virtual bool memIsInKernelHeap(void *pMem)
static const size_t KernelMode
static const size_t Shared
void unmapUnlocked(void *virtualAddress, bool requireMapped=true)
bool getPageTableEntry(void *virtualAddress, uint64_t *&pageTableEntry) const
virtual ~X64VirtualAddressSpace()
static Scheduler & instance()
static const size_t Swapped
virtual VirtualAddressSpace * clone(bool copyOnWrite=true)
Process * getProcess(size_t n)
static const size_t Dirty
bool conditionalTableEntryAllocation(uint64_t *tableEntry, uint64_t flags)
Process * getParent() const
VirtualAddressSpace * getAddressSpace()
static X64VirtualAddressSpace m_KernelSpace
size_t fromFlags(uint64_t Flags, bool bFinal=false) const PURE
bool conditionalTableEntryMapping(uint64_t *tableEntry, uint64_t physAddress, uint64_t flags)
virtual Stack * allocateStack()
virtual void revertToKernelAddressSpace()=0
virtual void * getEndOfHeap()=0
virtual void freePage(physical_uintptr_t page)=0
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
bool mapPageStructures(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
virtual bool isAddressValid(void *virtualAddress)
static const size_t CacheDisable
void maybeFreeTables(void *virtualAddress)
Possibly cleans up tables for the given address.
physical_uintptr_t m_PhysicalPML4
uint64_t toFlags(size_t flags, bool bFinal=false) const PURE
static void invalidate(void *pAddress)
static const size_t CopyOnWrite
static const size_t ClearDirty