20 #include "VirtualAddressSpace.h" 21 #include "pedigree/kernel/LockGuard.h" 22 #include "pedigree/kernel/panic.h" 23 #include "pedigree/kernel/process/Process.h" 24 #include "pedigree/kernel/process/Scheduler.h" 25 #include "pedigree/kernel/processor/PhysicalMemoryManager.h" 26 #include "pedigree/kernel/processor/Processor.h" 27 #include "pedigree/kernel/utilities/utility.h" 29 #include "pedigree/kernel/Log.h" 34 #define PAGE_PRESENT 0x01 35 #define PAGE_WRITE 0x02 36 #define PAGE_USER 0x04 37 #define PAGE_WRITE_COMBINE 0x08 38 #define PAGE_CACHE_DISABLE 0x10 39 #define PAGE_ACCESSED 0x20 40 #define PAGE_DIRTY 0x40 43 #define PAGE_GLOBAL 0x100 44 #define PAGE_SWAPPED 0x200 45 #define PAGE_COPY_ON_WRITE 0x400 46 #define PAGE_SHARED 0x800 47 #define PAGE_WRITE_THROUGH (PAGE_PAT | PAGE_WRITE_COMBINE) 52 #define PAGE_DIRECTORY_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 22) & 0x3FF) 53 #define PAGE_TABLE_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 12) & 0x3FF) 55 #define PAGE_DIRECTORY_ENTRY(pageDir, index) \ 56 (&reinterpret_cast<uint32_t *>(pageDir)[index]) 57 #define PAGE_TABLE_ENTRY(VirtualPageTables, pageDirectoryIndex, index) \ 58 (&reinterpret_cast<uint32_t *>( \ 59 adjust_pointer(VirtualPageTables, pageDirectoryIndex * 4096))[index]) 61 #define PAGE_GET_FLAGS(x) (*x & 0xFFF) 62 #define PAGE_SET_FLAGS(x, f) *x = (*x & ~0xFFF) | f 63 #define PAGE_GET_PHYSICAL_ADDRESS(x) (*x & ~0xFFF) 66 extern void *pagedirectory;
73 physical_uintptr_t g_EscrowPages[256];
91 if (pMem < KERNEL_VIRTUAL_HEAP)
100 return reinterpret_cast<void *
>(
101 reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_HEAP) +
102 KERNEL_VIRTUAL_HEAP_SIZE);
111 #if defined(ADDITIONAL_CHECKS) 112 if (Processor::readCr3() != m_PhysicalPageDirectory)
114 "VirtualAddressSpace::isMapped(): not in this VirtualAddressSpace");
117 return doIsMapped(virtualAddress);
120 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
122 #if defined(ADDITIONAL_CHECKS) 123 if (Processor::readCr3() != m_PhysicalPageDirectory)
124 panic(
"VirtualAddressSpace::map(): not in this VirtualAddressSpace");
127 return doMap(physicalAddress, virtualAddress, flags);
130 void *virtualAddress, physical_uintptr_t &
physicalAddress,
size_t &flags)
132 #if defined(ADDITIONAL_CHECKS) 133 if (Processor::readCr3() != m_PhysicalPageDirectory)
134 panic(
"VirtualAddressSpace::getMapping(): not in this " 135 "VirtualAddressSpace");
138 doGetMapping(virtualAddress, physicalAddress, flags);
142 #if defined(ADDITIONAL_CHECKS) 143 if (Processor::readCr3() != m_PhysicalPageDirectory)
145 "VirtualAddressSpace::setFlags(): not in this VirtualAddressSpace");
148 doSetFlags(virtualAddress, newFlags);
152 #if defined(ADDITIONAL_CHECKS) 153 if (Processor::readCr3() != m_PhysicalPageDirectory)
154 panic(
"VirtualAddressSpace::unmap(): not in this VirtualAddressSpace");
157 doUnmap(virtualAddress);
161 void *st = doAllocateStack(USERSPACE_VIRTUAL_MAX_STACK_SIZE);
168 stackSz = USERSPACE_VIRTUAL_MAX_STACK_SIZE;
169 void *st = doAllocateStack(stackSz);
173 void X86VirtualAddressSpace::freeStack(
void *pStack)
176 m_freeStacks.pushBack(pStack);
180 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
184 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
185 uint32_t *pageDirectoryEntry =
186 PAGE_DIRECTORY_ENTRY(m_VirtualPageDirectory, pageDirectoryIndex);
189 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
192 *pageDirectoryEntry = physicalAddress | toFlags(flags);
196 PAGE_TABLE_ENTRY(m_VirtualPageTables, pageDirectoryIndex, 0), 0,
203 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
204 uint32_t *pageTableEntry = PAGE_TABLE_ENTRY(
205 m_VirtualPageTables, pageDirectoryIndex, pageTableIndex);
208 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
210 *pageTableEntry = physicalAddress | toFlags(flags);
228 physical_uintptr_t pageTable = PAGE_GET_PHYSICAL_ADDRESS(
229 PAGE_DIRECTORY_ENTRY(VIRTUAL_PAGE_DIRECTORY, 0x3FE));
239 physicalMemoryManager.
freePage(pageTable);
240 physicalMemoryManager.
freePage(m_PhysicalPageDirectory);
245 m_VirtualPageDirectory(VIRTUAL_PAGE_DIRECTORY),
246 m_VirtualPageTables(VIRTUAL_PAGE_TABLES),
247 m_pStackTop(USERSPACE_VIRTUAL_STACK), m_freeStacks(), m_Lock(false, true)
253 physical_uintptr_t pageTable = physicalMemoryManager.
allocatePage();
261 virtualAddressSpace.
map(
264 virtualAddressSpace.
map(
265 pageTable, KERNEL_VIRTUAL_TEMP2,
269 ByteSet(KERNEL_VIRTUAL_TEMP1, 0, 0xC00);
272 ByteSet(KERNEL_VIRTUAL_TEMP2, 0, 0x1000);
276 adjust_pointer(KERNEL_VIRTUAL_TEMP1, 0xC00),
283 *
reinterpret_cast<uint32_t *
>(adjust_pointer(KERNEL_VIRTUAL_TEMP1, 0xFFC)) =
287 *
reinterpret_cast<uint32_t *
>(adjust_pointer(KERNEL_VIRTUAL_TEMP1, 0xFF8)) =
288 pageTable | PAGE_PRESENT | PAGE_WRITE;
289 *
reinterpret_cast<uint32_t *
>(adjust_pointer(KERNEL_VIRTUAL_TEMP2, 0xFFC)) =
294 virtualAddressSpace.
unmap(KERNEL_VIRTUAL_TEMP1);
295 virtualAddressSpace.
unmap(KERNEL_VIRTUAL_TEMP2);
299 void *Heap, physical_uintptr_t PhysicalPageDirectory,
300 void *VirtualPageDirectory,
void *VirtualPageTables,
void *VirtualStack)
308 bool X86VirtualAddressSpace::doIsMapped(
void *virtualAddress)
314 virtualAddress =
reinterpret_cast<void *
>(
315 reinterpret_cast<uintptr_t
>(virtualAddress) & ~0xFFF);
317 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
318 uint32_t *pageDirectoryEntry =
322 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
326 if ((*pageDirectoryEntry & PAGE_4MB) == PAGE_4MB)
329 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
330 uint32_t *pageTableEntry = PAGE_TABLE_ENTRY(
334 return ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT);
336 bool X86VirtualAddressSpace::doMap(
337 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
347 FATAL(
"Out of memory");
353 size_t Flags =
toFlags(flags,
true);
354 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
355 uint32_t *pageDirectoryEntry =
359 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
361 size_t PdeFlags =
toFlags(flags);
370 *pageDirectoryEntry =
372 ((PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED | PAGE_COPY_ON_WRITE)) |
387 virtualAddress >= KERNEL_SPACE_START)
401 pageDirectoryEntry = PAGE_DIRECTORY_ENTRY(
403 *pageDirectoryEntry = page | PAGE_WRITE | PAGE_USER |
404 (PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED |
405 PAGE_COPY_ON_WRITE));
407 if (g_pCurrentlyCloning)
411 g_pCurrentlyCloning);
416 pageDirectoryEntry = PAGE_DIRECTORY_ENTRY(
418 *pageDirectoryEntry =
419 page | PAGE_WRITE | PAGE_USER |
421 ~(PAGE_GLOBAL | PAGE_SWAPPED | PAGE_COPY_ON_WRITE));
431 pageDirectoryEntry = PAGE_DIRECTORY_ENTRY(
433 *pageDirectoryEntry = page | PAGE_WRITE | PAGE_USER |
434 (PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED |
435 PAGE_COPY_ON_WRITE));
444 (Flags & PAGE_USER) && ((*pageDirectoryEntry & PAGE_USER) != PAGE_USER))
446 *pageDirectoryEntry |= PAGE_USER;
449 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
450 uint32_t *pageTableEntry = PAGE_TABLE_ENTRY(
454 if ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT)
465 void X86VirtualAddressSpace::doGetMapping(
466 void *virtualAddress, physical_uintptr_t &
physicalAddress,
size_t &flags)
470 uint32_t *pageTableEntry = 0;
472 panic(
"VirtualAddressSpace::getMapping(): function misused");
476 flags =
fromFlags(PAGE_GET_FLAGS(pageTableEntry),
true);
478 void X86VirtualAddressSpace::doSetFlags(
void *virtualAddress,
size_t newFlags)
484 uint32_t *pageTableEntry = 0;
486 panic(
"VirtualAddressSpace::setFlags(): function misused");
489 PAGE_SET_FLAGS(pageTableEntry,
toFlags(newFlags,
true));
494 void X86VirtualAddressSpace::doUnmap(
void *virtualAddress)
500 uint32_t *pageTableEntry = 0;
502 panic(
"VirtualAddressSpace::unmap(): function misused");
510 void *X86VirtualAddressSpace::doAllocateStack(
size_t sSize)
526 uintptr_t stackBottom =
reinterpret_cast<uintptr_t
>(pStack) - sSize;
527 for (
size_t j = 0; j < sSize; j += 0x1000)
529 physical_uintptr_t phys =
532 map(phys, reinterpret_cast<void *>(stackBottom + j),
535 WARNING(
"map() failed in doAllocateStack");
542 void *virtualAddress, uint32_t *&pageTableEntry)
546 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
547 uint32_t *pageDirectoryEntry =
551 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
553 if ((*pageDirectoryEntry & PAGE_4MB) == PAGE_4MB)
556 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
557 pageTableEntry = PAGE_TABLE_ENTRY(
561 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT &&
562 (*pageTableEntry & PAGE_SWAPPED) != PAGE_SWAPPED)
572 Flags |= PAGE_GLOBAL;
575 if ((flags &
Write) == Write)
578 Flags |= PAGE_WRITE_COMBINE;
580 Flags |= PAGE_CACHE_DISABLE;
581 if ((flags &
Swapped) == Swapped)
582 Flags |= PAGE_SWAPPED;
584 Flags |= PAGE_PRESENT;
586 Flags |= PAGE_COPY_ON_WRITE;
587 if ((flags &
Shared) == Shared)
588 Flags |= PAGE_SHARED;
592 Flags |= PAGE_WRITE_THROUGH;
594 Flags |= PAGE_ACCESSED;
595 if ((flags &
Dirty) == Dirty)
598 Flags &= ~PAGE_DIRTY;
605 if ((Flags & PAGE_USER) != PAGE_USER)
607 if ((Flags & PAGE_WRITE) == PAGE_WRITE)
609 if ((Flags & PAGE_WRITE_COMBINE) == PAGE_WRITE_COMBINE)
611 if ((Flags & PAGE_CACHE_DISABLE) == PAGE_CACHE_DISABLE)
613 if ((Flags & PAGE_SWAPPED) == PAGE_SWAPPED)
615 if ((Flags & PAGE_COPY_ON_WRITE) == PAGE_COPY_ON_WRITE)
617 if ((Flags & PAGE_SHARED) == PAGE_SHARED)
621 if ((Flags & PAGE_WRITE_THROUGH) == PAGE_WRITE_THROUGH)
623 if ((Flags & PAGE_ACCESSED) == PAGE_ACCESSED)
625 if ((Flags & PAGE_DIRTY) == PAGE_DIRTY)
642 WARNING(
"X86VirtualAddressSpace: Clone() failed!");
646 g_pCurrentlyCloning = pClone;
651 for (uintptr_t i = 0; i < 1024; i++)
653 uint32_t *pageDirectoryEntry =
656 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
660 if ((*pageDirectoryEntry & PAGE_4MB) == PAGE_4MB)
663 for (uintptr_t j = 0; j < 1024; j++)
665 uint32_t *pageTableEntry =
668 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
671 uint32_t flags = PAGE_GET_FLAGS(pageTableEntry);
673 PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
675 void *virtualAddress =
676 reinterpret_cast<void *
>(((i * 1024) + j) * 4096);
677 if ((virtualAddress < USERSPACE_VIRTUAL_START) ||
678 (virtualAddress >= KERNEL_SPACE_START))
681 if (flags & PAGE_SHARED)
685 v, physicalAddress, virtualAddress,
fromFlags(flags,
true));
691 bool bWasCopyOnWrite = (flags & PAGE_COPY_ON_WRITE);
692 flags |= PAGE_COPY_ON_WRITE;
693 flags &= ~PAGE_WRITE;
695 v, physicalAddress, virtualAddress,
fromFlags(flags,
true));
701 PAGE_SET_FLAGS(pageTableEntry, flags);
708 if (!bWasCopyOnWrite)
716 g_pCurrentlyCloning = 0;
736 if (
m_Heap < KERNEL_SPACE_START)
749 for (uintptr_t i = 0; i < 1024; i++)
751 uint32_t *pageDirectoryEntry =
754 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
757 if (reinterpret_cast<void *>(i * 1024 * 4096) >= KERNEL_SPACE_START)
759 if (reinterpret_cast<void *>(i * 1024 * 4096) < USERSPACE_VIRTUAL_START)
762 bool bDidSkip =
false;
763 for (uintptr_t j = 0; j < 1024; j++)
765 uint32_t *pageTableEntry =
768 if ((*pageTableEntry & PAGE_PRESENT) != PAGE_PRESENT)
771 size_t flags = PAGE_GET_FLAGS(pageTableEntry);
775 PAGE_GET_PHYSICAL_ADDRESS(pageTableEntry);
778 void *virtualAddress =
779 reinterpret_cast<void *
>(((i * 1024) + j) * 4096);
780 unmap(virtualAddress);
787 if ((flags & (PAGE_SHARED | PAGE_SWAPPED)) == 0)
796 PAGE_GET_PHYSICAL_ADDRESS(pageDirectoryEntry));
797 *pageDirectoryEntry = 0;
811 uint32_t *pDir =
reinterpret_cast<uint32_t *
>(KERNEL_VIRTUAL_TEMP2);
812 map(pDir[0], KERNEL_VIRTUAL_TEMP3,
819 uintptr_t &v, physical_uintptr_t
physicalAddress,
void *virtualAddress,
822 size_t Flags =
toFlags(flags,
true);
823 size_t pageDirectoryIndex = PAGE_DIRECTORY_INDEX(virtualAddress);
825 uint32_t *pageDirectoryEntry =
826 PAGE_DIRECTORY_ENTRY(KERNEL_VIRTUAL_TEMP2, pageDirectoryIndex);
827 uint32_t *pDir =
reinterpret_cast<uint32_t *
>(KERNEL_VIRTUAL_TEMP2);
829 if ((*pageDirectoryEntry & PAGE_PRESENT) != PAGE_PRESENT)
831 size_t PdeFlags =
toFlags(flags);
835 *pageDirectoryEntry =
837 ((PdeFlags & ~(PAGE_GLOBAL | PAGE_SWAPPED | PAGE_COPY_ON_WRITE)) |
841 v = pageDirectoryIndex;
842 unmap(KERNEL_VIRTUAL_TEMP3);
843 map(pDir[pageDirectoryIndex], KERNEL_VIRTUAL_TEMP3,
850 (Flags & PAGE_USER) && ((*pageDirectoryEntry & PAGE_USER) != PAGE_USER))
852 *pageDirectoryEntry |= PAGE_USER;
856 if (v != pageDirectoryIndex)
858 v = pageDirectoryIndex;
859 unmap(KERNEL_VIRTUAL_TEMP3);
860 map(pDir[pageDirectoryIndex], KERNEL_VIRTUAL_TEMP3,
864 size_t pageTableIndex = PAGE_TABLE_INDEX(virtualAddress);
865 uint32_t *pageTableEntry =
866 &(
reinterpret_cast<uint32_t *
>(KERNEL_VIRTUAL_TEMP3)[pageTableIndex]);
869 if ((*pageTableEntry & PAGE_PRESENT) == PAGE_PRESENT)
873 *pageTableEntry = physicalAddress | Flags;
880 unmap(KERNEL_VIRTUAL_TEMP2);
881 unmap(KERNEL_VIRTUAL_TEMP3);
886 return doIsMapped(virtualAddress);
889 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
891 return doMap(physicalAddress, virtualAddress, flags);
894 void *virtualAddress, physical_uintptr_t &
physicalAddress,
size_t &flags)
896 doGetMapping(virtualAddress, physicalAddress, flags);
899 void *virtualAddress,
size_t newFlags)
901 doSetFlags(virtualAddress, newFlags);
905 doUnmap(virtualAddress);
909 void *pStack = doAllocateStack(KERNEL_STACK_SIZE + 0x1000);
916 reinterpret_cast<uintptr_t>(&pagedirectory) -
917 reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
918 KERNEL_VIRUTAL_PAGE_DIRECTORY, VIRTUAL_PAGE_TABLES,
919 KERNEL_VIRTUAL_STACK)
922 for (
int i = 0; i < 256; i++)
924 g_EscrowPages[i] = 0;
virtual void unmap(void *virtualAddress)
void pushBack(const T &value)
size_t fromFlags(uint32_t Flags, bool bFinal=false)
virtual void unmap(void *virtualAddress)
virtual void unmap(void *virtualAddress)=0
static size_t getPageSize() PURE
static PhysicalMemoryManager & instance()
virtual void pin(physical_uintptr_t page)=0
A vector / dynamic array.
virtual ~X86VirtualAddressSpace()
virtual bool isMapped(void *virtualAddress)
virtual bool isAddressValid(void *virtualAddress)
static const size_t Accessed
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
static const size_t Execute
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
X86KernelVirtualAddressSpace()
static const size_t WriteThrough
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
virtual bool memIsInHeap(void *pMem)
static ProcessorInformation & information()
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
static const size_t WriteCombine
Vector< void * > m_freeStacks
virtual void * getEndOfHeap()
static size_t m_Initialised
static const size_t Write
static VirtualAddressSpace * create()
virtual void revertToKernelAddressSpace()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
bool mapPageStructures(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
static const size_t KernelMode
void * m_VirtualPageDirectory
static const size_t Shared
static X86KernelVirtualAddressSpace m_Instance
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
uintptr_t beginCrossSpace(X86VirtualAddressSpace *pOther)
static Scheduler & instance()
static const size_t Swapped
~X86KernelVirtualAddressSpace()
Process * getProcess(size_t n)
virtual void setFlags(void *virtualAddress, size_t newFlags)
static const size_t Dirty
bool getPageTableEntry(void *virtualAddress, uint32_t *&pageTableEntry)
void * m_VirtualPageTables
virtual void * allocateStack()
VirtualAddressSpace * getAddressSpace()
virtual void * getEndOfHeap()=0
virtual void freePage(physical_uintptr_t page)=0
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
virtual bool isMapped(void *virtualAddress)
static const size_t CacheDisable
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
bool mapCrossSpace(uintptr_t &v, physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual void setFlags(void *virtualAddress, size_t newFlags)
virtual void * allocateStack()
physical_uintptr_t m_PhysicalPageDirectory
static void invalidate(void *pAddress)
static const size_t CopyOnWrite
static const size_t ClearDirty
uint32_t toFlags(size_t flags, bool bFinal=false)