20 #ifndef KERNEL_PROCESSOR_X64_VIRTUALADDRESSSPACE_H 21 #define KERNEL_PROCESSOR_X64_VIRTUALADDRESSSPACE_H 23 #include "pedigree/kernel/Spinlock.h" 24 #include "pedigree/kernel/compiler.h" 25 #include "pedigree/kernel/processor/VirtualAddressSpace.h" 26 #include "pedigree/kernel/processor/types.h" 27 #include "pedigree/kernel/utilities/Vector.h" 28 #include "pedigree/kernel/utilities/utility.h" 38 #define USERSPACE_DYNAMIC_LINKER_LOCATION reinterpret_cast<void *>(0x4FA00000) 40 #define USERSPACE_VIRTUAL_START reinterpret_cast<void *>(0x400000) 41 #define USERSPACE_VIRTUAL_HEAP reinterpret_cast<void *>(0x50000000) 42 #define USERSPACE_RESERVED_START USERSPACE_DYNAMIC_LINKER_LOCATION 43 #define USERSPACE_VIRTUAL_STACK_SIZE 0x100000 44 #define USERSPACE_VIRTUAL_MAX_STACK_SIZE 0x100000 45 #define USERSPACE_DYNAMIC_START reinterpret_cast<void *>(0x100000000) 46 #define USERSPACE_DYNAMIC_END reinterpret_cast<void *>(0x00000FFFFFFFFFFF) 47 #define USERSPACE_VIRTUAL_LOWEST_STACK \ 48 reinterpret_cast<void *>( \ 49 USERSPACE_DYNAMIC_END + USERSPACE_VIRTUAL_MAX_STACK_SIZE) 50 #define USERSPACE_VIRTUAL_STACK \ 51 reinterpret_cast<void *>(0x00007FFFEFFFF000) // right below the Event base 52 #define KERNEL_VIRTUAL_EVENT_BASE \ 53 reinterpret_cast<void *>(0x00007FFFF0000000) // right above the stacks 55 #define KERNEL_SPACE_START reinterpret_cast<void *>(0xFFFF800000000000) 56 #define KERNEL_VIRTUAL_PAGESTACK_ABV4GB1 \ 57 reinterpret_cast<void *>(0xFFFF800100000000) 58 #define KERNEL_VIRTUAL_PAGESTACK_ABV4GB2 \ 59 reinterpret_cast<void *>(0xFFFF801000000000) 60 #define KERNEL_VIRTUAL_HEAP reinterpret_cast<void *>(0xFFFF900000000000) 61 #define KERNEL_VIRTUAL_CACHE reinterpret_cast<void *>(0xFFFFB00000000000) 62 #define KERNEL_VIRTUAL_MEMORYREGION_ADDRESS \ 63 reinterpret_cast<void *>(0xFFFFF00000000000) 64 #define KERNEL_VIRTUAL_PAGESTACK_4GB \ 65 reinterpret_cast<void *>(0xFFFFFFFF7FC00000) 66 #define KERNEL_VIRTUAL_ADDRESS reinterpret_cast<void *>(0xFFFFFFFF7FF00000) 67 #define KERNEL_VIRTUAL_INFO_BLOCK reinterpret_cast<void *>(0xFFFFFFFF8FFF0000) 68 #define KERNEL_VIRTUAL_MODULE_BASE reinterpret_cast<void *>(0xFFFFFFFF90000000) 69 #define KERNEL_VIRTUAL_LOWEST_STACK reinterpret_cast<void *>(0xFFFFFFFFE0000000) 70 #define KERNEL_VIRTUAL_STACK reinterpret_cast<void *>(0xFFFFFFFFFFFF7000) 72 #define KERNEL_VIRTUAL_MODULE_SIZE \ 73 pointer_diff_const(KERNEL_VIRTUAL_MODULE_BASE, KERNEL_VIRTUAL_LOWEST_STACK) 74 #define KERNEL_VIRTUAL_HEAP_SIZE \ 75 pointer_diff_const(KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_CACHE) 76 #define KERNEL_VIRTUAL_CACHE_SIZE \ 78 KERNEL_VIRTUAL_CACHE, KERNEL_VIRTUAL_MEMORYREGION_ADDRESS) 79 #define KERNEL_VIRTUAL_MEMORYREGION_SIZE \ 81 KERNEL_VIRTUAL_MEMORYREGION_ADDRESS, KERNEL_VIRTUAL_PAGESTACK_4GB) 82 #define KERNEL_STACK_SIZE 0x8000 107 virtual bool isMapped(
void *virtualAddress);
110 map(physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags);
112 physical_uintptr_t physAddress,
void *virtualAddress,
size_t count,
115 void *virtualAddress, physical_uintptr_t &physAddress,
size_t &flags);
116 virtual void setFlags(
void *virtualAddress,
size_t newFlags);
117 virtual void unmap(
void *virtualAddress);
143 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags);
144 bool mapPageStructuresAbove4GB(
145 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags);
153 return reinterpret_cast<uintptr_t
>(KERNEL_SPACE_START);
159 return reinterpret_cast<uintptr_t
>(USERSPACE_VIRTUAL_START);
165 return reinterpret_cast<uintptr_t
>(USERSPACE_RESERVED_START);
177 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_HEAP);
183 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_HEAP) +
184 KERNEL_VIRTUAL_HEAP_SIZE;
190 return reinterpret_cast<uintptr_t
>(USERSPACE_DYNAMIC_START);
196 return reinterpret_cast<uintptr_t
>(USERSPACE_DYNAMIC_END);
202 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_INFO_BLOCK);
208 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_CACHE);
214 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_CACHE) +
215 KERNEL_VIRTUAL_CACHE_SIZE;
221 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_EVENT_BASE);
227 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_MODULE_BASE);
233 return reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_MODULE_BASE) +
234 KERNEL_VIRTUAL_MODULE_SIZE;
247 void *Heap, physical_uintptr_t PhysicalPML4,
void *VirtualStack);
277 uint64_t
toFlags(
size_t flags,
bool bFinal =
false)
const PURE;
283 size_t fromFlags(uint64_t Flags,
bool bFinal =
false)
const PURE;
295 uint64_t *tableEntry, uint64_t physAddress, uint64_t flags);
302 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags,
303 bool locked =
false);
308 void unmapUnlocked(
void *virtualAddress,
bool requireMapped =
true);
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
virtual void * getEndOfHeap()
virtual uintptr_t getKernelHeapStart() const
bool mapUnlocked(physical_uintptr_t physAddress, void *virtualAddress, size_t flags, bool locked=false)
Vector< Stack * > m_freeStacks
Stack * doAllocateStack(size_t sSize)
virtual uintptr_t getUserStart() const
virtual uintptr_t getDynamicLinkerAddress() const
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
X64VirtualAddressSpace & operator=(const X64VirtualAddressSpace &)
virtual uintptr_t getDynamicStart() const
virtual uintptr_t getKernelStart() const
virtual void unmap(void *virtualAddress)
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual bool map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual void freeStack(Stack *pStack)
virtual uintptr_t getKernelCacheStart() const
virtual uintptr_t getKernelHeapEnd() const
virtual uintptr_t getKernelEventBlockStart() const
virtual void setFlags(void *virtualAddress, size_t newFlags)
static VirtualAddressSpace * create()
virtual void revertToKernelAddressSpace()
virtual bool memIsInHeap(void *pMem)
virtual bool memIsInKernelHeap(void *pMem)
void unmapUnlocked(void *virtualAddress, bool requireMapped=true)
bool getPageTableEntry(void *virtualAddress, uint64_t *&pageTableEntry) const
virtual uintptr_t getUserReservedStart() const
virtual ~X64VirtualAddressSpace()
The exception was caused by a hardware task switch.
virtual VirtualAddressSpace * clone(bool copyOnWrite=true)
virtual uintptr_t getKernelCacheEnd() const
#define USERSPACE_DYNAMIC_LINKER_LOCATION
bool conditionalTableEntryAllocation(uint64_t *tableEntry, uint64_t flags)
virtual uintptr_t getGlobalInfoBlock() const
virtual uintptr_t getDynamicEnd() const
static X64VirtualAddressSpace m_KernelSpace
size_t fromFlags(uint64_t Flags, bool bFinal=false) const PURE
bool conditionalTableEntryMapping(uint64_t *tableEntry, uint64_t physAddress, uint64_t flags)
virtual Stack * allocateStack()
bool mapPageStructures(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
virtual uintptr_t getKernelModulesEnd() const
virtual bool isAddressValid(void *virtualAddress)
void maybeFreeTables(void *virtualAddress)
Possibly cleans up tables for the given address.
physical_uintptr_t m_PhysicalPML4
uint64_t toFlags(size_t flags, bool bFinal=false) const PURE
virtual uintptr_t getKernelModulesStart() const