The Pedigree Project  0.1
kernel/core/processor/x64/VirtualAddressSpace.h
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef KERNEL_PROCESSOR_X64_VIRTUALADDRESSSPACE_H
21 #define KERNEL_PROCESSOR_X64_VIRTUALADDRESSSPACE_H
22 
23 #include "pedigree/kernel/Spinlock.h"
24 #include "pedigree/kernel/compiler.h"
25 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
26 #include "pedigree/kernel/processor/types.h"
27 #include "pedigree/kernel/utilities/Vector.h"
28 #include "pedigree/kernel/utilities/utility.h"
29 
38 #define USERSPACE_DYNAMIC_LINKER_LOCATION reinterpret_cast<void *>(0x4FA00000)
39 
40 #define USERSPACE_VIRTUAL_START reinterpret_cast<void *>(0x400000)
41 #define USERSPACE_VIRTUAL_HEAP reinterpret_cast<void *>(0x50000000)
42 #define USERSPACE_RESERVED_START USERSPACE_DYNAMIC_LINKER_LOCATION
43 #define USERSPACE_VIRTUAL_STACK_SIZE 0x100000
44 #define USERSPACE_VIRTUAL_MAX_STACK_SIZE 0x100000
45 #define USERSPACE_DYNAMIC_START reinterpret_cast<void *>(0x100000000)
46 #define USERSPACE_DYNAMIC_END reinterpret_cast<void *>(0x00000FFFFFFFFFFF)
47 #define USERSPACE_VIRTUAL_LOWEST_STACK \
48  reinterpret_cast<void *>( \
49  USERSPACE_DYNAMIC_END + USERSPACE_VIRTUAL_MAX_STACK_SIZE)
50 #define USERSPACE_VIRTUAL_STACK \
51  reinterpret_cast<void *>(0x00007FFFEFFFF000) // right below the Event base
52 #define KERNEL_VIRTUAL_EVENT_BASE \
53  reinterpret_cast<void *>(0x00007FFFF0000000) // right above the stacks
54 
55 #define KERNEL_SPACE_START reinterpret_cast<void *>(0xFFFF800000000000)
56 #define KERNEL_VIRTUAL_PAGESTACK_ABV4GB1 \
57  reinterpret_cast<void *>(0xFFFF800100000000)
58 #define KERNEL_VIRTUAL_PAGESTACK_ABV4GB2 \
59  reinterpret_cast<void *>(0xFFFF801000000000)
60 #define KERNEL_VIRTUAL_HEAP reinterpret_cast<void *>(0xFFFF900000000000)
61 #define KERNEL_VIRTUAL_CACHE reinterpret_cast<void *>(0xFFFFB00000000000)
62 #define KERNEL_VIRTUAL_MEMORYREGION_ADDRESS \
63  reinterpret_cast<void *>(0xFFFFF00000000000)
64 #define KERNEL_VIRTUAL_PAGESTACK_4GB \
65  reinterpret_cast<void *>(0xFFFFFFFF7FC00000)
66 #define KERNEL_VIRTUAL_ADDRESS reinterpret_cast<void *>(0xFFFFFFFF7FF00000)
67 #define KERNEL_VIRTUAL_INFO_BLOCK reinterpret_cast<void *>(0xFFFFFFFF8FFF0000)
68 #define KERNEL_VIRTUAL_MODULE_BASE reinterpret_cast<void *>(0xFFFFFFFF90000000)
69 #define KERNEL_VIRTUAL_LOWEST_STACK reinterpret_cast<void *>(0xFFFFFFFFE0000000)
70 #define KERNEL_VIRTUAL_STACK reinterpret_cast<void *>(0xFFFFFFFFFFFF7000)
71 
72 #define KERNEL_VIRTUAL_MODULE_SIZE \
73  pointer_diff_const(KERNEL_VIRTUAL_MODULE_BASE, KERNEL_VIRTUAL_LOWEST_STACK)
74 #define KERNEL_VIRTUAL_HEAP_SIZE \
75  pointer_diff_const(KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_CACHE)
76 #define KERNEL_VIRTUAL_CACHE_SIZE \
77  pointer_diff_const( \
78  KERNEL_VIRTUAL_CACHE, KERNEL_VIRTUAL_MEMORYREGION_ADDRESS)
79 #define KERNEL_VIRTUAL_MEMORYREGION_SIZE \
80  pointer_diff_const( \
81  KERNEL_VIRTUAL_MEMORYREGION_ADDRESS, KERNEL_VIRTUAL_PAGESTACK_4GB)
82 #define KERNEL_STACK_SIZE 0x8000
83 
91 {
93  friend class Processor;
95  friend class Multiprocessor;
101 
102  public:
103  //
104  // VirtualAddressSpace Interface
105  //
106  virtual bool isAddressValid(void *virtualAddress);
107  virtual bool isMapped(void *virtualAddress);
108 
109  virtual bool
110  map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags);
111  virtual bool mapHuge(
112  physical_uintptr_t physAddress, void *virtualAddress, size_t count,
113  size_t flags);
114  virtual void getMapping(
115  void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags);
116  virtual void setFlags(void *virtualAddress, size_t newFlags);
117  virtual void unmap(void *virtualAddress);
118  virtual Stack *allocateStack();
119  virtual Stack *allocateStack(size_t stackSz);
120  virtual void freeStack(Stack *pStack);
121 
122  virtual bool memIsInKernelHeap(void *pMem);
123  virtual bool memIsInHeap(void *pMem);
124  virtual void *getEndOfHeap();
125 
126  virtual VirtualAddressSpace *clone(bool copyOnWrite = true);
127  virtual void revertToKernelAddressSpace();
128 
129  //
130  // Needed for the PhysicalMemoryManager
131  //
142  bool mapPageStructures(
143  physical_uintptr_t physAddress, void *virtualAddress, size_t flags);
144  bool mapPageStructuresAbove4GB(
145  physical_uintptr_t physAddress, void *virtualAddress, size_t flags);
146 
148  virtual ~X64VirtualAddressSpace();
149 
151  virtual uintptr_t getKernelStart() const
152  {
153  return reinterpret_cast<uintptr_t>(KERNEL_SPACE_START);
154  }
155 
157  virtual uintptr_t getUserStart() const
158  {
159  return reinterpret_cast<uintptr_t>(USERSPACE_VIRTUAL_START);
160  }
161 
163  virtual uintptr_t getUserReservedStart() const
164  {
165  return reinterpret_cast<uintptr_t>(USERSPACE_RESERVED_START);
166  }
167 
169  virtual uintptr_t getDynamicLinkerAddress() const
170  {
171  return reinterpret_cast<uintptr_t>(USERSPACE_DYNAMIC_LINKER_LOCATION);
172  }
173 
175  virtual uintptr_t getKernelHeapStart() const
176  {
177  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_HEAP);
178  }
179 
181  virtual uintptr_t getKernelHeapEnd() const
182  {
183  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_HEAP) +
184  KERNEL_VIRTUAL_HEAP_SIZE;
185  }
186 
188  virtual uintptr_t getDynamicStart() const
189  {
190  return reinterpret_cast<uintptr_t>(USERSPACE_DYNAMIC_START);
191  }
192 
194  virtual uintptr_t getDynamicEnd() const
195  {
196  return reinterpret_cast<uintptr_t>(USERSPACE_DYNAMIC_END);
197  }
198 
200  virtual uintptr_t getGlobalInfoBlock() const
201  {
202  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_INFO_BLOCK);
203  }
204 
206  virtual uintptr_t getKernelCacheStart() const
207  {
208  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_CACHE);
209  }
210 
212  virtual uintptr_t getKernelCacheEnd() const
213  {
214  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_CACHE) +
215  KERNEL_VIRTUAL_CACHE_SIZE;
216  }
217 
219  virtual uintptr_t getKernelEventBlockStart() const
220  {
221  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_EVENT_BASE);
222  }
223 
225  virtual uintptr_t getKernelModulesStart() const
226  {
227  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MODULE_BASE);
228  }
229 
231  virtual uintptr_t getKernelModulesEnd() const
232  {
233  return reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MODULE_BASE) +
234  KERNEL_VIRTUAL_MODULE_SIZE;
235  }
236 
237  private:
247  void *Heap, physical_uintptr_t PhysicalPML4, void *VirtualStack);
248 
255 
261  bool
262  getPageTableEntry(void *virtualAddress, uint64_t *&pageTableEntry) const;
271  void maybeFreeTables(void *virtualAddress);
277  uint64_t toFlags(size_t flags, bool bFinal = false) const PURE;
283  size_t fromFlags(uint64_t Flags, bool bFinal = false) const PURE;
288  bool conditionalTableEntryAllocation(uint64_t *tableEntry, uint64_t flags);
295  uint64_t *tableEntry, uint64_t physAddress, uint64_t flags);
296 
301  bool mapUnlocked(
302  physical_uintptr_t physAddress, void *virtualAddress, size_t flags,
303  bool locked = false);
304 
308  void unmapUnlocked(void *virtualAddress, bool requireMapped = true);
309 
311  Stack *doAllocateStack(size_t sSize);
312 
314  physical_uintptr_t m_PhysicalPML4;
316  void *m_pStackTop;
325 
328 };
329 
332 #endif
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
bool mapUnlocked(physical_uintptr_t physAddress, void *virtualAddress, size_t flags, bool locked=false)
Stack * doAllocateStack(size_t sSize)
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
X64VirtualAddressSpace & operator=(const X64VirtualAddressSpace &)
virtual void unmap(void *virtualAddress)
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual bool map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual void freeStack(Stack *pStack)
virtual void setFlags(void *virtualAddress, size_t newFlags)
static VirtualAddressSpace * create()
virtual bool memIsInHeap(void *pMem)
virtual bool memIsInKernelHeap(void *pMem)
void unmapUnlocked(void *virtualAddress, bool requireMapped=true)
bool getPageTableEntry(void *virtualAddress, uint64_t *&pageTableEntry) const
The exception was caused by a hardware task switch.
Definition: Processor.h:80
virtual VirtualAddressSpace * clone(bool copyOnWrite=true)
bool conditionalTableEntryAllocation(uint64_t *tableEntry, uint64_t flags)
size_t fromFlags(uint64_t Flags, bool bFinal=false) const PURE
bool conditionalTableEntryMapping(uint64_t *tableEntry, uint64_t physAddress, uint64_t flags)
bool mapPageStructures(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
virtual bool isAddressValid(void *virtualAddress)
void maybeFreeTables(void *virtualAddress)
Possibly cleans up tables for the given address.
uint64_t toFlags(size_t flags, bool bFinal=false) const PURE