The Pedigree Project  0.1
ppc32/VirtualAddressSpace.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "VirtualAddressSpace.h"
21 #include "HashedPageTable.h"
22 #include "pedigree/kernel/Log.h"
23 #include "pedigree/kernel/machine/openfirmware/Device.h"
24 #include "pedigree/kernel/machine/openfirmware/OpenFirmware.h"
25 #include "pedigree/kernel/panic.h"
26 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
27 #include "pedigree/kernel/processor/Processor.h"
28 #include "pedigree/kernel/processor/types.h"
29 #include "pedigree/kernel/utilities/utility.h"
30 
31 #define PAGE_DIRECTORY_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 22) & 0x3FF)
32 #define PAGE_TABLE_INDEX(x) ((reinterpret_cast<uintptr_t>(x) >> 12) & 0x3FF)
33 
35 
37 {
39 }
40 
42 {
44  p->m_Heap = reinterpret_cast<void *>(USERSPACE_VIRTUAL_HEAP);
45  p->m_HeapEnd = reinterpret_cast<void *>(USERSPACE_VIRTUAL_HEAP);
46  return p;
47 }
48 
50  : VirtualAddressSpace(reinterpret_cast<void *>(KERNEL_VIRTUAL_HEAP)),
51  m_Vsid(0)
52 {
53  if (this != &m_KernelSpace)
54  {
55  // Grab some VSIDs.
57  }
58 
59  ByteSet(
60  reinterpret_cast<uint8_t *>(m_pPageDirectory), 0,
61  sizeof(m_pPageDirectory));
62 }
63 
65 {
66  for (int i = 0; i < 1024; i++)
67  if (m_pPageDirectory[i])
68  delete m_pPageDirectory;
69  // Return the VSIDs
71 }
72 
74 {
75  // We need to map our page tables in.
76  OFDevice chosen(OpenFirmware::instance().findDevice("/chosen"));
77  OFDevice mmu(chosen.getProperty("mmu"));
78 
79  // Try and find some free physical memory to put the initial page tables in.
80  uint32_t phys = translations.findFreePhysicalMemory(0x100000);
81  if (phys == 0)
82  panic("Couldn't find anywhere to load the initial page tables!");
83  // We've got some physical RAM - now add a translation for it so that
84  // when the hashed page table initialises it maps us in.
87  translations.addTranslation(
88  KERNEL_INITIAL_PAGE_TABLES, phys, 0x100000, 0x6a);
89 
90  OFParam ret = mmu.executeMethod(
91  "map", 4, reinterpret_cast<OFParam>(-1),
92  reinterpret_cast<OFParam>(0x100000),
93  reinterpret_cast<OFParam>(KERNEL_INITIAL_PAGE_TABLES),
94  reinterpret_cast<OFParam>(phys));
95  if (ret == reinterpret_cast<OFParam>(-1))
96  panic("Kernel page table mapping failed");
97 
98  // Now make sure they're all invalid.
99  ByteSet(
100  reinterpret_cast<uint8_t *>(KERNEL_INITIAL_PAGE_TABLES), 0, 0x100000);
101  // Map them.
104  for (int i = 0; i < 0x100; i++)
105  m_pPageDirectory[1023 - i] = reinterpret_cast<ShadowPageTable *>(
106  KERNEL_INITIAL_PAGE_TABLES + i * 0x1000);
107 
108  return true;
109 }
110 
112 {
113  if (this != &m_KernelSpace)
114  panic("initialRoster() called on a VA space that is not the kernel "
115  "space!");
116 
117  // For every translation...
118  for (unsigned int i = 0; i < translations.getNumTranslations(); i++)
119  {
120  Translations::Translation t = translations.getTranslation(i);
121  // For every page in this translation...
122  for (unsigned int j = 0; j < t.size; j += 0x1000)
123  {
124  void *virtualAddress = reinterpret_cast<void *>(t.virt + j);
125  uint32_t physicalAddress = t.phys + j;
126 
127  uint32_t mode = t.mode;
128  uint32_t newMode = VirtualAddressSpace::Write;
129  if (mode & 0x20)
131  if (mode & 0x10)
133 
134  // Grab the page directory entry.
135  ShadowPageTable *pTable =
136  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)];
137 
138  // Sanity check.
139  if (pTable == 0)
140  {
141  pTable = new ShadowPageTable;
142 
143  ByteSet(
144  reinterpret_cast<uint8_t *>(pTable), 0,
145  sizeof(ShadowPageTable));
146  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)] = pTable;
147  }
148  // Grab the page table entry.
149  pTable->entries[PAGE_TABLE_INDEX(virtualAddress)] =
150  (physicalAddress & 0xFFFFF000) | newMode;
151  }
152  }
153 }
154 
156 {
157  return true;
158 }
159 
160 bool PPC32VirtualAddressSpace::isMapped(void *virtualAddress)
161 {
162  // Firstly check if this is an access to kernel space.
163  // If so, redirect to the kernel address space.
164  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
165  if (addr >= KERNEL_SPACE_START && this != &m_KernelSpace)
166  return m_KernelSpace.isMapped(virtualAddress);
167 
168  // Grab the page directory entry.
169  ShadowPageTable *pTable =
170  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)];
171 
172  // Sanity check.
173  if (pTable == 0)
174  return false;
175 
176  // Grab the page table entry.
177  uint32_t pte = pTable->entries[PAGE_TABLE_INDEX(virtualAddress)];
178 
179  // Valid?
180  if (pte == 0)
181  return false;
182  else
183  return true;
184 }
185 
187  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
188 {
189  // Firstly check if this is an access to kernel space.
190  // If so, redirect to the kernel address space.
191  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
192  if (addr >= KERNEL_SPACE_START && this != &m_KernelSpace)
193  m_KernelSpace.map(physicalAddress, virtualAddress, flags);
194 
195  // Grab the page directory entry.
196  ShadowPageTable *pTable =
197  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)];
198 
199  // Sanity check.
200  if (pTable == 0)
201  {
202  // New page table.
203  pTable = new ShadowPageTable;
204  ByteSet(
205  reinterpret_cast<uint8_t *>(pTable), 0, sizeof(ShadowPageTable));
206  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)] = pTable;
207  }
208 
209  // Check we're not already mapped.
210  if (pTable->entries[PAGE_TABLE_INDEX(virtualAddress)] != 0)
211  return false;
212 
213  // Grab the page table entry.
214  pTable->entries[PAGE_TABLE_INDEX(virtualAddress)] =
215  (physicalAddress & 0xFFFFF000) | flags;
216 
217  // Put it in the hash table.
219  addr, physicalAddress, flags, m_Vsid * 8 + (addr >> 28));
220 
221  return true;
222 }
223 
225  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
226 {
227  // Firstly check if this is an access to kernel space.
228  // If so, redirect to the kernel address space.
229  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
230  if (addr >= KERNEL_SPACE_START && this != &m_KernelSpace)
231  m_KernelSpace.getMapping(virtualAddress, physicalAddress, flags);
232 
233  // Grab the page directory entry.
234  ShadowPageTable *pTable =
235  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)];
236 
237  // Sanity check.
238  if (pTable == 0)
239  return;
240 
241  // Grab the page table entry.
242  uint32_t pte = pTable->entries[PAGE_TABLE_INDEX(virtualAddress)];
243 
244  physicalAddress = pte & 0xFFF;
245  flags = pte & 0xFFFFF000;
246 }
247 
248 void PPC32VirtualAddressSpace::setFlags(void *virtualAddress, size_t newFlags)
249 {
250  // Firstly check if this is an access to kernel space.
251  // If so, redirect to the kernel address space.
252  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
253  if (addr >= KERNEL_SPACE_START && this != &m_KernelSpace)
254  m_KernelSpace.setFlags(virtualAddress, newFlags);
255 
256  // Grab the page directory entry.
257  ShadowPageTable *pTable =
258  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)];
259 
260  // Sanity check.
261  if (pTable == 0)
262  return;
263 
264  // Grab the page table entry.
265  pTable->entries[PAGE_TABLE_INDEX(virtualAddress)] &= 0xFFFFF000;
266  pTable->entries[PAGE_TABLE_INDEX(virtualAddress)] |= newFlags & 0xFFF;
267 }
268 
269 void PPC32VirtualAddressSpace::unmap(void *virtualAddress)
270 {
271  // Firstly check if this is an access to kernel space.
272  // If so, redirect to the kernel address space.
273  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
274  if (addr >= KERNEL_SPACE_START && this != &m_KernelSpace)
275  m_KernelSpace.unmap(virtualAddress);
276 
277  // Grab the page directory entry.
278  ShadowPageTable *pTable =
279  m_pPageDirectory[PAGE_DIRECTORY_INDEX(virtualAddress)];
280 
281  // Sanity check.
282  if (pTable == 0)
283  return;
284 
285  // Grab the PTE.
286  pTable->entries[PAGE_TABLE_INDEX(virtualAddress)] = 0;
287 
288  // Unmap from the hash table.
290  reinterpret_cast<uint32_t>(virtualAddress), m_Vsid * 8 + (addr >> 28));
291 }
292 
294 {
295  uint8_t *pStackTop = new uint8_t[0x4000];
296 
297  return pStackTop + 0x4000 - 4;
298 }
299 void PPC32VirtualAddressSpace::freeStack(void *pStack)
300 {
301  // TODO
302 }
303 
305 {
306  PPC32VirtualAddressSpace *newAS =
307  reinterpret_cast<PPC32VirtualAddressSpace *>(
309  for (uint64_t i = 0; i < 1024; i++)
310  {
311  if (i * 1024ULL * 4096ULL >= KERNEL_SPACE_START)
312  break;
313 
314  if (m_pPageDirectory[i])
315  {
316  ShadowPageTable *pPageTable = m_pPageDirectory[i];
317  for (int j = 0; j < 1024; j++)
318  {
319  if (pPageTable->entries[j] == 0)
320  continue;
321 
322  void *virtualAddress =
323  reinterpret_cast<void *>(((i * 1024) + j) * 4096);
324  uint32_t flags = pPageTable->entries[j] & 0xFFF;
325 
326  // Page mapped in source address space, but not in kernel.
328  physical_uintptr_t newFrame =
330 
331  // Temporarily map in.
332  map(newFrame, KERNEL_VIRTUAL_TEMP1,
335 
336  // Copy across.
337  MemoryCopy(KERNEL_VIRTUAL_TEMP1, virtualAddress, 0x1000);
338 
339  // Unmap.
340  unmap(KERNEL_VIRTUAL_TEMP1);
341 
342  // Map in.
343  newAS->map(newFrame, virtualAddress, flags);
344  }
345  }
346  }
347  return newAS;
348 }
349 
351 {
352  // This is easy - we just unmap everything!
353  for (uint64_t i = 0; i < 1024; i++)
354  {
355  if (i * 1024ULL * 4096ULL >= KERNEL_SPACE_START)
356  break;
357 
358  if (m_pPageDirectory[i])
359  {
360  ShadowPageTable *pPageTable = m_pPageDirectory[i];
361  for (int j = 0; j < 1024; j++)
362  {
363  if (i <= 3)
364  continue; // Don't unmap the first 12MB - contains our
365  // interrupt handlers and symtab!
366 
367  if (pPageTable->entries[j] == 0)
368  continue;
369 
370  unmap(reinterpret_cast<void *>(((i * 1024) + j) * 4096));
371  }
372  }
373  }
374 }
static HashedPageTable & instance()
static PhysicalMemoryManager & instance()
virtual void setFlags(void *virtualAddress, size_t newFlags)
static OpenFirmware & instance()
Definition: OpenFirmware.h:45
void returnVsid(Vsid vsid)
Definition: VsidManager.cc:58
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual bool isAddressValid(void *virtualAddress)
void addMapping(uint32_t effectiveAddress, uint32_t physicalAddress, uint32_t mode, uint32_t vsid)
void removeMapping(uint32_t effectiveAddress, uint32_t vsid)
static VirtualAddressSpace * create()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
virtual VirtualAddressSpace * clone()
bool initialise(Translations &translations)
virtual void unmap(void *virtualAddress)
Translation getTranslation(size_t n)
Definition: Translation.cc:57
static VsidManager & instance()
Definition: VsidManager.cc:25
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
virtual bool isMapped(void *virtualAddress)
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
Vsid obtainVsid()
Definition: VsidManager.cc:38
void addTranslation(uint32_t virt, uint32_t phys, uint32_t size, uint32_t mode)
Definition: Translation.cc:67
void initialRoster(Translations &translations)
uint32_t findFreePhysicalMemory(uint32_t size, uint32_t align=0x100000)
Definition: Translation.cc:80
size_t getNumTranslations()
Definition: Translation.cc:62