The Pedigree Project  0.1
armv7/VirtualAddressSpace.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "VirtualAddressSpace.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/Log.h"
23 #include "pedigree/kernel/machine/Machine.h"
24 #include "pedigree/kernel/panic.h"
25 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
26 #include "pedigree/kernel/processor/Processor.h"
27 #include "pedigree/kernel/processor/types.h"
28 #include "pedigree/kernel/utilities/utility.h"
29 
35 physical_uintptr_t g_EscrowPages[256];
36 
38 
40 {
42 }
43 
45 {
46  return new ArmV7VirtualAddressSpace();
47 }
48 
50  : VirtualAddressSpace(reinterpret_cast<void *>(0))
51 {
52 }
53 
55 {
56 }
57 
59 {
60  if (pMem < KERNEL_VIRTUAL_HEAP)
61  return false;
62  else if (pMem >= getEndOfHeap())
63  return false;
64  else
65  return true;
66 }
68 {
69  return reinterpret_cast<void *>(
70  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_HEAP) +
71  KERNEL_VIRTUAL_HEAP_SIZE);
72 }
73 
74 uint32_t ArmV7VirtualAddressSpace::toFlags(size_t flags)
75 {
76  uint32_t ret = 0; // No access.
77  if (flags & KernelMode)
78  {
79  ret = 1; // Read/write in privileged mode, not usable in user mode
80  }
81  else
82  {
83  if (flags & Write)
84  ret = 3; // Read/write all
85  else
86  ret = 2; // Read/write privileged, read-only user
87  }
88  return ret;
89 }
90 
92 {
93  switch (flags)
94  {
95  case 0:
96  return 0; // Zero permissions
97  case 1:
98  return (Write | KernelMode);
99  case 2:
100  return 0; // Read-only by user mode... how to represent that?
101  case 3:
102  return Write; // Read/write all
103  default:
104  return 0;
105  }
106 }
107 
109 {
110  return true;
111 }
112 
114 {
115  // No address is "invalid" in the sense that we're looking for here.
116  return true;
117 }
118 
120  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
121 {
122  return doMap(physicalAddress, virtualAddress, flags);
123 }
124 
125 void ArmV7VirtualAddressSpace::unmap(void *virtualAddress)
126 {
127  return doUnmap(virtualAddress);
128 }
129 
130 bool ArmV7VirtualAddressSpace::isMapped(void *virtualAddress)
131 {
132  return doIsMapped(virtualAddress);
133 }
134 
136  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
137 {
138  doGetMapping(virtualAddress, physicalAddress, flags);
139 }
140 
141 void ArmV7VirtualAddressSpace::setFlags(void *virtualAddress, size_t newFlags)
142 {
143  return doSetFlags(virtualAddress, newFlags);
144 }
145 
146 bool ArmV7VirtualAddressSpace::doIsMapped(void *virtualAddress)
147 {
148  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
149  addr &= ~0xFFF;
150  uint32_t pdir_offset = addr >> 20;
151  uint32_t ptab_offset = (addr >> 12) & 0xFF;
152 
153  // Grab the entry in the page directory
154  FirstLevelDescriptor *pdir =
155  reinterpret_cast<FirstLevelDescriptor *>(m_VirtualPageDirectory);
156  if (pdir[pdir_offset].descriptor.entry)
157  {
158  if (pdir[pdir_offset].descriptor.fault.type == 2)
159  return true; // No second-level table walk
160 
161  // Knowing if a page is mapped is a global thing
162  SecondLevelDescriptor *ptbl = reinterpret_cast<SecondLevelDescriptor *>(
163  reinterpret_cast<uintptr_t>(m_VirtualPageTables) +
164  (pdir_offset * 0x400));
165  if (ptbl[ptab_offset].descriptor.fault.type)
166  return true;
167  }
168 
169  return false;
170 }
171 
172 extern "C" void writeHex(unsigned int n);
174  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
175 {
176  // Determine which range of page tables to use
177  void *pageTables = 0;
178  if (reinterpret_cast<uintptr_t>(virtualAddress) < 0x40000000)
179  pageTables = USERSPACE_PAGETABLES;
180  else
181  pageTables = KERNEL_PAGETABLES;
182 
183  // Check if we have an allocated escrow page - if we don't, allocate it.
184  // The kernel address space already has all page tables pre-allocated.
185  if ((g_EscrowPages[Processor::id()] == 0) &&
186  (pageTables == USERSPACE_PAGETABLES))
187  {
188  g_EscrowPages[Processor::id()] =
190  if (g_EscrowPages[Processor::id()] == 0)
191  {
192  // Still 0, we have problems.
193  FATAL("Out of memory");
194  }
195  }
196 
198 
199  // Grab offsets for the virtual address
200  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
201  uint32_t pdir_offset = addr >> 20;
202  uint32_t ptbl_offset = (addr >> 12) & 0xFF;
203 
204  // Is there a page table for this page yet?
205  FirstLevelDescriptor *pdir =
206  reinterpret_cast<FirstLevelDescriptor *>(m_VirtualPageDirectory);
207 
208  if (!pdir[pdir_offset].descriptor.entry)
209  {
210  // There isn't - allocate one.
211  uintptr_t page = g_EscrowPages[Processor::id()];
212  g_EscrowPages[Processor::id()] = 0;
213 
214  // Point to the page, which defines a page table, is not shareable,
215  // and is in domain 0. Note that there's 4 page tables in a 4K page,
216  // so we handle that here.
217  for (int i = 0; i < 4; i++)
218  {
223  pdir[pdir_offset + i].descriptor.entry = page + (i * 1024);
224  pdir[pdir_offset + i].descriptor.pageTable.type = 1;
225  pdir[pdir_offset + i].descriptor.pageTable.sbz1 =
226  pdir[pdir_offset + i].descriptor.pageTable.sbz2 = 0;
227  pdir[pdir_offset + i].descriptor.pageTable.ns = 1;
228  pdir[pdir_offset + i].descriptor.pageTable.domain =
229  0; // DOMAIN0: Main memory
230  pdir[pdir_offset + i].descriptor.pageTable.imp = 0;
231 
232  // Map in the page table we've just created so we can zero it.
233  // mapaddr is the virtual address of the page table we just
234  // allocated physical space for.
235  uintptr_t mapaddr =
236  reinterpret_cast<uintptr_t>(m_VirtualPageTables);
237  mapaddr += ((pdir_offset + i) * 0x400);
238  uint32_t ptbl_offset2 = (mapaddr >> 12) & 0xFF;
239 
240  // Grab the right page table for this new page
241  uintptr_t ptbl_addr =
242  reinterpret_cast<uintptr_t>(m_VirtualPageTables) +
243  (mapaddr >> 20);
244  SecondLevelDescriptor *ptbl =
245  reinterpret_cast<SecondLevelDescriptor *>(ptbl_addr);
246  ptbl[ptbl_offset2].descriptor.entry = page + (i * 1024);
247  ptbl[ptbl_offset2].descriptor.smallpage.type = 2;
248  ptbl[ptbl_offset2].descriptor.smallpage.b = 0;
249  ptbl[ptbl_offset2].descriptor.smallpage.c = 0;
250  ptbl[ptbl_offset2].descriptor.smallpage.ap1 =
251  3; // Page table, give it READ/WRITE
252  ptbl[ptbl_offset2].descriptor.smallpage.sbz = 0;
253  ptbl[ptbl_offset2].descriptor.smallpage.ap2 = 0;
254  ptbl[ptbl_offset2].descriptor.smallpage.s = 0;
255  ptbl[ptbl_offset2].descriptor.smallpage.nG = 1;
256 
257  // Mapped, so clear the page now
258  ByteSet(reinterpret_cast<void *>(mapaddr), 0, 1024);
259  }
260  }
261 
262  // Grab the virtual address for the page table for this page
263  uintptr_t mapaddr = reinterpret_cast<uintptr_t>(pageTables);
264  mapaddr += pdir_offset * 0x400;
265 
266  // Perform the mapping, if necessary
267  SecondLevelDescriptor *ptbl =
268  reinterpret_cast<SecondLevelDescriptor *>(mapaddr);
269  if (ptbl[ptbl_offset].descriptor.entry & 0x3)
270  {
271  return false; // Already mapped.
272  }
273  else
274  {
275  ptbl[ptbl_offset].descriptor.entry = physicalAddress;
276  ptbl[ptbl_offset].descriptor.smallpage.type = 2;
277  ptbl[ptbl_offset].descriptor.smallpage.b = 0;
278  ptbl[ptbl_offset].descriptor.smallpage.c = 0;
279  ptbl[ptbl_offset].descriptor.smallpage.ap1 = toFlags(flags);
280  ptbl[ptbl_offset].descriptor.smallpage.sbz = 0;
281  ptbl[ptbl_offset].descriptor.smallpage.ap2 = 0;
282  ptbl[ptbl_offset].descriptor.smallpage.s = 0;
283  ptbl[ptbl_offset].descriptor.smallpage.nG = 1;
284  }
285 
286  return true;
287 }
288 
289 void ArmV7VirtualAddressSpace::doGetMapping(
290  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
291 {
292  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
293  addr &= ~0xFFF;
294  uint32_t pdir_offset = addr >> 20;
295  uint32_t ptab_offset = (addr >> 12) & 0xFF;
296 
297  // Grab the entry in the page directory
298  FirstLevelDescriptor *pdir =
299  reinterpret_cast<FirstLevelDescriptor *>(m_VirtualPageDirectory);
300  if (pdir[pdir_offset].descriptor.entry)
301  {
302  // What type is the entry?
303  switch (pdir[pdir_offset].descriptor.fault.type)
304  {
305  case 1:
306  {
307  // Page table walk.
308  SecondLevelDescriptor *ptbl =
309  reinterpret_cast<SecondLevelDescriptor *>(
310  reinterpret_cast<uintptr_t>(m_VirtualPageTables) +
311  (pdir_offset * 0x400));
312  if (!ptbl[ptab_offset].descriptor.fault.type)
313  return;
314  else
315  {
317  ptbl[ptab_offset].descriptor.smallpage.base << 12;
318  flags =
319  fromFlags(ptbl[ptab_offset].descriptor.smallpage.ap1);
320  }
321  break;
322  }
323  case 2:
324  {
325  // Section or supersection
326  if (pdir[pdir_offset].descriptor.section.sectiontype == 0)
327  {
328  uintptr_t offset = addr % 0x100000;
330  (pdir[pdir_offset].descriptor.section.base << 20) +
331  offset;
332  flags = fromFlags(pdir[pdir_offset].descriptor.section.ap1);
333  }
334  else if (pdir[pdir_offset].descriptor.section.sectiontype == 1)
335  {
336  uintptr_t offset = addr % 0x1000000;
338  (pdir[pdir_offset].descriptor.section.base << 20) +
339  offset;
340  flags = fromFlags(pdir[pdir_offset].descriptor.section.ap1);
341  }
342  else
343  ERROR("doGetMapping: who knows what the hell this paging "
344  "structure is");
345  break;
346  }
347  default:
348  return;
349  }
350  }
351 }
352 
353 void ArmV7VirtualAddressSpace::doSetFlags(void *virtualAddress, size_t newFlags)
354 {
355  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
356  uint32_t pdir_offset = addr >> 20;
357  uint32_t ptab_offset = (addr >> 12) & 0xFF;
358 
359  // Grab the entry in the page directory
360  FirstLevelDescriptor *pdir =
361  reinterpret_cast<FirstLevelDescriptor *>(m_VirtualPageDirectory);
362  if (pdir[pdir_offset].descriptor.entry)
363  {
364  // What type is the entry?
365  switch (pdir[pdir_offset].descriptor.fault.type)
366  {
367  case 1:
368  {
369  // Page table walk.
370  SecondLevelDescriptor *ptbl =
371  reinterpret_cast<SecondLevelDescriptor *>(
372  reinterpret_cast<uintptr_t>(m_VirtualPageTables) +
373  (pdir_offset * 0x400));
374  ptbl[ptab_offset].descriptor.smallpage.ap1 = toFlags(newFlags);
375  break;
376  }
377  case 2:
378  {
379  // Section or supersection
380  if (pdir[pdir_offset].descriptor.section.sectiontype == 0)
381  pdir[pdir_offset].descriptor.section.ap1 =
382  toFlags(newFlags);
383  else if (pdir[pdir_offset].descriptor.section.sectiontype == 1)
384  {
385  WARNING("doSetFlags: supersections not handled yet");
386  }
387  else
388  ERROR("doSetFlags: who knows what the hell this paging "
389  "structure is");
390  break;
391  }
392  default:
393  return;
394  }
395  }
396 }
397 
398 void ArmV7VirtualAddressSpace::doUnmap(void *virtualAddress)
399 {
400  uintptr_t addr = reinterpret_cast<uintptr_t>(virtualAddress);
401  uint32_t pdir_offset = addr >> 20;
402  uint32_t ptab_offset = (addr >> 12) & 0xFF;
403 
404  // Grab the entry in the page directory
405  FirstLevelDescriptor *pdir =
406  reinterpret_cast<FirstLevelDescriptor *>(m_VirtualPageDirectory);
407  if (pdir[pdir_offset].descriptor.entry)
408  {
409  // What type is the entry?
410  switch (pdir[pdir_offset].descriptor.fault.type)
411  {
412  case 1:
413  {
414  // Page table walk.
415  SecondLevelDescriptor *ptbl =
416  reinterpret_cast<SecondLevelDescriptor *>(
417  reinterpret_cast<uintptr_t>(m_VirtualPageTables) +
418  (pdir_offset * 0x400));
419  ptbl[ptab_offset].descriptor.fault.type = 0; // Unmap.
420  break;
421  }
422  case 2:
423  // Section or supersection
424  pdir[pdir_offset].descriptor.fault.type = 0;
425  break;
426  default:
427  return;
428  }
429  }
430 }
431 
433 {
434  size_t sz = USERSPACE_VIRTUAL_STACK_SIZE;
436  sz = KERNEL_STACK_SIZE;
437  return doAllocateStack(USERSPACE_VIRTUAL_STACK_SIZE);
438 }
439 
441 {
442  if (stackSz == 0)
443  return allocateStack();
444  return doAllocateStack(stackSz);
445 }
446 
447 void *ArmV7VirtualAddressSpace::doAllocateStack(size_t sSize)
448 {
449  size_t flags = 0;
451  {
453  }
454 
455  m_Lock.acquire();
456 
457  // Get a virtual address for the stack
458  void *pStack = 0;
459  if (m_freeStacks.count() != 0)
460  {
461  pStack = m_freeStacks.popBack();
462  m_Lock.release();
463  }
464  else
465  {
466  pStack = m_pStackTop;
467  m_pStackTop = adjust_pointer(m_pStackTop, -sSize);
468 
469  m_Lock.release();
470 
471  // Map it in
472  uintptr_t stackBottom = reinterpret_cast<uintptr_t>(pStack) - sSize;
473  for (size_t j = 0; j < sSize; j += PhysicalMemoryManager::getPageSize())
474  {
475  physical_uintptr_t phys =
477  bool b =
478  map(phys, reinterpret_cast<void *>(j + stackBottom),
480  if (!b)
481  WARNING("map() failed in doAllocateStack");
482  }
483  }
484  return pStack;
485 }
486 
487 void ArmV7VirtualAddressSpace::freeStack(void *pStack)
488 {
489  // Add the stack to the list
490  m_freeStacks.pushBack(pStack);
491 }
492 
493 extern char __start, __end;
494 
496 {
497  // The kernel address space is initialised by this function. We don't have
498  // the MMU on yet, so we can modify the page directory to our heart's
499  // content. We'll enable the MMU after the page directory is set up - only
500  // ever use this function to construct the kernel address space, and only
501  // ever once.
502 
503  // Map in the 4 MB we'll use for page tables - this region is pinned in
504  // PhysicalMemoryManager
505  FirstLevelDescriptor *pdir =
506  reinterpret_cast<FirstLevelDescriptor *>(m_PhysicalPageDirectory);
507  ByteSet(pdir, 0, 0x4000);
508 
509  uint32_t pdir_offset = 0, ptbl_offset = 0;
510  uintptr_t vaddr = 0, paddr = 0;
511 
512  // Page table for mapping in the page directory. This table will cover the
513  // last MB of the address space.
514  physical_uintptr_t ptbl_paddr = 0x8FB00000 + (0x400000 - 0x400);
515  ByteSet(reinterpret_cast<void *>(0x8FB00000), 0, 0x400000);
516 
517  // Map in the page directory
518  SecondLevelDescriptor *ptbl =
519  reinterpret_cast<SecondLevelDescriptor *>(ptbl_paddr);
520  vaddr = reinterpret_cast<uintptr_t>(m_VirtualPageDirectory);
521  pdir_offset = vaddr >> 20;
522 
523  pdir[pdir_offset].descriptor.entry =
524  ptbl_paddr; // Last page table in the 4K block.
525  pdir[pdir_offset].descriptor.pageTable.type = 1;
526  pdir[pdir_offset].descriptor.pageTable.sbz1 =
527  pdir[pdir_offset].descriptor.pageTable.sbz2 = 0;
528  pdir[pdir_offset].descriptor.pageTable.ns = 0; // Shareable
529  pdir[pdir_offset].descriptor.pageTable.domain =
530  1; // Paging structures = DOMAIN1
531  pdir[pdir_offset].descriptor.pageTable.imp = 0;
532  for (int i = 0; i < 4; i++) // 4 pages in the page directory
533  {
534  ptbl_offset = ((vaddr + (i * 0x1000)) >> 12) & 0xFF;
535  ptbl[ptbl_offset].descriptor.entry =
536  m_PhysicalPageDirectory + (i * 0x1000);
537  ptbl[ptbl_offset].descriptor.smallpage.type = 2;
538  ptbl[ptbl_offset].descriptor.smallpage.b = 0;
539  ptbl[ptbl_offset].descriptor.smallpage.c = 0;
540  ptbl[ptbl_offset].descriptor.smallpage.ap1 = 3;
541  ptbl[ptbl_offset].descriptor.smallpage.ap2 = 0;
542  ptbl[ptbl_offset].descriptor.smallpage.sbz = 0;
543  ptbl[ptbl_offset].descriptor.smallpage.s = 1; // Shareable
544  ptbl[ptbl_offset].descriptor.smallpage.nG = 0; // Global, hint to MMU
545  }
546 
547  // Identity-map the kernel
548  size_t kernelSize = reinterpret_cast<uintptr_t>(&__end) - 0x80000000;
549  for (size_t offset = 0; offset < kernelSize; offset += 0x100000)
550  {
551  uintptr_t baseAddr = 0x80000000 + offset;
552  pdir_offset = baseAddr >> 20;
553 
554  // Map this block
555  pdir[pdir_offset].descriptor.entry = baseAddr;
556  pdir[pdir_offset].descriptor.section.type = 2;
557  pdir[pdir_offset].descriptor.section.b = 0;
558  pdir[pdir_offset].descriptor.section.c = 0;
559  pdir[pdir_offset].descriptor.section.xn = 0;
560  pdir[pdir_offset].descriptor.section.domain = 2; // Kernel = DOMAIN2
561  pdir[pdir_offset].descriptor.section.imp = 0;
562  pdir[pdir_offset].descriptor.section.ap1 = 3;
563  pdir[pdir_offset].descriptor.section.ap2 = 0;
564  pdir[pdir_offset].descriptor.section.tex = 0;
565  pdir[pdir_offset].descriptor.section.s = 1;
566  pdir[pdir_offset].descriptor.section.nG = 0;
567  pdir[pdir_offset].descriptor.section.sectiontype = 0;
568  pdir[pdir_offset].descriptor.section.ns = 0;
569  }
570 
571  // Pre-allocate and define all the remaining kernel page tables.
572  vaddr = reinterpret_cast<uintptr_t>(KERNEL_PAGETABLES);
573  paddr = 0x8FB00000;
574  for (size_t offset = 0; offset < 0x400000; offset += 0x100000)
575  {
576  uintptr_t baseAddr = vaddr + offset;
577  pdir_offset = baseAddr >> 20;
578 
579  // Map this block
580  pdir[pdir_offset].descriptor.entry = paddr + offset;
581  pdir[pdir_offset].descriptor.section.type = 2;
582  pdir[pdir_offset].descriptor.section.b = 0;
583  pdir[pdir_offset].descriptor.section.c = 0;
584  pdir[pdir_offset].descriptor.section.xn = 0;
585  pdir[pdir_offset].descriptor.section.domain =
586  1; // Paging structures = DOMAIN1
587  pdir[pdir_offset].descriptor.section.imp = 0;
588  pdir[pdir_offset].descriptor.section.ap1 = 3;
589  pdir[pdir_offset].descriptor.section.ap2 = 0;
590  pdir[pdir_offset].descriptor.section.tex = 0;
591  pdir[pdir_offset].descriptor.section.s = 1;
592  pdir[pdir_offset].descriptor.section.nG = 0;
593  pdir[pdir_offset].descriptor.section.sectiontype = 0;
594  pdir[pdir_offset].descriptor.section.ns = 0;
595 
596  // Virtual address base of the region this 1 MB of page tables maps
597  uintptr_t blockVBase = offset << 10;
598 
599  // 1024 page tables in this 1 MB region
600  for (int i = 0; i < 1024; i++)
601  {
602  // First virtual address mapped by this page table
603  uintptr_t firstVaddr = blockVBase + (i * 0x100000);
604 
605  // Physical address of the page table (as mapped above)
606  uintptr_t ptbl_paddr = paddr + offset + (i * 0x400);
607 
608  // Do NOT overwrite existing mappings. That'll negate the above.
609  pdir_offset = firstVaddr >> 20;
610  if (pdir[pdir_offset].descriptor.entry)
611  continue;
612  pdir[pdir_offset].descriptor.entry = ptbl_paddr;
613  pdir[pdir_offset].descriptor.pageTable.type = 1;
614  pdir[pdir_offset].descriptor.pageTable.sbz1 =
615  pdir[pdir_offset].descriptor.pageTable.sbz2 = 0;
616  pdir[pdir_offset].descriptor.pageTable.ns = 0; // Shareable
617  pdir[pdir_offset].descriptor.pageTable.domain =
618  1; // Paging structures = DOMAIN1
619  pdir[pdir_offset].descriptor.pageTable.imp = 0;
620  }
621  }
622 
623  // Set up the required control registers before turning on the MMU
624  Processor::writeTTBR0(0);
625  Processor::writeTTBR1(m_PhysicalPageDirectory);
626  Processor::writeTTBCR(
627  2); // 0b010 = 4 KB TTBR0 directory, 1/3 GB split for user/kernel
628  asm volatile("MCR p15,0,%0,c3,c0,0"
629  :
630  : "r"(0xFFFFFFFF)); // Manager access to all domains for now
631 
632  // Switch on the MMU
633  uint32_t sctlr = 0;
634  asm volatile("MRC p15,0,%0,c1,c0,0" : "=r"(sctlr));
635  if (!(sctlr & 1))
636  sctlr |= 1;
637  asm volatile("MCR p15,0,%0,c1,c0,0" : : "r"(sctlr));
638 
639  return true;
640 }
641 
643  void *Heap, physical_uintptr_t PhysicalPageDirectory,
644  void *VirtualPageDirectory, void *VirtualPageTables, void *VirtualStack)
645  : VirtualAddressSpace(Heap), m_PhysicalPageDirectory(PhysicalPageDirectory),
646  m_VirtualPageDirectory(VirtualPageDirectory),
647  m_VirtualPageTables(VirtualPageTables), m_pStackTop(VirtualStack),
648  m_freeStacks(), m_Lock(false, true)
649 {
650 }
651 
654  KERNEL_VIRTUAL_HEAP, 0x8FAFC000, KERNEL_PAGEDIR, KERNEL_PAGETABLES,
655  KERNEL_VIRTUAL_STACK)
656 {
657  for (int i = 0; i < 256; i++)
658  g_EscrowPages[i] = 0;
659 
661 }
662 
664 {
665 }
666 
668 {
669  return doIsMapped(virtualAddress);
670 }
672  physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
673 {
674  return doMap(physicalAddress, virtualAddress, flags);
675 }
677  void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
678 {
679  doGetMapping(virtualAddress, physicalAddress, flags);
680  if (!(flags & KernelMode))
681  flags |= KernelMode;
682 }
684  void *virtualAddress, size_t newFlags)
685 {
686  doSetFlags(virtualAddress, newFlags);
687 }
688 void ArmV7KernelVirtualAddressSpace::unmap(void *virtualAddress)
689 {
690  doUnmap(virtualAddress);
691 }
void release()
Definition: Spinlock.cc:273
void pushBack(const T &value)
Definition: Vector.h:270
T popBack()
Definition: Vector.h:286
size_t count() const
Definition: Vector.h:264
static PhysicalMemoryManager & instance()
virtual void unmap(void *virtualAddress)
size_t fromFlags(uint32_t Flags)
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
virtual bool memIsInHeap(void *pMem)
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
virtual void setFlags(void *virtualAddress, size_t newFlags)
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual void unmap(void *virtualAddress)
virtual bool isMapped(void *virtualAddress)
#define WARNING(text)
Definition: Log.h:78
static VirtualAddressSpace * create()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
bool doMap(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
static ProcessorId id()
Definition: Processor.cc:40
#define ERROR(text)
Definition: Log.h:82
#define FATAL(text)
Definition: Log.h:89
union ArmV7VirtualAddressSpace::FirstLevelDescriptor::@88 descriptor
virtual void setFlags(void *virtualAddress, size_t newFlags)
union ArmV7VirtualAddressSpace::SecondLevelDescriptor::@92 descriptor
virtual bool isAddressValid(void *virtualAddress)