20 #include "VirtualAddressSpace.h" 21 #include "pedigree/kernel/LockGuard.h" 22 #include "pedigree/kernel/Log.h" 23 #include "pedigree/kernel/machine/Machine.h" 24 #include "pedigree/kernel/panic.h" 25 #include "pedigree/kernel/processor/PhysicalMemoryManager.h" 26 #include "pedigree/kernel/processor/Processor.h" 27 #include "pedigree/kernel/processor/types.h" 28 #include "pedigree/kernel/utilities/utility.h" 35 physical_uintptr_t g_EscrowPages[256];
60 if (pMem < KERNEL_VIRTUAL_HEAP)
69 return reinterpret_cast<void *
>(
70 reinterpret_cast<uintptr_t
>(KERNEL_VIRTUAL_HEAP) +
71 KERNEL_VIRTUAL_HEAP_SIZE);
120 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
122 return doMap(physicalAddress, virtualAddress, flags);
127 return doUnmap(virtualAddress);
132 return doIsMapped(virtualAddress);
136 void *virtualAddress, physical_uintptr_t &
physicalAddress,
size_t &flags)
138 doGetMapping(virtualAddress, physicalAddress, flags);
143 return doSetFlags(virtualAddress, newFlags);
146 bool ArmV7VirtualAddressSpace::doIsMapped(
void *virtualAddress)
148 uintptr_t addr =
reinterpret_cast<uintptr_t
>(virtualAddress);
150 uint32_t pdir_offset = addr >> 20;
151 uint32_t ptab_offset = (addr >> 12) & 0xFF;
156 if (pdir[pdir_offset].descriptor.entry)
158 if (pdir[pdir_offset].descriptor.fault.type == 2)
164 (pdir_offset * 0x400));
165 if (ptbl[ptab_offset].descriptor.fault.type)
172 extern "C" void writeHex(
unsigned int n);
174 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
177 void *pageTables = 0;
178 if (reinterpret_cast<uintptr_t>(virtualAddress) < 0x40000000)
181 pageTables = KERNEL_PAGETABLES;
193 FATAL(
"Out of memory");
200 uintptr_t addr =
reinterpret_cast<uintptr_t
>(virtualAddress);
201 uint32_t pdir_offset = addr >> 20;
202 uint32_t ptbl_offset = (addr >> 12) & 0xFF;
208 if (!pdir[pdir_offset].descriptor.entry)
217 for (
int i = 0; i < 4; i++)
223 pdir[pdir_offset + i].
descriptor.entry = page + (i * 1024);
224 pdir[pdir_offset + i].
descriptor.pageTable.type = 1;
225 pdir[pdir_offset + i].
descriptor.pageTable.sbz1 =
226 pdir[pdir_offset + i].
descriptor.pageTable.sbz2 = 0;
227 pdir[pdir_offset + i].
descriptor.pageTable.ns = 1;
228 pdir[pdir_offset + i].
descriptor.pageTable.domain =
230 pdir[pdir_offset + i].
descriptor.pageTable.imp = 0;
237 mapaddr += ((pdir_offset + i) * 0x400);
238 uint32_t ptbl_offset2 = (mapaddr >> 12) & 0xFF;
241 uintptr_t ptbl_addr =
246 ptbl[ptbl_offset2].
descriptor.entry = page + (i * 1024);
247 ptbl[ptbl_offset2].
descriptor.smallpage.type = 2;
248 ptbl[ptbl_offset2].
descriptor.smallpage.b = 0;
249 ptbl[ptbl_offset2].
descriptor.smallpage.c = 0;
252 ptbl[ptbl_offset2].
descriptor.smallpage.sbz = 0;
253 ptbl[ptbl_offset2].
descriptor.smallpage.ap2 = 0;
254 ptbl[ptbl_offset2].
descriptor.smallpage.s = 0;
255 ptbl[ptbl_offset2].
descriptor.smallpage.nG = 1;
258 ByteSet(reinterpret_cast<void *>(mapaddr), 0, 1024);
263 uintptr_t mapaddr =
reinterpret_cast<uintptr_t
>(pageTables);
264 mapaddr += pdir_offset * 0x400;
269 if (ptbl[ptbl_offset].descriptor.entry & 0x3)
276 ptbl[ptbl_offset].
descriptor.smallpage.type = 2;
280 ptbl[ptbl_offset].
descriptor.smallpage.sbz = 0;
281 ptbl[ptbl_offset].
descriptor.smallpage.ap2 = 0;
283 ptbl[ptbl_offset].
descriptor.smallpage.nG = 1;
289 void ArmV7VirtualAddressSpace::doGetMapping(
290 void *virtualAddress, physical_uintptr_t &
physicalAddress,
size_t &flags)
292 uintptr_t addr =
reinterpret_cast<uintptr_t
>(virtualAddress);
294 uint32_t pdir_offset = addr >> 20;
295 uint32_t ptab_offset = (addr >> 12) & 0xFF;
300 if (pdir[pdir_offset].descriptor.entry)
303 switch (pdir[pdir_offset].descriptor.fault.type)
311 (pdir_offset * 0x400));
312 if (!ptbl[ptab_offset].descriptor.fault.type)
317 ptbl[ptab_offset].
descriptor.smallpage.base << 12;
319 fromFlags(ptbl[ptab_offset].descriptor.smallpage.ap1);
326 if (pdir[pdir_offset].descriptor.section.sectiontype == 0)
328 uintptr_t offset = addr % 0x100000;
330 (pdir[pdir_offset].
descriptor.section.base << 20) +
332 flags =
fromFlags(pdir[pdir_offset].descriptor.section.ap1);
334 else if (pdir[pdir_offset].descriptor.section.sectiontype == 1)
336 uintptr_t offset = addr % 0x1000000;
338 (pdir[pdir_offset].
descriptor.section.base << 20) +
340 flags =
fromFlags(pdir[pdir_offset].descriptor.section.ap1);
343 ERROR(
"doGetMapping: who knows what the hell this paging " 353 void ArmV7VirtualAddressSpace::doSetFlags(
void *virtualAddress,
size_t newFlags)
355 uintptr_t addr =
reinterpret_cast<uintptr_t
>(virtualAddress);
356 uint32_t pdir_offset = addr >> 20;
357 uint32_t ptab_offset = (addr >> 12) & 0xFF;
362 if (pdir[pdir_offset].descriptor.entry)
365 switch (pdir[pdir_offset].descriptor.fault.type)
373 (pdir_offset * 0x400));
380 if (pdir[pdir_offset].descriptor.section.sectiontype == 0)
383 else if (pdir[pdir_offset].descriptor.section.sectiontype == 1)
385 WARNING(
"doSetFlags: supersections not handled yet");
388 ERROR(
"doSetFlags: who knows what the hell this paging " 398 void ArmV7VirtualAddressSpace::doUnmap(
void *virtualAddress)
400 uintptr_t addr =
reinterpret_cast<uintptr_t
>(virtualAddress);
401 uint32_t pdir_offset = addr >> 20;
402 uint32_t ptab_offset = (addr >> 12) & 0xFF;
407 if (pdir[pdir_offset].descriptor.entry)
410 switch (pdir[pdir_offset].descriptor.fault.type)
418 (pdir_offset * 0x400));
434 size_t sz = USERSPACE_VIRTUAL_STACK_SIZE;
436 sz = KERNEL_STACK_SIZE;
437 return doAllocateStack(USERSPACE_VIRTUAL_STACK_SIZE);
444 return doAllocateStack(stackSz);
447 void *ArmV7VirtualAddressSpace::doAllocateStack(
size_t sSize)
472 uintptr_t stackBottom =
reinterpret_cast<uintptr_t
>(pStack) - sSize;
475 physical_uintptr_t phys =
478 map(phys, reinterpret_cast<void *>(j + stackBottom),
481 WARNING(
"map() failed in doAllocateStack");
487 void ArmV7VirtualAddressSpace::freeStack(
void *pStack)
493 extern char __start, __end;
507 ByteSet(pdir, 0, 0x4000);
509 uint32_t pdir_offset = 0, ptbl_offset = 0;
510 uintptr_t vaddr = 0, paddr = 0;
514 physical_uintptr_t ptbl_paddr = 0x8FB00000 + (0x400000 - 0x400);
515 ByteSet(reinterpret_cast<void *>(0x8FB00000), 0, 0x400000);
521 pdir_offset = vaddr >> 20;
525 pdir[pdir_offset].
descriptor.pageTable.type = 1;
527 pdir[pdir_offset].
descriptor.pageTable.sbz2 = 0;
528 pdir[pdir_offset].
descriptor.pageTable.ns = 0;
529 pdir[pdir_offset].
descriptor.pageTable.domain =
531 pdir[pdir_offset].
descriptor.pageTable.imp = 0;
532 for (
int i = 0; i < 4; i++)
534 ptbl_offset = ((vaddr + (i * 0x1000)) >> 12) & 0xFF;
537 ptbl[ptbl_offset].
descriptor.smallpage.type = 2;
540 ptbl[ptbl_offset].
descriptor.smallpage.ap1 = 3;
541 ptbl[ptbl_offset].
descriptor.smallpage.ap2 = 0;
542 ptbl[ptbl_offset].
descriptor.smallpage.sbz = 0;
544 ptbl[ptbl_offset].
descriptor.smallpage.nG = 0;
548 size_t kernelSize =
reinterpret_cast<uintptr_t
>(&__end) - 0x80000000;
549 for (
size_t offset = 0; offset < kernelSize; offset += 0x100000)
551 uintptr_t baseAddr = 0x80000000 + offset;
552 pdir_offset = baseAddr >> 20;
555 pdir[pdir_offset].
descriptor.entry = baseAddr;
556 pdir[pdir_offset].
descriptor.section.type = 2;
560 pdir[pdir_offset].
descriptor.section.domain = 2;
567 pdir[pdir_offset].
descriptor.section.sectiontype = 0;
572 vaddr =
reinterpret_cast<uintptr_t
>(KERNEL_PAGETABLES);
574 for (
size_t offset = 0; offset < 0x400000; offset += 0x100000)
576 uintptr_t baseAddr = vaddr + offset;
577 pdir_offset = baseAddr >> 20;
580 pdir[pdir_offset].
descriptor.entry = paddr + offset;
581 pdir[pdir_offset].
descriptor.section.type = 2;
593 pdir[pdir_offset].
descriptor.section.sectiontype = 0;
597 uintptr_t blockVBase = offset << 10;
600 for (
int i = 0; i < 1024; i++)
603 uintptr_t firstVaddr = blockVBase + (i * 0x100000);
606 uintptr_t ptbl_paddr = paddr + offset + (i * 0x400);
609 pdir_offset = firstVaddr >> 20;
610 if (pdir[pdir_offset].descriptor.entry)
612 pdir[pdir_offset].
descriptor.entry = ptbl_paddr;
613 pdir[pdir_offset].
descriptor.pageTable.type = 1;
615 pdir[pdir_offset].
descriptor.pageTable.sbz2 = 0;
616 pdir[pdir_offset].
descriptor.pageTable.ns = 0;
617 pdir[pdir_offset].
descriptor.pageTable.domain =
619 pdir[pdir_offset].
descriptor.pageTable.imp = 0;
624 Processor::writeTTBR0(0);
626 Processor::writeTTBCR(
628 asm volatile(
"MCR p15,0,%0,c3,c0,0" 634 asm volatile(
"MRC p15,0,%0,c1,c0,0" :
"=r"(sctlr));
637 asm volatile(
"MCR p15,0,%0,c1,c0,0" : :
"r"(sctlr));
643 void *Heap, physical_uintptr_t PhysicalPageDirectory,
644 void *VirtualPageDirectory,
void *VirtualPageTables,
void *VirtualStack)
654 KERNEL_VIRTUAL_HEAP, 0x8FAFC000, KERNEL_PAGEDIR, KERNEL_PAGETABLES,
655 KERNEL_VIRTUAL_STACK)
657 for (
int i = 0; i < 256; i++)
658 g_EscrowPages[i] = 0;
669 return doIsMapped(virtualAddress);
672 physical_uintptr_t
physicalAddress,
void *virtualAddress,
size_t flags)
674 return doMap(physicalAddress, virtualAddress, flags);
677 void *virtualAddress, physical_uintptr_t &
physicalAddress,
size_t &flags)
679 doGetMapping(virtualAddress, physicalAddress, flags);
684 void *virtualAddress,
size_t newFlags)
686 doSetFlags(virtualAddress, newFlags);
690 doUnmap(virtualAddress);
void pushBack(const T &value)
static size_t getPageSize() PURE
static PhysicalMemoryManager & instance()
virtual void unmap(void *virtualAddress)
size_t fromFlags(uint32_t Flags)
Vector< void * > m_freeStacks
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
virtual bool memIsInHeap(void *pMem)
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
bool acquire(bool recurse=false, bool safe=true)
virtual void setFlags(void *virtualAddress, size_t newFlags)
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual void unmap(void *virtualAddress)
virtual bool isMapped(void *virtualAddress)
static const size_t Write
static VirtualAddressSpace * create()
ArmV7VirtualAddressSpace()
uintptr_t physicalAddress(physical_uintptr_t address) PURE
static const size_t KernelMode
static ArmV7KernelVirtualAddressSpace m_Instance
bool doMap(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual bool initialiseKernelAddressSpace()
virtual void * getEndOfHeap()
#define USERSPACE_PAGETABLES
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)
void * m_VirtualPageDirectory
physical_uintptr_t m_PhysicalPageDirectory
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
~ArmV7KernelVirtualAddressSpace()
virtual ~ArmV7VirtualAddressSpace()
uint32_t toFlags(size_t flags)
ArmV7KernelVirtualAddressSpace()
union ArmV7VirtualAddressSpace::FirstLevelDescriptor::@88 descriptor
void * m_VirtualPageTables
virtual void setFlags(void *virtualAddress, size_t newFlags)
union ArmV7VirtualAddressSpace::SecondLevelDescriptor::@92 descriptor
virtual void * allocateStack()
virtual bool isAddressValid(void *virtualAddress)