20 #include "VirtualAddressSpace.h" 21 #include "pedigree/kernel/LockGuard.h" 22 #include "pedigree/kernel/panic.h" 23 #include "pedigree/kernel/process/Process.h" 24 #include "pedigree/kernel/process/Scheduler.h" 25 #include "pedigree/kernel/processor/PhysicalMemoryManager.h" 26 #include "pedigree/kernel/processor/Processor.h" 27 #include "pedigree/kernel/utilities/utility.h" 29 #include "PhysicalMemoryManager.h" 38 KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_STACK);
40 typedef void *(*malloc_t)(size_t);
41 typedef void *(*realloc_t)(
void *, size_t);
42 typedef void (*free_t)(
void *);
45 void *__libc_malloc(
size_t n)
47 static malloc_t local = (malloc_t) dlsym(RTLD_NEXT,
"malloc");
51 void *__libc_realloc(
void *p,
size_t n)
53 static realloc_t local = (realloc_t) dlsym(RTLD_NEXT,
"realloc");
57 void __libc_free(
void *p)
59 static free_t local = (free_t) dlsym(RTLD_NEXT,
"free");
65 return HostedVirtualAddressSpace::m_KernelSpace;
75 if (pMem < KERNEL_VIRTUAL_HEAP)
85 return adjust_pointer(KERNEL_VIRTUAL_HEAP, KERNEL_VIRTUAL_HEAP_SIZE);
90 if (reinterpret_cast<uint64_t>(virtualAddress) < 0x0008000000000000ULL ||
91 reinterpret_cast<uint64_t>(virtualAddress) >= 0xFFF8000000000000ULL)
120 for (
size_t i = 0; i < m_KnownMapsSize; ++i)
122 if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
130 physical_uintptr_t physAddress,
void *virtualAddress,
size_t flags)
136 if ((virtualAddress >= KERNEL_SPACE_START) || (flags &
KernelMode))
138 physAddress, virtualAddress, flags);
149 int prot = toFlags(flags,
true);
152 MAP_FIXED | MAP_SHARED,
158 assert(r == virtualAddress);
161 if (m_numKnownMaps == m_KnownMapsSize)
163 size_t oldSize = m_KnownMapsSize;
164 if (m_KnownMapsSize == 0)
167 m_KnownMapsSize *= 2;
169 size_t newSizeBytes =
sizeof(
mapping_t) * m_KnownMapsSize;
171 m_pKnownMaps = (
mapping_t *) __libc_malloc(newSizeBytes);
174 (
mapping_t *) __libc_realloc(m_pKnownMaps, newSizeBytes);
177 for (
size_t i = oldSize; i < m_KnownMapsSize; ++i)
178 m_pKnownMaps[i].active =
false;
182 bool bRegistered =
false;
183 size_t idx = m_nLastUnmap;
184 for (; idx < m_KnownMapsSize; ++idx)
186 if (m_pKnownMaps[idx].active)
195 for (idx = 0; idx < m_nLastUnmap; ++idx)
197 if (m_pKnownMaps[idx].active)
206 panic(
"Fatal algorithmic error in HostedVirtualAddressSpace::map");
208 m_pKnownMaps[idx].active =
true;
209 m_pKnownMaps[idx].vaddr = virtualAddress;
210 m_pKnownMaps[idx].paddr = physAddress;
211 m_pKnownMaps[idx].flags = flags;
219 void *virtualAddress, physical_uintptr_t &physAddress,
size_t &flags)
231 virtualAddress, physAddress, flags);
237 uintptr_t alignedVirtualAddress =
238 reinterpret_cast<uintptr_t
>(virtualAddress) & ~(pageSize - 1);
239 virtualAddress =
reinterpret_cast<void *
>(alignedVirtualAddress);
242 for (
size_t i = 0; i < m_KnownMapsSize; ++i)
244 if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
246 physAddress = m_pKnownMaps[i].paddr;
247 flags = fromFlags(m_pKnownMaps[i].flags,
true);
252 panic(
"HostedVirtualAddressSpace::getMapping - function misused");
270 WARNING(
"setFlags called with KernelMode as a flag, page is not " 271 "mapped in kernel.");
274 for (
size_t i = 0; i < m_KnownMapsSize; ++i)
276 if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
278 m_pKnownMaps[i].flags = newFlags;
283 size_t flags = toFlags(newFlags,
true);
303 for (
size_t i = 0; i < m_KnownMapsSize; ++i)
305 if (m_pKnownMaps[i].active && m_pKnownMaps[i].vaddr == virtualAddress)
307 m_pKnownMaps[i].active =
false;
338 if (!mapping->active)
343 if (mapping->flags &
Shared)
351 if (mapping->flags &
Write)
355 mapping->flags &= ~
Write;
359 if (m_pStackTop < KERNEL_SPACE_START)
363 it != m_freeStacks.end(); ++it)
370 if (
m_Heap < KERNEL_SPACE_START)
384 for (
size_t i = 0; i < m_KnownMapsSize; ++i)
386 if (m_pKnownMaps[i].active)
390 m_pKnownMaps[i].active =
false;
394 else if (m_pKnownMaps[i].vaddr > KERNEL_SPACE_START)
402 m_pKnownMaps[i].paddr);
404 m_pKnownMaps[i].active =
false;
411 size_t sz = USERSPACE_VIRTUAL_STACK_SIZE;
413 sz = KERNEL_STACK_SIZE;
414 return doAllocateStack(sz);
422 return doAllocateStack(stackSz);
430 if (
this == &m_KernelSpace)
445 if (m_freeStacks.count() != 0)
447 Stack *poppedStack = m_freeStacks.popBack();
448 if (poppedStack->getSize() >= sSize)
450 pStack = poppedStack->getTop();
456 pStack = m_pStackTop;
460 adjust_pointer(m_pStackTop, -static_cast<ssize_t>(sSize + pageSz));
466 uintptr_t firstPage =
reinterpret_cast<uintptr_t
>(pStack) - pageSz;
471 phys, reinterpret_cast<void *>(firstPage),
473 WARNING(
"map() failed in doAllocateStack");
476 uintptr_t stackBottom =
reinterpret_cast<uintptr_t
>(pStack) - sSize;
477 for (uintptr_t addr = stackBottom; addr < firstPage; addr += pageSz)
479 size_t map_flags = 0;
493 if (!
map(phys, reinterpret_cast<void *>(addr), flags | map_flags))
494 WARNING(
"CoW map() failed in doAllocateStack");
506 uintptr_t stackTop =
reinterpret_cast<uintptr_t
>(pStack->getTop());
507 for (
size_t i = 0; i < pStack->getSize(); i += pageSz)
510 void *v =
reinterpret_cast<void *
>(stackTop);
515 physical_uintptr_t phys = 0;
524 m_freeStacks.pushBack(pStack);
536 m_pStackTop(USERSPACE_VIRTUAL_STACK), m_freeStacks(),
537 m_bKernelSpace(false), m_Lock(false, true), m_pKnownMaps(0),
538 m_numKnownMaps(0), m_nLastUnmap(0)
543 void *Heap,
void *VirtualStack)
614 MAP_FIXED | MAP_SHARED,
Stack * doAllocateStack(size_t sSize)
void pushBack(const T &value)
virtual void unmap(void *virtualAddress)=0
static size_t getPageSize() PURE
virtual bool isMapped(void *virtualAddress)
virtual void revertToKernelAddressSpace()
static PhysicalMemoryManager & instance()
virtual bool map(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual void pin(physical_uintptr_t page)=0
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
virtual bool isAddressValid(void *virtualAddress)
A vector / dynamic array.
virtual void setFlags(void *virtualAddress, size_t newFlags)=0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
static const size_t Execute
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
uint64_t toFlags(size_t flags, bool bFinal=false)
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual Stack * allocateStack()=0
virtual void setFlags(void *virtualAddress, size_t newFlags)
size_t fromFlags(uint64_t Flags, bool bFinal=false)
virtual bool memIsInHeap(void *pMem)
static const size_t Write
static VirtualAddressSpace * create()
static const size_t KernelMode
static const size_t Shared
virtual VirtualAddressSpace * clone(bool copyOnWrite=true)
virtual void * getEndOfHeap()
static const size_t Swapped
HostedVirtualAddressSpace()
EXPORTED_PUBLIC void * page_align(void *p) PURE
virtual void * getEndOfHeap()=0
static HostedPhysicalMemoryManager & instance()
virtual void freePage(physical_uintptr_t page)=0
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physAddress, size_t &flags)
virtual void unmap(void *virtualAddress)
virtual void freeStack(Stack *pStack)
static void switchAddressSpace(VirtualAddressSpace &oldSpace, VirtualAddressSpace &newSpace)
virtual Stack * allocateStack()
Vector< Stack * > m_freeStacks
virtual ~HostedVirtualAddressSpace()
static const size_t CopyOnWrite