20 #include "PhysicalMemoryManager.h" 21 #include "pedigree/kernel/BootstrapInfo.h" 22 #include "pedigree/kernel/LockGuard.h" 23 #include "pedigree/kernel/Log.h" 24 #include "pedigree/kernel/panic.h" 25 #include "pedigree/kernel/process/MemoryPressureManager.h" 26 #include "pedigree/kernel/process/Process.h" 27 #include "pedigree/kernel/process/Thread.h" 28 #include "pedigree/kernel/processor/MemoryRegion.h" 29 #include "pedigree/kernel/processor/Processor.h" 30 #include "pedigree/kernel/processor/ProcessorInformation.h" 31 #include "pedigree/kernel/processor/VirtualAddressSpace.h" 32 #include "pedigree/kernel/utilities/Vector.h" 33 #include "pedigree/kernel/utilities/utility.h" 36 #include "../x86/VirtualAddressSpace.h" 38 #include "../x64/VirtualAddressSpace.h" 41 #if defined(TRACK_PAGE_ALLOCATIONS) 42 #include "pedigree/kernel/debugger/commands/AllocationCommand.h" 45 #if defined(X86) && defined(DEBUGGER) 50 uint32_t g_PageBitmap[16384] = {0};
58 static void trackPages(ssize_t v, ssize_t p, ssize_t s)
67 pProcess->trackPages(v, p, s);
79 return m_PageStack.freePages();
85 static bool bDidHitWatermark =
false;
86 static bool bHandlingPressure =
false;
92 physical_uintptr_t ptr;
96 if (!bHandlingPressure)
98 if (m_PageStack.freePages() < MemoryPressureManager::getHighWatermark())
100 bHandlingPressure =
true;
106 "Memory pressure encountered, performing a compact...");
107 if (!MemoryPressureManager::instance().compact())
108 ERROR_NOLOCK(
"Compact did not alleviate any memory pressure.");
110 NOTICE_NOLOCK(
"Compact was successful.");
112 m_Lock.acquire(
true);
114 bDidHitWatermark =
true;
115 bHandlingPressure =
false;
117 else if (bDidHitWatermark)
119 ERROR_NOLOCK(
"<pressure was hit, but is no longer being hit>");
120 bDidHitWatermark =
false;
124 ptr = m_PageStack.allocate(pageConstraints);
127 panic(
"Out of memory.");
130 #ifdef MEMORY_TRACING 132 reinterpret_cast<void *>(ptr), MemoryTracing::PageAlloc, 4096);
138 physical_uintptr_t ptr_bitmap = ptr / 0x1000;
139 size_t idx = ptr_bitmap / 32;
140 size_t bit = ptr_bitmap % 32;
141 g_PageBitmap[idx] |= (1 << bit);
146 #if defined(TRACK_PAGE_ALLOCATIONS) 149 if (!g_AllocationCommand.isMallocing())
151 g_AllocationCommand.allocatePage(ptr);
166 if (!m_Lock.acquired())
167 FATAL(
"X86CommonPhysicalMemoryManager::freePageUnlocked called without " 173 if (result.hasValue())
175 struct page p = result.value();
181 m_PageMetadata.update(index, p);
188 m_PageMetadata.update(index, p);
194 physical_uintptr_t ptr_bitmap = page / 0x1000;
195 size_t idx = ptr_bitmap / 32;
196 size_t bit = ptr_bitmap % 32;
197 if (!(g_PageBitmap[idx] & (1 << bit)))
200 FATAL_NOLOCK(
"PhysicalMemoryManager DOUBLE FREE");
203 g_PageBitmap[idx] &= ~(1 << bit);
208 #ifdef MEMORY_TRACING 210 reinterpret_cast<void *>(page), MemoryTracing::PageFree, 4096);
213 trackPages(0, -1, 0);
221 if (result.hasValue())
223 struct page p = result.value();
226 m_PageMetadata.update(index, p);
233 m_PageMetadata.insert(index, p);
237 MemoryRegion &Region,
size_t cPages,
size_t pageConstraints,
size_t Flags,
238 physical_uintptr_t start)
243 if (start != static_cast<physical_uintptr_t>(-1))
248 if (((pageConstraints &
continuous) != continuous) ||
250 panic(
"PhysicalMemoryManager::allocateRegion(): function misused");
255 Region.setNonRamMemory(
true);
256 if (m_PhysicalRanges.allocateSpecific(
261 ERROR(
"PhysicalMemoryManager::allocateRegion() [specific] " 262 "- failed to get space from general range list and " 267 Region.setForced(
true);
272 if (start < 0x100000 && (start + cPages *
getPageSize()) < 0x100000)
274 if (m_RangeBelow1MB.allocateSpecific(
277 ERROR(
"PhysicalMemoryManager::allocateRegion() [specific] " 278 "- failed to get space from <1MB range list");
286 if (m_RangeBelow16MB.allocateSpecific(
290 "PhysicalMemoryManager::allocateRegion() [specific] - " 293 <<
" pages of memory from <16MB range list at " <<
Hex 298 else if (start < 0x1000000)
300 ERROR(
"PhysicalMemoryManager: Memory region neither completely " 301 "below nor above 1MB");
308 Region.setNonRamMemory(
true);
309 Region.setForced(
true);
314 uintptr_t vAddress = 0;
320 WARNING(
"AllocateRegion: MemoryRegion allocation failed.");
327 for (
size_t i = 0; i < cPages; i++)
328 if (virtualAddressSpace.
map(
330 reinterpret_cast<void *>(
336 WARNING(
"AllocateRegion: VirtualAddressSpace::map failed.");
357 if ((pageConstraints &
continuous) == continuous)
359 (pageConstraints & addressConstraints) !=
below16MB)
361 (pageConstraints & ~addressConstraints) |
below16MB;
369 WARNING(
"AllocateRegion: MemoryRegion allocation failed.");
373 uint32_t allocatedStart = 0;
379 if ((pageConstraints & addressConstraints) ==
below1MB ||
380 (pageConstraints & addressConstraints) ==
below16MB)
383 if ((pageConstraints & addressConstraints) ==
below1MB)
385 if (m_RangeBelow1MB.allocate(
388 ERROR(
"PhysicalMemoryManager::allocateRegion() - " 389 "failed to get space from <1MB range list");
393 else if ((pageConstraints & addressConstraints) ==
below16MB)
395 if (m_RangeBelow16MB.allocate(
398 ERROR(
"PhysicalMemoryManager::allocateRegion() - " 399 "failed to get space from <16MB range list");
405 for (
size_t i = 0; i < cPages; i++)
406 if (virtualAddressSpace.
map(
409 reinterpret_cast<void *>(
415 "AllocateRegion: VirtualAddressSpace::map failed.");
422 for (
size_t i = 0; i < cPages; i++)
424 physical_uintptr_t
page = m_PageStack.allocate(
425 pageConstraints & addressConstraints);
426 if (virtualAddressSpace.
map(
428 reinterpret_cast<void *>(
434 "AllocateRegion: VirtualAddressSpace::map failed.");
457 NOTICE(
"Shutting down X86CommonPhysicalMemoryManager");
459 m_PageMetadata.clear();
466 physical_uintptr_t top = 0;
473 void *MemoryMap = Info.getMemoryMap();
475 panic(
"no memory map provided by the bootloader");
480 uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
481 uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
482 uint32_t type = Info.getMemoryMapEntryType(MemoryMap);
485 " " <<
Hex << addr <<
" - " << (addr + length)
486 <<
", type: " << type);
488 MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
496 uint64_t rangeTop = addr + length;
497 if (rangeTop < 0x1000000)
502 else if (rangeTop >= 0x100000000ULL)
508 if (addr < 0x1000000)
511 length = rangeTop - 0x1000000;
522 m_PageStack.increaseCapacity((length / pageSize) + 1);
524 m_PageStack.free(addr, length);
528 m_PageStack.markBelow4GReady();
531 m_PageMetadata.reserve(top >> 12);
534 MemoryMap = Info.getMemoryMap();
537 uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
538 uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
539 uint32_t type = Info.getMemoryMapEntryType(MemoryMap);
548 if ((addr + length) >= 0x100000)
549 panic(
"PhysicalMemoryManager: strange memory-map");
551 m_RangeBelow1MB.free(addr, length);
553 else if (addr < 0x1000000)
555 uint64_t upperBound = addr + length;
556 if (upperBound >= 0x1000000)
557 upperBound = 0x1000000;
559 m_RangeBelow16MB.free(addr, upperBound - addr);
563 else if (type == 3 || type == 4)
565 m_AcpiRanges.free(addr, length);
569 MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
573 extern void *kernel_start;
574 extern void *kernel_end;
575 if (m_RangeBelow16MB.allocateSpecific(
576 reinterpret_cast<uintptr_t>(&kernel_start) -
577 reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
578 reinterpret_cast<uintptr_t>(&kernel_end) -
579 reinterpret_cast<uintptr_t>(&kernel_start)) ==
false)
581 panic(
"PhysicalMemoryManager: could not remove the kernel image from " 586 #if defined(VERBOSE_MEMORY_MANAGER) 587 NOTICE(
"free memory ranges (below 1MB):");
588 for (
size_t i = 0; i < m_RangeBelow1MB.size(); i++)
590 " " <<
Hex << m_RangeBelow1MB.getRange(i).address <<
" - " 591 << (m_RangeBelow1MB.getRange(i).address +
592 m_RangeBelow1MB.getRange(i).length));
593 NOTICE(
"free memory ranges (below 16MB):");
594 for (
size_t i = 0; i < m_RangeBelow16MB.size(); i++)
596 " " <<
Hex << m_RangeBelow16MB.getRange(i).address <<
" - " 597 << (m_RangeBelow16MB.getRange(i).address +
598 m_RangeBelow16MB.getRange(i).length));
601 for (
size_t i = 0; i < m_AcpiRanges.size(); i++)
603 " " <<
Hex << m_AcpiRanges.getRange(i).address <<
" - " 604 << (m_AcpiRanges.getRange(i).address +
605 m_AcpiRanges.getRange(i).length));
610 m_PhysicalRanges.free(0, 0x100000000ULL);
611 MemoryMap = Info.getMemoryMap();
614 uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
615 uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
620 WARNING(
"Memory region " << addr <<
" not used.");
622 else if (addr >= 0x100000000ULL)
627 else if (m_PhysicalRanges.allocateSpecific(addr, length) ==
false)
628 panic(
"PhysicalMemoryManager: Failed to create the list of ranges " 629 "of free physical space");
631 MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
635 #if defined(VERBOSE_MEMORY_MANAGER) 636 NOTICE(
"physical memory ranges:");
637 for (
size_t i = 0; i < m_PhysicalRanges.size(); i++)
640 " " <<
Hex << m_PhysicalRanges.getRange(i).address <<
" - " 641 << (m_PhysicalRanges.getRange(i).address +
642 m_PhysicalRanges.getRange(i).length));
648 reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MEMORYREGION_ADDRESS),
649 KERNEL_VIRTUAL_MEMORYREGION_SIZE);
654 NOTICE(
"64-bit memory-map:");
660 size_t numPagesOver4G = 0;
662 void *MemoryMap = Info.getMemoryMap();
665 uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
666 uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
667 uint32_t type = Info.getMemoryMapEntryType(MemoryMap);
669 if (addr >= 0x100000000ULL)
671 if (base == 0 || addr < base)
677 " " <<
Hex << addr <<
" - " << (addr + length)
678 <<
", type: " << type);
683 m_PageStack.increaseCapacity(numPages);
684 m_PageStack.free(addr, length);
686 m_PhysicalRanges.free(addr, length);
688 numPagesOver4G += numPages;
692 MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
701 base, reinterpret_cast<void *>(0xFFFF800000000000 + base),
706 FATAL(
"failed to map physical memory");
709 NOTICE(
" --> " << numPagesOver4G <<
" pages exist above 4G!");
712 m_PageStack.markAbove4GReady();
716 MemoryMap = Info.getMemoryMap();
719 if ((Info.getMemoryMapEntryType(MemoryMap) == 3 ||
720 Info.getMemoryMapEntryType(MemoryMap) == 4) &&
721 Info.getMemoryMapEntryAddress(MemoryMap) >= 0x100000000ULL)
724 Info.getMemoryMapEntryAddress(MemoryMap),
725 Info.getMemoryMapEntryLength(MemoryMap));
728 MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
731 #if defined(VERBOSE_MEMORY_MANAGER) 733 NOTICE(
"ACPI ranges (x64 added):");
734 for (
size_t i = 0; i < m_AcpiRanges.size(); i++)
736 " " <<
Hex << m_AcpiRanges.getRange(i).address <<
" - " 737 << (m_AcpiRanges.getRange(i).address +
738 m_AcpiRanges.getRange(i).length));
743 MemoryMap = Info.getMemoryMap();
747 if ((Info.getMemoryMapEntryAddress(MemoryMap)) > ~0ULL)
750 "Memory region " << Info.getMemoryMapEntryAddress(MemoryMap)
754 (Info.getMemoryMapEntryAddress(MemoryMap) >= 0x100000000ULL) &&
755 (m_PhysicalRanges.allocateSpecific(
756 Info.getMemoryMapEntryAddress(MemoryMap),
757 Info.getMemoryMapEntryLength(MemoryMap)) ==
false))
758 panic(
"PhysicalMemoryManager: Failed to create the list of ranges " 759 "of free physical space");
761 MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
765 #if defined(VERBOSE_MEMORY_MANAGER) 766 NOTICE(
"physical memory ranges, 64-bit added:");
767 for (
size_t i = 0; i < m_PhysicalRanges.size(); i++)
770 " " <<
Hex << m_PhysicalRanges.getRange(i).address <<
" - " 771 << (m_PhysicalRanges.getRange(i).address +
772 m_PhysicalRanges.getRange(i).length));
780 extern void *kernel_init;
781 extern void *kernel_init_end;
783 NOTICE(
"PhysicalMemoryManager: kernel initialisation complete, cleaning " 789 size_t count = (
reinterpret_cast<uintptr_t
>(&kernel_init_end) -
790 reinterpret_cast<uintptr_t>(&kernel_init)) /
792 for (
size_t i = 0; i < count; i++)
794 void *vAddress = adjust_pointer(
795 reinterpret_cast<void *>(&kernel_init), i *
getPageSize());
799 physical_uintptr_t pAddress;
800 kernelSpace.
getMapping(vAddress, pAddress, flags);
803 kernelSpace.
unmap(vAddress);
807 m_RangeBelow16MB.free(
808 reinterpret_cast<uintptr_t>(&kernel_init) -
809 reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
813 "PhysicalMemoryManager: cleaned up " <<
Dec << (count * 4) <<
Hex 814 <<
"KB of init-only code.");
818 : m_PageStack(), m_RangeBelow1MB(), m_RangeBelow16MB(), m_PhysicalRanges(),
848 if (pRegion->getNonRamMemory())
850 if (!pRegion->getForced())
855 if (phys < 0x100000 &&
866 else if (phys < 0x1000000)
868 ERROR(
"PhysicalMemoryManager: Memory region neither " 869 "completely below nor above 1MB");
874 for (
size_t i = 0; i < cPages; i++)
876 void *vAddr =
reinterpret_cast<void *
>(
878 if (!virtualAddressSpace.
isMapped(vAddr))
884 physical_uintptr_t pAddr;
886 virtualAddressSpace.
getMapping(vAddr, pAddr, flags);
888 if (!pRegion->getNonRamMemory() && pAddr > 0x1000000)
891 virtualAddressSpace.
unmap(vAddr);
916 if (!m_StackReady[index])
920 if (!m_StackReady[index])
931 while (!m_StackReady[index])
936 if (index == 2 && (m_StackMax[2] == m_StackSize[2] || !m_StackReady[2]))
938 if (index == 1 && (m_StackMax[1] == m_StackSize[1] || !m_StackReady[1]))
942 physical_uintptr_t result = 0;
943 if ((m_StackMax[index] != m_StackSize[index]) && m_StackSize[index])
949 reinterpret_cast<uint32_t *
>(m_Stack[0]) + m_StackSize[0] / 4);
953 m_StackSize[index] -= 8;
955 *(
reinterpret_cast<uint64_t *
>(m_Stack[index]) +
956 m_StackSize[index] / 8);
976 performPush(T *stack,
size_t &stackSize, uint64_t
physicalAddress,
size_t count)
978 size_t nextEntry = stackSize /
sizeof(T);
980 for (
size_t i = 0; i < count; ++i)
986 stackSize +=
sizeof(T) * count;
995 if (physicalAddress >= 0x100000000ULL)
1000 if (physicalAddress >= 0x1000000000ULL)
1017 uint64_t topPhysical = physicalAddress + length;
1019 for (; physicalAddress < topPhysical; physicalAddress +=
getPageSize())
1022 if (!maybeMap(index, physicalAddress))
1033 reinterpret_cast<uint32_t *>(m_Stack[index]), m_StackSize[index],
1034 physicalAddress, numPages);
1039 reinterpret_cast<uint64_t *>(m_Stack[index]), m_StackSize[index],
1040 physicalAddress, numPages);
1045 if (g_AllocedPages > 0)
1047 if (g_AllocedPages >= numPages)
1049 g_AllocedPages -= numPages;
1057 m_FreePages += numPages;
1063 m_DesiredCapacity = 0;
1065 for (
size_t i = 0; i < StackCount; i++)
1069 m_StackReady[i] =
false;
1073 m_Stack[0] = KERNEL_VIRTUAL_PAGESTACK_4GB;
1075 m_Stack[1] = KERNEL_VIRTUAL_PAGESTACK_ABV4GB1;
1076 m_Stack[2] = KERNEL_VIRTUAL_PAGESTACK_ABV4GB2;
1084 for (
size_t i = 1; i < StackCount; ++i)
1086 m_StackReady[i] =
true;
1092 m_StackReady[0] =
true;
1098 bool mapped =
false;
1100 void *virtualAddress = adjust_pointer(m_Stack[index], m_StackMax[index]);
1103 if (m_Capacity >= m_DesiredCapacity)
1122 physicalAddress, virtualAddress,
1132 if (AddressSpace.mapPageStructuresAbove4GB(
1133 physicalAddress, virtualAddress,
1140 FATAL(
"PageStack::free - index > 0 when not built as x86_64");
1145 if (AddressSpace.
isMapped(virtualAddress))
1149 size_t entrySize =
sizeof(uint32_t);
1152 entrySize =
sizeof(uint64_t);
1162 if (m_Capacity >= m_DesiredCapacity)
static const size_t below4GB
Bootstrap structure passed to the kernel entry point.
EXPORTED_PUBLIC size_t g_FreePages
physical_uintptr_t allocate(size_t constraints)
void pushBack(const T &value)
virtual void unmap(void *virtualAddress)=0
static X86CommonPhysicalMemoryManager & instance()
static size_t getPageSize() PURE
bool maybeMap(size_t index, uint64_t physicalAddress)
static const size_t addressConstraints
static PhysicalMemoryManager & instance()
static const size_t virtualOnly
static const size_t continuous
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
A vector / dynamic array.
virtual void freePageUnlocked(physical_uintptr_t page)
static const size_t force
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
RangeList< uintptr_t > m_MemoryRegions
physical_uintptr_t m_PhysicalAddress
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
void initialisationDone()
void free(T address, T length, bool merge=true)
static ProcessorInformation & information()
void initialise64(const BootstrapStruct_t &Info) INITIALISATION_ONLY
void free(uint64_t physicalAddress, size_t length)
virtual bool allocateRegion(MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags, physical_uintptr_t start=-1)
void unmapRegion(MemoryRegion *pRegion)
static size_t m_Initialised
static const size_t Write
static const size_t below1MB
X86CommonPhysicalMemoryManager() INITIALISATION_ONLY
uintptr_t physicalAddress(physical_uintptr_t address) PURE
RangeList< uint64_t > m_PhysicalRanges
static const size_t KernelMode
virtual void pin(physical_uintptr_t page)
Special memory entity in the kernel's virtual address space.
virtual ~X86CommonPhysicalMemoryManager()
static const size_t anonymous
virtual void freePage(physical_uintptr_t page)
physical_uintptr_t physicalAddress() const
PageStack() INITIALISATION_ONLY
static X86CommonPhysicalMemoryManager m_Instance
virtual void freePageUnlocked(physical_uintptr_t page)=0
Process * getParent() const
RangeList< uint32_t > m_RangeBelow1MB
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)
void initialise(const BootstrapStruct_t &Info) INITIALISATION_ONLY
void * virtualAddress() const
static const size_t below16MB
static const size_t below64GB
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
static const size_t nonRamMemory
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
bool mapPageStructures(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
Implementation of the PhysicalMemoryManager for common x86.
virtual size_t freePageCount() const
void clear(bool freeMem=false)
Vector< MemoryRegion * > m_MemoryRegions
RangeList< uint32_t > m_RangeBelow16MB