20 #include "PhysicalMemoryManager.h" 21 #include "pedigree/kernel/LockGuard.h" 22 #include "pedigree/kernel/Log.h" 23 #include "pedigree/kernel/panic.h" 24 #include "pedigree/kernel/processor/MemoryRegion.h" 25 #include "pedigree/kernel/processor/Processor.h" 26 #include "pedigree/kernel/utilities/Cache.h" 27 #include "pedigree/kernel/utilities/assert.h" 28 #include "pedigree/kernel/utilities/utility.h" 30 #if defined(TRACK_PAGE_ALLOCATIONS) 31 #include "pedigree/kernel/debugger/commands/AllocationCommand.h" 34 #include "VirtualAddressSpace.h" 36 #include "pedigree/kernel/core/SlamAllocator.h" 37 #include "pedigree/kernel/process/MemoryPressureManager.h" 51 uint32_t g_PageBitmap[16384] = {0};
61 physical_uintptr_t HostedPhysicalMemoryManager::allocatePage()
63 static bool bDidHitWatermark =
false;
64 static bool bHandlingPressure =
false;
68 physical_uintptr_t ptr;
72 if (!bHandlingPressure)
74 if (m_PageStack.freePages() < MemoryPressureManager::getHighWatermark())
76 bHandlingPressure =
true;
82 "Memory pressure encountered, performing a compact...");
83 if (!MemoryPressureManager::instance().compact())
84 ERROR_NOLOCK(
"Compact did not alleviate any memory pressure.");
86 NOTICE_NOLOCK(
"Compact was successful.");
90 bDidHitWatermark =
true;
91 bHandlingPressure =
false;
93 else if (bDidHitWatermark)
95 ERROR_NOLOCK(
"<pressure was hit, but is no longer being hit>");
96 bDidHitWatermark =
false;
100 ptr = m_PageStack.allocate(0);
103 panic(
"Out of memory.");
107 physical_uintptr_t ptr_bitmap = ptr / 0x1000;
108 size_t idx = ptr_bitmap / 32;
109 size_t bit = ptr_bitmap % 32;
110 if (g_PageBitmap[idx] & (1 << bit))
113 FATAL_NOLOCK(
"PhysicalMemoryManager allocate()d a page twice");
115 g_PageBitmap[idx] |= (1 << bit);
120 #if defined(TRACK_PAGE_ALLOCATIONS) 123 if (!g_AllocationCommand.isMallocing())
125 g_AllocationCommand.allocatePage(ptr);
136 freePageUnlocked(page);
140 if (!m_Lock.acquired())
141 FATAL(
"HostedPhysicalMemoryManager::freePageUnlocked called without an " 145 size_t index = page >> 12;
146 if (m_PageMetadata && m_PageMetadata[index].active)
148 if (--m_PageMetadata[index].refcount)
156 m_PageMetadata[index].active =
false;
161 physical_uintptr_t ptr_bitmap = page / 0x1000;
162 size_t idx = ptr_bitmap / 32;
163 size_t bit = ptr_bitmap % 32;
164 if (!(g_PageBitmap[idx] & (1 << bit)))
167 FATAL_NOLOCK(
"PhysicalMemoryManager DOUBLE FREE");
170 g_PageBitmap[idx] &= ~(1 << bit);
173 m_PageStack.free(page);
186 size_t index = page >> 12;
187 if (m_PageMetadata[index].active)
189 ++m_PageMetadata[index].refcount;
193 m_PageMetadata[index].refcount = 1;
194 m_PageMetadata[index].active =
true;
199 MemoryRegion &Region,
size_t cPages,
size_t pageConstraints,
size_t Flags,
200 physical_uintptr_t start)
205 if (start != static_cast<physical_uintptr_t>(-1))
208 start &= ~(getPageSize() - 1);
210 if ((pageConstraints & continuous) != continuous)
211 panic(
"PhysicalMemoryManager::allocateRegion(): function misused");
214 if ((pageConstraints & nonRamMemory) == nonRamMemory)
216 Region.setNonRamMemory(
true);
217 if (m_PhysicalRanges.allocateSpecific(
218 start, cPages * getPageSize()) ==
false)
220 if ((pageConstraints & force) != force)
223 Region.setForced(
true);
229 Region.setNonRamMemory(
true);
230 Region.setForced(
true);
236 if (m_MemoryRegions.allocate(
240 WARNING(
"AllocateRegion: MemoryRegion allocation failed.");
247 for (
size_t i = 0; i < cPages; i++)
248 if (virtualAddressSpace.
map(
250 reinterpret_cast<void *>(
254 m_MemoryRegions.free(
256 WARNING(
"AllocateRegion: VirtualAddressSpace::map failed.");
275 if (m_MemoryRegions.allocate(
279 WARNING(
"AllocateRegion: MemoryRegion allocation failed.");
288 for (
size_t i = 0; i < cPages; i++)
290 physical_uintptr_t
page = m_PageStack.allocate(pageConstraints);
291 if (virtualAddressSpace.
map(
293 reinterpret_cast<void *>(
297 WARNING(
"AllocateRegion: VirtualAddressSpace::map failed.");
319 for (physical_uintptr_t p = 0; p < HOSTED_PHYSICAL_MEMORY_SIZE;
324 m_PageMetadata =
new struct page[HOSTED_PHYSICAL_MEMORY_SIZE >> 12];
327 m_PhysicalRanges.free(0, 0x100000000ULL);
328 m_PhysicalRanges.allocateSpecific(0, HOSTED_PHYSICAL_MEMORY_SIZE);
331 #if defined(VERBOSE_MEMORY_MANAGER) 332 NOTICE(
"physical memory ranges:");
333 for (
size_t i = 0; i < m_PhysicalRanges.size(); i++)
336 " " <<
Hex << m_PhysicalRanges.getRange(i).address <<
" - " 337 << (m_PhysicalRanges.getRange(i).address +
338 m_PhysicalRanges.getRange(i).length));
343 m_MemoryRegions.free(
344 reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MEMORYREGION_ADDRESS),
345 KERNEL_VIRTUAL_MEMORYREGION_SIZE);
350 NOTICE(
"PhysicalMemoryManager: kernel initialisation complete");
354 : m_PhysicalRanges(), m_MemoryRegions(), m_Lock(false, true),
355 m_RegionLock(false, true), m_PageMetadata(0), m_BackingFile(-1)
358 m_BackingFile = open(
"physical.bin", O_RDWR | O_CREAT, 0644);
391 if (pRegion->getNonRamMemory())
393 if (!pRegion->getForced())
397 for (
size_t i = 0; i < cPages; i++)
399 void *vAddr =
reinterpret_cast<void *
>(
401 if (!virtualAddressSpace.
isMapped(vAddr))
403 FATAL(
"Algorithmic error in " 404 "PhysicalMemoryManager::unmapRegion");
406 physical_uintptr_t pAddr;
408 virtualAddressSpace.
getMapping(vAddr, pAddr, flags);
410 if (!pRegion->getNonRamMemory() && pAddr > 0x1000000)
413 virtualAddressSpace.
unmap(vAddr);
423 size_t g_AllocedPages = 0;
429 physical_uintptr_t result = 0;
430 if ((m_StackMax[index] != m_StackSize[index]) && m_StackSize[index])
436 reinterpret_cast<uint32_t *
>(m_Stack[0]) + m_StackSize[0] / 4);
440 m_StackSize[index] -= 8;
442 *(
reinterpret_cast<uint64_t *
>(m_Stack[index]) +
443 m_StackSize[index] / 8);
467 if (m_StackMax[0] == m_StackSize[0])
473 physicalAddress, adjust_pointer(m_Stack[0], m_StackMax[0]),
481 *(
reinterpret_cast<uint32_t *
>(m_Stack[0]) + m_StackSize[0] / 4) =
487 if (g_AllocedPages > 0)
495 for (
size_t i = 0; i < StackCount; i++)
502 m_Stack[0] = KERNEL_VIRTUAL_PAGESTACK_4GB;
Bootstrap structure passed to the kernel entry point.
void pushBack(const T &value)
virtual void unmap(void *virtualAddress)=0
static size_t getPageSize() PURE
virtual void freePage(physical_uintptr_t page)
static PhysicalMemoryManager & instance()
physical_uintptr_t allocate(size_t constraints)
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
A vector / dynamic array.
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
physical_uintptr_t m_PhysicalAddress
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
virtual ~HostedPhysicalMemoryManager()
void free(T address, T length, bool merge=true)
static ProcessorInformation & information()
void initialisationDone()
void unmapRegion(MemoryRegion *pRegion)
static size_t m_Initialised
static const size_t Write
uintptr_t physicalAddress(physical_uintptr_t address) PURE
HostedPhysicalMemoryManager() INITIALISATION_ONLY
Special memory entity in the kernel's virtual address space.
virtual bool allocateRegion(MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags, physical_uintptr_t start=-1)
void initialise(const BootstrapStruct_t &Info) INITIALISATION_ONLY
physical_uintptr_t physicalAddress() const
RangeList< uintptr_t > m_MemoryRegions
virtual void pin(physical_uintptr_t page)
struct page * m_PageMetadata
Implementation of the PhysicalMemoryManager for common x86.
void * virtualAddress() const
static HostedPhysicalMemoryManager m_Instance
static HostedPhysicalMemoryManager & instance()
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
RangeList< uint64_t > m_PhysicalRanges
PageStack() INITIALISATION_ONLY
virtual void freePageUnlocked(physical_uintptr_t page)
void clear(bool freeMem=false)
Vector< MemoryRegion * > m_MemoryRegions
void free(uint64_t physicalAddress)