The Pedigree Project  0.1
hosted/PhysicalMemoryManager.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "PhysicalMemoryManager.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/Log.h"
23 #include "pedigree/kernel/panic.h"
24 #include "pedigree/kernel/processor/MemoryRegion.h"
25 #include "pedigree/kernel/processor/Processor.h"
26 #include "pedigree/kernel/utilities/Cache.h"
27 #include "pedigree/kernel/utilities/assert.h"
28 #include "pedigree/kernel/utilities/utility.h"
29 
30 #if defined(TRACK_PAGE_ALLOCATIONS)
31 #include "pedigree/kernel/debugger/commands/AllocationCommand.h"
32 #endif
33 
34 #include "VirtualAddressSpace.h"
35 
36 #include "pedigree/kernel/core/SlamAllocator.h"
37 #include "pedigree/kernel/process/MemoryPressureManager.h"
38 
39 namespace __pedigree_hosted
40 {
41 };
42 using namespace __pedigree_hosted;
43 
44 #include <fcntl.h>
45 #include <stdio.h>
46 #include <unistd.h>
47 
48 #define USE_BITMAP
49 
50 #ifdef USE_BITMAP
51 uint32_t g_PageBitmap[16384] = {0};
52 #endif
53 
55 
57 {
59 }
60 
61 physical_uintptr_t HostedPhysicalMemoryManager::allocatePage()
62 {
63  static bool bDidHitWatermark = false;
64  static bool bHandlingPressure = false;
65 
66  m_Lock.acquire(true);
67 
68  physical_uintptr_t ptr;
69 
70  // Some methods of handling memory pressure require allocating pages, so
71  // we need to not end up recursively trying to release the pressure.
72  if (!bHandlingPressure)
73  {
74  if (m_PageStack.freePages() < MemoryPressureManager::getHighWatermark())
75  {
76  bHandlingPressure = true;
77 
78  // Make sure the compact can trigger frees.
79  m_Lock.release();
80 
81  WARNING_NOLOCK(
82  "Memory pressure encountered, performing a compact...");
83  if (!MemoryPressureManager::instance().compact())
84  ERROR_NOLOCK("Compact did not alleviate any memory pressure.");
85  else
86  NOTICE_NOLOCK("Compact was successful.");
87 
88  m_Lock.acquire(true);
89 
90  bDidHitWatermark = true;
91  bHandlingPressure = false;
92  }
93  else if (bDidHitWatermark)
94  {
95  ERROR_NOLOCK("<pressure was hit, but is no longer being hit>");
96  bDidHitWatermark = false;
97  }
98  }
99 
100  ptr = m_PageStack.allocate(0);
101  if (!ptr)
102  {
103  panic("Out of memory.");
104  }
105 
106 #ifdef USE_BITMAP
107  physical_uintptr_t ptr_bitmap = ptr / 0x1000;
108  size_t idx = ptr_bitmap / 32;
109  size_t bit = ptr_bitmap % 32;
110  if (g_PageBitmap[idx] & (1 << bit))
111  {
112  m_Lock.release();
113  FATAL_NOLOCK("PhysicalMemoryManager allocate()d a page twice");
114  }
115  g_PageBitmap[idx] |= (1 << bit);
116 #endif
117 
118  m_Lock.release();
119 
120 #if defined(TRACK_PAGE_ALLOCATIONS)
121  if (Processor::m_Initialised == 2)
122  {
123  if (!g_AllocationCommand.isMallocing())
124  {
125  g_AllocationCommand.allocatePage(ptr);
126  }
127  }
128 #endif
129 
130  return ptr;
131 }
133 {
134  RecursingLockGuard<Spinlock> guard(m_Lock);
135 
136  freePageUnlocked(page);
137 }
139 {
140  if (!m_Lock.acquired())
141  FATAL("HostedPhysicalMemoryManager::freePageUnlocked called without an "
142  "acquired lock");
143 
144  // Check for pinned page.
145  size_t index = page >> 12;
146  if (m_PageMetadata && m_PageMetadata[index].active)
147  {
148  if (--m_PageMetadata[index].refcount)
149  {
150  // Still references.
151  return;
152  }
153  else
154  {
155  // No more references.
156  m_PageMetadata[index].active = false;
157  }
158  }
159 
160 #ifdef USE_BITMAP
161  physical_uintptr_t ptr_bitmap = page / 0x1000;
162  size_t idx = ptr_bitmap / 32;
163  size_t bit = ptr_bitmap % 32;
164  if (!(g_PageBitmap[idx] & (1 << bit)))
165  {
166  m_Lock.release();
167  FATAL_NOLOCK("PhysicalMemoryManager DOUBLE FREE");
168  }
169 
170  g_PageBitmap[idx] &= ~(1 << bit);
171 #endif
172 
173  m_PageStack.free(page);
174 }
175 
176 void HostedPhysicalMemoryManager::pin(physical_uintptr_t page)
177 {
178  RecursingLockGuard<Spinlock> guard(m_Lock);
179 
180  if (!m_PageMetadata)
181  {
182  // No page metadata to speak of.
183  return;
184  }
185 
186  size_t index = page >> 12;
187  if (m_PageMetadata[index].active)
188  {
189  ++m_PageMetadata[index].refcount;
190  }
191  else
192  {
193  m_PageMetadata[index].refcount = 1;
194  m_PageMetadata[index].active = true;
195  }
196 }
197 
199  MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags,
200  physical_uintptr_t start)
201 {
202  LockGuard<Spinlock> guard(m_RegionLock);
203 
204  // Allocate a specific physical memory region (always physically continuous)
205  if (start != static_cast<physical_uintptr_t>(-1))
206  {
207  // Page-align the start address.
208  start &= ~(getPageSize() - 1);
209 
210  if ((pageConstraints & continuous) != continuous)
211  panic("PhysicalMemoryManager::allocateRegion(): function misused");
212 
213  // Remove the memory from the range-lists (if desired/possible)
214  if ((pageConstraints & nonRamMemory) == nonRamMemory)
215  {
216  Region.setNonRamMemory(true);
217  if (m_PhysicalRanges.allocateSpecific(
218  start, cPages * getPageSize()) == false)
219  {
220  if ((pageConstraints & force) != force)
221  return false;
222  else
223  Region.setForced(true);
224  }
225  }
226  else
227  {
228  // Ensure that free() does not attempt to free the given memory...
229  Region.setNonRamMemory(true);
230  Region.setForced(true);
231  }
232 
233  // Allocate the virtual address space
234  uintptr_t vAddress;
235 
236  if (m_MemoryRegions.allocate(
237  cPages * PhysicalMemoryManager::getPageSize(), vAddress) ==
238  false)
239  {
240  WARNING("AllocateRegion: MemoryRegion allocation failed.");
241  return false;
242  }
243 
244  // Map the physical memory into the allocated space
245  VirtualAddressSpace &virtualAddressSpace =
246  Processor::information().getVirtualAddressSpace();
247  for (size_t i = 0; i < cPages; i++)
248  if (virtualAddressSpace.map(
250  reinterpret_cast<void *>(
251  vAddress + i * PhysicalMemoryManager::getPageSize()),
252  Flags) == false)
253  {
254  m_MemoryRegions.free(
255  vAddress, cPages * PhysicalMemoryManager::getPageSize());
256  WARNING("AllocateRegion: VirtualAddressSpace::map failed.");
257  return false;
258  }
259 
260  // Set the memory-region's members
261  Region.m_VirtualAddress = reinterpret_cast<void *>(vAddress);
262  Region.m_PhysicalAddress = start;
263  Region.m_Size = cPages * PhysicalMemoryManager::getPageSize();
264  // NOTICE("MR: Allocated " << Hex << vAddress << " (phys " <<
265  // static_cast<uintptr_t>(start) << "), size " << (cPages*4096));
266 
267  // Add to the list of memory-regions
269  return true;
270  }
271  else
272  {
273  // Allocate the virtual address space
274  uintptr_t vAddress;
275  if (m_MemoryRegions.allocate(
276  cPages * PhysicalMemoryManager::getPageSize(), vAddress) ==
277  false)
278  {
279  WARNING("AllocateRegion: MemoryRegion allocation failed.");
280  return false;
281  }
282 
283  uint32_t start = 0;
284  VirtualAddressSpace &virtualAddressSpace =
285  Processor::information().getVirtualAddressSpace();
286 
287  // Map the physical memory into the allocated space
288  for (size_t i = 0; i < cPages; i++)
289  {
290  physical_uintptr_t page = m_PageStack.allocate(pageConstraints);
291  if (virtualAddressSpace.map(
292  page,
293  reinterpret_cast<void *>(
294  vAddress + i * PhysicalMemoryManager::getPageSize()),
295  Flags) == false)
296  {
297  WARNING("AllocateRegion: VirtualAddressSpace::map failed.");
298  return false;
299  }
300  }
301 
302  // Set the memory-region's members
303  Region.m_VirtualAddress = reinterpret_cast<void *>(vAddress);
304  Region.m_PhysicalAddress = start;
305  Region.m_Size = cPages * PhysicalMemoryManager::getPageSize();
306 
307  // Add to the list of memory-regions
309  return true;
310  }
311  return false;
312 }
313 
315 {
316  NOTICE("memory-map:");
317 
318  // Free pages into the page stack first.
319  for (physical_uintptr_t p = 0; p < HOSTED_PHYSICAL_MEMORY_SIZE;
320  p += getPageSize())
321  {
322  m_PageStack.free(p);
323  }
324  m_PageMetadata = new struct page[HOSTED_PHYSICAL_MEMORY_SIZE >> 12];
325 
326  // Initialise the free physical ranges
327  m_PhysicalRanges.free(0, 0x100000000ULL);
328  m_PhysicalRanges.allocateSpecific(0, HOSTED_PHYSICAL_MEMORY_SIZE);
329 
330 // Print the ranges
331 #if defined(VERBOSE_MEMORY_MANAGER)
332  NOTICE("physical memory ranges:");
333  for (size_t i = 0; i < m_PhysicalRanges.size(); i++)
334  {
335  NOTICE(
336  " " << Hex << m_PhysicalRanges.getRange(i).address << " - "
337  << (m_PhysicalRanges.getRange(i).address +
338  m_PhysicalRanges.getRange(i).length));
339  }
340 #endif
341 
342  // Initialise the range of virtual space for MemoryRegions
343  m_MemoryRegions.free(
344  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MEMORYREGION_ADDRESS),
345  KERNEL_VIRTUAL_MEMORYREGION_SIZE);
346 }
347 
349 {
350  NOTICE("PhysicalMemoryManager: kernel initialisation complete");
351 }
352 
354  : m_PhysicalRanges(), m_MemoryRegions(), m_Lock(false, true),
355  m_RegionLock(false, true), m_PageMetadata(0), m_BackingFile(-1)
356 {
357  // Create our backing memory file.
358  m_BackingFile = open("physical.bin", O_RDWR | O_CREAT, 0644);
359  ftruncate(m_BackingFile, HOSTED_PHYSICAL_MEMORY_SIZE);
360  lseek(m_BackingFile, 0, SEEK_SET);
361 }
362 
364 {
366  delete[] m_PageMetadata;
367  m_PageMetadata = 0;
368 
369  close(m_BackingFile);
370  m_BackingFile = -1;
371 }
372 
374 {
375  LockGuard<Spinlock> guard(m_RegionLock);
376 
380  {
381  if (*it == pRegion)
382  {
383  size_t cPages =
385  uintptr_t start =
386  reinterpret_cast<uintptr_t>(pRegion->virtualAddress());
387  physical_uintptr_t phys = pRegion->physicalAddress();
388  VirtualAddressSpace &virtualAddressSpace =
390 
391  if (pRegion->getNonRamMemory())
392  {
393  if (!pRegion->getForced())
394  m_PhysicalRanges.free(phys, pRegion->size());
395  }
396 
397  for (size_t i = 0; i < cPages; i++)
398  {
399  void *vAddr = reinterpret_cast<void *>(
400  start + i * PhysicalMemoryManager::getPageSize());
401  if (!virtualAddressSpace.isMapped(vAddr))
402  {
403  FATAL("Algorithmic error in "
404  "PhysicalMemoryManager::unmapRegion");
405  }
406  physical_uintptr_t pAddr;
407  size_t flags;
408  virtualAddressSpace.getMapping(vAddr, pAddr, flags);
409 
410  if (!pRegion->getNonRamMemory() && pAddr > 0x1000000)
411  m_PageStack.free(pAddr);
412 
413  virtualAddressSpace.unmap(vAddr);
414  }
415  m_MemoryRegions.free(start, pRegion->size());
417  break;
418  }
419  }
420 }
421 
422 size_t g_FreePages = 0;
423 size_t g_AllocedPages = 0;
424 physical_uintptr_t
426 {
427  size_t index = 0;
428 
429  physical_uintptr_t result = 0;
430  if ((m_StackMax[index] != m_StackSize[index]) && m_StackSize[index])
431  {
432  if (index == 0)
433  {
434  m_StackSize[0] -= 4;
435  result = *(
436  reinterpret_cast<uint32_t *>(m_Stack[0]) + m_StackSize[0] / 4);
437  }
438  else
439  {
440  m_StackSize[index] -= 8;
441  result =
442  *(reinterpret_cast<uint64_t *>(m_Stack[index]) +
443  m_StackSize[index] / 8);
444  }
445  }
446 
447  if (result)
448  {
450  if (g_FreePages)
451  g_FreePages--;
452  g_AllocedPages++;
453 
454  if (m_FreePages)
455  --m_FreePages;
456  }
457  return result;
458 }
459 
461 {
462  // Don't attempt to map address zero.
463  if (!m_Stack[0])
464  return;
465 
466  // Expand the stack if necessary
467  if (m_StackMax[0] == m_StackSize[0])
468  {
469  // Map the next increment of the stack to the page we are freeing.
470  // The next free will actually move StackMax, and write to the newly
471  // allocated page.
473  physicalAddress, adjust_pointer(m_Stack[0], m_StackMax[0]),
475  {
476  return;
477  }
478  m_StackMax[0] += getPageSize();
479  }
480 
481  *(reinterpret_cast<uint32_t *>(m_Stack[0]) + m_StackSize[0] / 4) =
482  static_cast<uint32_t>(physicalAddress);
483  m_StackSize[0] += 4;
484 
486  g_FreePages++;
487  if (g_AllocedPages > 0)
488  g_AllocedPages--;
489 
490  ++m_FreePages;
491 }
492 
494 {
495  for (size_t i = 0; i < StackCount; i++)
496  {
497  m_StackMax[i] = 0;
498  m_StackSize[i] = 0;
499  }
500 
501  // Set the locations for the page stacks in the virtual address space
502  m_Stack[0] = KERNEL_VIRTUAL_PAGESTACK_4GB;
503  m_FreePages = 0;
504 }
Bootstrap structure passed to the kernel entry point.
void pushBack(const T &value)
Definition: Vector.h:270
virtual void unmap(void *virtualAddress)=0
Iterator end()
Definition: Vector.h:160
virtual void freePage(physical_uintptr_t page)
static PhysicalMemoryManager & instance()
physical_uintptr_t allocate(size_t constraints)
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
A vector / dynamic array.
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
void * m_VirtualAddress
Definition: MemoryRegion.h:91
physical_uintptr_t m_PhysicalAddress
Definition: MemoryRegion.h:94
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
void free(T address, T length, bool merge=true)
Definition: RangeList.h:163
static ProcessorInformation & information()
Definition: Processor.cc:45
size_t m_Size
Definition: MemoryRegion.h:96
#define WARNING(text)
Definition: Log.h:78
void unmapRegion(MemoryRegion *pRegion)
static size_t m_Initialised
Definition: Processor.h:371
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
HostedPhysicalMemoryManager() INITIALISATION_ONLY
#define NOTICE(text)
Definition: Log.h:74
Special memory entity in the kernel&#39;s virtual address space.
Definition: MemoryRegion.h:35
size_t size() const
Definition: MemoryRegion.cc:49
Definition: Log.h:136
virtual bool allocateRegion(MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags, physical_uintptr_t start=-1)
void initialise(const BootstrapStruct_t &Info) INITIALISATION_ONLY
physical_uintptr_t physicalAddress() const
Definition: MemoryRegion.cc:44
virtual void pin(physical_uintptr_t page)
Implementation of the PhysicalMemoryManager for common x86.
void * virtualAddress() const
Definition: MemoryRegion.cc:39
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
#define FATAL(text)
Definition: Log.h:89
virtual void freePageUnlocked(physical_uintptr_t page)
void clear(bool freeMem=false)
Definition: Vector.h:337
void erase(size_t index)
Definition: Vector.h:350