The Pedigree Project  0.1
x86_common/PhysicalMemoryManager.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "PhysicalMemoryManager.h"
21 #include "pedigree/kernel/BootstrapInfo.h"
22 #include "pedigree/kernel/LockGuard.h"
23 #include "pedigree/kernel/Log.h"
24 #include "pedigree/kernel/panic.h"
25 #include "pedigree/kernel/process/MemoryPressureManager.h"
26 #include "pedigree/kernel/process/Process.h"
27 #include "pedigree/kernel/process/Thread.h"
28 #include "pedigree/kernel/processor/MemoryRegion.h"
29 #include "pedigree/kernel/processor/Processor.h"
30 #include "pedigree/kernel/processor/ProcessorInformation.h"
31 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
32 #include "pedigree/kernel/utilities/Vector.h"
33 #include "pedigree/kernel/utilities/utility.h"
34 
35 #if defined(X86)
36 #include "../x86/VirtualAddressSpace.h"
37 #elif defined(X64)
38 #include "../x64/VirtualAddressSpace.h"
39 #endif
40 
41 #if defined(TRACK_PAGE_ALLOCATIONS)
42 #include "pedigree/kernel/debugger/commands/AllocationCommand.h"
43 #endif
44 
45 #if defined(X86) && defined(DEBUGGER)
46 #define USE_BITMAP
47 #endif
48 
49 #ifdef USE_BITMAP
50 uint32_t g_PageBitmap[16384] = {0};
51 #endif
52 
54 EXPORTED_PUBLIC size_t g_AllocedPages = 0;
55 
57 
58 static void trackPages(ssize_t v, ssize_t p, ssize_t s)
59 {
60  // Track, if we can.
61  Thread *pThread = Processor::information().getCurrentThread();
62  if (pThread)
63  {
64  Process *pProcess = pThread->getParent();
65  if (pProcess)
66  {
67  pProcess->trackPages(v, p, s);
68  }
69  }
70 }
71 
73 {
75 }
76 
78 {
79  return m_PageStack.freePages();
80 }
81 
82 physical_uintptr_t
84 {
85  static bool bDidHitWatermark = false;
86  static bool bHandlingPressure = false;
87 
88  // Recursion allowed, to permit e.g. calls from the manager to the heap to
89  // succeed without needing to release/re-acquire the lock.
90  m_Lock.acquire(true);
91 
92  physical_uintptr_t ptr;
93 
94  // Some methods of handling memory pressure require allocating pages, so
95  // we need to not end up recursively trying to release the pressure.
96  if (!bHandlingPressure)
97  {
98  if (m_PageStack.freePages() < MemoryPressureManager::getHighWatermark())
99  {
100  bHandlingPressure = true;
101 
102  // Make sure the compact can trigger frees.
103  m_Lock.release();
104 
105  WARNING_NOLOCK(
106  "Memory pressure encountered, performing a compact...");
107  if (!MemoryPressureManager::instance().compact())
108  ERROR_NOLOCK("Compact did not alleviate any memory pressure.");
109  else
110  NOTICE_NOLOCK("Compact was successful.");
111 
112  m_Lock.acquire(true);
113 
114  bDidHitWatermark = true;
115  bHandlingPressure = false;
116  }
117  else if (bDidHitWatermark)
118  {
119  ERROR_NOLOCK("<pressure was hit, but is no longer being hit>");
120  bDidHitWatermark = false;
121  }
122  }
123 
124  ptr = m_PageStack.allocate(pageConstraints);
125  if (!ptr)
126  {
127  panic("Out of memory.");
128  }
129 
130 #ifdef MEMORY_TRACING
131  traceAllocation(
132  reinterpret_cast<void *>(ptr), MemoryTracing::PageAlloc, 4096);
133 #endif
134 
135  trackPages(0, 1, 0);
136 
137 #ifdef USE_BITMAP
138  physical_uintptr_t ptr_bitmap = ptr / 0x1000;
139  size_t idx = ptr_bitmap / 32;
140  size_t bit = ptr_bitmap % 32;
141  g_PageBitmap[idx] |= (1 << bit);
142 #endif
143 
144  m_Lock.release();
145 
146 #if defined(TRACK_PAGE_ALLOCATIONS)
147  if (Processor::m_Initialised == 2)
148  {
149  if (!g_AllocationCommand.isMallocing())
150  {
151  g_AllocationCommand.allocatePage(ptr);
152  }
153  }
154 #endif
155 
156  return ptr;
157 }
159 {
160  RecursingLockGuard<Spinlock> guard(m_Lock);
161 
162  freePageUnlocked(page);
163 }
165 {
166  if (!m_Lock.acquired())
167  FATAL("X86CommonPhysicalMemoryManager::freePageUnlocked called without "
168  "an acquired lock");
169 
170  // Check for pinned page.
171  PageHashable index(page);
172  MetadataTable::LookupResult result = m_PageMetadata.lookup(index);
173  if (result.hasValue())
174  {
175  struct page p = result.value();
176  if (p.active)
177  {
178  if (--p.refcount)
179  {
180  // Still references.
181  m_PageMetadata.update(index, p);
182  return;
183  }
184  else
185  {
186  // No more references, stop tracking page.
187  p.active = false;
188  m_PageMetadata.update(index, p);
189  }
190  }
191  }
192 
193 #ifdef USE_BITMAP
194  physical_uintptr_t ptr_bitmap = page / 0x1000;
195  size_t idx = ptr_bitmap / 32;
196  size_t bit = ptr_bitmap % 32;
197  if (!(g_PageBitmap[idx] & (1 << bit)))
198  {
199  m_Lock.release();
200  FATAL_NOLOCK("PhysicalMemoryManager DOUBLE FREE");
201  }
202 
203  g_PageBitmap[idx] &= ~(1 << bit);
204 #endif
205 
206  m_PageStack.free(page, getPageSize());
207 
208 #ifdef MEMORY_TRACING
209  traceAllocation(
210  reinterpret_cast<void *>(page), MemoryTracing::PageFree, 4096);
211 #endif
212 
213  trackPages(0, -1, 0);
214 }
216 {
217  RecursingLockGuard<Spinlock> guard(m_Lock);
218 
219  PageHashable index(page);
220  MetadataTable::LookupResult result = m_PageMetadata.lookup(index);
221  if (result.hasValue())
222  {
223  struct page p = result.value();
224  ++p.refcount;
225  p.active = true;
226  m_PageMetadata.update(index, p);
227  }
228  else
229  {
230  struct page p;
231  p.refcount = 1;
232  p.active = true;
233  m_PageMetadata.insert(index, p);
234  }
235 }
237  MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags,
238  physical_uintptr_t start)
239 {
240  LockGuard<Spinlock> guard(m_RegionLock);
241 
242  // Allocate a specific physical memory region (always physically continuous)
243  if (start != static_cast<physical_uintptr_t>(-1))
244  {
245  // Page-align the start address.
246  start &= ~(getPageSize() - 1);
247 
248  if (((pageConstraints & continuous) != continuous) ||
249  (pageConstraints & virtualOnly))
250  panic("PhysicalMemoryManager::allocateRegion(): function misused");
251 
252  // Remove the memory from the range-lists (if desired/possible)
253  if ((pageConstraints & nonRamMemory) == nonRamMemory)
254  {
255  Region.setNonRamMemory(true);
256  if (m_PhysicalRanges.allocateSpecific(
257  start, cPages * getPageSize()) == false)
258  {
259  if ((pageConstraints & force) != force)
260  {
261  ERROR("PhysicalMemoryManager::allocateRegion() [specific] "
262  "- failed to get space from general range list and "
263  "force is not set");
264  return false;
265  }
266  else
267  Region.setForced(true);
268  }
269  }
270  else
271  {
272  if (start < 0x100000 && (start + cPages * getPageSize()) < 0x100000)
273  {
274  if (m_RangeBelow1MB.allocateSpecific(
275  start, cPages * getPageSize()) == false)
276  {
277  ERROR("PhysicalMemoryManager::allocateRegion() [specific] "
278  "- failed to get space from <1MB range list");
279  return false;
280  }
281  }
282  else if (
283  start < 0x1000000 &&
284  (start + cPages * getPageSize()) < 0x1000000)
285  {
286  if (m_RangeBelow16MB.allocateSpecific(
287  start, cPages * getPageSize()) == false)
288  {
289  ERROR(
290  "PhysicalMemoryManager::allocateRegion() [specific] - "
291  "failed to get "
292  << cPages
293  << " pages of memory from <16MB range list at " << Hex
294  << start);
295  return false;
296  }
297  }
298  else if (start < 0x1000000)
299  {
300  ERROR("PhysicalMemoryManager: Memory region neither completely "
301  "below nor above 1MB");
302  return false;
303  }
304  else
305  {
306  // Ensure that free() does not attempt to free the given
307  // memory...
308  Region.setNonRamMemory(true);
309  Region.setForced(true);
310  }
311  }
312 
313  // Allocate the virtual address space
314  uintptr_t vAddress = 0;
315 
316  if (m_MemoryRegions.allocate(
317  cPages * PhysicalMemoryManager::getPageSize(), vAddress) ==
318  false)
319  {
320  WARNING("AllocateRegion: MemoryRegion allocation failed.");
321  return false;
322  }
323 
324  // Map the physical memory into the allocated space
325  VirtualAddressSpace &virtualAddressSpace =
326  Processor::information().getVirtualAddressSpace();
327  for (size_t i = 0; i < cPages; i++)
328  if (virtualAddressSpace.map(
330  reinterpret_cast<void *>(
331  vAddress + i * PhysicalMemoryManager::getPageSize()),
332  Flags) == false)
333  {
334  m_MemoryRegions.free(
335  vAddress, cPages * PhysicalMemoryManager::getPageSize());
336  WARNING("AllocateRegion: VirtualAddressSpace::map failed.");
337  return false;
338  }
339 
340  // Set the memory-region's members
341  Region.m_VirtualAddress = reinterpret_cast<void *>(vAddress);
342  Region.m_PhysicalAddress = start;
343  Region.m_Size = cPages * PhysicalMemoryManager::getPageSize();
344  // NOTICE("MR: Allocated " << Hex << vAddress << " (phys " <<
345  // static_cast<uintptr_t>(start) << "), size " << (cPages*4096));
346 
347  // Add to the list of memory-regions
348  if (!(pageConstraints & PhysicalMemoryManager::anonymous))
349  {
351  }
352  return true;
353  }
354  else
355  {
356  // If we need continuous memory, switch to below16 if not already
357  if ((pageConstraints & continuous) == continuous)
358  if ((pageConstraints & addressConstraints) != below1MB &&
359  (pageConstraints & addressConstraints) != below16MB)
360  pageConstraints =
361  (pageConstraints & ~addressConstraints) | below16MB;
362 
363  // Allocate the virtual address space
364  uintptr_t vAddress;
365  if (m_MemoryRegions.allocate(
366  cPages * PhysicalMemoryManager::getPageSize(), vAddress) ==
367  false)
368  {
369  WARNING("AllocateRegion: MemoryRegion allocation failed.");
370  return false;
371  }
372 
373  uint32_t allocatedStart = 0;
374  if (!(pageConstraints & virtualOnly))
375  {
376  VirtualAddressSpace &virtualAddressSpace =
377  Processor::information().getVirtualAddressSpace();
378 
379  if ((pageConstraints & addressConstraints) == below1MB ||
380  (pageConstraints & addressConstraints) == below16MB)
381  {
382  // Allocate a range
383  if ((pageConstraints & addressConstraints) == below1MB)
384  {
385  if (m_RangeBelow1MB.allocate(
386  cPages * getPageSize(), allocatedStart) == false)
387  {
388  ERROR("PhysicalMemoryManager::allocateRegion() - "
389  "failed to get space from <1MB range list");
390  return false;
391  }
392  }
393  else if ((pageConstraints & addressConstraints) == below16MB)
394  {
395  if (m_RangeBelow16MB.allocate(
396  cPages * getPageSize(), allocatedStart) == false)
397  {
398  ERROR("PhysicalMemoryManager::allocateRegion() - "
399  "failed to get space from <16MB range list");
400  return false;
401  }
402  }
403 
404  // Map the physical memory into the allocated space
405  for (size_t i = 0; i < cPages; i++)
406  if (virtualAddressSpace.map(
407  allocatedStart +
409  reinterpret_cast<void *>(
410  vAddress +
412  Flags) == false)
413  {
414  WARNING(
415  "AllocateRegion: VirtualAddressSpace::map failed.");
416  return false;
417  }
418  }
419  else
420  {
421  // Map the physical memory into the allocated space
422  for (size_t i = 0; i < cPages; i++)
423  {
424  physical_uintptr_t page = m_PageStack.allocate(
425  pageConstraints & addressConstraints);
426  if (virtualAddressSpace.map(
427  page,
428  reinterpret_cast<void *>(
429  vAddress +
431  Flags) == false)
432  {
433  WARNING(
434  "AllocateRegion: VirtualAddressSpace::map failed.");
435  return false;
436  }
437  }
438  }
439  }
440 
441  // Set the memory-region's members
442  Region.m_VirtualAddress = reinterpret_cast<void *>(vAddress);
443  Region.m_PhysicalAddress = allocatedStart;
444  Region.m_Size = cPages * PhysicalMemoryManager::getPageSize();
445 
446  // Add to the list of memory-regions
447  if (!(pageConstraints & PhysicalMemoryManager::anonymous))
448  {
450  }
451  return true;
452  }
453 }
454 
456 {
457  NOTICE("Shutting down X86CommonPhysicalMemoryManager");
459  m_PageMetadata.clear();
460 }
461 
463 {
464  NOTICE("memory-map:");
465 
466  physical_uintptr_t top = 0;
467  size_t pageSize = getPageSize();
468 
469  // Fill the page-stack (usable memory above 16MB)
470  // NOTE: We must do the page-stack first, because the range-lists already
471  // need the
472  // memory-management
473  void *MemoryMap = Info.getMemoryMap();
474  if (!MemoryMap)
475  panic("no memory map provided by the bootloader");
476 
477  // Fill our stack with pages below the 4GB threshold.
478  while (MemoryMap)
479  {
480  uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
481  uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
482  uint32_t type = Info.getMemoryMapEntryType(MemoryMap);
483 
484  NOTICE(
485  " " << Hex << addr << " - " << (addr + length)
486  << ", type: " << type);
487 
488  MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
489 
490  if (type != 1)
491  {
492  continue;
493  }
494 
495  // We don't want pages below 1MB, and don't want any over 4GB.
496  uint64_t rangeTop = addr + length;
497  if (rangeTop < 0x1000000)
498  {
499  // Entire region is below 1MB.
500  continue;
501  }
502  else if (rangeTop >= 0x100000000ULL)
503  {
504  // Region is too high.
505  continue;
506  }
507 
508  if (addr < 0x1000000)
509  {
510  // Region crosses 1MB mark. Fix to base at 1MB instead.
511  length = rangeTop - 0x1000000;
512  addr = 0x1000000;
513  }
514 
515  if (rangeTop >= top)
516  {
517  // Update the "top of memory" value.
518  top = rangeTop;
519  }
520 
521  // Prepare the page stack for the additional pages we're giving it.
522  m_PageStack.increaseCapacity((length / pageSize) + 1);
523 
524  m_PageStack.free(addr, length);
525  }
526 
527  // Stack with <4GB is done.
528  m_PageStack.markBelow4GReady();
529 
531  m_PageMetadata.reserve(top >> 12); // number of 4k pages in this zone
532 
533  // Fill the range-lists (usable memory below 1/16MB & ACPI)
534  MemoryMap = Info.getMemoryMap();
535  while (MemoryMap)
536  {
537  uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
538  uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
539  uint32_t type = Info.getMemoryMapEntryType(MemoryMap);
540 
541  if (type == 1)
542  {
543  if (addr < 0x100000)
544  {
545  // NOTE: Assumes that the entry/entries starting below 1MB don't
546  // cross the
547  // 1MB barrier
548  if ((addr + length) >= 0x100000)
549  panic("PhysicalMemoryManager: strange memory-map");
550 
551  m_RangeBelow1MB.free(addr, length);
552  }
553  else if (addr < 0x1000000)
554  {
555  uint64_t upperBound = addr + length;
556  if (upperBound >= 0x1000000)
557  upperBound = 0x1000000;
558 
559  m_RangeBelow16MB.free(addr, upperBound - addr);
560  }
561  }
562 #if defined(ACPI)
563  else if (type == 3 || type == 4)
564  {
565  m_AcpiRanges.free(addr, length);
566  }
567 #endif
568 
569  MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
570  }
571 
572  // Remove the pages used by the kernel from the range-list (below 16MB)
573  extern void *kernel_start;
574  extern void *kernel_end;
575  if (m_RangeBelow16MB.allocateSpecific(
576  reinterpret_cast<uintptr_t>(&kernel_start) -
577  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
578  reinterpret_cast<uintptr_t>(&kernel_end) -
579  reinterpret_cast<uintptr_t>(&kernel_start)) == false)
580  {
581  panic("PhysicalMemoryManager: could not remove the kernel image from "
582  "the range-list");
583  }
584 
585 // Print the ranges
586 #if defined(VERBOSE_MEMORY_MANAGER)
587  NOTICE("free memory ranges (below 1MB):");
588  for (size_t i = 0; i < m_RangeBelow1MB.size(); i++)
589  NOTICE(
590  " " << Hex << m_RangeBelow1MB.getRange(i).address << " - "
591  << (m_RangeBelow1MB.getRange(i).address +
592  m_RangeBelow1MB.getRange(i).length));
593  NOTICE("free memory ranges (below 16MB):");
594  for (size_t i = 0; i < m_RangeBelow16MB.size(); i++)
595  NOTICE(
596  " " << Hex << m_RangeBelow16MB.getRange(i).address << " - "
597  << (m_RangeBelow16MB.getRange(i).address +
598  m_RangeBelow16MB.getRange(i).length));
599 #if defined(ACPI)
600  NOTICE("ACPI ranges:");
601  for (size_t i = 0; i < m_AcpiRanges.size(); i++)
602  NOTICE(
603  " " << Hex << m_AcpiRanges.getRange(i).address << " - "
604  << (m_AcpiRanges.getRange(i).address +
605  m_AcpiRanges.getRange(i).length));
606 #endif
607 #endif
608 
609  // Initialise the free physical ranges
610  m_PhysicalRanges.free(0, 0x100000000ULL);
611  MemoryMap = Info.getMemoryMap();
612  while (MemoryMap)
613  {
614  uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
615  uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
616 
617  // Only map if the variable fits into a uintptr_t - no overflow!
618  if (addr > ~0ULL)
619  {
620  WARNING("Memory region " << addr << " not used.");
621  }
622  else if (addr >= 0x100000000ULL)
623  {
624  // Skip >= 4 GB for now, done in initialise64
625  break;
626  }
627  else if (m_PhysicalRanges.allocateSpecific(addr, length) == false)
628  panic("PhysicalMemoryManager: Failed to create the list of ranges "
629  "of free physical space");
630 
631  MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
632  }
633 
634 // Print the ranges
635 #if defined(VERBOSE_MEMORY_MANAGER)
636  NOTICE("physical memory ranges:");
637  for (size_t i = 0; i < m_PhysicalRanges.size(); i++)
638  {
639  NOTICE(
640  " " << Hex << m_PhysicalRanges.getRange(i).address << " - "
641  << (m_PhysicalRanges.getRange(i).address +
642  m_PhysicalRanges.getRange(i).length));
643  }
644 #endif
645 
646  // Initialise the range of virtual space for MemoryRegions
647  m_MemoryRegions.free(
648  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MEMORYREGION_ADDRESS),
649  KERNEL_VIRTUAL_MEMORYREGION_SIZE);
650 }
651 #ifdef X64
653 {
654  NOTICE("64-bit memory-map:");
655 
656  // Fill the page-stack (usable memory above 16MB)
657  // NOTE: We must do the page-stack first, because the range-lists already
658  // need the
659  // memory-management
660  size_t numPagesOver4G = 0;
661  uint64_t base = 0;
662  void *MemoryMap = Info.getMemoryMap();
663  while (MemoryMap)
664  {
665  uint64_t addr = Info.getMemoryMapEntryAddress(MemoryMap);
666  uint64_t length = Info.getMemoryMapEntryLength(MemoryMap);
667  uint32_t type = Info.getMemoryMapEntryType(MemoryMap);
668 
669  if (addr >= 0x100000000ULL)
670  {
671  if (base == 0 || addr < base)
672  {
673  base = addr;
674  }
675 
676  NOTICE(
677  " " << Hex << addr << " - " << (addr + length)
678  << ", type: " << type);
679 
680  if (type == 1)
681  {
682  size_t numPages = length / getPageSize();
683  m_PageStack.increaseCapacity(numPages);
684  m_PageStack.free(addr, length);
685 
686  m_PhysicalRanges.free(addr, length);
687 
688  numPagesOver4G += numPages;
689  }
690  }
691 
692  MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
693  }
694 
695  // Map physical memory above 4G into the kernel address space.
696  // Everything below 4G is already mapped using 2MB pages.
698  VirtualAddressSpace &kernelSpace =
700  bool ok = kernelSpace.mapHuge(
701  base, reinterpret_cast<void *>(0xFFFF800000000000 + base),
702  numPagesOver4G,
704  if (!ok)
705  {
706  FATAL("failed to map physical memory");
707  }
708 
709  NOTICE(" --> " << numPagesOver4G << " pages exist above 4G!");
710 
711  // Stacks >=4GB are done.
712  m_PageStack.markAbove4GReady();
713 
714 // Fill the range-lists (usable memory below 1/16MB & ACPI)
715 #if defined(ACPI)
716  MemoryMap = Info.getMemoryMap();
717  while (MemoryMap)
718  {
719  if ((Info.getMemoryMapEntryType(MemoryMap) == 3 ||
720  Info.getMemoryMapEntryType(MemoryMap) == 4) &&
721  Info.getMemoryMapEntryAddress(MemoryMap) >= 0x100000000ULL)
722  {
723  m_AcpiRanges.free(
724  Info.getMemoryMapEntryAddress(MemoryMap),
725  Info.getMemoryMapEntryLength(MemoryMap));
726  }
727 
728  MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
729  }
730 
731 #if defined(VERBOSE_MEMORY_MANAGER)
732  // Print the ranges
733  NOTICE("ACPI ranges (x64 added):");
734  for (size_t i = 0; i < m_AcpiRanges.size(); i++)
735  NOTICE(
736  " " << Hex << m_AcpiRanges.getRange(i).address << " - "
737  << (m_AcpiRanges.getRange(i).address +
738  m_AcpiRanges.getRange(i).length));
739 #endif
740 #endif
741 
742  // Initialise the free physical ranges
743  MemoryMap = Info.getMemoryMap();
744  while (MemoryMap)
745  {
746  // Only map if the variable fits into a uintptr_t - no overflow!
747  if ((Info.getMemoryMapEntryAddress(MemoryMap)) > ~0ULL)
748  {
749  WARNING(
750  "Memory region " << Info.getMemoryMapEntryAddress(MemoryMap)
751  << " not used.");
752  }
753  else if (
754  (Info.getMemoryMapEntryAddress(MemoryMap) >= 0x100000000ULL) &&
755  (m_PhysicalRanges.allocateSpecific(
756  Info.getMemoryMapEntryAddress(MemoryMap),
757  Info.getMemoryMapEntryLength(MemoryMap)) == false))
758  panic("PhysicalMemoryManager: Failed to create the list of ranges "
759  "of free physical space");
760 
761  MemoryMap = Info.nextMemoryMapEntry(MemoryMap);
762  }
763 
764 // Print the ranges
765 #if defined(VERBOSE_MEMORY_MANAGER)
766  NOTICE("physical memory ranges, 64-bit added:");
767  for (size_t i = 0; i < m_PhysicalRanges.size(); i++)
768  {
769  NOTICE(
770  " " << Hex << m_PhysicalRanges.getRange(i).address << " - "
771  << (m_PhysicalRanges.getRange(i).address +
772  m_PhysicalRanges.getRange(i).length));
773  }
774 #endif
775 }
776 #endif
777 
779 {
780  extern void *kernel_init;
781  extern void *kernel_init_end;
782 
783  NOTICE("PhysicalMemoryManager: kernel initialisation complete, cleaning "
784  "up...");
785 
786  // Unmap & free the .init section
787  VirtualAddressSpace &kernelSpace =
789  size_t count = (reinterpret_cast<uintptr_t>(&kernel_init_end) -
790  reinterpret_cast<uintptr_t>(&kernel_init)) /
791  getPageSize();
792  for (size_t i = 0; i < count; i++)
793  {
794  void *vAddress = adjust_pointer(
795  reinterpret_cast<void *>(&kernel_init), i * getPageSize());
796 
797  // Get the physical address
798  size_t flags;
799  physical_uintptr_t pAddress;
800  kernelSpace.getMapping(vAddress, pAddress, flags);
801 
802  // Unmap the page
803  kernelSpace.unmap(vAddress);
804  }
805 
806  // Free the physical pages
807  m_RangeBelow16MB.free(
808  reinterpret_cast<uintptr_t>(&kernel_init) -
809  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_ADDRESS),
810  count * getPageSize());
811 
812  NOTICE(
813  "PhysicalMemoryManager: cleaned up " << Dec << (count * 4) << Hex
814  << "KB of init-only code.");
815 }
816 
818  : m_PageStack(), m_RangeBelow1MB(), m_RangeBelow16MB(), m_PhysicalRanges(),
819 #if defined(ACPI)
820  m_AcpiRanges(),
821 #endif
822  m_MemoryRegions(), m_Lock(false, true), m_RegionLock(false, true),
823  m_PageMetadata()
824 {
825 }
827 {
828 }
829 
831 {
832  LockGuard<Spinlock> guard(m_RegionLock);
833 
837  {
838  if (*it == pRegion)
839  {
840  size_t cPages =
842  uintptr_t start =
843  reinterpret_cast<uintptr_t>(pRegion->virtualAddress());
844  physical_uintptr_t phys = pRegion->physicalAddress();
845  VirtualAddressSpace &virtualAddressSpace =
847 
848  if (pRegion->getNonRamMemory())
849  {
850  if (!pRegion->getForced())
851  m_PhysicalRanges.free(phys, pRegion->size());
852  }
853  else
854  {
855  if (phys < 0x100000 &&
856  (phys + cPages * getPageSize()) < 0x100000)
857  {
858  m_RangeBelow1MB.free(phys, cPages * getPageSize());
859  }
860  else if (
861  phys < 0x1000000 &&
862  (phys + cPages * getPageSize()) < 0x1000000)
863  {
864  m_RangeBelow16MB.free(phys, cPages * getPageSize());
865  }
866  else if (phys < 0x1000000)
867  {
868  ERROR("PhysicalMemoryManager: Memory region neither "
869  "completely below nor above 1MB");
870  return;
871  }
872  }
873 
874  for (size_t i = 0; i < cPages; i++)
875  {
876  void *vAddr = reinterpret_cast<void *>(
877  start + i * PhysicalMemoryManager::getPageSize());
878  if (!virtualAddressSpace.isMapped(vAddr))
879  {
880  // Can happen with virtualOnly mappings.
882  continue;
883  }
884  physical_uintptr_t pAddr;
885  size_t flags;
886  virtualAddressSpace.getMapping(vAddr, pAddr, flags);
887 
888  if (!pRegion->getNonRamMemory() && pAddr > 0x1000000)
889  m_PageStack.free(pAddr, getPageSize());
890 
891  virtualAddressSpace.unmap(vAddr);
892  }
893  // NOTICE("MR: Freed " << Hex << start << ", size " <<
894  // (cPages*4096));
895  m_MemoryRegions.free(start, pRegion->size());
897  break;
898  }
899  }
900 }
901 
902 physical_uintptr_t
904 {
905  size_t index = 0;
906 #if defined(X64)
907  if (constraints == X86CommonPhysicalMemoryManager::below4GB)
908  index = 0;
909  else if (constraints == X86CommonPhysicalMemoryManager::below64GB)
910  index = 1;
911  else
912  {
913  index = 2;
914 
915  // Degrade quietly if this stack is not ready.
916  if (!m_StackReady[index])
917  {
918  index = 1;
919 
920  if (!m_StackReady[index])
921  {
922  index = 0;
923  }
924  }
925  }
926 
927  // Wait for the stack to be ready. With constraints, this will block until
928  // a specific page stack is ready. With no constraints, this will just
929  // block until the first page stack is ready (which should almost always
930  // be the case).
931  while (!m_StackReady[index])
932  {
934  }
935 
936  if (index == 2 && (m_StackMax[2] == m_StackSize[2] || !m_StackReady[2]))
937  index = 1;
938  if (index == 1 && (m_StackMax[1] == m_StackSize[1] || !m_StackReady[1]))
939  index = 0;
940 #endif
941 
942  physical_uintptr_t result = 0;
943  if ((m_StackMax[index] != m_StackSize[index]) && m_StackSize[index])
944  {
945  if (index == 0)
946  {
947  m_StackSize[0] -= 4;
948  result = *(
949  reinterpret_cast<uint32_t *>(m_Stack[0]) + m_StackSize[0] / 4);
950  }
951  else
952  {
953  m_StackSize[index] -= 8;
954  result =
955  *(reinterpret_cast<uint64_t *>(m_Stack[index]) +
956  m_StackSize[index] / 8);
957  }
958  }
959 
960  if (result)
961  {
963  if (g_FreePages)
964  g_FreePages--;
965  g_AllocedPages++;
966 
967  if (m_FreePages)
968  --m_FreePages;
969  }
970 
971  return result;
972 }
973 
974 template <class T>
975 static void
976 performPush(T *stack, size_t &stackSize, uint64_t physicalAddress, size_t count)
977 {
978  size_t nextEntry = stackSize / sizeof(T);
979  T addend = 0;
980  for (size_t i = 0; i < count; ++i)
981  {
982  stack[nextEntry + i] = static_cast<T>(physicalAddress + addend);
984  }
985 
986  stackSize += sizeof(T) * count;
987 }
988 
990  uint64_t physicalAddress, size_t length)
991 {
992  // Select the right stack
994  size_t index = 0;
995  if (physicalAddress >= 0x100000000ULL)
996  {
997 #if defined(X86)
998  return;
999 #elif defined(X64)
1000  if (physicalAddress >= 0x1000000000ULL)
1001  {
1002  index = 2;
1003  }
1004  else
1005  {
1006  index = 1;
1007  }
1008 #endif
1009  }
1010 
1011  // Don't attempt to map address zero.
1012  if (UNLIKELY(!m_Stack[index]))
1013  {
1014  return;
1015  }
1016 
1017  uint64_t topPhysical = physicalAddress + length;
1018 
1019  for (; physicalAddress < topPhysical; physicalAddress += getPageSize())
1020  {
1021  // Expand the stack if necessary.
1022  if (!maybeMap(index, physicalAddress))
1023  {
1024  break;
1025  }
1026  }
1027 
1028  size_t numPages = (topPhysical - physicalAddress) / getPageSize();
1029 
1030  if (index == 0)
1031  {
1032  performPush(
1033  reinterpret_cast<uint32_t *>(m_Stack[index]), m_StackSize[index],
1034  physicalAddress, numPages);
1035  }
1036  else
1037  {
1038  performPush(
1039  reinterpret_cast<uint64_t *>(m_Stack[index]), m_StackSize[index],
1040  physicalAddress, numPages);
1041  }
1042 
1044  g_FreePages += numPages;
1045  if (g_AllocedPages > 0)
1046  {
1047  if (g_AllocedPages >= numPages)
1048  {
1049  g_AllocedPages -= numPages;
1050  }
1051  else
1052  {
1053  g_AllocedPages = 0;
1054  }
1055  }
1056 
1057  m_FreePages += numPages;
1058 }
1059 
1061 {
1062  m_Capacity = 0;
1063  m_DesiredCapacity = 0;
1064 
1065  for (size_t i = 0; i < StackCount; i++)
1066  {
1067  m_StackMax[i] = 0;
1068  m_StackSize[i] = 0;
1069  m_StackReady[i] = false;
1070  }
1071 
1072  // Set the locations for the page stacks in the virtual address space
1073  m_Stack[0] = KERNEL_VIRTUAL_PAGESTACK_4GB;
1074 #if defined(X64)
1075  m_Stack[1] = KERNEL_VIRTUAL_PAGESTACK_ABV4GB1;
1076  m_Stack[2] = KERNEL_VIRTUAL_PAGESTACK_ABV4GB2;
1077 #endif
1078 
1079  m_FreePages = 0;
1080 }
1081 
1083 {
1084  for (size_t i = 1; i < StackCount; ++i)
1085  {
1086  m_StackReady[i] = true;
1087  }
1088 }
1089 
1091 {
1092  m_StackReady[0] = true;
1093 }
1094 
1096  size_t index, uint64_t physicalAddress)
1097 {
1098  bool mapped = false;
1099 
1100  void *virtualAddress = adjust_pointer(m_Stack[index], m_StackMax[index]);
1101 
1102  // Do we even need to do this mapping?
1103  if (m_Capacity >= m_DesiredCapacity)
1104  {
1105  return false;
1106  }
1107 
1108 #if defined(X86)
1109  // Get the kernel virtual address-space
1110  X86VirtualAddressSpace &AddressSpace =
1111  static_cast<X86VirtualAddressSpace &>(
1113 #elif defined(X64)
1114  X64VirtualAddressSpace &AddressSpace =
1115  static_cast<X64VirtualAddressSpace &>(
1117 #endif
1118 
1119  if (!index)
1120  {
1121  if (AddressSpace.mapPageStructures(
1122  physicalAddress, virtualAddress,
1124  true)
1125  {
1126  mapped = true;
1127  }
1128  }
1129  else
1130  {
1131 #if defined(X64)
1132  if (AddressSpace.mapPageStructuresAbove4GB(
1133  physicalAddress, virtualAddress,
1135  true)
1136  {
1137  mapped = true;
1138  }
1139 #else
1140  FATAL("PageStack::free - index > 0 when not built as x86_64");
1141 #endif
1142  }
1143 
1144  // Another page worth of entries is mapped - update capacity accordingly.
1145  if (AddressSpace.isMapped(virtualAddress))
1146  {
1147  // This address is now valid for stack usage, so it adds capacity for
1148  // significantly more pages to the stack.
1149  size_t entrySize = sizeof(uint32_t);
1150  if (index != 0)
1151  {
1152  entrySize = sizeof(uint64_t);
1153  }
1154  m_Capacity += getPageSize() / entrySize;
1155 
1156  // This page is mapped, so we need to go ahead and start allocating the
1157  // next page in the stack. This way we always have the entire stack
1158  // mapped before we start pushing pages into it.
1159  m_StackMax[index] += getPageSize();
1160 
1161  // Top of stack mapped, do we need to expand further?
1162  if (m_Capacity >= m_DesiredCapacity)
1163  {
1164  // No need to map here.
1165  return false;
1166  }
1167  }
1168 
1169  return mapped;
1170 }
Bootstrap structure passed to the kernel entry point.
EXPORTED_PUBLIC size_t g_FreePages
void pushBack(const T &value)
Definition: Vector.h:270
virtual void unmap(void *virtualAddress)=0
Iterator end()
Definition: Vector.h:160
bool maybeMap(size_t index, uint64_t physicalAddress)
static PhysicalMemoryManager & instance()
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
A vector / dynamic array.
virtual void freePageUnlocked(physical_uintptr_t page)
static void pause()
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
void * m_VirtualAddress
Definition: MemoryRegion.h:91
physical_uintptr_t m_PhysicalAddress
Definition: MemoryRegion.h:94
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
Definition: Result.h:36
void free(T address, T length, bool merge=true)
Definition: RangeList.h:163
static ProcessorInformation & information()
Definition: Processor.cc:45
void initialise64(const BootstrapStruct_t &Info) INITIALISATION_ONLY
size_t m_Size
Definition: MemoryRegion.h:96
void free(uint64_t physicalAddress, size_t length)
virtual bool allocateRegion(MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags, physical_uintptr_t start=-1)
#define WARNING(text)
Definition: Log.h:78
static size_t m_Initialised
Definition: Processor.h:371
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
virtual void pin(physical_uintptr_t page)
#define NOTICE(text)
Definition: Log.h:74
Special memory entity in the kernel&#39;s virtual address space.
Definition: MemoryRegion.h:35
size_t size() const
Definition: MemoryRegion.cc:49
virtual void freePage(physical_uintptr_t page)
Definition: Log.h:136
physical_uintptr_t physicalAddress() const
Definition: MemoryRegion.cc:44
virtual void freePageUnlocked(physical_uintptr_t page)=0
Process * getParent() const
Definition: Thread.h:181
Definition: Thread.h:54
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)
void initialise(const BootstrapStruct_t &Info) INITIALISATION_ONLY
void * virtualAddress() const
Definition: MemoryRegion.cc:39
#define ERROR(text)
Definition: Log.h:82
virtual bool mapHuge(physical_uintptr_t physAddress, void *virtualAddress, size_t count, size_t flags)
Definition: Log.h:138
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
bool mapPageStructures(physical_uintptr_t physAddress, void *virtualAddress, size_t flags)
virtual bool isMapped(void *virtualAddress)
#define FATAL(text)
Definition: Log.h:89
Implementation of the PhysicalMemoryManager for common x86.
void clear(bool freeMem=false)
Definition: Vector.h:337
void erase(size_t index)
Definition: Vector.h:350