The Pedigree Project  0.1
armv7/PhysicalMemoryManager.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "PhysicalMemoryManager.h"
21 #include "VirtualAddressSpace.h"
22 #include "pedigree/kernel/LockGuard.h"
23 #include "pedigree/kernel/Log.h"
24 #include "pedigree/kernel/processor/MemoryRegion.h"
25 #include "pedigree/kernel/processor/Processor.h"
26 
28 
30 {
32 }
33 
35 {
36  LockGuard<Spinlock> guard(m_Lock);
37 
39  physical_uintptr_t ptr = m_PageStack.allocate(0);
40  return ptr;
41 }
42 void ArmV7PhysicalMemoryManager::freePage(physical_uintptr_t page)
43 {
44  LockGuard<Spinlock> guard(m_Lock);
45 
46  m_PageStack.free(page);
47 }
48 void ArmV7PhysicalMemoryManager::freePageUnlocked(physical_uintptr_t page)
49 {
50  if (!m_Lock.acquired())
51  FATAL("ArmV7PhysicalMemoryManager::freePageUnlocked called without an "
52  "acquired lock");
53 
54  m_PageStack.free(page);
55 }
57  MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags,
58  physical_uintptr_t start)
59 {
60  LockGuard<Spinlock> guard(m_RegionLock);
61 
62  // Allocate a specific physical memory region (always physically continuous)
63  if (start != static_cast<physical_uintptr_t>(-1))
64  {
65  if ((pageConstraints & continuous) != continuous)
66  panic("PhysicalMemoryManager::allocateRegion(): function misused");
67 
68 // Remove the memory from the range-lists (if desired/possible)
69 #ifdef ARM_BEAGLE // Beagleboard RAM locations
70  if ((start < 0x80000000) || (start >= 0x90000000))
71  {
72  if (!m_NonRAMRanges.allocateSpecific(start, cPages * getPageSize()))
73  {
74  if (!(pageConstraints & PhysicalMemoryManager::force))
75  return false;
76  }
77  }
78  else
79 #endif
80  {
81  if (!m_PhysicalRanges.allocateSpecific(
82  start, cPages * getPageSize()))
83  return false;
84  }
85 
86  // Allocate the virtual address space
87  uintptr_t vAddress;
88 
89  if (m_VirtualMemoryRegions.allocate(cPages * getPageSize(), vAddress) ==
90  false)
91  {
92  WARNING("AllocateRegion: MemoryRegion allocation failed.");
93  return false;
94  }
95 
96  // Map the physical memory into the allocated space
97  VirtualAddressSpace *virtualAddressSpace;
98  if (vAddress > 0x40000000) // > 1 GB = kernel address space
99  virtualAddressSpace = &VirtualAddressSpace::getKernelAddressSpace();
100  else
101  virtualAddressSpace =
102  &Processor::information().getVirtualAddressSpace();
103  for (size_t i = 0; i < cPages; i++)
104  if (virtualAddressSpace->map(
106  reinterpret_cast<void *>(
107  vAddress + i * PhysicalMemoryManager::getPageSize()),
108  Flags) == false)
109  {
110  m_VirtualMemoryRegions.free(
111  vAddress, cPages * PhysicalMemoryManager::getPageSize());
112  WARNING("AllocateRegion: VirtualAddressSpace::map failed.");
113  return false;
114  }
115 
116  // Set the memory-region's members
117  Region.m_VirtualAddress = reinterpret_cast<void *>(vAddress);
118  Region.m_PhysicalAddress = start;
119  Region.m_Size = cPages * PhysicalMemoryManager::getPageSize();
120 
121  // Add to the list of memory-regions
123  return true;
124  }
125  else
126  {
127  // Allocate continuous memory if we need to
128  bool bContinuous = false;
129  physical_uintptr_t physAddr = 0;
130  if (pageConstraints & PhysicalMemoryManager::continuous)
131  {
132  bContinuous = true;
133  if (!m_PhysicalRanges.allocate(cPages * getPageSize(), physAddr))
134  return false;
135  }
136 
137  // Allocate the virtual address space
138  uintptr_t vAddress;
139  if (m_VirtualMemoryRegions.allocate(
140  cPages * PhysicalMemoryManager::getPageSize(), vAddress) ==
141  false)
142  {
143  WARNING("AllocateRegion: MemoryRegion allocation failed.");
144  return false;
145  }
146 
147  uint32_t start = 0;
148  VirtualAddressSpace *virtualAddressSpace;
149  if (vAddress > 0x40000000) // > 1 GB = kernel address space
150  virtualAddressSpace = &VirtualAddressSpace::getKernelAddressSpace();
151  else
152  virtualAddressSpace =
153  &Processor::information().getVirtualAddressSpace();
154 
155  {
156  // Map the physical memory into the allocated space
157  for (size_t i = 0; i < cPages; i++)
158  {
159  physical_uintptr_t page = 0;
160  if (bContinuous)
161  page =
162  physAddr + (i * PhysicalMemoryManager::getPageSize());
163  else
164  page = m_PageStack.allocate(pageConstraints);
165  if (virtualAddressSpace->map(
166  page,
167  reinterpret_cast<void *>(
168  vAddress +
170  Flags) == false)
171  {
172  WARNING("AllocateRegion: VirtualAddressSpace::map failed.");
173  return false;
174  }
175  }
176  }
177 
178  // Set the memory-region's members
179  Region.m_VirtualAddress = reinterpret_cast<void *>(vAddress);
180  Region.m_PhysicalAddress =
181  bContinuous ?
182  physAddr :
183  0; // If any mapping is done non-continuously, use getMapping
184  Region.m_Size = cPages * PhysicalMemoryManager::getPageSize();
185 
186  // Add to the list of memory-regions
188  return true;
189  }
190  return false;
191 }
193 {
194 }
195 
196 extern char __start, __end;
197 void ArmV7PhysicalMemoryManager::initialise(const BootstrapStruct_t &info)
198 {
199 // Define beginning and end ranges of usable RAM
200 #ifdef ARM_BEAGLE
201  physical_uintptr_t addr = 0;
202  for (addr = reinterpret_cast<physical_uintptr_t>(&__end); addr < 0x87000000;
203  addr += 0x1000)
204  {
205  m_PageStack.free(addr);
206  }
207  for (addr = 0x88000000; addr < 0x8F000000; addr += 0x1000)
208  {
209  m_PageStack.free(addr);
210  }
211 
212  size_t kernelSize = reinterpret_cast<physical_uintptr_t>(&__end) -
213  reinterpret_cast<physical_uintptr_t>(&__start);
214  if (kernelSize % 4096)
215  {
216  kernelSize += 0x1000;
217  kernelSize &= ~0xFFF;
218  }
219 
220  m_PhysicalRanges.free(0x80000000 + kernelSize, 0xF000000);
221  m_PhysicalRanges.allocateSpecific(
222  0x80000000, reinterpret_cast<physical_uintptr_t>(&__end) - 0x80000000);
223  m_PhysicalRanges.allocateSpecific(0x87000000, 0x1000000);
224 
225  m_NonRAMRanges.free(0, 0x80000000);
226  m_NonRAMRanges.free(0x90000000, 0x60000000);
227 #endif
228 
229  // Initialise the range of virtual space for MemoryRegions
230  m_VirtualMemoryRegions.free(
231  reinterpret_cast<uintptr_t>(KERNEL_VIRTUAL_MEMORYREGION_ADDRESS),
232  KERNEL_VIRTUAL_MEMORYREGION_SIZE);
233 }
234 
236  : m_PageStack(), m_PhysicalRanges(), m_VirtualMemoryRegions(),
237  m_Lock(false, true), m_RegionLock(false, true)
238 {
239 }
241 {
242 }
243 
244 physical_uintptr_t
246 {
247  // Ignore all constraints, none relevant
248  physical_uintptr_t ret = 0;
249  if ((m_StackMax != m_StackSize) && m_StackSize)
250  {
251  m_StackSize -= sizeof(physical_uintptr_t);
252  ret =
253  *(reinterpret_cast<uint32_t *>(m_Stack) +
254  m_StackSize / sizeof(physical_uintptr_t));
255  }
256  return ret;
257 }
258 
260  physical_uintptr_t physicalAddress)
261 {
262 // Input verification (machine-specific)
263 #ifdef ARM_BEAGLE
264  if (physicalAddress < 0x80000000)
265  return;
266  else if (physicalAddress >= 0x90000000)
267  return;
268 #endif
269 
270  // No stack, no free.
271  if (!m_Stack)
272  return;
273 
274  // Expand the stack if we need to
275  if (m_StackMax == m_StackSize)
276  {
277  ArmV7VirtualAddressSpace &AddressSpace =
278  static_cast<ArmV7VirtualAddressSpace &>(
280  if (!AddressSpace.map(
281  physicalAddress, adjust_pointer(m_Stack, m_StackMax),
283  return;
284 
285  m_StackMax += getPageSize();
286  }
287 
288  *(reinterpret_cast<physical_uintptr_t *>(m_Stack) +
289  (m_StackSize / sizeof(physical_uintptr_t))) = physicalAddress;
290  m_StackSize += sizeof(physical_uintptr_t);
291 }
292 
294 {
295  m_StackMax = 0;
296  m_StackSize = 0;
297  m_Stack = KERNEL_VIRTUAL_PAGESTACK_4GB;
298 }
Bootstrap structure passed to the kernel entry point.
void pushBack(const T &value)
Definition: Vector.h:270
static PhysicalMemoryManager & instance()
void free(physical_uintptr_t physicalAddress)
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
void * m_VirtualAddress
Definition: MemoryRegion.h:91
physical_uintptr_t m_PhysicalAddress
Definition: MemoryRegion.h:94
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
static ProcessorInformation & information()
Definition: Processor.cc:45
size_t m_Size
Definition: MemoryRegion.h:96
virtual physical_uintptr_t allocatePage()
#define WARNING(text)
Definition: Log.h:78
virtual bool allocateRegion(MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags, physical_uintptr_t start=-1)
uintptr_t physicalAddress(physical_uintptr_t address) PURE
Definition: utils.h:38
Special memory entity in the kernel&#39;s virtual address space.
Definition: MemoryRegion.h:35
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)
virtual void freePage(physical_uintptr_t page)
Implementation of the PhysicalMemoryManager for common arm.
virtual void unmapRegion(MemoryRegion *pRegion)
virtual void freePageUnlocked(physical_uintptr_t page)
physical_uintptr_t allocate(size_t constraints)
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
#define FATAL(text)
Definition: Log.h:89