The Pedigree Project  0.1
MemoryPool.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "pedigree/kernel/utilities/MemoryPool.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/Log.h"
23 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
24 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
25 #include "pedigree/kernel/utilities/assert.h"
26 #include "pedigree/kernel/utilities/utility.h"
27 
28 static void map(uintptr_t location)
29 {
31 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
32  VirtualAddressSpace &currva =
33  Processor::information().getVirtualAddressSpace();
35 #endif
36 
37  void *page = page_align(reinterpret_cast<void *>(location));
38  if (!va.isMapped(page))
39  {
40  physical_uintptr_t phys =
42  va.map(
43  phys, page,
45  }
46 
47 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
49 #endif
50 }
51 
52 static bool unmap(uintptr_t location)
53 {
55 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
56  VirtualAddressSpace &currva =
57  Processor::information().getVirtualAddressSpace();
59 #endif
60 
61  void *page = page_align(reinterpret_cast<void *>(location));
62  bool result = false;
63  if ((result = va.isMapped(page)))
64  {
65  size_t flags = 0;
66  physical_uintptr_t phys = 0;
67  va.getMapping(page, phys, flags);
68 
69  va.unmap(page);
71  }
72 
73 #ifdef KERNEL_NEEDS_ADDRESS_SPACE_SWITCH
75 #endif
76 
77  return result;
78 }
79 
80 MemoryPoolPressureHandler::MemoryPoolPressureHandler(MemoryPool *pool)
81  : m_Pool(pool)
82 {
83 }
84 
85 MemoryPoolPressureHandler::~MemoryPoolPressureHandler()
86 {
87 }
88 
89 const String MemoryPoolPressureHandler::getMemoryPressureDescription()
90 {
91  return String("MemoryPool: freeing unused pages");
92 }
93 
95 {
96  return m_Pool->trim();
97 }
98 
99 MemoryPool::MemoryPool()
100  :
101 #ifdef THREADS
102  m_Condition(), m_Lock(),
103 #endif
104  m_BufferSize(1024), m_BufferCount(0), m_Pool("memory-pool"),
105  m_bInitialised(false), m_AllocBitmap(), m_PressureHandler(this)
106 {
107 }
108 
109 MemoryPool::MemoryPool(const char *poolName)
110  :
111 #ifdef THREADS
112  m_Condition(), m_Lock(),
113 #endif
114  m_BufferSize(1024), m_BufferCount(0), m_Pool(poolName),
115  m_bInitialised(false), m_AllocBitmap(), m_PressureHandler(this)
116 {
117 }
118 
119 MemoryPool::~MemoryPool()
120 {
121  // Free all the buffers
122  m_bInitialised = false;
123 #ifdef THREADS
124  m_Condition.broadcast();
125 #endif
126 }
127 
128 bool MemoryPool::initialise(size_t poolSize, size_t bufferSize)
129 {
130 #ifdef THREADS
131  LockGuard<Mutex> guard(m_Lock);
132 #endif
133 
134  if (m_bInitialised)
135  return true;
136 
137  if (!poolSize || !bufferSize ||
138  (bufferSize > (poolSize * PhysicalMemoryManager::getPageSize())))
139  return false;
140 
141  // Find the next power of two for bufferSize, if it isn't already one
142  if ((bufferSize & (bufferSize - 1)))
143  {
144  size_t powerOf2 = 1;
145  size_t lg2 = 0;
146  while (powerOf2 < bufferSize)
147  {
148  powerOf2 <<= 1;
149  lg2++;
150  }
151  bufferSize = powerOf2;
152  }
153 
154  m_BufferSize = bufferSize;
155 
156  NOTICE(
157  "MemoryPool: allocating memory pool '"
158  << m_Pool.name() << "', " << Dec << ((poolSize * 4096) / 1024) << Hex
159  << "K. Buffer size is " << m_BufferSize << ".");
161  m_Pool, poolSize, PhysicalMemoryManager::virtualOnly,
163  if (!m_bInitialised)
164  return false;
165 
166  m_BufferCount = (poolSize * 0x1000) / bufferSize;
167 
168  // Register us as a memory pressure handler, with top priority. We should
169  // very easily be able to free pages in most cases.
170  MemoryPressureManager::instance().registerHandler(
171  MemoryPressureManager::HighestPriority, &m_PressureHandler);
172 
173  return true;
174 }
175 
177 {
178  if (!m_bInitialised)
179  return 0;
180 
181  return allocateDoer(true);
182 }
183 
185 {
186  if (!m_bInitialised)
187  return 0;
188 
189  return allocateDoer(false);
190 }
191 
192 uintptr_t MemoryPool::allocateDoer(bool canBlock)
193 {
194 #ifdef THREADS
195  m_Lock.acquire();
196 #endif
197 
198  // Find a free buffer
199  size_t poolSize = m_Pool.size();
200  size_t nBuffers = poolSize / m_BufferSize;
201  uintptr_t poolBase = reinterpret_cast<uintptr_t>(m_Pool.virtualAddress());
202 
203  size_t n = 0;
204 #ifdef THREADS
205  while (true)
206  {
207  if (!m_BufferCount)
208  {
209  if (!canBlock)
210  {
211  m_Lock.release();
212  return 0;
213  }
214 
215  ConditionVariable::WaitResult result = m_Condition.wait(m_Lock);
216  if (result.hasError())
217  {
219  return 0;
220  }
221  continue;
222  }
223 #else
224  if (!m_BufferCount)
225  {
226  return 0;
227  }
228 #endif
229 
230 #ifdef THREADS
231  // Have a buffer available.
232  n = m_AllocBitmap.getFirstClear();
233  assert(n < nBuffers);
234  m_AllocBitmap.set(n);
235  break;
236  }
237 #endif
238 
239  uintptr_t result = poolBase + (n * 0x1000);
240  map(result);
241 
242  --m_BufferCount;
243 
244 #ifdef THREADS
245  m_Lock.release();
246 #endif
247 
248  return result;
249 }
250 
251 void MemoryPool::free(uintptr_t buffer)
252 {
253 #ifdef THREADS
254  LockGuard<Mutex> guard(m_Lock);
255 #endif
256 
257  if (!m_bInitialised)
258  return;
259 
260  size_t n = (buffer - reinterpret_cast<uintptr_t>(m_Pool.virtualAddress())) /
261  m_BufferSize;
262  m_AllocBitmap.clear(n);
263 
264  ++m_BufferCount;
265 }
266 
268 {
269  size_t poolSize = m_Pool.size();
270  size_t nBuffers = poolSize / m_BufferSize;
271  uintptr_t poolBase = reinterpret_cast<uintptr_t>(m_Pool.virtualAddress());
272 
273  // Easy trim if buffers are pages or larger (remember that buffer sizes are
274  // rounded up to the next power of two).
275  size_t nFreed = 0;
276  if (m_BufferSize >= PhysicalMemoryManager::getPageSize())
277  {
278  for (size_t n = 0; n < nBuffers; ++n)
279  {
280  if (!m_AllocBitmap.test(n))
281  {
282  uintptr_t page = poolBase + (n * 0x1000);
283  for (size_t off = 0; off < m_BufferSize;
285  {
286  if (unmap(page + off))
287  ++nFreed;
288  }
289  }
290  }
291  }
292  else
293  {
294  // Need to find N contiguous sets of bits.
295  // We also need to navigate in blocks of pages.
296  size_t N = PhysicalMemoryManager::getPageSize() / m_BufferSize;
297  for (size_t n = 0, m = 0; n < nBuffers; n += N, ++m)
298  {
299  if (m_AllocBitmap.test(n))
300  continue;
301 
302  bool ok = true;
303  for (size_t y = 1; y < N; ++y)
304  {
305  if (m_AllocBitmap.test(n + y))
306  {
307  ok = false;
308  break;
309  }
310  }
311 
312  if (!ok)
313  continue;
314 
315  uintptr_t page = poolBase + (m * 0x1000);
316  if (unmap(page))
317  ++nFreed;
318  }
319  }
320 
321  return nFreed > 0;
322 }
virtual void unmap(void *virtualAddress)=0
static PhysicalMemoryManager & instance()
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
Definition: String.h:49
Definition: Result.h:36
static ProcessorInformation & information()
Definition: Processor.cc:45
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
bool trim()
Trims the pool, freeing pages that are not otherwise in use.
Definition: MemoryPool.cc:267
virtual bool compact()
Definition: MemoryPool.cc:94
#define NOTICE(text)
Definition: Log.h:74
uintptr_t allocate()
Definition: MemoryPool.cc:176
Definition: Log.h:136
#define assert(x)
Definition: assert.h:37
bool initialise(size_t poolSize, size_t bufferSize=1024)
Definition: MemoryPool.cc:128
uintptr_t allocateNow()
Definition: MemoryPool.cc:184
uintptr_t allocateDoer(bool canBlock)
Allocation doer.
Definition: MemoryPool.cc:192
void free(uintptr_t buffer)
Frees an allocated buffer, allowing it to be used elsewhere.
Definition: MemoryPool.cc:251
virtual bool allocateRegion(MemoryRegion &Region, size_t cPages, size_t pageConstraints, size_t Flags, physical_uintptr_t start=-1)=0
EXPORTED_PUBLIC void * page_align(void *p) PURE
Definition: utility.cc:28
virtual void freePage(physical_uintptr_t page)=0
Definition: Log.h:138
void registerHandler(size_t prio, MemoryPressureHandler *pHandler)