The Pedigree Project  0.1
mem.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
42 /*
43  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
44  * All rights reserved.
45  *
46  * Redistribution and use in source and binary forms, with or without modification,
47  * are permitted provided that the following conditions are met:
48  *
49  * 1. Redistributions of source code must retain the above copyright notice,
50  * this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright notice,
52  * this list of conditions and the following disclaimer in the documentation
53  * and/or other materials provided with the distribution.
54  * 3. The name of the author may not be used to endorse or promote products
55  * derived from this software without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
58  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
59  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
60  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
61  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
62  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
65  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
66  * OF SUCH DAMAGE.
67  *
68  * This file is part of the lwIP TCP/IP stack.
69  *
70  * Author: Adam Dunkels <adam@sics.se>
71  * Simon Goldschmidt
72  *
73  */
74 
75 #include "lwip/opt.h"
76 #include "lwip/mem.h"
77 #include "lwip/def.h"
78 #include "lwip/sys.h"
79 #include "lwip/stats.h"
80 #include "lwip/err.h"
81 
82 #include <string.h>
83 
84 #if MEM_LIBC_MALLOC
85 #include <stdlib.h> /* for malloc()/free() */
86 #endif
87 
88 #if MEM_LIBC_MALLOC || MEM_USE_POOLS
89 
93 void
94 mem_init(void)
95 {
96 }
97 
102 void*
103 mem_trim(void *mem, mem_size_t size)
104 {
105  LWIP_UNUSED_ARG(size);
106  return mem;
107 }
108 #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
109 
110 #if MEM_LIBC_MALLOC
111 /* lwIP heap implemented using C library malloc() */
112 
113 /* in case C library malloc() needs extra protection,
114  * allow these defines to be overridden.
115  */
116 #ifndef mem_clib_free
117 #define mem_clib_free free
118 #endif
119 #ifndef mem_clib_malloc
120 #define mem_clib_malloc malloc
121 #endif
122 #ifndef mem_clib_calloc
123 #define mem_clib_calloc calloc
124 #endif
125 
126 #if LWIP_STATS && MEM_STATS
127 #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
128 #else
129 #define MEM_LIBC_STATSHELPER_SIZE 0
130 #endif
131 
140 void *
141 mem_malloc(mem_size_t size)
142 {
143  void* ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
144  if (ret == NULL) {
145  MEM_STATS_INC(err);
146  } else {
147  LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
148 #if LWIP_STATS && MEM_STATS
149  *(mem_size_t*)ret = size;
150  ret = (u8_t*)ret + MEM_LIBC_STATSHELPER_SIZE;
151  MEM_STATS_INC_USED(used, size);
152 #endif
153  }
154  return ret;
155 }
156 
161 void
162 mem_free(void *rmem)
163 {
164  LWIP_ASSERT("rmem != NULL", (rmem != NULL));
165  LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
166 #if LWIP_STATS && MEM_STATS
167  rmem = (u8_t*)rmem - MEM_LIBC_STATSHELPER_SIZE;
168  MEM_STATS_DEC_USED(used, *(mem_size_t*)rmem);
169 #endif
170  mem_clib_free(rmem);
171 }
172 
173 #elif MEM_USE_POOLS
174 
175 /* lwIP heap implemented with different sized pools */
176 
184 void *
185 mem_malloc(mem_size_t size)
186 {
187  void *ret;
188  struct memp_malloc_helper *element = NULL;
189  memp_t poolnr;
190  mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
191 
192  for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
193  /* is this pool big enough to hold an element of the required size
194  plus a struct memp_malloc_helper that saves the pool this element came from? */
195  if (required_size <= memp_pools[poolnr]->size) {
196  element = (struct memp_malloc_helper*)memp_malloc(poolnr);
197  if (element == NULL) {
198  /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
199 #if MEM_USE_POOLS_TRY_BIGGER_POOL
200 
201  if (poolnr < MEMP_POOL_LAST) {
202  continue;
203  }
204 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
205  MEM_STATS_INC(err);
206  return NULL;
207  }
208  break;
209  }
210  }
211  if (poolnr > MEMP_POOL_LAST) {
212  LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
213  MEM_STATS_INC(err);
214  return NULL;
215  }
216 
217  /* save the pool number this element came from */
218  element->poolnr = poolnr;
219  /* and return a pointer to the memory directly after the struct memp_malloc_helper */
220  ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
221 
222 #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
223  /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
224  element->size = (u16_t)size;
225  MEM_STATS_INC_USED(used, element->size);
226 #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
227 #if MEMP_OVERFLOW_CHECK
228  /* initialize unused memory (diff between requested size and selected pool's size) */
229  memset((u8_t*)ret + size, 0xcd, memp_pools[poolnr]->size - size);
230 #endif /* MEMP_OVERFLOW_CHECK */
231  return ret;
232 }
233 
241 void
242 mem_free(void *rmem)
243 {
244  struct memp_malloc_helper *hmem;
245 
246  LWIP_ASSERT("rmem != NULL", (rmem != NULL));
247  LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
248 
249  /* get the original struct memp_malloc_helper */
250  /* cast through void* to get rid of alignment warnings */
251  hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
252 
253  LWIP_ASSERT("hmem != NULL", (hmem != NULL));
254  LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
255  LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
256 
257  MEM_STATS_DEC_USED(used, hmem->size);
258 #if MEMP_OVERFLOW_CHECK
259  {
260  u16_t i;
261  LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
262  hmem->size <= memp_pools[hmem->poolnr]->size);
263  /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
264  for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
265  u8_t data = *((u8_t*)rmem + i);
266  LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
267  }
268  }
269 #endif /* MEMP_OVERFLOW_CHECK */
270 
271  /* and put it in the pool we saved earlier */
272  memp_free(hmem->poolnr, hmem);
273 }
274 
275 #else /* MEM_USE_POOLS */
276 /* lwIP replacement for your libc malloc() */
277 
283 struct mem {
285  mem_size_t next;
287  mem_size_t prev;
289  u8_t used;
290 };
291 
295 #ifndef MIN_SIZE
296 #define MIN_SIZE 12
297 #endif /* MIN_SIZE */
298 /* some alignment macros: we define them here for better source code layout */
299 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
300 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
301 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
302 
307 #ifndef LWIP_RAM_HEAP_POINTER
308 
309 LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U*SIZEOF_STRUCT_MEM));
310 #define LWIP_RAM_HEAP_POINTER ram_heap
311 #endif /* LWIP_RAM_HEAP_POINTER */
312 
314 static u8_t *ram;
316 static struct mem *ram_end;
318 static struct mem *lfree;
319 
321 #if !NO_SYS
322 static sys_mutex_t mem_mutex;
323 #endif
324 
325 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
326 
327 static volatile u8_t mem_free_count;
328 
329 /* Allow mem_free from other (e.g. interrupt) context */
330 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
331 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
332 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
333 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
334 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
335 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
336 
337 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
338 
339 /* Protect the heap only by using a semaphore */
340 #define LWIP_MEM_FREE_DECL_PROTECT()
341 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
342 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
343 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
344 #define LWIP_MEM_ALLOC_DECL_PROTECT()
345 #define LWIP_MEM_ALLOC_PROTECT()
346 #define LWIP_MEM_ALLOC_UNPROTECT()
347 
348 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
349 
350 
362 static void
363 plug_holes(struct mem *mem)
364 {
365  struct mem *nmem;
366  struct mem *pmem;
367 
368  LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
369  LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
370  LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
371 
372  /* plug hole forward */
373  LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
374 
375  nmem = (struct mem *)(void *)&ram[mem->next];
376  if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
377  /* if mem->next is unused and not end of ram, combine mem and mem->next */
378  if (lfree == nmem) {
379  lfree = mem;
380  }
381  mem->next = nmem->next;
382  ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
383  }
384 
385  /* plug hole backward */
386  pmem = (struct mem *)(void *)&ram[mem->prev];
387  if (pmem != mem && pmem->used == 0) {
388  /* if mem->prev is unused, combine mem and mem->prev */
389  if (lfree == mem) {
390  lfree = pmem;
391  }
392  pmem->next = mem->next;
393  ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
394  }
395 }
396 
400 void
401 mem_init(void)
402 {
403  struct mem *mem;
404 
405  LWIP_ASSERT("Sanity check alignment",
406  (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
407 
408  /* align the heap */
409  ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
410  /* initialize the start of the heap */
411  mem = (struct mem *)(void *)ram;
412  mem->next = MEM_SIZE_ALIGNED;
413  mem->prev = 0;
414  mem->used = 0;
415  /* initialize the end of the heap */
416  ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
417  ram_end->used = 1;
418  ram_end->next = MEM_SIZE_ALIGNED;
419  ram_end->prev = MEM_SIZE_ALIGNED;
420 
421  /* initialize the lowest-free pointer to the start of the heap */
422  lfree = (struct mem *)(void *)ram;
423 
424  MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
425 
426  if (sys_mutex_new(&mem_mutex) != ERR_OK) {
427  LWIP_ASSERT("failed to create mem_mutex", 0);
428  }
429 }
430 
437 void
438 mem_free(void *rmem)
439 {
440  struct mem *mem;
441  LWIP_MEM_FREE_DECL_PROTECT();
442 
443  if (rmem == NULL) {
444  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
445  return;
446  }
447  LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
448 
449  LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
450  (u8_t *)rmem < (u8_t *)ram_end);
451 
452  if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
454  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
455  /* protect mem stats from concurrent access */
456  SYS_ARCH_PROTECT(lev);
457  MEM_STATS_INC(illegal);
458  SYS_ARCH_UNPROTECT(lev);
459  return;
460  }
461  /* protect the heap from concurrent access */
462  LWIP_MEM_FREE_PROTECT();
463  /* Get the corresponding struct mem ... */
464  /* cast through void* to get rid of alignment warnings */
465  mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
466  /* ... which has to be in a used state ... */
467  LWIP_ASSERT("mem_free: mem->used", mem->used);
468  /* ... and is now unused. */
469  mem->used = 0;
470 
471  if (mem < lfree) {
472  /* the newly freed struct is now the lowest */
473  lfree = mem;
474  }
475 
476  MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
477 
478  /* finally, see if prev or next are free also */
479  plug_holes(mem);
480 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
481  mem_free_count = 1;
482 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
483  LWIP_MEM_FREE_UNPROTECT();
484 }
485 
496 void *
497 mem_trim(void *rmem, mem_size_t newsize)
498 {
499  mem_size_t size;
500  mem_size_t ptr, ptr2;
501  struct mem *mem, *mem2;
502  /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
503  LWIP_MEM_FREE_DECL_PROTECT();
504 
505  /* Expand the size of the allocated memory region so that we can
506  adjust for alignment. */
507  newsize = LWIP_MEM_ALIGN_SIZE(newsize);
508 
509  if (newsize < MIN_SIZE_ALIGNED) {
510  /* every data block must be at least MIN_SIZE_ALIGNED long */
511  newsize = MIN_SIZE_ALIGNED;
512  }
513 
514  if (newsize > MEM_SIZE_ALIGNED) {
515  return NULL;
516  }
517 
518  LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
519  (u8_t *)rmem < (u8_t *)ram_end);
520 
521  if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
523  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
524  /* protect mem stats from concurrent access */
525  SYS_ARCH_PROTECT(lev);
526  MEM_STATS_INC(illegal);
527  SYS_ARCH_UNPROTECT(lev);
528  return rmem;
529  }
530  /* Get the corresponding struct mem ... */
531  /* cast through void* to get rid of alignment warnings */
532  mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
533  /* ... and its offset pointer */
534  ptr = (mem_size_t)((u8_t *)mem - ram);
535 
536  size = mem->next - ptr - SIZEOF_STRUCT_MEM;
537  LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
538  if (newsize > size) {
539  /* not supported */
540  return NULL;
541  }
542  if (newsize == size) {
543  /* No change in size, simply return */
544  return rmem;
545  }
546 
547  /* protect the heap from concurrent access */
548  LWIP_MEM_FREE_PROTECT();
549 
550  mem2 = (struct mem *)(void *)&ram[mem->next];
551  if (mem2->used == 0) {
552  /* The next struct is unused, we can simply move it at little */
553  mem_size_t next;
554  /* remember the old next pointer */
555  next = mem2->next;
556  /* create new struct mem which is moved directly after the shrinked mem */
557  ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
558  if (lfree == mem2) {
559  lfree = (struct mem *)(void *)&ram[ptr2];
560  }
561  mem2 = (struct mem *)(void *)&ram[ptr2];
562  mem2->used = 0;
563  /* restore the next pointer */
564  mem2->next = next;
565  /* link it back to mem */
566  mem2->prev = ptr;
567  /* link mem to it */
568  mem->next = ptr2;
569  /* last thing to restore linked list: as we have moved mem2,
570  * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
571  * the end of the heap */
572  if (mem2->next != MEM_SIZE_ALIGNED) {
573  ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
574  }
575  MEM_STATS_DEC_USED(used, (size - newsize));
576  /* no need to plug holes, we've already done that */
577  } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
578  /* Next struct is used but there's room for another struct mem with
579  * at least MIN_SIZE_ALIGNED of data.
580  * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
581  * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
582  * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
583  * region that couldn't hold data, but when mem->next gets freed,
584  * the 2 regions would be combined, resulting in more free memory */
585  ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
586  mem2 = (struct mem *)(void *)&ram[ptr2];
587  if (mem2 < lfree) {
588  lfree = mem2;
589  }
590  mem2->used = 0;
591  mem2->next = mem->next;
592  mem2->prev = ptr;
593  mem->next = ptr2;
594  if (mem2->next != MEM_SIZE_ALIGNED) {
595  ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
596  }
597  MEM_STATS_DEC_USED(used, (size - newsize));
598  /* the original mem->next is used, so no need to plug holes! */
599  }
600  /* else {
601  next struct mem is used but size between mem and mem2 is not big enough
602  to create another struct mem
603  -> don't do anyhting.
604  -> the remaining space stays unused since it is too small
605  } */
606 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
607  mem_free_count = 1;
608 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
609  LWIP_MEM_FREE_UNPROTECT();
610  return rmem;
611 }
612 
621 void *
622 mem_malloc(mem_size_t size)
623 {
624  mem_size_t ptr, ptr2;
625  struct mem *mem, *mem2;
626 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
627  u8_t local_mem_free_count = 0;
628 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
629  LWIP_MEM_ALLOC_DECL_PROTECT();
630 
631  if (size == 0) {
632  return NULL;
633  }
634 
635  /* Expand the size of the allocated memory region so that we can
636  adjust for alignment. */
637  size = LWIP_MEM_ALIGN_SIZE(size);
638 
639  if (size < MIN_SIZE_ALIGNED) {
640  /* every data block must be at least MIN_SIZE_ALIGNED long */
641  size = MIN_SIZE_ALIGNED;
642  }
643 
644  if (size > MEM_SIZE_ALIGNED) {
645  return NULL;
646  }
647 
648  /* protect the heap from concurrent access */
649  sys_mutex_lock(&mem_mutex);
650  LWIP_MEM_ALLOC_PROTECT();
651 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
652  /* run as long as a mem_free disturbed mem_malloc or mem_trim */
653  do {
654  local_mem_free_count = 0;
655 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
656 
657  /* Scan through the heap searching for a free block that is big enough,
658  * beginning with the lowest free block.
659  */
660  for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
661  ptr = ((struct mem *)(void *)&ram[ptr])->next) {
662  mem = (struct mem *)(void *)&ram[ptr];
663 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
664  mem_free_count = 0;
665  LWIP_MEM_ALLOC_UNPROTECT();
666  /* allow mem_free or mem_trim to run */
667  LWIP_MEM_ALLOC_PROTECT();
668  if (mem_free_count != 0) {
669  /* If mem_free or mem_trim have run, we have to restart since they
670  could have altered our current struct mem. */
671  local_mem_free_count = 1;
672  break;
673  }
674 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
675 
676  if ((!mem->used) &&
677  (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
678  /* mem is not used and at least perfect fit is possible:
679  * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
680 
681  if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
682  /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
683  * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
684  * -> split large block, create empty remainder,
685  * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
686  * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
687  * struct mem would fit in but no data between mem2 and mem2->next
688  * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
689  * region that couldn't hold data, but when mem->next gets freed,
690  * the 2 regions would be combined, resulting in more free memory
691  */
692  ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
693  /* create mem2 struct */
694  mem2 = (struct mem *)(void *)&ram[ptr2];
695  mem2->used = 0;
696  mem2->next = mem->next;
697  mem2->prev = ptr;
698  /* and insert it between mem and mem->next */
699  mem->next = ptr2;
700  mem->used = 1;
701 
702  if (mem2->next != MEM_SIZE_ALIGNED) {
703  ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
704  }
705  MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
706  } else {
707  /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
708  * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
709  * take care of this).
710  * -> near fit or exact fit: do not split, no mem2 creation
711  * also can't move mem->next directly behind mem, since mem->next
712  * will always be used at this point!
713  */
714  mem->used = 1;
715  MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
716  }
717 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
718 mem_malloc_adjust_lfree:
719 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
720  if (mem == lfree) {
721  struct mem *cur = lfree;
722  /* Find next free block after mem and update lowest free pointer */
723  while (cur->used && cur != ram_end) {
724 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
725  mem_free_count = 0;
726  LWIP_MEM_ALLOC_UNPROTECT();
727  /* prevent high interrupt latency... */
728  LWIP_MEM_ALLOC_PROTECT();
729  if (mem_free_count != 0) {
730  /* If mem_free or mem_trim have run, we have to restart since they
731  could have altered our current struct mem or lfree. */
732  goto mem_malloc_adjust_lfree;
733  }
734 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
735  cur = (struct mem *)(void *)&ram[cur->next];
736  }
737  lfree = cur;
738  LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
739  }
740  LWIP_MEM_ALLOC_UNPROTECT();
741  sys_mutex_unlock(&mem_mutex);
742  LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
743  (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
744  LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
745  ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
746  LWIP_ASSERT("mem_malloc: sanity check alignment",
747  (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
748 
749  return (u8_t *)mem + SIZEOF_STRUCT_MEM;
750  }
751  }
752 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
753  /* if we got interrupted by a mem_free, try again */
754  } while (local_mem_free_count != 0);
755 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
756  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
757  MEM_STATS_INC(err);
758  LWIP_MEM_ALLOC_UNPROTECT();
759  sys_mutex_unlock(&mem_mutex);
760  return NULL;
761 }
762 
763 #endif /* MEM_USE_POOLS */
764 
765 #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
766 void *
767 mem_calloc(mem_size_t count, mem_size_t size)
768 {
769  return mem_clib_calloc(count, size);
770 }
771 
772 #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
773 
783 void *
784 mem_calloc(mem_size_t count, mem_size_t size)
785 {
786  void *p;
787 
788  /* allocate 'count' objects of size 'size' */
789  p = mem_malloc(count * size);
790  if (p) {
791  /* zero the memory */
792  memset(p, 0, (size_t)count * (size_t)size);
793  }
794  return p;
795 }
796 #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
void mem_free(void *rmem)
Definition: mem.c:438
memp_t
Definition: memp.h:69
mem_size_t prev
Definition: mem.c:287
#define MEM_DEBUG
Definition: opt.h:2734
void memp_free(memp_t type, void *mem)
Definition: memp.c:488
void sys_mutex_unlock(sys_mutex_t *mutex)
Definition: sys_arch.cc:308
#define SYS_ARCH_DECL_PROTECT(lev)
Definition: sys.h:420
u8_t used
Definition: mem.c:289
void * mem_trim(void *rmem, mem_size_t newsize)
Definition: mem.c:497
void mem_init(void)
Definition: mem.c:401
void * mem_calloc(mem_size_t count, mem_size_t size)
Definition: mem.c:784
void sys_mutex_lock(sys_mutex_t *mutex)
Definition: sys_arch.cc:301
#define LWIP_DEBUGF(debug, message)
#define LWIP_MEM_ALIGN_SIZE(size)
Definition: arch.h:233
LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED+(2U *SIZEOF_STRUCT_MEM))
#define LWIP_MEM_ALIGN(addr)
Definition: arch.h:248
#define LWIP_UNUSED_ARG(x)
Definition: arch.h:327
Definition: err.h:82
Definition: mem.c:283
void * mem_malloc(mem_size_t size)
Definition: mem.c:622
void * memp_malloc(memp_t type)
Definition: memp.c:404
err_t sys_mutex_new(sys_mutex_t *mutex)
Definition: sys_arch.cc:294
mem_size_t next
Definition: mem.c:285