2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <gpxe/list.h>
30 * Dynamic memory allocation
34 /** A free block of memory */
36 /** List of free blocks */
37 struct list_head list;
38 /** Size of this block */
42 #define MIN_MEMBLOCK_SIZE \
43 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
45 /** A block of allocated memory complete with size information */
46 struct autosized_block {
47 /** Size of this block */
53 /** List of free memory blocks */
54 static LIST_HEAD ( free_blocks );
57 * Allocate a memory block
59 * @v size Requested size
60 * @v align Physical alignment
61 * @ret ptr Memory block, or NULL
63 * Allocates a memory block @b physically aligned as requested. No
64 * guarantees are provided for the alignment of the virtual address.
66 * @c align must be a power of two. @c size may not be zero.
68 void * alloc_memblock ( size_t size, size_t align ) {
69 struct memory_block *block;
73 struct memory_block *pre;
74 struct memory_block *post;
76 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
77 * calculate alignment mask.
79 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
80 align_mask = ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 );
82 DBG ( "Allocating %#zx (aligned %#zx)\n", size, align );
84 /* Search through blocks for the first one with enough space */
85 list_for_each_entry ( block, &free_blocks, list ) {
86 pre_size = ( - virt_to_phys ( block ) ) & align_mask;
87 post_size = block->size - pre_size - size;
88 if ( post_size >= 0 ) {
89 /* Split block into pre-block, block, and
90 * post-block. After this split, the "pre"
91 * block is the one currently linked into the
95 block = ( ( ( void * ) pre ) + pre_size );
96 post = ( ( ( void * ) block ) + size );
97 DBG ( "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
98 ( ( ( void * ) pre ) + pre->size ), pre, block,
99 post, ( ( ( void * ) pre ) + pre->size ) );
100 /* If there is a "post" block, add it in to
101 * the free list. Leak it if it is too small
102 * (which can happen only at the very end of
105 if ( ( size_t ) post_size >= MIN_MEMBLOCK_SIZE ) {
106 post->size = post_size;
107 list_add ( &post->list, &pre->list );
109 /* Shrink "pre" block, leaving the main block
110 * isolated and no longer part of the free
113 pre->size = pre_size;
114 /* If there is no "pre" block, remove it from
115 * the list. Also remove it (i.e. leak it) if
116 * it is too small, which can happen only at
117 * the very start of the heap.
119 if ( pre_size < MIN_MEMBLOCK_SIZE )
120 list_del ( &pre->list );
121 /* Zero allocated memory, for calloc() */
122 memset ( block, 0, size );
123 DBG ( "Allocated [%p,%p)\n", block,
124 ( ( ( void * ) block ) + size ) );
129 DBG ( "Failed to allocate %#zx (aligned %#zx)\n", size, align );
134 * Free a memory block
136 * @v ptr Memory allocated by alloc_memblock(), or NULL
137 * @v size Size of the memory
139 * If @c ptr is NULL, no action is taken.
141 void free_memblock ( void *ptr, size_t size ) {
142 struct memory_block *freeing;
143 struct memory_block *block;
145 ssize_t gap_after = -1;
147 /* Allow for ptr==NULL */
151 /* Round up size to match actual size that alloc_memblock()
154 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
156 freeing->size = size;
157 DBG ( "Freeing [%p,%p)\n", freeing, ( ( ( void * ) freeing ) + size ));
159 /* Insert/merge into free list */
160 list_for_each_entry ( block, &free_blocks, list ) {
161 /* Calculate gaps before and after the "freeing" block */
162 gap_before = ( ( ( void * ) freeing ) -
163 ( ( ( void * ) block ) + block->size ) );
164 gap_after = ( ( ( void * ) block ) -
165 ( ( ( void * ) freeing ) + freeing->size ) );
166 /* Merge with immediately preceding block, if possible */
167 if ( gap_before == 0 ) {
168 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
169 ( ( ( void * ) block ) + block->size ), freeing,
170 ( ( ( void * ) freeing ) + freeing->size ),block,
171 ( ( ( void * ) freeing ) + freeing->size ) );
173 list_del ( &block->list );
176 /* Stop processing as soon as we reach a following block */
177 if ( gap_after >= 0 )
181 /* Insert before the immediately following block. If
182 * possible, merge the following block into the "freeing"
185 DBG ( "[%p,%p)\n", freeing, ( ( ( void * ) freeing ) + freeing->size));
186 list_add_tail ( &freeing->list, &block->list );
187 if ( gap_after == 0 ) {
188 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
189 ( ( ( void * ) freeing ) + freeing->size ), block,
190 ( ( ( void * ) block ) + block->size ), freeing,
191 ( ( ( void * ) block ) + block->size ) );
192 freeing->size += block->size;
193 list_del ( &block->list );
200 * @v size Requested size
201 * @ret ptr Memory, or NULL
203 * Allocates memory with no particular alignment requirement. @c ptr
204 * will be aligned to at least a multiple of sizeof(void*).
206 void * malloc ( size_t size ) {
208 struct autosized_block *block;
210 total_size = size + offsetof ( struct autosized_block, data );
211 block = alloc_memblock ( total_size, 1 );
214 block->size = total_size;
221 * @v size Memory allocated by malloc(), or NULL
223 * Memory allocated with malloc_dma() cannot be freed with free(); it
224 * must be freed with free_dma() instead.
226 * If @c ptr is NULL, no action is taken.
228 void free ( void *ptr ) {
229 struct autosized_block *block;
232 block = container_of ( ptr, struct autosized_block, data );
233 free_memblock ( block, block->size );
238 * Add memory to allocation pool
240 * @v start Start address
243 * Adds a block of memory [start,end) to the allocation pool. This is
244 * a one-way operation; there is no way to reclaim this memory.
246 * @c start must be aligned to at least a multiple of sizeof(void*).
248 void mpopulate ( void *start, size_t len ) {
249 /* Prevent free_memblock() from rounding up len beyond the end
250 * of what we were actually given...
252 free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
256 #include <vsprintf.h>
258 * Dump free block list
261 void mdumpfree ( void ) {
262 struct memory_block *block;
264 printf ( "Free block list:\n" );
265 list_for_each_entry ( block, &free_blocks, list ) {
266 printf ( "[%p,%p] (size %#zx)\n", block,
267 ( ( ( void * ) block ) + block->size ), block->size );