]>
git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_eal/common/malloc_elem.c
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/queue.h>
39 #include <rte_memory.h>
41 #include <rte_launch.h>
42 #include <rte_per_lcore.h>
43 #include <rte_lcore.h>
44 #include <rte_debug.h>
45 #include <rte_common.h>
46 #include <rte_spinlock.h>
48 #include "malloc_elem.h"
49 #include "malloc_heap.h"
51 #define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
54 * initialise a general malloc_elem header structure
57 malloc_elem_init(struct malloc_elem
*elem
,
58 struct malloc_heap
*heap
, const struct rte_memseg
*ms
, size_t size
)
63 memset(&elem
->free_list
, 0, sizeof(elem
->free_list
));
64 elem
->state
= ELEM_FREE
;
72 * initialise a dummy malloc_elem header for the end-of-memseg marker
75 malloc_elem_mkend(struct malloc_elem
*elem
, struct malloc_elem
*prev
)
77 malloc_elem_init(elem
, prev
->heap
, prev
->ms
, 0);
79 elem
->state
= ELEM_BUSY
; /* mark busy so its never merged */
83 * calculate the starting point of where data of the requested size
84 * and alignment would fit in the current element. If the data doesn't
88 elem_start_pt(struct malloc_elem
*elem
, size_t size
, unsigned align
,
91 const size_t bmask
= ~(bound
- 1);
92 uintptr_t end_pt
= (uintptr_t)elem
+
93 elem
->size
- MALLOC_ELEM_TRAILER_LEN
;
94 uintptr_t new_data_start
= RTE_ALIGN_FLOOR((end_pt
- size
), align
);
95 uintptr_t new_elem_start
;
98 if ((new_data_start
& bmask
) != ((end_pt
- 1) & bmask
)) {
99 end_pt
= RTE_ALIGN_FLOOR(end_pt
, bound
);
100 new_data_start
= RTE_ALIGN_FLOOR((end_pt
- size
), align
);
101 if (((end_pt
- 1) & bmask
) != (new_data_start
& bmask
))
105 new_elem_start
= new_data_start
- MALLOC_ELEM_HEADER_LEN
;
107 /* if the new start point is before the exist start, it won't fit */
108 return (new_elem_start
< (uintptr_t)elem
) ? NULL
: (void *)new_elem_start
;
112 * use elem_start_pt to determine if we get meet the size and
113 * alignment request from the current element
116 malloc_elem_can_hold(struct malloc_elem
*elem
, size_t size
, unsigned align
,
119 return elem_start_pt(elem
, size
, align
, bound
) != NULL
;
123 * split an existing element into two smaller elements at the given
124 * split_pt parameter.
127 split_elem(struct malloc_elem
*elem
, struct malloc_elem
*split_pt
)
129 struct malloc_elem
*next_elem
= RTE_PTR_ADD(elem
, elem
->size
);
130 const size_t old_elem_size
= (uintptr_t)split_pt
- (uintptr_t)elem
;
131 const size_t new_elem_size
= elem
->size
- old_elem_size
;
133 malloc_elem_init(split_pt
, elem
->heap
, elem
->ms
, new_elem_size
);
134 split_pt
->prev
= elem
;
135 next_elem
->prev
= split_pt
;
136 elem
->size
= old_elem_size
;
141 * Given an element size, compute its freelist index.
142 * We free an element into the freelist containing similarly-sized elements.
143 * We try to allocate elements starting with the freelist containing
144 * similarly-sized elements, and if necessary, we search freelists
145 * containing larger elements.
147 * Example element size ranges for a heap with five free lists:
148 * heap->free_head[0] - (0 , 2^8]
149 * heap->free_head[1] - (2^8 , 2^10]
150 * heap->free_head[2] - (2^10 ,2^12]
151 * heap->free_head[3] - (2^12, 2^14]
152 * heap->free_head[4] - (2^14, MAX_SIZE]
155 malloc_elem_free_list_index(size_t size
)
157 #define MALLOC_MINSIZE_LOG2 8
158 #define MALLOC_LOG2_INCREMENT 2
163 if (size
<= (1UL << MALLOC_MINSIZE_LOG2
))
166 /* Find next power of 2 >= size. */
167 log2
= sizeof(size
) * 8 - __builtin_clzl(size
-1);
169 /* Compute freelist index, based on log2(size). */
170 index
= (log2
- MALLOC_MINSIZE_LOG2
+ MALLOC_LOG2_INCREMENT
- 1) /
171 MALLOC_LOG2_INCREMENT
;
173 return index
<= RTE_HEAP_NUM_FREELISTS
-1?
174 index
: RTE_HEAP_NUM_FREELISTS
-1;
178 * Add the specified element to its heap's free list.
181 malloc_elem_free_list_insert(struct malloc_elem
*elem
)
185 idx
= malloc_elem_free_list_index(elem
->size
- MALLOC_ELEM_HEADER_LEN
);
186 elem
->state
= ELEM_FREE
;
187 LIST_INSERT_HEAD(&elem
->heap
->free_head
[idx
], elem
, free_list
);
191 * Remove the specified element from its heap's free list.
194 elem_free_list_remove(struct malloc_elem
*elem
)
196 LIST_REMOVE(elem
, free_list
);
200 * reserve a block of data in an existing malloc_elem. If the malloc_elem
201 * is much larger than the data block requested, we split the element in two.
202 * This function is only called from malloc_heap_alloc so parameter checking
203 * is not done here, as it's done there previously.
206 malloc_elem_alloc(struct malloc_elem
*elem
, size_t size
, unsigned align
,
209 struct malloc_elem
*new_elem
= elem_start_pt(elem
, size
, align
, bound
);
210 const size_t old_elem_size
= (uintptr_t)new_elem
- (uintptr_t)elem
;
211 const size_t trailer_size
= elem
->size
- old_elem_size
- size
-
212 MALLOC_ELEM_OVERHEAD
;
214 elem_free_list_remove(elem
);
216 if (trailer_size
> MALLOC_ELEM_OVERHEAD
+ MIN_DATA_SIZE
) {
217 /* split it, too much free space after elem */
218 struct malloc_elem
*new_free_elem
=
219 RTE_PTR_ADD(new_elem
, size
+ MALLOC_ELEM_OVERHEAD
);
221 split_elem(elem
, new_free_elem
);
222 malloc_elem_free_list_insert(new_free_elem
);
225 if (old_elem_size
< MALLOC_ELEM_OVERHEAD
+ MIN_DATA_SIZE
) {
226 /* don't split it, pad the element instead */
227 elem
->state
= ELEM_BUSY
;
228 elem
->pad
= old_elem_size
;
230 /* put a dummy header in padding, to point to real element header */
231 if (elem
->pad
> 0){ /* pad will be at least 64-bytes, as everything
232 * is cache-line aligned */
233 new_elem
->pad
= elem
->pad
;
234 new_elem
->state
= ELEM_PAD
;
235 new_elem
->size
= elem
->size
- elem
->pad
;
236 set_header(new_elem
);
242 /* we are going to split the element in two. The original element
243 * remains free, and the new element is the one allocated.
244 * Re-insert original element, in case its new size makes it
245 * belong on a different list.
247 split_elem(elem
, new_elem
);
248 new_elem
->state
= ELEM_BUSY
;
249 malloc_elem_free_list_insert(elem
);
255 * joing two struct malloc_elem together. elem1 and elem2 must
256 * be contiguous in memory.
259 join_elem(struct malloc_elem
*elem1
, struct malloc_elem
*elem2
)
261 struct malloc_elem
*next
= RTE_PTR_ADD(elem2
, elem2
->size
);
262 elem1
->size
+= elem2
->size
;
267 * free a malloc_elem block by adding it to the free list. If the
268 * blocks either immediately before or immediately after newly freed block
269 * are also free, the blocks are merged together.
272 malloc_elem_free(struct malloc_elem
*elem
)
274 if (!malloc_elem_cookies_ok(elem
) || elem
->state
!= ELEM_BUSY
)
277 rte_spinlock_lock(&(elem
->heap
->lock
));
278 size_t sz
= elem
->size
- sizeof(*elem
);
279 uint8_t *ptr
= (uint8_t *)&elem
[1];
280 struct malloc_elem
*next
= RTE_PTR_ADD(elem
, elem
->size
);
281 if (next
->state
== ELEM_FREE
){
282 /* remove from free list, join to this one */
283 elem_free_list_remove(next
);
284 join_elem(elem
, next
);
288 /* check if previous element is free, if so join with it and return,
289 * need to re-insert in free list, as that element's size is changing
291 if (elem
->prev
!= NULL
&& elem
->prev
->state
== ELEM_FREE
) {
292 elem_free_list_remove(elem
->prev
);
293 join_elem(elem
->prev
, elem
);
295 ptr
-= sizeof(*elem
);
298 malloc_elem_free_list_insert(elem
);
300 /* decrease heap's count of allocated elements */
301 elem
->heap
->alloc_count
--;
305 rte_spinlock_unlock(&(elem
->heap
->lock
));
311 * attempt to resize a malloc_elem by expanding into any free space
312 * immediately after it in memory.
315 malloc_elem_resize(struct malloc_elem
*elem
, size_t size
)
317 const size_t new_size
= size
+ MALLOC_ELEM_OVERHEAD
;
318 /* if we request a smaller size, then always return ok */
319 const size_t current_size
= elem
->size
- elem
->pad
;
320 if (current_size
>= new_size
)
323 struct malloc_elem
*next
= RTE_PTR_ADD(elem
, elem
->size
);
324 rte_spinlock_lock(&elem
->heap
->lock
);
325 if (next
->state
!= ELEM_FREE
)
327 if (current_size
+ next
->size
< new_size
)
330 /* we now know the element fits, so remove from free list,
333 elem_free_list_remove(next
);
334 join_elem(elem
, next
);
336 if (elem
->size
- new_size
>= MIN_DATA_SIZE
+ MALLOC_ELEM_OVERHEAD
){
337 /* now we have a big block together. Lets cut it down a bit, by splitting */
338 struct malloc_elem
*split_pt
= RTE_PTR_ADD(elem
, new_size
);
339 split_pt
= RTE_PTR_ALIGN_CEIL(split_pt
, RTE_CACHE_LINE_SIZE
);
340 split_elem(elem
, split_pt
);
341 malloc_elem_free_list_insert(split_pt
);
343 rte_spinlock_unlock(&elem
->heap
->lock
);
347 rte_spinlock_unlock(&elem
->heap
->lock
);