4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
38 #include <rte_config.h>
39 #include <rte_cycles.h>
40 #include <rte_malloc.h>
41 #include <rte_mempool.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
46 virt_to_phys(void *vaddr
)
50 #if RTE_VERSION >= RTE_VERSION_NUM(17, 11, 0, 3)
51 ret
= rte_malloc_virt2iova(vaddr
);
52 if (ret
!= RTE_BAD_IOVA
) {
56 ret
= rte_malloc_virt2phy(vaddr
);
57 if (ret
!= RTE_BAD_PHYS_ADDR
) {
62 return spdk_vtophys(vaddr
);
66 spdk_malloc(size_t size
, size_t align
, uint64_t *phys_addr
, int socket_id
, uint32_t flags
)
72 void *buf
= rte_malloc_socket(NULL
, size
, align
, socket_id
);
73 if (buf
&& phys_addr
) {
74 *phys_addr
= virt_to_phys(buf
);
80 spdk_zmalloc(size_t size
, size_t align
, uint64_t *phys_addr
, int socket_id
, uint32_t flags
)
82 void *buf
= spdk_malloc(size
, align
, phys_addr
, socket_id
, flags
);
96 spdk_dma_malloc_socket(size_t size
, size_t align
, uint64_t *phys_addr
, int socket_id
)
98 return spdk_malloc(size
, align
, phys_addr
, socket_id
, (SPDK_MALLOC_DMA
| SPDK_MALLOC_SHARE
));
102 spdk_dma_zmalloc_socket(size_t size
, size_t align
, uint64_t *phys_addr
, int socket_id
)
104 return spdk_zmalloc(size
, align
, phys_addr
, socket_id
, (SPDK_MALLOC_DMA
| SPDK_MALLOC_SHARE
));
108 spdk_dma_malloc(size_t size
, size_t align
, uint64_t *phys_addr
)
110 return spdk_dma_malloc_socket(size
, align
, phys_addr
, SPDK_ENV_SOCKET_ID_ANY
);
114 spdk_dma_zmalloc(size_t size
, size_t align
, uint64_t *phys_addr
)
116 return spdk_dma_zmalloc_socket(size
, align
, phys_addr
, SPDK_ENV_SOCKET_ID_ANY
);
120 spdk_dma_realloc(void *buf
, size_t size
, size_t align
, uint64_t *phys_addr
)
122 void *new_buf
= rte_realloc(buf
, size
, align
);
123 if (new_buf
&& phys_addr
) {
124 *phys_addr
= virt_to_phys(new_buf
);
130 spdk_dma_free(void *buf
)
136 spdk_memzone_reserve_aligned(const char *name
, size_t len
, int socket_id
,
137 unsigned flags
, unsigned align
)
139 const struct rte_memzone
*mz
;
140 unsigned dpdk_flags
= 0;
142 #if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0)
143 /* Older DPDKs do not offer such flag since their
144 * memzones are iova-contiguous by default.
146 if ((flags
& SPDK_MEMZONE_NO_IOVA_CONTIG
) == 0) {
147 dpdk_flags
|= RTE_MEMZONE_IOVA_CONTIG
;
151 if (socket_id
== SPDK_ENV_SOCKET_ID_ANY
) {
152 socket_id
= SOCKET_ID_ANY
;
155 mz
= rte_memzone_reserve_aligned(name
, len
, socket_id
, dpdk_flags
, align
);
158 memset(mz
->addr
, 0, len
);
166 spdk_memzone_reserve(const char *name
, size_t len
, int socket_id
, unsigned flags
)
168 return spdk_memzone_reserve_aligned(name
, len
, socket_id
, flags
,
169 RTE_CACHE_LINE_SIZE
);
173 spdk_memzone_lookup(const char *name
)
175 const struct rte_memzone
*mz
= rte_memzone_lookup(name
);
185 spdk_memzone_free(const char *name
)
187 const struct rte_memzone
*mz
= rte_memzone_lookup(name
);
190 return rte_memzone_free(mz
);
197 spdk_memzone_dump(FILE *f
)
202 struct spdk_mempool
*
203 spdk_mempool_create_ctor(const char *name
, size_t count
,
204 size_t ele_size
, size_t cache_size
, int socket_id
,
205 spdk_mempool_obj_cb_t
*obj_init
, void *obj_init_arg
)
207 struct rte_mempool
*mp
;
210 if (socket_id
== SPDK_ENV_SOCKET_ID_ANY
) {
211 socket_id
= SOCKET_ID_ANY
;
214 /* No more than half of all elements can be in cache */
215 tmp
= (count
/ 2) / rte_lcore_count();
216 if (cache_size
> tmp
) {
220 if (cache_size
> RTE_MEMPOOL_CACHE_MAX_SIZE
) {
221 cache_size
= RTE_MEMPOOL_CACHE_MAX_SIZE
;
224 mp
= rte_mempool_create(name
, count
, ele_size
, cache_size
,
225 0, NULL
, NULL
, (rte_mempool_obj_cb_t
*)obj_init
, obj_init_arg
,
226 socket_id
, MEMPOOL_F_NO_PHYS_CONTIG
);
228 return (struct spdk_mempool
*)mp
;
232 struct spdk_mempool
*
233 spdk_mempool_create(const char *name
, size_t count
,
234 size_t ele_size
, size_t cache_size
, int socket_id
)
236 return spdk_mempool_create_ctor(name
, count
, ele_size
, cache_size
, socket_id
,
241 spdk_mempool_get_name(struct spdk_mempool
*mp
)
243 return ((struct rte_mempool
*)mp
)->name
;
247 spdk_mempool_free(struct spdk_mempool
*mp
)
249 #if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 1)
250 rte_mempool_free((struct rte_mempool
*)mp
);
255 spdk_mempool_get(struct spdk_mempool
*mp
)
260 rc
= rte_mempool_get((struct rte_mempool
*)mp
, &ele
);
268 spdk_mempool_get_bulk(struct spdk_mempool
*mp
, void **ele_arr
, size_t count
)
270 return rte_mempool_get_bulk((struct rte_mempool
*)mp
, ele_arr
, count
);
274 spdk_mempool_put(struct spdk_mempool
*mp
, void *ele
)
276 rte_mempool_put((struct rte_mempool
*)mp
, ele
);
280 spdk_mempool_put_bulk(struct spdk_mempool
*mp
, void **ele_arr
, size_t count
)
282 rte_mempool_put_bulk((struct rte_mempool
*)mp
, ele_arr
, count
);
286 spdk_mempool_count(const struct spdk_mempool
*pool
)
288 #if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 1)
289 return rte_mempool_count((struct rte_mempool
*)pool
);
291 return rte_mempool_avail_count((struct rte_mempool
*)pool
);
296 spdk_process_is_primary(void)
298 return (rte_eal_process_type() == RTE_PROC_PRIMARY
);
301 uint64_t spdk_get_ticks(void)
303 return rte_get_timer_cycles();
306 uint64_t spdk_get_ticks_hz(void)
308 return rte_get_timer_hz();
311 void spdk_delay_us(unsigned int us
)
317 spdk_unaffinitize_thread(void)
319 rte_cpuset_t new_cpuset
;
322 CPU_ZERO(&new_cpuset
);
324 num_cores
= sysconf(_SC_NPROCESSORS_CONF
);
326 /* Create a mask containing all CPUs */
327 for (i
= 0; i
< num_cores
; i
++) {
328 CPU_SET(i
, &new_cpuset
);
331 rte_thread_set_affinity(&new_cpuset
);
335 spdk_call_unaffinitized(void *cb(void *arg
), void *arg
)
337 rte_cpuset_t orig_cpuset
;
344 rte_thread_get_affinity(&orig_cpuset
);
346 spdk_unaffinitize_thread();
350 rte_thread_set_affinity(&orig_cpuset
);
356 spdk_ring_create(enum spdk_ring_type type
, size_t count
, int socket_id
)
359 static uint32_t ring_num
= 0;
363 case SPDK_RING_TYPE_SP_SC
:
364 flags
= RING_F_SP_ENQ
| RING_F_SC_DEQ
;
366 case SPDK_RING_TYPE_MP_SC
:
367 flags
= RING_F_SC_DEQ
;
369 case SPDK_RING_TYPE_MP_MC
:
376 snprintf(ring_name
, sizeof(ring_name
), "ring_%u_%d",
377 __sync_fetch_and_add(&ring_num
, 1), getpid());
379 return (struct spdk_ring
*)rte_ring_create(ring_name
, count
, socket_id
, flags
);
383 spdk_ring_free(struct spdk_ring
*ring
)
385 rte_ring_free((struct rte_ring
*)ring
);
389 spdk_ring_count(struct spdk_ring
*ring
)
391 return rte_ring_count((struct rte_ring
*)ring
);
395 spdk_ring_enqueue(struct spdk_ring
*ring
, void **objs
, size_t count
)
398 #if RTE_VERSION < RTE_VERSION_NUM(17, 5, 0, 0)
399 rc
= rte_ring_enqueue_bulk((struct rte_ring
*)ring
, objs
, count
);
406 rc
= rte_ring_enqueue_bulk((struct rte_ring
*)ring
, objs
, count
, NULL
);
412 spdk_ring_dequeue(struct spdk_ring
*ring
, void **objs
, size_t count
)
414 #if RTE_VERSION < RTE_VERSION_NUM(17, 5, 0, 0)
415 return rte_ring_dequeue_burst((struct rte_ring
*)ring
, objs
, count
);
417 return rte_ring_dequeue_burst((struct rte_ring
*)ring
, objs
, count
, NULL
);