]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_mempool / rte_mempool_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 */
5
6 #include <stdio.h>
7 #include <string.h>
8
9 #include <rte_mempool.h>
10 #include <rte_errno.h>
11 #include <rte_dev.h>
12
13 /* indirect jump table to support external memory pools. */
14 struct rte_mempool_ops_table rte_mempool_ops_table = {
15 .sl = RTE_SPINLOCK_INITIALIZER,
16 .num_ops = 0
17 };
18
19 /* add a new ops struct in rte_mempool_ops_table, return its index. */
20 int
21 rte_mempool_register_ops(const struct rte_mempool_ops *h)
22 {
23 struct rte_mempool_ops *ops;
24 int16_t ops_index;
25
26 rte_spinlock_lock(&rte_mempool_ops_table.sl);
27
28 if (rte_mempool_ops_table.num_ops >=
29 RTE_MEMPOOL_MAX_OPS_IDX) {
30 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
31 RTE_LOG(ERR, MEMPOOL,
32 "Maximum number of mempool ops structs exceeded\n");
33 return -ENOSPC;
34 }
35
36 if (h->alloc == NULL || h->enqueue == NULL ||
37 h->dequeue == NULL || h->get_count == NULL) {
38 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
39 RTE_LOG(ERR, MEMPOOL,
40 "Missing callback while registering mempool ops\n");
41 return -EINVAL;
42 }
43
44 if (strlen(h->name) >= sizeof(ops->name) - 1) {
45 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
46 RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
47 __func__, h->name);
48 rte_errno = EEXIST;
49 return -EEXIST;
50 }
51
52 ops_index = rte_mempool_ops_table.num_ops++;
53 ops = &rte_mempool_ops_table.ops[ops_index];
54 snprintf(ops->name, sizeof(ops->name), "%s", h->name);
55 ops->alloc = h->alloc;
56 ops->free = h->free;
57 ops->enqueue = h->enqueue;
58 ops->dequeue = h->dequeue;
59 ops->get_count = h->get_count;
60 ops->calc_mem_size = h->calc_mem_size;
61 ops->populate = h->populate;
62 ops->get_info = h->get_info;
63 ops->dequeue_contig_blocks = h->dequeue_contig_blocks;
64
65 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
66
67 return ops_index;
68 }
69
70 /* wrapper to allocate an external mempool's private (pool) data. */
71 int
72 rte_mempool_ops_alloc(struct rte_mempool *mp)
73 {
74 struct rte_mempool_ops *ops;
75
76 ops = rte_mempool_get_ops(mp->ops_index);
77 return ops->alloc(mp);
78 }
79
80 /* wrapper to free an external pool ops. */
81 void
82 rte_mempool_ops_free(struct rte_mempool *mp)
83 {
84 struct rte_mempool_ops *ops;
85
86 ops = rte_mempool_get_ops(mp->ops_index);
87 if (ops->free == NULL)
88 return;
89 ops->free(mp);
90 }
91
92 /* wrapper to get available objects in an external mempool. */
93 unsigned int
94 rte_mempool_ops_get_count(const struct rte_mempool *mp)
95 {
96 struct rte_mempool_ops *ops;
97
98 ops = rte_mempool_get_ops(mp->ops_index);
99 return ops->get_count(mp);
100 }
101
102 /* wrapper to notify new memory area to external mempool */
103 ssize_t
104 rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
105 uint32_t obj_num, uint32_t pg_shift,
106 size_t *min_chunk_size, size_t *align)
107 {
108 struct rte_mempool_ops *ops;
109
110 ops = rte_mempool_get_ops(mp->ops_index);
111
112 if (ops->calc_mem_size == NULL)
113 return rte_mempool_op_calc_mem_size_default(mp, obj_num,
114 pg_shift, min_chunk_size, align);
115
116 return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
117 }
118
119 /* wrapper to populate memory pool objects using provided memory chunk */
120 int
121 rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
122 void *vaddr, rte_iova_t iova, size_t len,
123 rte_mempool_populate_obj_cb_t *obj_cb,
124 void *obj_cb_arg)
125 {
126 struct rte_mempool_ops *ops;
127
128 ops = rte_mempool_get_ops(mp->ops_index);
129
130 if (ops->populate == NULL)
131 return rte_mempool_op_populate_default(mp, max_objs, vaddr,
132 iova, len, obj_cb,
133 obj_cb_arg);
134
135 return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
136 obj_cb_arg);
137 }
138
139 /* wrapper to get additional mempool info */
140 int
141 rte_mempool_ops_get_info(const struct rte_mempool *mp,
142 struct rte_mempool_info *info)
143 {
144 struct rte_mempool_ops *ops;
145
146 ops = rte_mempool_get_ops(mp->ops_index);
147
148 RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP);
149 return ops->get_info(mp, info);
150 }
151
152
153 /* sets mempool ops previously registered by rte_mempool_register_ops. */
154 int
155 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
156 void *pool_config)
157 {
158 struct rte_mempool_ops *ops = NULL;
159 unsigned i;
160
161 /* too late, the mempool is already populated. */
162 if (mp->flags & MEMPOOL_F_POOL_CREATED)
163 return -EEXIST;
164
165 for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
166 if (!strcmp(name,
167 rte_mempool_ops_table.ops[i].name)) {
168 ops = &rte_mempool_ops_table.ops[i];
169 break;
170 }
171 }
172
173 if (ops == NULL)
174 return -EINVAL;
175
176 mp->ops_index = i;
177 mp->pool_config = pool_config;
178 return 0;
179 }