]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2016 Intel Corporation. | |
3 | * Copyright(c) 2016 6WIND S.A. | |
7c673cae FG |
4 | */ |
5 | ||
6 | #include <stdio.h> | |
7 | #include <string.h> | |
8 | ||
9f95a23c | 9 | #include <rte_string_fns.h> |
7c673cae FG |
10 | #include <rte_mempool.h> |
11 | #include <rte_errno.h> | |
11fdf7f2 | 12 | #include <rte_dev.h> |
7c673cae FG |
13 | |
14 | /* indirect jump table to support external memory pools. */ | |
15 | struct rte_mempool_ops_table rte_mempool_ops_table = { | |
16 | .sl = RTE_SPINLOCK_INITIALIZER, | |
17 | .num_ops = 0 | |
18 | }; | |
19 | ||
20 | /* add a new ops struct in rte_mempool_ops_table, return its index. */ | |
21 | int | |
22 | rte_mempool_register_ops(const struct rte_mempool_ops *h) | |
23 | { | |
24 | struct rte_mempool_ops *ops; | |
25 | int16_t ops_index; | |
26 | ||
27 | rte_spinlock_lock(&rte_mempool_ops_table.sl); | |
28 | ||
29 | if (rte_mempool_ops_table.num_ops >= | |
30 | RTE_MEMPOOL_MAX_OPS_IDX) { | |
31 | rte_spinlock_unlock(&rte_mempool_ops_table.sl); | |
32 | RTE_LOG(ERR, MEMPOOL, | |
33 | "Maximum number of mempool ops structs exceeded\n"); | |
34 | return -ENOSPC; | |
35 | } | |
36 | ||
37 | if (h->alloc == NULL || h->enqueue == NULL || | |
38 | h->dequeue == NULL || h->get_count == NULL) { | |
39 | rte_spinlock_unlock(&rte_mempool_ops_table.sl); | |
40 | RTE_LOG(ERR, MEMPOOL, | |
41 | "Missing callback while registering mempool ops\n"); | |
42 | return -EINVAL; | |
43 | } | |
44 | ||
45 | if (strlen(h->name) >= sizeof(ops->name) - 1) { | |
46 | rte_spinlock_unlock(&rte_mempool_ops_table.sl); | |
47 | RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n", | |
48 | __func__, h->name); | |
49 | rte_errno = EEXIST; | |
50 | return -EEXIST; | |
51 | } | |
52 | ||
53 | ops_index = rte_mempool_ops_table.num_ops++; | |
54 | ops = &rte_mempool_ops_table.ops[ops_index]; | |
9f95a23c | 55 | strlcpy(ops->name, h->name, sizeof(ops->name)); |
7c673cae FG |
56 | ops->alloc = h->alloc; |
57 | ops->free = h->free; | |
58 | ops->enqueue = h->enqueue; | |
59 | ops->dequeue = h->dequeue; | |
60 | ops->get_count = h->get_count; | |
11fdf7f2 TL |
61 | ops->calc_mem_size = h->calc_mem_size; |
62 | ops->populate = h->populate; | |
63 | ops->get_info = h->get_info; | |
64 | ops->dequeue_contig_blocks = h->dequeue_contig_blocks; | |
7c673cae FG |
65 | |
66 | rte_spinlock_unlock(&rte_mempool_ops_table.sl); | |
67 | ||
68 | return ops_index; | |
69 | } | |
70 | ||
71 | /* wrapper to allocate an external mempool's private (pool) data. */ | |
72 | int | |
73 | rte_mempool_ops_alloc(struct rte_mempool *mp) | |
74 | { | |
75 | struct rte_mempool_ops *ops; | |
76 | ||
77 | ops = rte_mempool_get_ops(mp->ops_index); | |
78 | return ops->alloc(mp); | |
79 | } | |
80 | ||
81 | /* wrapper to free an external pool ops. */ | |
82 | void | |
83 | rte_mempool_ops_free(struct rte_mempool *mp) | |
84 | { | |
85 | struct rte_mempool_ops *ops; | |
86 | ||
87 | ops = rte_mempool_get_ops(mp->ops_index); | |
88 | if (ops->free == NULL) | |
89 | return; | |
90 | ops->free(mp); | |
91 | } | |
92 | ||
93 | /* wrapper to get available objects in an external mempool. */ | |
94 | unsigned int | |
95 | rte_mempool_ops_get_count(const struct rte_mempool *mp) | |
96 | { | |
97 | struct rte_mempool_ops *ops; | |
98 | ||
99 | ops = rte_mempool_get_ops(mp->ops_index); | |
100 | return ops->get_count(mp); | |
101 | } | |
102 | ||
11fdf7f2 TL |
103 | /* wrapper to notify new memory area to external mempool */ |
104 | ssize_t | |
105 | rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, | |
106 | uint32_t obj_num, uint32_t pg_shift, | |
107 | size_t *min_chunk_size, size_t *align) | |
108 | { | |
109 | struct rte_mempool_ops *ops; | |
110 | ||
111 | ops = rte_mempool_get_ops(mp->ops_index); | |
112 | ||
113 | if (ops->calc_mem_size == NULL) | |
114 | return rte_mempool_op_calc_mem_size_default(mp, obj_num, | |
115 | pg_shift, min_chunk_size, align); | |
116 | ||
117 | return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align); | |
118 | } | |
119 | ||
120 | /* wrapper to populate memory pool objects using provided memory chunk */ | |
121 | int | |
122 | rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, | |
123 | void *vaddr, rte_iova_t iova, size_t len, | |
124 | rte_mempool_populate_obj_cb_t *obj_cb, | |
125 | void *obj_cb_arg) | |
126 | { | |
127 | struct rte_mempool_ops *ops; | |
128 | ||
129 | ops = rte_mempool_get_ops(mp->ops_index); | |
130 | ||
131 | if (ops->populate == NULL) | |
132 | return rte_mempool_op_populate_default(mp, max_objs, vaddr, | |
133 | iova, len, obj_cb, | |
134 | obj_cb_arg); | |
135 | ||
136 | return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb, | |
137 | obj_cb_arg); | |
138 | } | |
139 | ||
140 | /* wrapper to get additional mempool info */ | |
141 | int | |
142 | rte_mempool_ops_get_info(const struct rte_mempool *mp, | |
143 | struct rte_mempool_info *info) | |
144 | { | |
145 | struct rte_mempool_ops *ops; | |
146 | ||
147 | ops = rte_mempool_get_ops(mp->ops_index); | |
148 | ||
149 | RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP); | |
150 | return ops->get_info(mp, info); | |
151 | } | |
152 | ||
153 | ||
7c673cae FG |
154 | /* sets mempool ops previously registered by rte_mempool_register_ops. */ |
155 | int | |
156 | rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, | |
157 | void *pool_config) | |
158 | { | |
159 | struct rte_mempool_ops *ops = NULL; | |
160 | unsigned i; | |
161 | ||
162 | /* too late, the mempool is already populated. */ | |
163 | if (mp->flags & MEMPOOL_F_POOL_CREATED) | |
164 | return -EEXIST; | |
165 | ||
166 | for (i = 0; i < rte_mempool_ops_table.num_ops; i++) { | |
167 | if (!strcmp(name, | |
168 | rte_mempool_ops_table.ops[i].name)) { | |
169 | ops = &rte_mempool_ops_table.ops[i]; | |
170 | break; | |
171 | } | |
172 | } | |
173 | ||
174 | if (ops == NULL) | |
175 | return -EINVAL; | |
176 | ||
177 | mp->ops_index = i; | |
178 | mp->pool_config = pool_config; | |
179 | return 0; | |
180 | } |