]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2017-2018 Intel Corporation | |
3 | */ | |
4 | ||
5 | #include <string.h> | |
6 | ||
7 | #include <rte_errno.h> | |
8 | #include <rte_lcore.h> | |
9 | #include <rte_fbarray.h> | |
10 | #include <rte_memzone.h> | |
11 | #include <rte_memory.h> | |
12 | #include <rte_eal_memconfig.h> | |
13 | #include <rte_string_fns.h> | |
14 | #include <rte_rwlock.h> | |
15 | ||
16 | #include "eal_private.h" | |
17 | #include "eal_internal_cfg.h" | |
18 | #include "eal_memalloc.h" | |
19 | ||
20 | struct mem_event_callback_entry { | |
21 | TAILQ_ENTRY(mem_event_callback_entry) next; | |
22 | char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN]; | |
23 | rte_mem_event_callback_t clb; | |
24 | void *arg; | |
25 | }; | |
26 | ||
27 | struct mem_alloc_validator_entry { | |
28 | TAILQ_ENTRY(mem_alloc_validator_entry) next; | |
29 | char name[RTE_MEM_ALLOC_VALIDATOR_NAME_LEN]; | |
30 | rte_mem_alloc_validator_t clb; | |
31 | int socket_id; | |
32 | size_t limit; | |
33 | }; | |
34 | ||
35 | /** Double linked list of actions. */ | |
36 | TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry); | |
37 | TAILQ_HEAD(mem_alloc_validator_entry_list, mem_alloc_validator_entry); | |
38 | ||
39 | static struct mem_event_callback_entry_list mem_event_callback_list = | |
40 | TAILQ_HEAD_INITIALIZER(mem_event_callback_list); | |
41 | static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER; | |
42 | ||
43 | static struct mem_alloc_validator_entry_list mem_alloc_validator_list = | |
44 | TAILQ_HEAD_INITIALIZER(mem_alloc_validator_list); | |
45 | static rte_rwlock_t mem_alloc_validator_rwlock = RTE_RWLOCK_INITIALIZER; | |
46 | ||
47 | static struct mem_event_callback_entry * | |
48 | find_mem_event_callback(const char *name, void *arg) | |
49 | { | |
50 | struct mem_event_callback_entry *r; | |
51 | ||
52 | TAILQ_FOREACH(r, &mem_event_callback_list, next) { | |
53 | if (!strcmp(r->name, name) && r->arg == arg) | |
54 | break; | |
55 | } | |
56 | return r; | |
57 | } | |
58 | ||
59 | static struct mem_alloc_validator_entry * | |
60 | find_mem_alloc_validator(const char *name, int socket_id) | |
61 | { | |
62 | struct mem_alloc_validator_entry *r; | |
63 | ||
64 | TAILQ_FOREACH(r, &mem_alloc_validator_list, next) { | |
65 | if (!strcmp(r->name, name) && r->socket_id == socket_id) | |
66 | break; | |
67 | } | |
68 | return r; | |
69 | } | |
70 | ||
71 | bool | |
72 | eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start, | |
73 | size_t len) | |
74 | { | |
75 | void *end, *aligned_start, *aligned_end; | |
76 | size_t pgsz = (size_t)msl->page_sz; | |
77 | const struct rte_memseg *ms; | |
78 | ||
79 | /* for IOVA_VA, it's always contiguous */ | |
9f95a23c | 80 | if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external) |
11fdf7f2 TL |
81 | return true; |
82 | ||
83 | /* for legacy memory, it's always contiguous */ | |
84 | if (internal_config.legacy_mem) | |
85 | return true; | |
86 | ||
87 | end = RTE_PTR_ADD(start, len); | |
88 | ||
89 | /* for nohuge, we check pagemap, otherwise check memseg */ | |
90 | if (!rte_eal_has_hugepages()) { | |
91 | rte_iova_t cur, expected; | |
92 | ||
93 | aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz); | |
94 | aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz); | |
95 | ||
96 | /* if start and end are on the same page, bail out early */ | |
97 | if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz) | |
98 | return true; | |
99 | ||
100 | /* skip first iteration */ | |
101 | cur = rte_mem_virt2iova(aligned_start); | |
102 | expected = cur + pgsz; | |
103 | aligned_start = RTE_PTR_ADD(aligned_start, pgsz); | |
104 | ||
105 | while (aligned_start < aligned_end) { | |
106 | cur = rte_mem_virt2iova(aligned_start); | |
107 | if (cur != expected) | |
108 | return false; | |
109 | aligned_start = RTE_PTR_ADD(aligned_start, pgsz); | |
110 | expected += pgsz; | |
111 | } | |
112 | } else { | |
113 | int start_seg, end_seg, cur_seg; | |
114 | rte_iova_t cur, expected; | |
115 | ||
116 | aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz); | |
117 | aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz); | |
118 | ||
119 | start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) / | |
120 | pgsz; | |
121 | end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) / | |
122 | pgsz; | |
123 | ||
124 | /* if start and end are on the same page, bail out early */ | |
125 | if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz) | |
126 | return true; | |
127 | ||
128 | /* skip first iteration */ | |
129 | ms = rte_fbarray_get(&msl->memseg_arr, start_seg); | |
130 | cur = ms->iova; | |
131 | expected = cur + pgsz; | |
132 | ||
133 | /* if we can't access IOVA addresses, assume non-contiguous */ | |
134 | if (cur == RTE_BAD_IOVA) | |
135 | return false; | |
136 | ||
137 | for (cur_seg = start_seg + 1; cur_seg < end_seg; | |
138 | cur_seg++, expected += pgsz) { | |
139 | ms = rte_fbarray_get(&msl->memseg_arr, cur_seg); | |
140 | ||
141 | if (ms->iova != expected) | |
142 | return false; | |
143 | } | |
144 | } | |
145 | return true; | |
146 | } | |
147 | ||
148 | int | |
149 | eal_memalloc_mem_event_callback_register(const char *name, | |
150 | rte_mem_event_callback_t clb, void *arg) | |
151 | { | |
152 | struct mem_event_callback_entry *entry; | |
153 | int ret, len; | |
154 | if (name == NULL || clb == NULL) { | |
155 | rte_errno = EINVAL; | |
156 | return -1; | |
157 | } | |
158 | len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); | |
159 | if (len == 0) { | |
160 | rte_errno = EINVAL; | |
161 | return -1; | |
162 | } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) { | |
163 | rte_errno = ENAMETOOLONG; | |
164 | return -1; | |
165 | } | |
166 | rte_rwlock_write_lock(&mem_event_rwlock); | |
167 | ||
168 | entry = find_mem_event_callback(name, arg); | |
169 | if (entry != NULL) { | |
170 | rte_errno = EEXIST; | |
171 | ret = -1; | |
172 | goto unlock; | |
173 | } | |
174 | ||
175 | entry = malloc(sizeof(*entry)); | |
176 | if (entry == NULL) { | |
177 | rte_errno = ENOMEM; | |
178 | ret = -1; | |
179 | goto unlock; | |
180 | } | |
181 | ||
182 | /* callback successfully created and is valid, add it to the list */ | |
183 | entry->clb = clb; | |
184 | entry->arg = arg; | |
185 | strlcpy(entry->name, name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); | |
186 | TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next); | |
187 | ||
188 | ret = 0; | |
189 | ||
190 | RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' registered\n", | |
191 | name, arg); | |
192 | ||
193 | unlock: | |
194 | rte_rwlock_write_unlock(&mem_event_rwlock); | |
195 | return ret; | |
196 | } | |
197 | ||
198 | int | |
199 | eal_memalloc_mem_event_callback_unregister(const char *name, void *arg) | |
200 | { | |
201 | struct mem_event_callback_entry *entry; | |
202 | int ret, len; | |
203 | ||
204 | if (name == NULL) { | |
205 | rte_errno = EINVAL; | |
206 | return -1; | |
207 | } | |
208 | len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); | |
209 | if (len == 0) { | |
210 | rte_errno = EINVAL; | |
211 | return -1; | |
212 | } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) { | |
213 | rte_errno = ENAMETOOLONG; | |
214 | return -1; | |
215 | } | |
216 | rte_rwlock_write_lock(&mem_event_rwlock); | |
217 | ||
218 | entry = find_mem_event_callback(name, arg); | |
219 | if (entry == NULL) { | |
220 | rte_errno = ENOENT; | |
221 | ret = -1; | |
222 | goto unlock; | |
223 | } | |
224 | TAILQ_REMOVE(&mem_event_callback_list, entry, next); | |
225 | free(entry); | |
226 | ||
227 | ret = 0; | |
228 | ||
229 | RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' unregistered\n", | |
230 | name, arg); | |
231 | ||
232 | unlock: | |
233 | rte_rwlock_write_unlock(&mem_event_rwlock); | |
234 | return ret; | |
235 | } | |
236 | ||
237 | void | |
238 | eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start, | |
239 | size_t len) | |
240 | { | |
241 | struct mem_event_callback_entry *entry; | |
242 | ||
243 | rte_rwlock_read_lock(&mem_event_rwlock); | |
244 | ||
245 | TAILQ_FOREACH(entry, &mem_event_callback_list, next) { | |
246 | RTE_LOG(DEBUG, EAL, "Calling mem event callback '%s:%p'\n", | |
247 | entry->name, entry->arg); | |
248 | entry->clb(event, start, len, entry->arg); | |
249 | } | |
250 | ||
251 | rte_rwlock_read_unlock(&mem_event_rwlock); | |
252 | } | |
253 | ||
254 | int | |
255 | eal_memalloc_mem_alloc_validator_register(const char *name, | |
256 | rte_mem_alloc_validator_t clb, int socket_id, size_t limit) | |
257 | { | |
258 | struct mem_alloc_validator_entry *entry; | |
259 | int ret, len; | |
260 | if (name == NULL || clb == NULL || socket_id < 0) { | |
261 | rte_errno = EINVAL; | |
262 | return -1; | |
263 | } | |
264 | len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); | |
265 | if (len == 0) { | |
266 | rte_errno = EINVAL; | |
267 | return -1; | |
268 | } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) { | |
269 | rte_errno = ENAMETOOLONG; | |
270 | return -1; | |
271 | } | |
272 | rte_rwlock_write_lock(&mem_alloc_validator_rwlock); | |
273 | ||
274 | entry = find_mem_alloc_validator(name, socket_id); | |
275 | if (entry != NULL) { | |
276 | rte_errno = EEXIST; | |
277 | ret = -1; | |
278 | goto unlock; | |
279 | } | |
280 | ||
281 | entry = malloc(sizeof(*entry)); | |
282 | if (entry == NULL) { | |
283 | rte_errno = ENOMEM; | |
284 | ret = -1; | |
285 | goto unlock; | |
286 | } | |
287 | ||
288 | /* callback successfully created and is valid, add it to the list */ | |
289 | entry->clb = clb; | |
290 | entry->socket_id = socket_id; | |
291 | entry->limit = limit; | |
292 | strlcpy(entry->name, name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); | |
293 | TAILQ_INSERT_TAIL(&mem_alloc_validator_list, entry, next); | |
294 | ||
295 | ret = 0; | |
296 | ||
297 | RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i with limit %zu registered\n", | |
298 | name, socket_id, limit); | |
299 | ||
300 | unlock: | |
301 | rte_rwlock_write_unlock(&mem_alloc_validator_rwlock); | |
302 | return ret; | |
303 | } | |
304 | ||
305 | int | |
306 | eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id) | |
307 | { | |
308 | struct mem_alloc_validator_entry *entry; | |
309 | int ret, len; | |
310 | ||
311 | if (name == NULL || socket_id < 0) { | |
312 | rte_errno = EINVAL; | |
313 | return -1; | |
314 | } | |
315 | len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); | |
316 | if (len == 0) { | |
317 | rte_errno = EINVAL; | |
318 | return -1; | |
319 | } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) { | |
320 | rte_errno = ENAMETOOLONG; | |
321 | return -1; | |
322 | } | |
323 | rte_rwlock_write_lock(&mem_alloc_validator_rwlock); | |
324 | ||
325 | entry = find_mem_alloc_validator(name, socket_id); | |
326 | if (entry == NULL) { | |
327 | rte_errno = ENOENT; | |
328 | ret = -1; | |
329 | goto unlock; | |
330 | } | |
331 | TAILQ_REMOVE(&mem_alloc_validator_list, entry, next); | |
332 | free(entry); | |
333 | ||
334 | ret = 0; | |
335 | ||
336 | RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i unregistered\n", | |
337 | name, socket_id); | |
338 | ||
339 | unlock: | |
340 | rte_rwlock_write_unlock(&mem_alloc_validator_rwlock); | |
341 | return ret; | |
342 | } | |
343 | ||
344 | int | |
345 | eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len) | |
346 | { | |
347 | struct mem_alloc_validator_entry *entry; | |
348 | int ret = 0; | |
349 | ||
350 | rte_rwlock_read_lock(&mem_alloc_validator_rwlock); | |
351 | ||
352 | TAILQ_FOREACH(entry, &mem_alloc_validator_list, next) { | |
353 | if (entry->socket_id != socket_id || entry->limit > new_len) | |
354 | continue; | |
355 | RTE_LOG(DEBUG, EAL, "Calling mem alloc validator '%s' on socket %i\n", | |
356 | entry->name, entry->socket_id); | |
357 | if (entry->clb(socket_id, entry->limit, new_len) < 0) | |
358 | ret = -1; | |
359 | } | |
360 | ||
361 | rte_rwlock_read_unlock(&mem_alloc_validator_rwlock); | |
362 | ||
363 | return ret; | |
364 | } |