]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/common/malloc_mp.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / common / malloc_mp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <sys/time.h>
7
8 #include <rte_alarm.h>
9 #include <rte_errno.h>
10 #include <rte_string_fns.h>
11
12 #include "eal_memalloc.h"
13
14 #include "malloc_elem.h"
15 #include "malloc_mp.h"
16
17 #define MP_ACTION_SYNC "mp_malloc_sync"
18 /**< request sent by primary process to notify of changes in memory map */
19 #define MP_ACTION_ROLLBACK "mp_malloc_rollback"
20 /**< request sent by primary process to notify of changes in memory map. this is
21 * essentially a regular sync request, but we cannot send sync requests while
22 * another one is in progress, and we might have to - therefore, we do this as
23 * a separate callback.
24 */
25 #define MP_ACTION_REQUEST "mp_malloc_request"
26 /**< request sent by secondary process to ask for allocation/deallocation */
27 #define MP_ACTION_RESPONSE "mp_malloc_response"
28 /**< response sent to secondary process to indicate result of request */
29
30 /* forward declarations */
31 static int
32 handle_sync_response(const struct rte_mp_msg *request,
33 const struct rte_mp_reply *reply);
34 static int
35 handle_rollback_response(const struct rte_mp_msg *request,
36 const struct rte_mp_reply *reply);
37
38 #define MP_TIMEOUT_S 5 /**< 5 seconds timeouts */
39
40 /* when we're allocating, we need to store some state to ensure that we can
41 * roll back later
42 */
43 struct primary_alloc_req_state {
44 struct malloc_heap *heap;
45 struct rte_memseg **ms;
46 int ms_len;
47 struct malloc_elem *elem;
48 void *map_addr;
49 size_t map_len;
50 };
51
52 enum req_state {
53 REQ_STATE_INACTIVE = 0,
54 REQ_STATE_ACTIVE,
55 REQ_STATE_COMPLETE
56 };
57
58 struct mp_request {
59 TAILQ_ENTRY(mp_request) next;
60 struct malloc_mp_req user_req; /**< contents of request */
61 pthread_cond_t cond; /**< variable we use to time out on this request */
62 enum req_state state; /**< indicate status of this request */
63 struct primary_alloc_req_state alloc_state;
64 };
65
66 /*
67 * We could've used just a single request, but it may be possible for
68 * secondaries to timeout earlier than the primary, and send a new request while
69 * primary is still expecting replies to the old one. Therefore, each new
70 * request will get assigned a new ID, which is how we will distinguish between
71 * expected and unexpected messages.
72 */
73 TAILQ_HEAD(mp_request_list, mp_request);
74 static struct {
75 struct mp_request_list list;
76 pthread_mutex_t lock;
77 } mp_request_list = {
78 .list = TAILQ_HEAD_INITIALIZER(mp_request_list.list),
79 .lock = PTHREAD_MUTEX_INITIALIZER
80 };
81
82 /**
83 * General workflow is the following:
84 *
85 * Allocation:
86 * S: send request to primary
87 * P: attempt to allocate memory
88 * if failed, sendmsg failure
89 * if success, send sync request
90 * S: if received msg of failure, quit
91 * if received sync request, synchronize memory map and reply with result
92 * P: if received sync request result
93 * if success, sendmsg success
94 * if failure, roll back allocation and send a rollback request
95 * S: if received msg of success, quit
96 * if received rollback request, synchronize memory map and reply with result
97 * P: if received sync request result
98 * sendmsg sync request result
99 * S: if received msg, quit
100 *
101 * Aside from timeouts, there are three points where we can quit:
102 * - if allocation failed straight away
103 * - if allocation and sync request succeeded
104 * - if allocation succeeded, sync request failed, allocation rolled back and
105 * rollback request received (irrespective of whether it succeeded or failed)
106 *
107 * Deallocation:
108 * S: send request to primary
109 * P: attempt to deallocate memory
110 * if failed, sendmsg failure
111 * if success, send sync request
112 * S: if received msg of failure, quit
113 * if received sync request, synchronize memory map and reply with result
114 * P: if received sync request result
115 * sendmsg sync request result
116 * S: if received msg, quit
117 *
118 * There is no "rollback" from deallocation, as it's safe to have some memory
119 * mapped in some processes - it's absent from the heap, so it won't get used.
120 */
121
122 static struct mp_request *
123 find_request_by_id(uint64_t id)
124 {
125 struct mp_request *req;
126 TAILQ_FOREACH(req, &mp_request_list.list, next) {
127 if (req->user_req.id == id)
128 break;
129 }
130 return req;
131 }
132
133 /* this ID is, like, totally guaranteed to be absolutely unique. pinky swear. */
134 static uint64_t
135 get_unique_id(void)
136 {
137 uint64_t id;
138 do {
139 id = rte_rand();
140 } while (find_request_by_id(id) != NULL);
141 return id;
142 }
143
144 /* secondary will respond to sync requests thusly */
145 static int
146 handle_sync(const struct rte_mp_msg *msg, const void *peer)
147 {
148 struct rte_mp_msg reply;
149 const struct malloc_mp_req *req =
150 (const struct malloc_mp_req *)msg->param;
151 struct malloc_mp_req *resp =
152 (struct malloc_mp_req *)reply.param;
153 int ret;
154
155 if (req->t != REQ_TYPE_SYNC) {
156 RTE_LOG(ERR, EAL, "Unexpected request from primary\n");
157 return -1;
158 }
159
160 memset(&reply, 0, sizeof(reply));
161
162 reply.num_fds = 0;
163 strlcpy(reply.name, msg->name, sizeof(reply.name));
164 reply.len_param = sizeof(*resp);
165
166 ret = eal_memalloc_sync_with_primary();
167
168 resp->t = REQ_TYPE_SYNC;
169 resp->id = req->id;
170 resp->result = ret == 0 ? REQ_RESULT_SUCCESS : REQ_RESULT_FAIL;
171
172 rte_mp_reply(&reply, peer);
173
174 return 0;
175 }
176
177 static int
178 handle_alloc_request(const struct malloc_mp_req *m,
179 struct mp_request *req)
180 {
181 const struct malloc_req_alloc *ar = &m->alloc_req;
182 struct malloc_heap *heap;
183 struct malloc_elem *elem;
184 struct rte_memseg **ms;
185 size_t alloc_sz;
186 int n_segs;
187 void *map_addr;
188
189 alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size +
190 MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
191 n_segs = alloc_sz / ar->page_sz;
192
193 heap = ar->heap;
194
195 /* we can't know in advance how many pages we'll need, so we malloc */
196 ms = malloc(sizeof(*ms) * n_segs);
197
198 memset(ms, 0, sizeof(*ms) * n_segs);
199
200 if (ms == NULL) {
201 RTE_LOG(ERR, EAL, "Couldn't allocate memory for request state\n");
202 goto fail;
203 }
204
205 elem = alloc_pages_on_heap(heap, ar->page_sz, ar->elt_size, ar->socket,
206 ar->flags, ar->align, ar->bound, ar->contig, ms,
207 n_segs);
208
209 if (elem == NULL)
210 goto fail;
211
212 map_addr = ms[0]->addr;
213
214 /* we have succeeded in allocating memory, but we still need to sync
215 * with other processes. however, since DPDK IPC is single-threaded, we
216 * send an asynchronous request and exit this callback.
217 */
218
219 req->alloc_state.ms = ms;
220 req->alloc_state.ms_len = n_segs;
221 req->alloc_state.map_addr = map_addr;
222 req->alloc_state.map_len = alloc_sz;
223 req->alloc_state.elem = elem;
224 req->alloc_state.heap = heap;
225
226 return 0;
227 fail:
228 free(ms);
229 return -1;
230 }
231
232 /* first stage of primary handling requests from secondary */
233 static int
234 handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
235 {
236 const struct malloc_mp_req *m =
237 (const struct malloc_mp_req *)msg->param;
238 struct mp_request *entry;
239 int ret;
240
241 /* lock access to request */
242 pthread_mutex_lock(&mp_request_list.lock);
243
244 /* make sure it's not a dupe */
245 entry = find_request_by_id(m->id);
246 if (entry != NULL) {
247 RTE_LOG(ERR, EAL, "Duplicate request id\n");
248 goto fail;
249 }
250
251 entry = malloc(sizeof(*entry));
252 if (entry == NULL) {
253 RTE_LOG(ERR, EAL, "Unable to allocate memory for request\n");
254 goto fail;
255 }
256
257 /* erase all data */
258 memset(entry, 0, sizeof(*entry));
259
260 if (m->t == REQ_TYPE_ALLOC) {
261 ret = handle_alloc_request(m, entry);
262 } else if (m->t == REQ_TYPE_FREE) {
263 ret = malloc_heap_free_pages(m->free_req.addr,
264 m->free_req.len);
265 } else {
266 RTE_LOG(ERR, EAL, "Unexpected request from secondary\n");
267 goto fail;
268 }
269
270 if (ret != 0) {
271 struct rte_mp_msg resp_msg;
272 struct malloc_mp_req *resp =
273 (struct malloc_mp_req *)resp_msg.param;
274
275 /* send failure message straight away */
276 resp_msg.num_fds = 0;
277 resp_msg.len_param = sizeof(*resp);
278 strlcpy(resp_msg.name, MP_ACTION_RESPONSE,
279 sizeof(resp_msg.name));
280
281 resp->t = m->t;
282 resp->result = REQ_RESULT_FAIL;
283 resp->id = m->id;
284
285 if (rte_mp_sendmsg(&resp_msg)) {
286 RTE_LOG(ERR, EAL, "Couldn't send response\n");
287 goto fail;
288 }
289 /* we did not modify the request */
290 free(entry);
291 } else {
292 struct rte_mp_msg sr_msg;
293 struct malloc_mp_req *sr =
294 (struct malloc_mp_req *)sr_msg.param;
295 struct timespec ts;
296
297 memset(&sr_msg, 0, sizeof(sr_msg));
298
299 /* we can do something, so send sync request asynchronously */
300 sr_msg.num_fds = 0;
301 sr_msg.len_param = sizeof(*sr);
302 strlcpy(sr_msg.name, MP_ACTION_SYNC, sizeof(sr_msg.name));
303
304 ts.tv_nsec = 0;
305 ts.tv_sec = MP_TIMEOUT_S;
306
307 /* sync requests carry no data */
308 sr->t = REQ_TYPE_SYNC;
309 sr->id = m->id;
310
311 /* there may be stray timeout still waiting */
312 do {
313 ret = rte_mp_request_async(&sr_msg, &ts,
314 handle_sync_response);
315 } while (ret != 0 && rte_errno == EEXIST);
316 if (ret != 0) {
317 RTE_LOG(ERR, EAL, "Couldn't send sync request\n");
318 if (m->t == REQ_TYPE_ALLOC)
319 free(entry->alloc_state.ms);
320 goto fail;
321 }
322
323 /* mark request as in progress */
324 memcpy(&entry->user_req, m, sizeof(*m));
325 entry->state = REQ_STATE_ACTIVE;
326
327 TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
328 }
329 pthread_mutex_unlock(&mp_request_list.lock);
330 return 0;
331 fail:
332 pthread_mutex_unlock(&mp_request_list.lock);
333 free(entry);
334 return -1;
335 }
336
337 /* callback for asynchronous sync requests for primary. this will either do a
338 * sendmsg with results, or trigger rollback request.
339 */
340 static int
341 handle_sync_response(const struct rte_mp_msg *request,
342 const struct rte_mp_reply *reply)
343 {
344 enum malloc_req_result result;
345 struct mp_request *entry;
346 const struct malloc_mp_req *mpreq =
347 (const struct malloc_mp_req *)request->param;
348 int i;
349
350 /* lock the request */
351 pthread_mutex_lock(&mp_request_list.lock);
352
353 entry = find_request_by_id(mpreq->id);
354 if (entry == NULL) {
355 RTE_LOG(ERR, EAL, "Wrong request ID\n");
356 goto fail;
357 }
358
359 result = REQ_RESULT_SUCCESS;
360
361 if (reply->nb_received != reply->nb_sent)
362 result = REQ_RESULT_FAIL;
363
364 for (i = 0; i < reply->nb_received; i++) {
365 struct malloc_mp_req *resp =
366 (struct malloc_mp_req *)reply->msgs[i].param;
367
368 if (resp->t != REQ_TYPE_SYNC) {
369 RTE_LOG(ERR, EAL, "Unexpected response to sync request\n");
370 result = REQ_RESULT_FAIL;
371 break;
372 }
373 if (resp->id != entry->user_req.id) {
374 RTE_LOG(ERR, EAL, "Response to wrong sync request\n");
375 result = REQ_RESULT_FAIL;
376 break;
377 }
378 if (resp->result == REQ_RESULT_FAIL) {
379 result = REQ_RESULT_FAIL;
380 break;
381 }
382 }
383
384 if (entry->user_req.t == REQ_TYPE_FREE) {
385 struct rte_mp_msg msg;
386 struct malloc_mp_req *resp = (struct malloc_mp_req *)msg.param;
387
388 memset(&msg, 0, sizeof(msg));
389
390 /* this is a free request, just sendmsg result */
391 resp->t = REQ_TYPE_FREE;
392 resp->result = result;
393 resp->id = entry->user_req.id;
394 msg.num_fds = 0;
395 msg.len_param = sizeof(*resp);
396 strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
397
398 if (rte_mp_sendmsg(&msg))
399 RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
400
401 TAILQ_REMOVE(&mp_request_list.list, entry, next);
402 free(entry);
403 } else if (entry->user_req.t == REQ_TYPE_ALLOC &&
404 result == REQ_RESULT_SUCCESS) {
405 struct malloc_heap *heap = entry->alloc_state.heap;
406 struct rte_mp_msg msg;
407 struct malloc_mp_req *resp =
408 (struct malloc_mp_req *)msg.param;
409
410 memset(&msg, 0, sizeof(msg));
411
412 heap->total_size += entry->alloc_state.map_len;
413
414 /* result is success, so just notify secondary about this */
415 resp->t = REQ_TYPE_ALLOC;
416 resp->result = result;
417 resp->id = entry->user_req.id;
418 msg.num_fds = 0;
419 msg.len_param = sizeof(*resp);
420 strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
421
422 if (rte_mp_sendmsg(&msg))
423 RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
424
425 TAILQ_REMOVE(&mp_request_list.list, entry, next);
426 free(entry->alloc_state.ms);
427 free(entry);
428 } else if (entry->user_req.t == REQ_TYPE_ALLOC &&
429 result == REQ_RESULT_FAIL) {
430 struct rte_mp_msg rb_msg;
431 struct malloc_mp_req *rb =
432 (struct malloc_mp_req *)rb_msg.param;
433 struct timespec ts;
434 struct primary_alloc_req_state *state =
435 &entry->alloc_state;
436 int ret;
437
438 memset(&rb_msg, 0, sizeof(rb_msg));
439
440 /* we've failed to sync, so do a rollback */
441 rollback_expand_heap(state->ms, state->ms_len, state->elem,
442 state->map_addr, state->map_len);
443
444 /* send rollback request */
445 rb_msg.num_fds = 0;
446 rb_msg.len_param = sizeof(*rb);
447 strlcpy(rb_msg.name, MP_ACTION_ROLLBACK, sizeof(rb_msg.name));
448
449 ts.tv_nsec = 0;
450 ts.tv_sec = MP_TIMEOUT_S;
451
452 /* sync requests carry no data */
453 rb->t = REQ_TYPE_SYNC;
454 rb->id = entry->user_req.id;
455
456 /* there may be stray timeout still waiting */
457 do {
458 ret = rte_mp_request_async(&rb_msg, &ts,
459 handle_rollback_response);
460 } while (ret != 0 && rte_errno == EEXIST);
461 if (ret != 0) {
462 RTE_LOG(ERR, EAL, "Could not send rollback request to secondary process\n");
463
464 /* we couldn't send rollback request, but that's OK -
465 * secondary will time out, and memory has been removed
466 * from heap anyway.
467 */
468 TAILQ_REMOVE(&mp_request_list.list, entry, next);
469 free(state->ms);
470 free(entry);
471 goto fail;
472 }
473 } else {
474 RTE_LOG(ERR, EAL, " to sync request of unknown type\n");
475 goto fail;
476 }
477
478 pthread_mutex_unlock(&mp_request_list.lock);
479 return 0;
480 fail:
481 pthread_mutex_unlock(&mp_request_list.lock);
482 return -1;
483 }
484
485 static int
486 handle_rollback_response(const struct rte_mp_msg *request,
487 const struct rte_mp_reply *reply __rte_unused)
488 {
489 struct rte_mp_msg msg;
490 struct malloc_mp_req *resp = (struct malloc_mp_req *)msg.param;
491 const struct malloc_mp_req *mpreq =
492 (const struct malloc_mp_req *)request->param;
493 struct mp_request *entry;
494
495 /* lock the request */
496 pthread_mutex_lock(&mp_request_list.lock);
497
498 memset(&msg, 0, sizeof(0));
499
500 entry = find_request_by_id(mpreq->id);
501 if (entry == NULL) {
502 RTE_LOG(ERR, EAL, "Wrong request ID\n");
503 goto fail;
504 }
505
506 if (entry->user_req.t != REQ_TYPE_ALLOC) {
507 RTE_LOG(ERR, EAL, "Unexpected active request\n");
508 goto fail;
509 }
510
511 /* we don't care if rollback succeeded, request still failed */
512 resp->t = REQ_TYPE_ALLOC;
513 resp->result = REQ_RESULT_FAIL;
514 resp->id = mpreq->id;
515 msg.num_fds = 0;
516 msg.len_param = sizeof(*resp);
517 strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
518
519 if (rte_mp_sendmsg(&msg))
520 RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
521
522 /* clean up */
523 TAILQ_REMOVE(&mp_request_list.list, entry, next);
524 free(entry->alloc_state.ms);
525 free(entry);
526
527 pthread_mutex_unlock(&mp_request_list.lock);
528 return 0;
529 fail:
530 pthread_mutex_unlock(&mp_request_list.lock);
531 return -1;
532 }
533
534 /* final stage of the request from secondary */
535 static int
536 handle_response(const struct rte_mp_msg *msg, const void *peer __rte_unused)
537 {
538 const struct malloc_mp_req *m =
539 (const struct malloc_mp_req *)msg->param;
540 struct mp_request *entry;
541
542 pthread_mutex_lock(&mp_request_list.lock);
543
544 entry = find_request_by_id(m->id);
545 if (entry != NULL) {
546 /* update request status */
547 entry->user_req.result = m->result;
548
549 entry->state = REQ_STATE_COMPLETE;
550
551 /* trigger thread wakeup */
552 pthread_cond_signal(&entry->cond);
553 }
554
555 pthread_mutex_unlock(&mp_request_list.lock);
556
557 return 0;
558 }
559
560 /* synchronously request memory map sync, this is only called whenever primary
561 * process initiates the allocation.
562 */
563 int
564 request_sync(void)
565 {
566 struct rte_mp_msg msg;
567 struct rte_mp_reply reply;
568 struct malloc_mp_req *req = (struct malloc_mp_req *)msg.param;
569 struct timespec ts;
570 int i, ret;
571
572 memset(&msg, 0, sizeof(msg));
573 memset(&reply, 0, sizeof(reply));
574
575 /* no need to create tailq entries as this is entirely synchronous */
576
577 msg.num_fds = 0;
578 msg.len_param = sizeof(*req);
579 strlcpy(msg.name, MP_ACTION_SYNC, sizeof(msg.name));
580
581 /* sync request carries no data */
582 req->t = REQ_TYPE_SYNC;
583 req->id = get_unique_id();
584
585 ts.tv_nsec = 0;
586 ts.tv_sec = MP_TIMEOUT_S;
587
588 /* there may be stray timeout still waiting */
589 do {
590 ret = rte_mp_request_sync(&msg, &reply, &ts);
591 } while (ret != 0 && rte_errno == EEXIST);
592 if (ret != 0) {
593 RTE_LOG(ERR, EAL, "Could not send sync request to secondary process\n");
594 ret = -1;
595 goto out;
596 }
597
598 if (reply.nb_received != reply.nb_sent) {
599 RTE_LOG(ERR, EAL, "Not all secondaries have responded\n");
600 ret = -1;
601 goto out;
602 }
603
604 for (i = 0; i < reply.nb_received; i++) {
605 struct malloc_mp_req *resp =
606 (struct malloc_mp_req *)reply.msgs[i].param;
607 if (resp->t != REQ_TYPE_SYNC) {
608 RTE_LOG(ERR, EAL, "Unexpected response from secondary\n");
609 ret = -1;
610 goto out;
611 }
612 if (resp->id != req->id) {
613 RTE_LOG(ERR, EAL, "Wrong request ID\n");
614 ret = -1;
615 goto out;
616 }
617 if (resp->result != REQ_RESULT_SUCCESS) {
618 RTE_LOG(ERR, EAL, "Secondary process failed to synchronize\n");
619 ret = -1;
620 goto out;
621 }
622 }
623
624 ret = 0;
625 out:
626 free(reply.msgs);
627 return ret;
628 }
629
630 /* this is a synchronous wrapper around a bunch of asynchronous requests to
631 * primary process. this will initiate a request and wait until responses come.
632 */
633 int
634 request_to_primary(struct malloc_mp_req *user_req)
635 {
636 struct rte_mp_msg msg;
637 struct malloc_mp_req *msg_req = (struct malloc_mp_req *)msg.param;
638 struct mp_request *entry;
639 struct timespec ts;
640 struct timeval now;
641 int ret;
642
643 memset(&msg, 0, sizeof(msg));
644 memset(&ts, 0, sizeof(ts));
645
646 pthread_mutex_lock(&mp_request_list.lock);
647
648 entry = malloc(sizeof(*entry));
649 if (entry == NULL) {
650 RTE_LOG(ERR, EAL, "Cannot allocate memory for request\n");
651 goto fail;
652 }
653
654 memset(entry, 0, sizeof(*entry));
655
656 if (gettimeofday(&now, NULL) < 0) {
657 RTE_LOG(ERR, EAL, "Cannot get current time\n");
658 goto fail;
659 }
660
661 ts.tv_nsec = (now.tv_usec * 1000) % 1000000000;
662 ts.tv_sec = now.tv_sec + MP_TIMEOUT_S +
663 (now.tv_usec * 1000) / 1000000000;
664
665 /* initialize the request */
666 pthread_cond_init(&entry->cond, NULL);
667
668 msg.num_fds = 0;
669 msg.len_param = sizeof(*msg_req);
670 strlcpy(msg.name, MP_ACTION_REQUEST, sizeof(msg.name));
671
672 /* (attempt to) get a unique id */
673 user_req->id = get_unique_id();
674
675 /* copy contents of user request into the message */
676 memcpy(msg_req, user_req, sizeof(*msg_req));
677
678 if (rte_mp_sendmsg(&msg)) {
679 RTE_LOG(ERR, EAL, "Cannot send message to primary\n");
680 goto fail;
681 }
682
683 /* copy contents of user request into active request */
684 memcpy(&entry->user_req, user_req, sizeof(*user_req));
685
686 /* mark request as in progress */
687 entry->state = REQ_STATE_ACTIVE;
688
689 TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
690
691 /* finally, wait on timeout */
692 do {
693 ret = pthread_cond_timedwait(&entry->cond,
694 &mp_request_list.lock, &ts);
695 } while (ret != 0 && ret != ETIMEDOUT);
696
697 if (entry->state != REQ_STATE_COMPLETE) {
698 RTE_LOG(ERR, EAL, "Request timed out\n");
699 ret = -1;
700 } else {
701 ret = 0;
702 user_req->result = entry->user_req.result;
703 }
704 TAILQ_REMOVE(&mp_request_list.list, entry, next);
705 free(entry);
706
707 pthread_mutex_unlock(&mp_request_list.lock);
708 return ret;
709 fail:
710 pthread_mutex_unlock(&mp_request_list.lock);
711 free(entry);
712 return -1;
713 }
714
715 int
716 register_mp_requests(void)
717 {
718 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
719 if (rte_mp_action_register(MP_ACTION_REQUEST, handle_request)) {
720 RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
721 MP_ACTION_REQUEST);
722 return -1;
723 }
724 } else {
725 if (rte_mp_action_register(MP_ACTION_SYNC, handle_sync)) {
726 RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
727 MP_ACTION_SYNC);
728 return -1;
729 }
730 if (rte_mp_action_register(MP_ACTION_ROLLBACK, handle_sync)) {
731 RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
732 MP_ACTION_SYNC);
733 return -1;
734 }
735 if (rte_mp_action_register(MP_ACTION_RESPONSE,
736 handle_response)) {
737 RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
738 MP_ACTION_RESPONSE);
739 return -1;
740 }
741 }
742 return 0;
743 }