]> git.proxmox.com Git - mirror_corosync.git/blob - exec/ipc_glue.c
config: Don't free pointers used by transports
[mirror_corosync.git] / exec / ipc_glue.c
1 /*
2 * Copyright (c) 2010-2017 Red Hat, Inc.
3 *
4 * All rights reserved.
5 *
6 * Author: Angus Salkeld <asalkeld@redhat.com>
7 *
8 * This software licensed under BSD license, the text of which follows:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * - Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * - Neither the name of Red Hat, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived from this
20 * software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <config.h>
36
37 #include <stdlib.h>
38 #include <stdio.h>
39 #include <errno.h>
40 #include <assert.h>
41 #include <sys/uio.h>
42 #include <string.h>
43
44 #include <qb/qbdefs.h>
45 #include <qb/qblist.h>
46 #include <qb/qbutil.h>
47 #include <qb/qbloop.h>
48 #include <qb/qbipcs.h>
49
50 #include <corosync/swab.h>
51 #include <corosync/corotypes.h>
52 #include <corosync/corodefs.h>
53 #include <corosync/totem/totempg.h>
54 #include <corosync/logsys.h>
55 #include <corosync/icmap.h>
56
57 #include "sync.h"
58 #include "timer.h"
59 #include "main.h"
60 #include "util.h"
61 #include "apidef.h"
62 #include "service.h"
63 #include "ipcs_stats.h"
64 #include "stats.h"
65
66 LOGSYS_DECLARE_SUBSYS ("MAIN");
67
68 static struct corosync_api_v1 *api = NULL;
69 static int32_t ipc_not_enough_fds_left = 0;
70 static int32_t ipc_fc_is_quorate; /* boolean */
71 static int32_t ipc_fc_totem_queue_level; /* percentage used */
72 static int32_t ipc_fc_sync_in_process; /* boolean */
73 static int32_t ipc_allow_connections = 0; /* boolean */
74
75 #define CS_IPCS_MAPPER_SERV_NAME 256
76
77 struct cs_ipcs_mapper {
78 int32_t id;
79 qb_ipcs_service_t *inst;
80 char name[CS_IPCS_MAPPER_SERV_NAME];
81 };
82
83 struct outq_item {
84 void *msg;
85 size_t mlen;
86 struct qb_list_head list;
87 };
88
89 static struct cs_ipcs_mapper ipcs_mapper[SERVICES_COUNT_MAX];
90
91 static int32_t cs_ipcs_job_add(enum qb_loop_priority p, void *data, qb_loop_job_dispatch_fn fn);
92 static int32_t cs_ipcs_dispatch_add(enum qb_loop_priority p, int32_t fd, int32_t events,
93 void *data, qb_ipcs_dispatch_fn_t fn);
94 static int32_t cs_ipcs_dispatch_mod(enum qb_loop_priority p, int32_t fd, int32_t events,
95 void *data, qb_ipcs_dispatch_fn_t fn);
96 static int32_t cs_ipcs_dispatch_del(int32_t fd);
97 static void outq_flush (void *data);
98
99
100 static struct qb_ipcs_poll_handlers corosync_poll_funcs = {
101 .job_add = cs_ipcs_job_add,
102 .dispatch_add = cs_ipcs_dispatch_add,
103 .dispatch_mod = cs_ipcs_dispatch_mod,
104 .dispatch_del = cs_ipcs_dispatch_del,
105 };
106
107 static int32_t cs_ipcs_connection_accept (qb_ipcs_connection_t *c, uid_t euid, gid_t egid);
108 static void cs_ipcs_connection_created(qb_ipcs_connection_t *c);
109 static int32_t cs_ipcs_msg_process(qb_ipcs_connection_t *c,
110 void *data, size_t size);
111 static int32_t cs_ipcs_connection_closed (qb_ipcs_connection_t *c);
112 static void cs_ipcs_connection_destroyed (qb_ipcs_connection_t *c);
113
114 static struct qb_ipcs_service_handlers corosync_service_funcs = {
115 .connection_accept = cs_ipcs_connection_accept,
116 .connection_created = cs_ipcs_connection_created,
117 .msg_process = cs_ipcs_msg_process,
118 .connection_closed = cs_ipcs_connection_closed,
119 .connection_destroyed = cs_ipcs_connection_destroyed,
120 };
121
122 static struct ipcs_global_stats global_stats;
123
124 static const char* cs_ipcs_serv_short_name(int32_t service_id)
125 {
126 const char *name;
127 switch (service_id) {
128 case CFG_SERVICE:
129 name = "cfg";
130 break;
131 case CPG_SERVICE:
132 name = "cpg";
133 break;
134 case QUORUM_SERVICE:
135 name = "quorum";
136 break;
137 case PLOAD_SERVICE:
138 name = "pload";
139 break;
140 case VOTEQUORUM_SERVICE:
141 name = "votequorum";
142 break;
143 case MON_SERVICE:
144 name = "mon";
145 break;
146 case WD_SERVICE:
147 name = "wd";
148 break;
149 case CMAP_SERVICE:
150 name = "cmap";
151 break;
152 default:
153 name = NULL;
154 break;
155 }
156 return name;
157 }
158
159 void cs_ipc_allow_connections(int32_t allow)
160 {
161 ipc_allow_connections = allow;
162 }
163
164 int32_t cs_ipcs_service_destroy(int32_t service_id)
165 {
166 if (ipcs_mapper[service_id].inst) {
167 qb_ipcs_destroy(ipcs_mapper[service_id].inst);
168 ipcs_mapper[service_id].inst = NULL;
169 }
170 return 0;
171 }
172
173 static int32_t cs_ipcs_connection_accept (qb_ipcs_connection_t *c, uid_t euid, gid_t egid)
174 {
175 int32_t service = qb_ipcs_service_id_get(c);
176 uint8_t u8;
177 char key_name[ICMAP_KEYNAME_MAXLEN];
178
179 if (!ipc_allow_connections) {
180 log_printf(LOGSYS_LEVEL_DEBUG, "Denied connection, corosync is not ready");
181 return -EAGAIN;
182 }
183
184 if (corosync_service[service] == NULL ||
185 ipcs_mapper[service].inst == NULL) {
186 return -ENOSYS;
187 }
188
189 if (ipc_not_enough_fds_left) {
190 return -EMFILE;
191 }
192
193 if (euid == 0 || egid == 0) {
194 return 0;
195 }
196
197 snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "uidgid.uid.%u", euid);
198 if (icmap_get_uint8(key_name, &u8) == CS_OK && u8 == 1)
199 return 0;
200
201 snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "uidgid.config.uid.%u", euid);
202 if (icmap_get_uint8(key_name, &u8) == CS_OK && u8 == 1)
203 return 0;
204
205 snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "uidgid.gid.%u", egid);
206 if (icmap_get_uint8(key_name, &u8) == CS_OK && u8 == 1)
207 return 0;
208
209 snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "uidgid.config.gid.%u", egid);
210 if (icmap_get_uint8(key_name, &u8) == CS_OK && u8 == 1)
211 return 0;
212
213 log_printf(LOGSYS_LEVEL_ERROR, "Denied connection attempt from %d:%d", euid, egid);
214
215 return -EACCES;
216 }
217
218 static char * pid_to_name (pid_t pid, char *out_name, size_t name_len)
219 {
220 char *name;
221 char *rest;
222 FILE *fp;
223 char fname[32];
224 char buf[256];
225
226 snprintf (fname, 32, "/proc/%d/stat", pid);
227 fp = fopen (fname, "r");
228 if (!fp) {
229 return NULL;
230 }
231
232 if (fgets (buf, sizeof (buf), fp) == NULL) {
233 fclose (fp);
234 return NULL;
235 }
236 fclose (fp);
237
238 name = strrchr (buf, '(');
239 if (!name) {
240 return NULL;
241 }
242
243 /* move past the bracket */
244 name++;
245
246 rest = strrchr (buf, ')');
247
248 if (rest == NULL || rest[1] != ' ') {
249 return NULL;
250 }
251
252 *rest = '\0';
253 /* move past the NULL and space */
254 rest += 2;
255
256 /* copy the name */
257 strncpy (out_name, name, name_len - 1);
258 out_name[name_len - 1] = '\0';
259 return out_name;
260 }
261
262 static void cs_ipcs_connection_created(qb_ipcs_connection_t *c)
263 {
264 int32_t service = 0;
265 struct cs_ipcs_conn_context *context;
266 struct qb_ipcs_connection_stats stats;
267 size_t size = sizeof(struct cs_ipcs_conn_context);
268
269 log_printf(LOG_DEBUG, "connection created");
270
271 service = qb_ipcs_service_id_get(c);
272
273 size += corosync_service[service]->private_data_size;
274 context = calloc(1, size);
275 if (context == NULL) {
276 qb_ipcs_disconnect(c);
277 return;
278 }
279
280 qb_list_init(&context->outq_head);
281 context->queuing = QB_FALSE;
282 context->queued = 0;
283 context->sent = 0;
284
285 qb_ipcs_context_set(c, context);
286
287 if (corosync_service[service]->lib_init_fn(c) != 0) {
288 log_printf(LOG_ERR, "lib_init_fn failed, disconnecting");
289 qb_ipcs_disconnect(c);
290 return;
291 }
292
293 qb_ipcs_connection_stats_get(c, &stats, QB_FALSE);
294
295 if (!pid_to_name (stats.client_pid, context->proc_name, sizeof(context->proc_name))) {
296 context->proc_name[0] = '\0';
297 }
298 stats_ipcs_add_connection(service, stats.client_pid, c);
299 global_stats.active++;
300 }
301
302 void cs_ipc_refcnt_inc(void *conn)
303 {
304 qb_ipcs_connection_ref(conn);
305 }
306
307 void cs_ipc_refcnt_dec(void *conn)
308 {
309 qb_ipcs_connection_unref(conn);
310 }
311
312 void *cs_ipcs_private_data_get(void *conn)
313 {
314 struct cs_ipcs_conn_context *cnx;
315 cnx = qb_ipcs_context_get(conn);
316 return &cnx->data[0];
317 }
318
319 static void cs_ipcs_connection_destroyed (qb_ipcs_connection_t *c)
320 {
321 struct cs_ipcs_conn_context *context;
322 struct qb_list_head *list, *tmp_iter;
323 struct outq_item *outq_item;
324
325 log_printf(LOG_DEBUG, "%s() ", __func__);
326
327 context = qb_ipcs_context_get(c);
328 if (context) {
329 qb_list_for_each_safe(list, tmp_iter, &(context->outq_head)) {
330 outq_item = qb_list_entry (list, struct outq_item, list);
331
332 qb_list_del (list);
333 free (outq_item->msg);
334 free (outq_item);
335 }
336 free(context);
337 }
338 }
339
340 static int32_t cs_ipcs_connection_closed (qb_ipcs_connection_t *c)
341 {
342 int32_t res = 0;
343 int32_t service = qb_ipcs_service_id_get(c);
344 struct qb_ipcs_connection_stats stats;
345
346 log_printf(LOG_DEBUG, "%s() ", __func__);
347 res = corosync_service[service]->lib_exit_fn(c);
348 if (res != 0) {
349 return res;
350 }
351
352 qb_loop_job_del(cs_poll_handle_get(), QB_LOOP_HIGH, c, outq_flush);
353
354 qb_ipcs_connection_stats_get(c, &stats, QB_FALSE);
355
356 stats_ipcs_del_connection(service, stats.client_pid, c);
357
358 global_stats.active--;
359 global_stats.closed++;
360 return 0;
361 }
362
363 int cs_ipcs_response_iov_send (void *conn,
364 const struct iovec *iov,
365 unsigned int iov_len)
366 {
367 int32_t rc = qb_ipcs_response_sendv(conn, iov, iov_len);
368 if (rc >= 0) {
369 return 0;
370 }
371 return rc;
372 }
373
374 int cs_ipcs_response_send(void *conn, const void *msg, size_t mlen)
375 {
376 int32_t rc = qb_ipcs_response_send(conn, msg, mlen);
377 if (rc >= 0) {
378 return 0;
379 }
380 return rc;
381 }
382
383 static void outq_flush (void *data)
384 {
385 qb_ipcs_connection_t *conn = data;
386 struct qb_list_head *list, *tmp_iter;
387 struct outq_item *outq_item;
388 int32_t rc;
389 struct cs_ipcs_conn_context *context = qb_ipcs_context_get(conn);
390
391 qb_list_for_each_safe(list, tmp_iter, &(context->outq_head)) {
392 outq_item = qb_list_entry (list, struct outq_item, list);
393
394 rc = qb_ipcs_event_send(conn, outq_item->msg, outq_item->mlen);
395 if (rc < 0 && rc != -EAGAIN) {
396 errno = -rc;
397 qb_perror(LOG_ERR, "qb_ipcs_event_send");
398 return;
399 } else if (rc == -EAGAIN) {
400 break;
401 }
402 assert(rc == outq_item->mlen);
403 context->sent++;
404 context->queued--;
405
406 qb_list_del (list);
407 free (outq_item->msg);
408 free (outq_item);
409 }
410 if (qb_list_empty (&context->outq_head)) {
411 context->queuing = QB_FALSE;
412 log_printf(LOGSYS_LEVEL_INFO, "Q empty, queued:%d sent:%d.",
413 context->queued, context->sent);
414 context->queued = 0;
415 context->sent = 0;
416 } else {
417 qb_loop_job_add(cs_poll_handle_get(), QB_LOOP_HIGH, conn, outq_flush);
418 }
419 }
420
421 static void msg_send_or_queue(qb_ipcs_connection_t *conn, const struct iovec *iov, uint32_t iov_len)
422 {
423 int32_t rc = 0;
424 int32_t i;
425 int32_t bytes_msg = 0;
426 struct outq_item *outq_item;
427 char *write_buf = 0;
428 struct cs_ipcs_conn_context *context = qb_ipcs_context_get(conn);
429
430 for (i = 0; i < iov_len; i++) {
431 bytes_msg += iov[i].iov_len;
432 }
433
434 if (!context->queuing) {
435 assert(qb_list_empty (&context->outq_head));
436 rc = qb_ipcs_event_sendv(conn, iov, iov_len);
437 if (rc == bytes_msg) {
438 context->sent++;
439 return;
440 }
441 if (rc == -EAGAIN) {
442 context->queued = 0;
443 context->sent = 0;
444 context->queuing = QB_TRUE;
445 qb_loop_job_add(cs_poll_handle_get(), QB_LOOP_HIGH, conn, outq_flush);
446 } else {
447 log_printf(LOGSYS_LEVEL_ERROR, "event_send retuned %d, expected %d!", rc, bytes_msg);
448 return;
449 }
450 }
451 outq_item = malloc (sizeof (struct outq_item));
452 if (outq_item == NULL) {
453 qb_ipcs_disconnect(conn);
454 return;
455 }
456 outq_item->msg = malloc (bytes_msg);
457 if (outq_item->msg == NULL) {
458 free (outq_item);
459 qb_ipcs_disconnect(conn);
460 return;
461 }
462
463 write_buf = outq_item->msg;
464 for (i = 0; i < iov_len; i++) {
465 memcpy (write_buf, iov[i].iov_base, iov[i].iov_len);
466 write_buf += iov[i].iov_len;
467 }
468 outq_item->mlen = bytes_msg;
469 qb_list_init (&outq_item->list);
470 qb_list_add_tail (&outq_item->list, &context->outq_head);
471 context->queued++;
472 }
473
474 int cs_ipcs_dispatch_send(void *conn, const void *msg, size_t mlen)
475 {
476 struct iovec iov;
477 iov.iov_base = (void *)msg;
478 iov.iov_len = mlen;
479 msg_send_or_queue (conn, &iov, 1);
480 return 0;
481 }
482
483 int cs_ipcs_dispatch_iov_send (void *conn,
484 const struct iovec *iov,
485 unsigned int iov_len)
486 {
487 msg_send_or_queue(conn, iov, iov_len);
488 return 0;
489 }
490
491 static int32_t cs_ipcs_msg_process(qb_ipcs_connection_t *c,
492 void *data, size_t size)
493 {
494 struct qb_ipc_response_header response;
495 struct qb_ipc_request_header *request_pt = (struct qb_ipc_request_header *)data;
496 int32_t service = qb_ipcs_service_id_get(c);
497 int32_t send_ok = 0;
498 int32_t is_async_call = QB_FALSE;
499 ssize_t res = -1;
500 int sending_allowed_private_data;
501 struct cs_ipcs_conn_context *cnx;
502
503 send_ok = corosync_sending_allowed (service,
504 request_pt->id,
505 request_pt,
506 &sending_allowed_private_data);
507
508 is_async_call = (service == CPG_SERVICE && request_pt->id == 2);
509
510 /*
511 * This happens when the message contains some kind of invalid
512 * parameter, such as an invalid size
513 */
514 if (send_ok == -EINVAL) {
515 response.size = sizeof (response);
516 response.id = 0;
517 response.error = CS_ERR_INVALID_PARAM;
518
519 cnx = qb_ipcs_context_get(c);
520 if (cnx) {
521 cnx->invalid_request++;
522 }
523
524 if (is_async_call) {
525 log_printf(LOGSYS_LEVEL_INFO, "*** %s() invalid message! size:%d error:%d",
526 __func__, response.size, response.error);
527 } else {
528 qb_ipcs_response_send (c,
529 &response,
530 sizeof (response));
531 }
532 res = -EINVAL;
533 } else if (send_ok < 0) {
534 cnx = qb_ipcs_context_get(c);
535 if (cnx) {
536 cnx->overload++;
537 }
538 if (!is_async_call) {
539 /*
540 * Overload, tell library to retry
541 */
542 response.size = sizeof (response);
543 response.id = 0;
544 response.error = CS_ERR_TRY_AGAIN;
545 qb_ipcs_response_send (c,
546 &response,
547 sizeof (response));
548 } else {
549 log_printf(LOGSYS_LEVEL_WARNING,
550 "*** %s() (%d:%d - %d) %s!",
551 __func__, service, request_pt->id,
552 is_async_call, strerror(-send_ok));
553 }
554 res = -ENOBUFS;
555 }
556
557 if (send_ok >= 0) {
558 corosync_service[service]->lib_engine[request_pt->id].lib_handler_fn(c, request_pt);
559 res = 0;
560 }
561 corosync_sending_allowed_release (&sending_allowed_private_data);
562 return res;
563 }
564
565
566 static int32_t cs_ipcs_job_add(enum qb_loop_priority p, void *data, qb_loop_job_dispatch_fn fn)
567 {
568 return qb_loop_job_add(cs_poll_handle_get(), p, data, fn);
569 }
570
571 static int32_t cs_ipcs_dispatch_add(enum qb_loop_priority p, int32_t fd, int32_t events,
572 void *data, qb_ipcs_dispatch_fn_t fn)
573 {
574 return qb_loop_poll_add(cs_poll_handle_get(), p, fd, events, data, fn);
575 }
576
577 static int32_t cs_ipcs_dispatch_mod(enum qb_loop_priority p, int32_t fd, int32_t events,
578 void *data, qb_ipcs_dispatch_fn_t fn)
579 {
580 return qb_loop_poll_mod(cs_poll_handle_get(), p, fd, events, data, fn);
581 }
582
583 static int32_t cs_ipcs_dispatch_del(int32_t fd)
584 {
585 return qb_loop_poll_del(cs_poll_handle_get(), fd);
586 }
587
588 static void cs_ipcs_low_fds_event(int32_t not_enough, int32_t fds_available)
589 {
590 ipc_not_enough_fds_left = not_enough;
591 if (not_enough) {
592 log_printf(LOGSYS_LEVEL_WARNING, "refusing new connections (fds_available:%d)",
593 fds_available);
594 } else {
595 log_printf(LOGSYS_LEVEL_NOTICE, "allowing new connections (fds_available:%d)",
596 fds_available);
597
598 }
599 }
600
601 int32_t cs_ipcs_q_level_get(void)
602 {
603 return ipc_fc_totem_queue_level;
604 }
605
606 static qb_loop_timer_handle ipcs_check_for_flow_control_timer;
607 static void cs_ipcs_check_for_flow_control(void)
608 {
609 int32_t i;
610 int32_t fc_enabled;
611
612 for (i = 0; i < SERVICES_COUNT_MAX; i++) {
613 if (corosync_service[i] == NULL || ipcs_mapper[i].inst == NULL) {
614 continue;
615 }
616 fc_enabled = QB_IPCS_RATE_OFF;
617 if (ipc_fc_is_quorate == 1 ||
618 corosync_service[i]->allow_inquorate == CS_LIB_ALLOW_INQUORATE) {
619 /*
620 * we are quorate
621 * now check flow control
622 */
623 if (ipc_fc_totem_queue_level != TOTEM_Q_LEVEL_CRITICAL &&
624 ipc_fc_sync_in_process == 0) {
625 fc_enabled = QB_FALSE;
626 } else if (ipc_fc_totem_queue_level != TOTEM_Q_LEVEL_CRITICAL &&
627 i == VOTEQUORUM_SERVICE) {
628 /*
629 * Allow message processing for votequorum service even
630 * in sync phase
631 */
632 fc_enabled = QB_FALSE;
633 } else {
634 fc_enabled = QB_IPCS_RATE_OFF_2;
635 }
636 }
637 if (fc_enabled) {
638 qb_ipcs_request_rate_limit(ipcs_mapper[i].inst, fc_enabled);
639
640 qb_loop_timer_add(cs_poll_handle_get(), QB_LOOP_MED, 1*QB_TIME_NS_IN_MSEC,
641 NULL, corosync_recheck_the_q_level, &ipcs_check_for_flow_control_timer);
642 } else if (ipc_fc_totem_queue_level == TOTEM_Q_LEVEL_LOW) {
643 qb_ipcs_request_rate_limit(ipcs_mapper[i].inst, QB_IPCS_RATE_FAST);
644 } else if (ipc_fc_totem_queue_level == TOTEM_Q_LEVEL_GOOD) {
645 qb_ipcs_request_rate_limit(ipcs_mapper[i].inst, QB_IPCS_RATE_NORMAL);
646 } else if (ipc_fc_totem_queue_level == TOTEM_Q_LEVEL_HIGH) {
647 qb_ipcs_request_rate_limit(ipcs_mapper[i].inst, QB_IPCS_RATE_SLOW);
648 }
649 }
650 }
651
652 static void cs_ipcs_fc_quorum_changed(int quorate, void *context)
653 {
654 ipc_fc_is_quorate = quorate;
655 cs_ipcs_check_for_flow_control();
656 }
657
658 static void cs_ipcs_totem_queue_level_changed(enum totem_q_level level)
659 {
660 ipc_fc_totem_queue_level = level;
661 cs_ipcs_check_for_flow_control();
662 }
663
664 void cs_ipcs_sync_state_changed(int32_t sync_in_process)
665 {
666 ipc_fc_sync_in_process = sync_in_process;
667 cs_ipcs_check_for_flow_control();
668 }
669
670 void cs_ipcs_get_global_stats(struct ipcs_global_stats *ipcs_stats)
671 {
672 memcpy(ipcs_stats, &global_stats, sizeof(global_stats));
673 }
674
675 cs_error_t cs_ipcs_get_conn_stats(int service_id, uint32_t pid, void *conn_ptr, struct ipcs_conn_stats *ipcs_stats)
676 {
677 struct cs_ipcs_conn_context *cnx;
678 qb_ipcs_connection_t *c, *prev;
679 int found = 0;
680
681 if (corosync_service[service_id] == NULL || ipcs_mapper[service_id].inst == NULL) {
682 return CS_ERR_NOT_EXIST;
683 }
684
685 qb_ipcs_stats_get(ipcs_mapper[service_id].inst, &ipcs_stats->srv, QB_FALSE);
686
687 for (c = qb_ipcs_connection_first_get(ipcs_mapper[service_id].inst);
688 c;
689 prev = c, c = qb_ipcs_connection_next_get(ipcs_mapper[service_id].inst, prev), qb_ipcs_connection_unref(prev)) {
690
691 cnx = qb_ipcs_context_get(c);
692 if (cnx == NULL) continue;
693 if (c != conn_ptr) continue;
694
695 qb_ipcs_connection_stats_get(c, &ipcs_stats->conn, QB_FALSE);
696 if (ipcs_stats->conn.client_pid != pid) {
697 continue;
698 }
699 found = 1;
700 memcpy(&ipcs_stats->cnx, cnx, sizeof(struct cs_ipcs_conn_context));
701 }
702 if (!found) {
703 return CS_ERR_NOT_EXIST;
704 }
705
706 return CS_OK;
707 }
708
709 void cs_ipcs_clear_stats()
710 {
711 struct cs_ipcs_conn_context *cnx;
712 struct ipcs_conn_stats ipcs_stats;
713 qb_ipcs_connection_t *c, *prev;
714 int service_id;
715
716 /* Global stats are easy */
717 memset(&global_stats, 0, sizeof(global_stats));
718
719 for (service_id = 0; service_id < SERVICES_COUNT_MAX; service_id++) {
720 if (!ipcs_mapper[service_id].inst) {
721 continue;
722 }
723
724 for (c = qb_ipcs_connection_first_get(ipcs_mapper[service_id].inst);
725 c;
726 prev = c, c = qb_ipcs_connection_next_get(ipcs_mapper[service_id].inst, prev), qb_ipcs_connection_unref(prev)) {
727 /* Get stats with 'clear_after_read' set */
728 qb_ipcs_connection_stats_get(c, &ipcs_stats.conn, QB_TRUE);
729
730 /* Our own stats */
731 cnx = qb_ipcs_context_get(c);
732 if (cnx == NULL) continue;
733 cnx->invalid_request = 0;
734 cnx->overload = 0;
735 cnx->sent = 0;
736
737 }
738 }
739 }
740
741 static enum qb_ipc_type cs_get_ipc_type (void)
742 {
743 char *str;
744 int found = 0;
745 enum qb_ipc_type ret = QB_IPC_NATIVE;
746
747 if (icmap_get_string("system.qb_ipc_type", &str) != CS_OK) {
748 log_printf(LOGSYS_LEVEL_DEBUG, "No configured system.qb_ipc_type. Using native ipc");
749 return QB_IPC_NATIVE;
750 }
751
752 if (strcmp(str, "native") == 0) {
753 ret = QB_IPC_NATIVE;
754 found = 1;
755 }
756
757 if (strcmp(str, "shm") == 0) {
758 ret = QB_IPC_SHM;
759 found = 1;
760 }
761
762 if (strcmp(str, "socket") == 0) {
763 ret = QB_IPC_SOCKET;
764 found = 1;
765 }
766
767 if (found) {
768 log_printf(LOGSYS_LEVEL_DEBUG, "Using %s ipc", str);
769 } else {
770 log_printf(LOGSYS_LEVEL_DEBUG, "Unknown ipc type %s", str);
771 }
772
773 free(str);
774
775 return ret;
776 }
777
778 const char *cs_ipcs_service_init(struct corosync_service_engine *service)
779 {
780 const char *serv_short_name;
781
782 serv_short_name = cs_ipcs_serv_short_name(service->id);
783
784 if (service->lib_engine_count == 0) {
785 log_printf (LOGSYS_LEVEL_DEBUG,
786 "NOT Initializing IPC on %s [%d]",
787 serv_short_name,
788 service->id);
789 return NULL;
790 }
791
792 if (strlen(serv_short_name) >= CS_IPCS_MAPPER_SERV_NAME) {
793 log_printf (LOGSYS_LEVEL_ERROR, "service name %s is too long", serv_short_name);
794 return "qb_ipcs_run error";
795 }
796
797 ipcs_mapper[service->id].id = service->id;
798 strcpy(ipcs_mapper[service->id].name, serv_short_name);
799 log_printf (LOGSYS_LEVEL_DEBUG,
800 "Initializing IPC on %s [%d]",
801 ipcs_mapper[service->id].name,
802 ipcs_mapper[service->id].id);
803 ipcs_mapper[service->id].inst = qb_ipcs_create(ipcs_mapper[service->id].name,
804 ipcs_mapper[service->id].id,
805 cs_get_ipc_type(),
806 &corosync_service_funcs);
807 assert(ipcs_mapper[service->id].inst);
808 qb_ipcs_poll_handlers_set(ipcs_mapper[service->id].inst,
809 &corosync_poll_funcs);
810 if (qb_ipcs_run(ipcs_mapper[service->id].inst) != 0) {
811 log_printf (LOGSYS_LEVEL_ERROR, "Can't initialize IPC");
812 return "qb_ipcs_run error";
813 }
814
815 return NULL;
816 }
817
818 void cs_ipcs_init(void)
819 {
820 api = apidef_get ();
821
822 qb_loop_poll_low_fds_event_set(cs_poll_handle_get(), cs_ipcs_low_fds_event);
823
824 api->quorum_register_callback (cs_ipcs_fc_quorum_changed, NULL);
825 totempg_queue_level_register_callback (cs_ipcs_totem_queue_level_changed);
826
827 global_stats.active = 0;
828 global_stats.closed = 0;
829 }