]>
Commit | Line | Data |
---|---|---|
7d65b7b7 CH |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * MGMTD Backend Client Connection Adapter | |
4 | * | |
5 | * Copyright (C) 2021 Vmware, Inc. | |
6 | * Pushpasis Sarkar <spushpasis@vmware.com> | |
7 | */ | |
8 | ||
9 | #include <zebra.h> | |
cb37cb33 | 10 | #include "event.h" |
7d65b7b7 CH |
11 | #include "sockopt.h" |
12 | #include "network.h" | |
13 | #include "libfrr.h" | |
f82370b4 | 14 | #include "mgmt_msg.h" |
7d65b7b7 CH |
15 | #include "mgmt_pb.h" |
16 | #include "mgmtd/mgmt.h" | |
17 | #include "mgmtd/mgmt_memory.h" | |
18 | #include "mgmt_be_client.h" | |
19 | #include "mgmtd/mgmt_be_adapter.h" | |
20 | ||
21 | #ifdef REDIRECT_DEBUG_TO_STDERR | |
22 | #define MGMTD_BE_ADAPTER_DBG(fmt, ...) \ | |
23 | fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__) | |
24 | #define MGMTD_BE_ADAPTER_ERR(fmt, ...) \ | |
25 | fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__) | |
26 | #else /* REDIRECT_DEBUG_TO_STDERR */ | |
27 | #define MGMTD_BE_ADAPTER_DBG(fmt, ...) \ | |
28 | do { \ | |
29 | if (mgmt_debug_be) \ | |
30 | zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
31 | } while (0) | |
32 | #define MGMTD_BE_ADAPTER_ERR(fmt, ...) \ | |
33 | zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__) | |
34 | #endif /* REDIRECT_DEBUG_TO_STDERR */ | |
35 | ||
36 | #define FOREACH_ADAPTER_IN_LIST(adapter) \ | |
37 | frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter)) | |
38 | ||
39 | /* | |
40 | * Static mapping of YANG XPath regular expressions and | |
41 | * the corresponding interested backend clients. | |
42 | * NOTE: Thiis is a static mapping defined by all MGMTD | |
43 | * backend client modules (for now, till we develop a | |
44 | * more dynamic way of creating and updating this map). | |
45 | * A running map is created by MGMTD in run-time to | |
46 | * handle real-time mapping of YANG xpaths to one or | |
47 | * more interested backend client adapters. | |
48 | * | |
49 | * Please see xpath_map_reg[] in lib/mgmt_be_client.c | |
50 | * for the actual map | |
51 | */ | |
52 | struct mgmt_be_xpath_map_reg { | |
53 | const char *xpath_regexp; /* Longest matching regular expression */ | |
54 | enum mgmt_be_client_id *be_clients; /* clients to notify */ | |
55 | }; | |
56 | ||
57 | struct mgmt_be_xpath_regexp_map { | |
58 | const char *xpath_regexp; | |
59 | struct mgmt_be_client_subscr_info be_subscrs; | |
60 | }; | |
61 | ||
62 | struct mgmt_be_get_adapter_config_params { | |
63 | struct mgmt_be_client_adapter *adapter; | |
64 | struct nb_config_cbs *cfg_chgs; | |
65 | uint32_t seq; | |
66 | }; | |
67 | ||
68 | /* | |
69 | * Static mapping of YANG XPath regular expressions and | |
70 | * the corresponding interested backend clients. | |
71 | * NOTE: Thiis is a static mapping defined by all MGMTD | |
72 | * backend client modules (for now, till we develop a | |
73 | * more dynamic way of creating and updating this map). | |
74 | * A running map is created by MGMTD in run-time to | |
75 | * handle real-time mapping of YANG xpaths to one or | |
76 | * more interested backend client adapters. | |
77 | */ | |
78 | static const struct mgmt_be_xpath_map_reg xpath_static_map_reg[] = { | |
79 | {.xpath_regexp = "/frr-vrf:lib/*", | |
80 | .be_clients = | |
81 | (enum mgmt_be_client_id[]){ | |
7d65b7b7 CH |
82 | #if HAVE_STATICD |
83 | MGMTD_BE_CLIENT_ID_STATICD, | |
7d65b7b7 CH |
84 | #endif |
85 | MGMTD_BE_CLIENT_ID_MAX}}, | |
86 | {.xpath_regexp = "/frr-interface:lib/*", | |
87 | .be_clients = | |
88 | (enum mgmt_be_client_id[]){ | |
7d65b7b7 CH |
89 | #if HAVE_STATICD |
90 | MGMTD_BE_CLIENT_ID_STATICD, | |
7d65b7b7 CH |
91 | #endif |
92 | MGMTD_BE_CLIENT_ID_MAX}}, | |
93 | {.xpath_regexp = | |
94 | "/frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/*", | |
95 | ||
96 | .be_clients = | |
97 | (enum mgmt_be_client_id[]){ | |
7d65b7b7 CH |
98 | #if HAVE_STATICD |
99 | MGMTD_BE_CLIENT_ID_STATICD, | |
7d65b7b7 CH |
100 | #endif |
101 | MGMTD_BE_CLIENT_ID_MAX}}, | |
102 | }; | |
103 | ||
104 | #define MGMTD_BE_MAX_NUM_XPATH_MAP 256 | |
105 | static struct mgmt_be_xpath_regexp_map | |
106 | mgmt_xpath_map[MGMTD_BE_MAX_NUM_XPATH_MAP]; | |
107 | static int mgmt_num_xpath_maps; | |
108 | ||
cd9d0537 | 109 | static struct event_loop *mgmt_be_adapter_tm; |
7d65b7b7 CH |
110 | |
111 | static struct mgmt_be_adapters_head mgmt_be_adapters; | |
112 | ||
113 | static struct mgmt_be_client_adapter | |
114 | *mgmt_be_adapters_by_id[MGMTD_BE_CLIENT_ID_MAX]; | |
115 | ||
116 | /* Forward declarations */ | |
117 | static void | |
118 | mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter, | |
119 | enum mgmt_be_event event); | |
120 | ||
121 | static struct mgmt_be_client_adapter * | |
122 | mgmt_be_find_adapter_by_fd(int conn_fd) | |
123 | { | |
124 | struct mgmt_be_client_adapter *adapter; | |
125 | ||
126 | FOREACH_ADAPTER_IN_LIST (adapter) { | |
127 | if (adapter->conn_fd == conn_fd) | |
128 | return adapter; | |
129 | } | |
130 | ||
131 | return NULL; | |
132 | } | |
133 | ||
134 | static struct mgmt_be_client_adapter * | |
135 | mgmt_be_find_adapter_by_name(const char *name) | |
136 | { | |
137 | struct mgmt_be_client_adapter *adapter; | |
138 | ||
139 | FOREACH_ADAPTER_IN_LIST (adapter) { | |
140 | if (!strncmp(adapter->name, name, sizeof(adapter->name))) | |
141 | return adapter; | |
142 | } | |
143 | ||
144 | return NULL; | |
145 | } | |
146 | ||
147 | static void | |
148 | mgmt_be_cleanup_adapters(void) | |
149 | { | |
150 | struct mgmt_be_client_adapter *adapter; | |
151 | ||
152 | FOREACH_ADAPTER_IN_LIST (adapter) | |
153 | mgmt_be_adapter_unlock(&adapter); | |
154 | } | |
155 | ||
156 | static void mgmt_be_xpath_map_init(void) | |
157 | { | |
158 | int indx, num_xpath_maps; | |
159 | uint16_t indx1; | |
160 | enum mgmt_be_client_id id; | |
161 | ||
162 | MGMTD_BE_ADAPTER_DBG("Init XPath Maps"); | |
163 | ||
164 | num_xpath_maps = (int)array_size(xpath_static_map_reg); | |
165 | for (indx = 0; indx < num_xpath_maps; indx++) { | |
166 | MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'", | |
167 | xpath_static_map_reg[indx].xpath_regexp); | |
168 | mgmt_xpath_map[indx].xpath_regexp = | |
169 | xpath_static_map_reg[indx].xpath_regexp; | |
170 | for (indx1 = 0;; indx1++) { | |
171 | id = xpath_static_map_reg[indx].be_clients[indx1]; | |
172 | if (id == MGMTD_BE_CLIENT_ID_MAX) | |
173 | break; | |
174 | MGMTD_BE_ADAPTER_DBG(" -- Client: %s Id: %u", | |
175 | mgmt_be_client_id2name(id), | |
176 | id); | |
177 | if (id < MGMTD_BE_CLIENT_ID_MAX) { | |
178 | mgmt_xpath_map[indx] | |
179 | .be_subscrs.xpath_subscr[id] | |
180 | .validate_config = 1; | |
181 | mgmt_xpath_map[indx] | |
182 | .be_subscrs.xpath_subscr[id] | |
183 | .notify_config = 1; | |
184 | mgmt_xpath_map[indx] | |
185 | .be_subscrs.xpath_subscr[id] | |
186 | .own_oper_data = 1; | |
187 | } | |
188 | } | |
189 | } | |
190 | ||
191 | mgmt_num_xpath_maps = indx; | |
192 | MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", mgmt_num_xpath_maps); | |
193 | } | |
194 | ||
195 | static int mgmt_be_eval_regexp_match(const char *xpath_regexp, | |
196 | const char *xpath) | |
197 | { | |
198 | int match_len = 0, re_indx = 0, xp_indx = 0; | |
199 | int rexp_len, xpath_len; | |
200 | bool match = true, re_wild = false, xp_wild = false; | |
201 | bool delim = false, enter_wild_match = false; | |
202 | char wild_delim = 0; | |
203 | ||
204 | rexp_len = strlen(xpath_regexp); | |
205 | xpath_len = strlen(xpath); | |
206 | ||
207 | /* | |
208 | * Remove the trailing wildcard from the regexp and Xpath. | |
209 | */ | |
210 | if (rexp_len && xpath_regexp[rexp_len-1] == '*') | |
211 | rexp_len--; | |
212 | if (xpath_len && xpath[xpath_len-1] == '*') | |
213 | xpath_len--; | |
214 | ||
215 | if (!rexp_len || !xpath_len) | |
216 | return 0; | |
217 | ||
218 | for (re_indx = 0, xp_indx = 0; | |
219 | match && re_indx < rexp_len && xp_indx < xpath_len;) { | |
220 | match = (xpath_regexp[re_indx] == xpath[xp_indx]); | |
221 | ||
222 | /* | |
223 | * Check if we need to enter wildcard matching. | |
224 | */ | |
225 | if (!enter_wild_match && !match && | |
226 | (xpath_regexp[re_indx] == '*' | |
227 | || xpath[xp_indx] == '*')) { | |
228 | /* | |
229 | * Found wildcard | |
230 | */ | |
231 | enter_wild_match = | |
232 | (xpath_regexp[re_indx-1] == '/' | |
233 | || xpath_regexp[re_indx-1] == '\'' | |
234 | || xpath[xp_indx-1] == '/' | |
235 | || xpath[xp_indx-1] == '\''); | |
236 | if (enter_wild_match) { | |
237 | if (xpath_regexp[re_indx] == '*') { | |
238 | /* | |
239 | * Begin RE wildcard match. | |
240 | */ | |
241 | re_wild = true; | |
242 | wild_delim = xpath_regexp[re_indx-1]; | |
243 | } else if (xpath[xp_indx] == '*') { | |
244 | /* | |
245 | * Begin XP wildcard match. | |
246 | */ | |
247 | xp_wild = true; | |
248 | wild_delim = xpath[xp_indx-1]; | |
249 | } | |
250 | } | |
251 | } | |
252 | ||
253 | /* | |
254 | * Check if we need to exit wildcard matching. | |
255 | */ | |
256 | if (enter_wild_match) { | |
257 | if (re_wild && xpath[xp_indx] == wild_delim) { | |
258 | /* | |
259 | * End RE wildcard matching. | |
260 | */ | |
261 | re_wild = false; | |
262 | if (re_indx < rexp_len-1) | |
263 | re_indx++; | |
264 | enter_wild_match = false; | |
265 | } else if (xp_wild | |
266 | && xpath_regexp[re_indx] == wild_delim) { | |
267 | /* | |
268 | * End XP wildcard matching. | |
269 | */ | |
270 | xp_wild = false; | |
271 | if (xp_indx < xpath_len-1) | |
272 | xp_indx++; | |
273 | enter_wild_match = false; | |
274 | } | |
275 | } | |
276 | ||
277 | match = (xp_wild || re_wild | |
278 | || xpath_regexp[re_indx] == xpath[xp_indx]); | |
279 | ||
280 | /* | |
281 | * Check if we found a delimiter in both the Xpaths | |
282 | */ | |
283 | if ((xpath_regexp[re_indx] == '/' | |
284 | && xpath[xp_indx] == '/') | |
285 | || (xpath_regexp[re_indx] == ']' | |
286 | && xpath[xp_indx] == ']') | |
287 | || (xpath_regexp[re_indx] == '[' | |
288 | && xpath[xp_indx] == '[')) { | |
289 | /* | |
290 | * Increment the match count if we have a | |
291 | * new delimiter. | |
292 | */ | |
293 | if (match && re_indx && xp_indx && !delim) | |
294 | match_len++; | |
295 | delim = true; | |
296 | } else { | |
297 | delim = false; | |
298 | } | |
299 | ||
300 | /* | |
301 | * Proceed to the next character in the RE/XP string as | |
302 | * necessary. | |
303 | */ | |
304 | if (!re_wild) | |
305 | re_indx++; | |
306 | if (!xp_wild) | |
307 | xp_indx++; | |
308 | } | |
309 | ||
310 | /* | |
311 | * If we finished matching and the last token was a full match | |
312 | * increment the match count appropriately. | |
313 | */ | |
314 | if (match && !delim && | |
315 | (xpath_regexp[re_indx] == '/' | |
316 | || xpath_regexp[re_indx] == ']')) | |
317 | match_len++; | |
318 | ||
319 | return match_len; | |
320 | } | |
321 | ||
322 | static void mgmt_be_adapter_disconnect(struct mgmt_be_client_adapter *adapter) | |
323 | { | |
324 | if (adapter->conn_fd >= 0) { | |
325 | close(adapter->conn_fd); | |
326 | adapter->conn_fd = -1; | |
327 | } | |
328 | ||
329 | /* | |
74335ceb | 330 | * Notify about client disconnect for appropriate cleanup |
7d65b7b7 | 331 | */ |
74335ceb | 332 | mgmt_txn_notify_be_adapter_conn(adapter, false); |
7d65b7b7 CH |
333 | |
334 | if (adapter->id < MGMTD_BE_CLIENT_ID_MAX) { | |
335 | mgmt_be_adapters_by_id[adapter->id] = NULL; | |
336 | adapter->id = MGMTD_BE_CLIENT_ID_MAX; | |
337 | } | |
338 | ||
339 | mgmt_be_adapters_del(&mgmt_be_adapters, adapter); | |
340 | ||
341 | mgmt_be_adapter_unlock(&adapter); | |
342 | } | |
343 | ||
344 | static void | |
345 | mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter) | |
346 | { | |
347 | struct mgmt_be_client_adapter *old; | |
348 | ||
349 | FOREACH_ADAPTER_IN_LIST (old) { | |
350 | if (old != adapter | |
351 | && !strncmp(adapter->name, old->name, sizeof(adapter->name))) { | |
352 | /* | |
353 | * We have a Zombie lingering around | |
354 | */ | |
355 | MGMTD_BE_ADAPTER_DBG( | |
356 | "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!", | |
357 | adapter->name, adapter->conn_fd, old->conn_fd); | |
358 | mgmt_be_adapter_disconnect(old); | |
359 | } | |
360 | } | |
361 | } | |
362 | ||
363 | static int | |
364 | mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter, | |
365 | Mgmtd__BeMessage *be_msg) | |
366 | { | |
0b645fd2 CH |
367 | /* |
368 | * protobuf-c adds a max size enum with an internal, and changing by | |
369 | * version, name; cast to an int to avoid unhandled enum warnings | |
370 | */ | |
371 | switch ((int)be_msg->message_case) { | |
7d65b7b7 CH |
372 | case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ: |
373 | MGMTD_BE_ADAPTER_DBG( | |
374 | "Got Subscribe Req Msg from '%s' to %sregister %u xpaths", | |
375 | be_msg->subscr_req->client_name, | |
376 | !be_msg->subscr_req->subscribe_xpaths | |
377 | && be_msg->subscr_req->n_xpath_reg | |
378 | ? "de" | |
379 | : "", | |
380 | (uint32_t)be_msg->subscr_req->n_xpath_reg); | |
381 | ||
382 | if (strlen(be_msg->subscr_req->client_name)) { | |
383 | strlcpy(adapter->name, be_msg->subscr_req->client_name, | |
384 | sizeof(adapter->name)); | |
385 | adapter->id = mgmt_be_client_name2id(adapter->name); | |
386 | if (adapter->id >= MGMTD_BE_CLIENT_ID_MAX) { | |
387 | MGMTD_BE_ADAPTER_ERR( | |
388 | "Unable to resolve adapter '%s' to a valid ID. Disconnecting!", | |
389 | adapter->name); | |
390 | mgmt_be_adapter_disconnect(adapter); | |
391 | } | |
392 | mgmt_be_adapters_by_id[adapter->id] = adapter; | |
393 | mgmt_be_adapter_cleanup_old_conn(adapter); | |
394 | } | |
395 | break; | |
396 | case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY: | |
397 | MGMTD_BE_ADAPTER_DBG( | |
398 | "Got %s TXN_REPLY Msg for Txn-Id 0x%llx from '%s' with '%s'", | |
399 | be_msg->txn_reply->create ? "Create" : "Delete", | |
400 | (unsigned long long)be_msg->txn_reply->txn_id, | |
401 | adapter->name, | |
402 | be_msg->txn_reply->success ? "success" : "failure"); | |
403 | /* | |
74335ceb | 404 | * Forward the TXN_REPLY to txn module. |
7d65b7b7 | 405 | */ |
74335ceb YR |
406 | mgmt_txn_notify_be_txn_reply( |
407 | be_msg->txn_reply->txn_id, | |
408 | be_msg->txn_reply->create, | |
409 | be_msg->txn_reply->success, adapter); | |
7d65b7b7 CH |
410 | break; |
411 | case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY: | |
412 | MGMTD_BE_ADAPTER_DBG( | |
413 | "Got CFGDATA_REPLY Msg from '%s' for Txn-Id 0x%llx Batch-Id 0x%llx with Err:'%s'", | |
414 | adapter->name, | |
415 | (unsigned long long)be_msg->cfg_data_reply->txn_id, | |
416 | (unsigned long long)be_msg->cfg_data_reply->batch_id, | |
417 | be_msg->cfg_data_reply->error_if_any | |
418 | ? be_msg->cfg_data_reply->error_if_any | |
419 | : "None"); | |
420 | /* | |
74335ceb | 421 | * Forward the CGFData-create reply to txn module. |
7d65b7b7 | 422 | */ |
74335ceb YR |
423 | mgmt_txn_notify_be_cfgdata_reply( |
424 | be_msg->cfg_data_reply->txn_id, | |
425 | be_msg->cfg_data_reply->batch_id, | |
426 | be_msg->cfg_data_reply->success, | |
427 | be_msg->cfg_data_reply->error_if_any, adapter); | |
7d65b7b7 CH |
428 | break; |
429 | case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY: | |
430 | MGMTD_BE_ADAPTER_DBG( | |
431 | "Got %s CFG_APPLY_REPLY Msg from '%s' for Txn-Id 0x%llx for %d batches (Id 0x%llx-0x%llx), Err:'%s'", | |
432 | be_msg->cfg_apply_reply->success ? "successful" | |
433 | : "failed", | |
434 | adapter->name, | |
435 | (unsigned long long) | |
436 | be_msg->cfg_apply_reply->txn_id, | |
437 | (int)be_msg->cfg_apply_reply->n_batch_ids, | |
438 | (unsigned long long) | |
439 | be_msg->cfg_apply_reply->batch_ids[0], | |
440 | (unsigned long long)be_msg->cfg_apply_reply | |
441 | ->batch_ids[be_msg->cfg_apply_reply | |
442 | ->n_batch_ids | |
443 | - 1], | |
444 | be_msg->cfg_apply_reply->error_if_any | |
445 | ? be_msg->cfg_apply_reply->error_if_any | |
446 | : "None"); | |
74335ceb YR |
447 | /* |
448 | * Forward the CGFData-apply reply to txn module. | |
7d65b7b7 | 449 | */ |
74335ceb YR |
450 | mgmt_txn_notify_be_cfg_apply_reply( |
451 | be_msg->cfg_apply_reply->txn_id, | |
452 | be_msg->cfg_apply_reply->success, | |
453 | (uint64_t *)be_msg->cfg_apply_reply->batch_ids, | |
454 | be_msg->cfg_apply_reply->n_batch_ids, | |
455 | be_msg->cfg_apply_reply->error_if_any, adapter); | |
7d65b7b7 CH |
456 | break; |
457 | case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY: | |
458 | case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY: | |
459 | case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY: | |
460 | case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA: | |
461 | /* | |
462 | * TODO: Add handling code in future. | |
463 | */ | |
464 | break; | |
465 | /* | |
466 | * NOTE: The following messages are always sent from MGMTD to | |
467 | * Backend clients only and/or need not be handled on MGMTd. | |
468 | */ | |
469 | case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY: | |
470 | case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ: | |
471 | case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ: | |
472 | case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ: | |
473 | case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ: | |
474 | case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ: | |
475 | case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ: | |
476 | case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET: | |
7d65b7b7 CH |
477 | default: |
478 | /* | |
479 | * A 'default' case is being added contrary to the | |
480 | * FRR code guidelines to take care of build | |
481 | * failures on certain build systems (courtesy of | |
482 | * the proto-c package). | |
483 | */ | |
484 | break; | |
485 | } | |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
490 | static inline void | |
491 | mgmt_be_adapter_sched_msg_write(struct mgmt_be_client_adapter *adapter) | |
492 | { | |
493 | if (!CHECK_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF)) | |
494 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE); | |
495 | } | |
496 | ||
497 | static inline void | |
498 | mgmt_be_adapter_writes_on(struct mgmt_be_client_adapter *adapter) | |
499 | { | |
500 | MGMTD_BE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name); | |
501 | UNSET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF); | |
f82370b4 | 502 | mgmt_be_adapter_sched_msg_write(adapter); |
7d65b7b7 CH |
503 | } |
504 | ||
505 | static inline void | |
506 | mgmt_be_adapter_writes_off(struct mgmt_be_client_adapter *adapter) | |
507 | { | |
508 | SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF); | |
509 | MGMTD_BE_ADAPTER_DBG("Pause writing msgs for '%s'", adapter->name); | |
510 | } | |
511 | ||
512 | static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter, | |
f82370b4 | 513 | Mgmtd__BeMessage *be_msg) |
7d65b7b7 | 514 | { |
f82370b4 CH |
515 | if (adapter->conn_fd == -1) { |
516 | MGMTD_BE_ADAPTER_DBG("can't send message on closed connection"); | |
7d65b7b7 CH |
517 | return -1; |
518 | } | |
519 | ||
f82370b4 CH |
520 | int rv = mgmt_msg_send_msg( |
521 | &adapter->mstate, be_msg, | |
522 | mgmtd__be_message__get_packed_size(be_msg), | |
523 | (size_t(*)(void *, void *))mgmtd__be_message__pack, | |
524 | mgmt_debug_be); | |
7d65b7b7 | 525 | mgmt_be_adapter_sched_msg_write(adapter); |
f82370b4 | 526 | return rv; |
7d65b7b7 CH |
527 | } |
528 | ||
529 | static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter, | |
530 | uint64_t txn_id, bool create) | |
531 | { | |
532 | Mgmtd__BeMessage be_msg; | |
533 | Mgmtd__BeTxnReq txn_req; | |
534 | ||
535 | mgmtd__be_txn_req__init(&txn_req); | |
536 | txn_req.create = create; | |
537 | txn_req.txn_id = txn_id; | |
538 | ||
539 | mgmtd__be_message__init(&be_msg); | |
540 | be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ; | |
541 | be_msg.txn_req = &txn_req; | |
542 | ||
543 | MGMTD_BE_ADAPTER_DBG( | |
544 | "Sending TXN_REQ message to Backend client '%s' for Txn-Id %llx", | |
545 | adapter->name, (unsigned long long)txn_id); | |
546 | ||
547 | return mgmt_be_adapter_send_msg(adapter, &be_msg); | |
548 | } | |
549 | ||
550 | static int | |
551 | mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter, | |
552 | uint64_t txn_id, uint64_t batch_id, | |
553 | Mgmtd__YangCfgDataReq **cfgdata_reqs, | |
554 | size_t num_reqs, bool end_of_data) | |
555 | { | |
556 | Mgmtd__BeMessage be_msg; | |
557 | Mgmtd__BeCfgDataCreateReq cfgdata_req; | |
558 | ||
559 | mgmtd__be_cfg_data_create_req__init(&cfgdata_req); | |
560 | cfgdata_req.batch_id = batch_id; | |
561 | cfgdata_req.txn_id = txn_id; | |
562 | cfgdata_req.data_req = cfgdata_reqs; | |
563 | cfgdata_req.n_data_req = num_reqs; | |
564 | cfgdata_req.end_of_data = end_of_data; | |
565 | ||
566 | mgmtd__be_message__init(&be_msg); | |
567 | be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ; | |
568 | be_msg.cfg_data_req = &cfgdata_req; | |
569 | ||
570 | MGMTD_BE_ADAPTER_DBG( | |
571 | "Sending CFGDATA_CREATE_REQ message to Backend client '%s' for Txn-Id %llx, Batch-Id: %llx", | |
572 | adapter->name, (unsigned long long)txn_id, | |
573 | (unsigned long long)batch_id); | |
574 | ||
575 | return mgmt_be_adapter_send_msg(adapter, &be_msg); | |
576 | } | |
577 | ||
578 | static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter, | |
579 | uint64_t txn_id) | |
580 | { | |
581 | Mgmtd__BeMessage be_msg; | |
582 | Mgmtd__BeCfgDataApplyReq apply_req; | |
583 | ||
584 | mgmtd__be_cfg_data_apply_req__init(&apply_req); | |
585 | apply_req.txn_id = txn_id; | |
586 | ||
587 | mgmtd__be_message__init(&be_msg); | |
588 | be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ; | |
589 | be_msg.cfg_apply_req = &apply_req; | |
590 | ||
591 | MGMTD_BE_ADAPTER_DBG( | |
592 | "Sending CFG_APPLY_REQ message to Backend client '%s' for Txn-Id 0x%llx", | |
593 | adapter->name, (unsigned long long)txn_id); | |
594 | ||
595 | return mgmt_be_adapter_send_msg(adapter, &be_msg); | |
596 | } | |
597 | ||
f82370b4 CH |
598 | static void mgmt_be_adapter_process_msg(void *user_ctx, uint8_t *data, |
599 | size_t len) | |
7d65b7b7 | 600 | { |
f82370b4 | 601 | struct mgmt_be_client_adapter *adapter = user_ctx; |
7d65b7b7 | 602 | Mgmtd__BeMessage *be_msg; |
7d65b7b7 | 603 | |
f82370b4 CH |
604 | be_msg = mgmtd__be_message__unpack(NULL, len, data); |
605 | if (!be_msg) { | |
606 | MGMTD_BE_ADAPTER_DBG( | |
607 | "Failed to decode %zu bytes for adapter: %s", len, | |
608 | adapter->name); | |
609 | return; | |
7d65b7b7 | 610 | } |
f82370b4 CH |
611 | MGMTD_BE_ADAPTER_DBG("Decoded %zu bytes of message: %u for adapter: %s", |
612 | len, be_msg->message_case, adapter->name); | |
613 | (void)mgmt_be_adapter_handle_msg(adapter, be_msg); | |
614 | mgmtd__be_message__free_unpacked(be_msg, NULL); | |
7d65b7b7 CH |
615 | } |
616 | ||
e6685141 | 617 | static void mgmt_be_adapter_proc_msgbufs(struct event *thread) |
7d65b7b7 | 618 | { |
e16d030c | 619 | struct mgmt_be_client_adapter *adapter = EVENT_ARG(thread); |
7d65b7b7 | 620 | |
f82370b4 CH |
621 | if (mgmt_msg_procbufs(&adapter->mstate, mgmt_be_adapter_process_msg, |
622 | adapter, mgmt_debug_be)) | |
7d65b7b7 CH |
623 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG); |
624 | } | |
625 | ||
e6685141 | 626 | static void mgmt_be_adapter_read(struct event *thread) |
7d65b7b7 CH |
627 | { |
628 | struct mgmt_be_client_adapter *adapter; | |
f82370b4 | 629 | enum mgmt_msg_rsched rv; |
7d65b7b7 | 630 | |
e16d030c | 631 | adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread); |
7d65b7b7 | 632 | |
f82370b4 CH |
633 | rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd, mgmt_debug_be); |
634 | if (rv == MSR_DISCONNECT) { | |
635 | mgmt_be_adapter_disconnect(adapter); | |
636 | return; | |
7d65b7b7 | 637 | } |
f82370b4 | 638 | if (rv == MSR_SCHED_BOTH) |
7d65b7b7 | 639 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG); |
7d65b7b7 CH |
640 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ); |
641 | } | |
642 | ||
e6685141 | 643 | static void mgmt_be_adapter_write(struct event *thread) |
7d65b7b7 | 644 | { |
e16d030c | 645 | struct mgmt_be_client_adapter *adapter = EVENT_ARG(thread); |
f82370b4 | 646 | enum mgmt_msg_wsched rv; |
7d65b7b7 | 647 | |
f82370b4 CH |
648 | rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd, mgmt_debug_be); |
649 | if (rv == MSW_SCHED_STREAM) | |
650 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE); | |
651 | else if (rv == MSW_DISCONNECT) | |
652 | mgmt_be_adapter_disconnect(adapter); | |
653 | else if (rv == MSW_SCHED_WRITES_OFF) { | |
7d65b7b7 CH |
654 | mgmt_be_adapter_writes_off(adapter); |
655 | mgmt_be_adapter_register_event(adapter, | |
f82370b4 CH |
656 | MGMTD_BE_CONN_WRITES_ON); |
657 | } else | |
658 | assert(rv == MSW_SCHED_NONE); | |
7d65b7b7 CH |
659 | } |
660 | ||
e6685141 | 661 | static void mgmt_be_adapter_resume_writes(struct event *thread) |
7d65b7b7 CH |
662 | { |
663 | struct mgmt_be_client_adapter *adapter; | |
664 | ||
e16d030c | 665 | adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread); |
7d65b7b7 CH |
666 | assert(adapter && adapter->conn_fd >= 0); |
667 | ||
668 | mgmt_be_adapter_writes_on(adapter); | |
669 | } | |
670 | ||
671 | static void mgmt_be_iter_and_get_cfg(struct mgmt_ds_ctx *ds_ctx, | |
672 | char *xpath, struct lyd_node *node, | |
673 | struct nb_node *nb_node, void *ctx) | |
674 | { | |
675 | struct mgmt_be_client_subscr_info subscr_info; | |
676 | struct mgmt_be_get_adapter_config_params *parms; | |
677 | struct mgmt_be_client_adapter *adapter; | |
678 | struct nb_config_cbs *root; | |
679 | uint32_t *seq; | |
680 | ||
681 | if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info) != 0) { | |
682 | MGMTD_BE_ADAPTER_ERR( | |
683 | "ERROR: Failed to get subscriber for '%s'", xpath); | |
684 | return; | |
685 | } | |
686 | ||
687 | parms = (struct mgmt_be_get_adapter_config_params *)ctx; | |
688 | ||
689 | adapter = parms->adapter; | |
690 | if (!subscr_info.xpath_subscr[adapter->id].subscribed) | |
691 | return; | |
692 | ||
693 | root = parms->cfg_chgs; | |
694 | seq = &parms->seq; | |
695 | nb_config_diff_created(node, seq, root); | |
696 | } | |
697 | ||
e6685141 | 698 | static void mgmt_be_adapter_conn_init(struct event *thread) |
7d65b7b7 CH |
699 | { |
700 | struct mgmt_be_client_adapter *adapter; | |
701 | ||
e16d030c | 702 | adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread); |
7d65b7b7 CH |
703 | assert(adapter && adapter->conn_fd >= 0); |
704 | ||
705 | /* | |
74335ceb | 706 | * Check first if the current session can run a CONFIG |
7d65b7b7 CH |
707 | * transaction or not. Reschedule if a CONFIG transaction |
708 | * from another session is already in progress. | |
74335ceb | 709 | */ |
7d65b7b7 CH |
710 | if (mgmt_config_txn_in_progress() != MGMTD_SESSION_ID_NONE) { |
711 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT); | |
74335ceb | 712 | return; |
7d65b7b7 | 713 | } |
7d65b7b7 | 714 | |
74335ceb YR |
715 | /* |
716 | * Notify TXN module to create a CONFIG transaction and | |
717 | * download the CONFIGs identified for this new client. | |
718 | * If the TXN module fails to initiate the CONFIG transaction | |
719 | * disconnect from the client forcing a reconnect later. | |
720 | * That should also take care of destroying the adapter. | |
721 | */ | |
7d65b7b7 CH |
722 | if (mgmt_txn_notify_be_adapter_conn(adapter, true) != 0) { |
723 | mgmt_be_adapter_disconnect(adapter); | |
724 | adapter = NULL; | |
725 | } | |
7d65b7b7 CH |
726 | } |
727 | ||
728 | static void | |
729 | mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter, | |
730 | enum mgmt_be_event event) | |
731 | { | |
732 | struct timeval tv = {0}; | |
733 | ||
734 | switch (event) { | |
735 | case MGMTD_BE_CONN_INIT: | |
907a2395 | 736 | event_add_timer_msec(mgmt_be_adapter_tm, |
7d65b7b7 CH |
737 | mgmt_be_adapter_conn_init, adapter, |
738 | MGMTD_BE_CONN_INIT_DELAY_MSEC, | |
739 | &adapter->conn_init_ev); | |
740 | assert(adapter->conn_init_ev); | |
741 | break; | |
742 | case MGMTD_BE_CONN_READ: | |
907a2395 | 743 | event_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read, |
7d65b7b7 CH |
744 | adapter, adapter->conn_fd, &adapter->conn_read_ev); |
745 | assert(adapter->conn_read_ev); | |
746 | break; | |
747 | case MGMTD_BE_CONN_WRITE: | |
f82370b4 CH |
748 | if (adapter->conn_write_ev) |
749 | MGMTD_BE_ADAPTER_DBG( | |
750 | "write ready notify already set for client %s", | |
751 | adapter->name); | |
752 | else | |
753 | MGMTD_BE_ADAPTER_DBG( | |
754 | "scheduling write ready notify for client %s", | |
755 | adapter->name); | |
907a2395 | 756 | event_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write, |
7d65b7b7 CH |
757 | adapter, adapter->conn_fd, &adapter->conn_write_ev); |
758 | assert(adapter->conn_write_ev); | |
759 | break; | |
760 | case MGMTD_BE_PROC_MSG: | |
761 | tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC; | |
907a2395 | 762 | event_add_timer_tv(mgmt_be_adapter_tm, |
7d65b7b7 CH |
763 | mgmt_be_adapter_proc_msgbufs, adapter, &tv, |
764 | &adapter->proc_msg_ev); | |
765 | assert(adapter->proc_msg_ev); | |
766 | break; | |
767 | case MGMTD_BE_CONN_WRITES_ON: | |
907a2395 | 768 | event_add_timer_msec(mgmt_be_adapter_tm, |
7d65b7b7 CH |
769 | mgmt_be_adapter_resume_writes, adapter, |
770 | MGMTD_BE_MSG_WRITE_DELAY_MSEC, | |
771 | &adapter->conn_writes_on); | |
772 | assert(adapter->conn_writes_on); | |
773 | break; | |
774 | case MGMTD_BE_SERVER: | |
775 | case MGMTD_BE_SCHED_CFG_PREPARE: | |
776 | case MGMTD_BE_RESCHED_CFG_PREPARE: | |
777 | case MGMTD_BE_SCHED_CFG_APPLY: | |
778 | case MGMTD_BE_RESCHED_CFG_APPLY: | |
779 | assert(!"mgmt_be_adapter_post_event() called incorrectly"); | |
780 | break; | |
781 | } | |
782 | } | |
783 | ||
784 | void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter) | |
785 | { | |
786 | adapter->refcount++; | |
787 | } | |
788 | ||
789 | extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter) | |
790 | { | |
791 | assert(*adapter && (*adapter)->refcount); | |
792 | ||
793 | (*adapter)->refcount--; | |
794 | if (!(*adapter)->refcount) { | |
795 | mgmt_be_adapters_del(&mgmt_be_adapters, *adapter); | |
e16d030c DS |
796 | EVENT_OFF((*adapter)->conn_init_ev); |
797 | EVENT_OFF((*adapter)->conn_read_ev); | |
798 | EVENT_OFF((*adapter)->conn_write_ev); | |
799 | EVENT_OFF((*adapter)->conn_writes_on); | |
800 | EVENT_OFF((*adapter)->proc_msg_ev); | |
f82370b4 | 801 | mgmt_msg_destroy(&(*adapter)->mstate); |
7d65b7b7 CH |
802 | XFREE(MTYPE_MGMTD_BE_ADPATER, *adapter); |
803 | } | |
804 | ||
805 | *adapter = NULL; | |
806 | } | |
807 | ||
cd9d0537 | 808 | int mgmt_be_adapter_init(struct event_loop *tm) |
7d65b7b7 CH |
809 | { |
810 | if (!mgmt_be_adapter_tm) { | |
811 | mgmt_be_adapter_tm = tm; | |
812 | memset(mgmt_xpath_map, 0, sizeof(mgmt_xpath_map)); | |
813 | mgmt_num_xpath_maps = 0; | |
814 | memset(mgmt_be_adapters_by_id, 0, | |
815 | sizeof(mgmt_be_adapters_by_id)); | |
816 | mgmt_be_adapters_init(&mgmt_be_adapters); | |
817 | mgmt_be_xpath_map_init(); | |
818 | } | |
819 | ||
820 | return 0; | |
821 | } | |
822 | ||
823 | void mgmt_be_adapter_destroy(void) | |
824 | { | |
825 | mgmt_be_cleanup_adapters(); | |
826 | } | |
827 | ||
828 | struct mgmt_be_client_adapter * | |
829 | mgmt_be_create_adapter(int conn_fd, union sockunion *from) | |
830 | { | |
831 | struct mgmt_be_client_adapter *adapter = NULL; | |
832 | ||
833 | adapter = mgmt_be_find_adapter_by_fd(conn_fd); | |
834 | if (!adapter) { | |
835 | adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER, | |
836 | sizeof(struct mgmt_be_client_adapter)); | |
837 | assert(adapter); | |
838 | ||
839 | adapter->conn_fd = conn_fd; | |
840 | adapter->id = MGMTD_BE_CLIENT_ID_MAX; | |
841 | memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su)); | |
842 | snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d", | |
843 | adapter->conn_fd); | |
f82370b4 CH |
844 | mgmt_msg_init(&adapter->mstate, MGMTD_BE_MAX_NUM_MSG_PROC, |
845 | MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, | |
846 | "BE-adapter"); | |
7d65b7b7 CH |
847 | mgmt_be_adapter_lock(adapter); |
848 | ||
849 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ); | |
850 | mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter); | |
851 | ||
852 | RB_INIT(nb_config_cbs, &adapter->cfg_chgs); | |
853 | ||
854 | MGMTD_BE_ADAPTER_DBG("Added new MGMTD Backend adapter '%s'", | |
855 | adapter->name); | |
856 | } | |
857 | ||
858 | /* Make client socket non-blocking. */ | |
859 | set_nonblocking(adapter->conn_fd); | |
860 | setsockopt_so_sendbuf(adapter->conn_fd, MGMTD_SOCKET_BE_SEND_BUF_SIZE); | |
861 | setsockopt_so_recvbuf(adapter->conn_fd, MGMTD_SOCKET_BE_RECV_BUF_SIZE); | |
862 | ||
863 | /* Trigger resync of config with the new adapter */ | |
864 | mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT); | |
865 | ||
866 | return adapter; | |
867 | } | |
868 | ||
869 | struct mgmt_be_client_adapter * | |
870 | mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id) | |
871 | { | |
872 | return (id < MGMTD_BE_CLIENT_ID_MAX ? mgmt_be_adapters_by_id[id] | |
873 | : NULL); | |
874 | } | |
875 | ||
876 | struct mgmt_be_client_adapter * | |
877 | mgmt_be_get_adapter_by_name(const char *name) | |
878 | { | |
879 | return mgmt_be_find_adapter_by_name(name); | |
880 | } | |
881 | ||
882 | int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter, | |
883 | struct mgmt_ds_ctx *ds_ctx, | |
884 | struct nb_config_cbs **cfg_chgs) | |
885 | { | |
886 | char base_xpath[] = "/"; | |
887 | struct mgmt_be_get_adapter_config_params parms; | |
888 | ||
889 | assert(cfg_chgs); | |
890 | ||
891 | if (RB_EMPTY(nb_config_cbs, &adapter->cfg_chgs)) { | |
892 | parms.adapter = adapter; | |
893 | parms.cfg_chgs = &adapter->cfg_chgs; | |
894 | parms.seq = 0; | |
895 | ||
896 | mgmt_ds_iter_data(ds_ctx, base_xpath, | |
897 | mgmt_be_iter_and_get_cfg, (void *)&parms, | |
898 | false); | |
899 | } | |
900 | ||
901 | *cfg_chgs = &adapter->cfg_chgs; | |
902 | return 0; | |
903 | } | |
904 | ||
905 | int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter, | |
906 | uint64_t txn_id) | |
907 | { | |
908 | return mgmt_be_send_txn_req(adapter, txn_id, true); | |
909 | } | |
910 | ||
911 | int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter, | |
912 | uint64_t txn_id) | |
913 | { | |
914 | return mgmt_be_send_txn_req(adapter, txn_id, false); | |
915 | } | |
916 | ||
917 | int mgmt_be_send_cfg_data_create_req(struct mgmt_be_client_adapter *adapter, | |
918 | uint64_t txn_id, uint64_t batch_id, | |
919 | struct mgmt_be_cfgreq *cfg_req, | |
920 | bool end_of_data) | |
921 | { | |
922 | return mgmt_be_send_cfgdata_create_req( | |
923 | adapter, txn_id, batch_id, cfg_req->cfgdata_reqs, | |
924 | cfg_req->num_reqs, end_of_data); | |
925 | } | |
926 | ||
927 | extern int | |
928 | mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter, | |
929 | uint64_t txn_id) | |
930 | { | |
931 | return mgmt_be_send_cfgapply_req(adapter, txn_id); | |
932 | } | |
933 | ||
934 | /* | |
935 | * This function maps a YANG dtata Xpath to one or more | |
936 | * Backend Clients that should be contacted for various purposes. | |
937 | */ | |
938 | int mgmt_be_get_subscr_info_for_xpath( | |
939 | const char *xpath, struct mgmt_be_client_subscr_info *subscr_info) | |
940 | { | |
941 | int indx, match, max_match = 0, num_reg; | |
942 | enum mgmt_be_client_id id; | |
943 | struct mgmt_be_client_subscr_info | |
944 | *reg_maps[array_size(mgmt_xpath_map)] = {0}; | |
945 | bool root_xp = false; | |
946 | ||
947 | if (!subscr_info) | |
948 | return -1; | |
949 | ||
950 | num_reg = 0; | |
951 | memset(subscr_info, 0, sizeof(*subscr_info)); | |
952 | ||
953 | if (strlen(xpath) <= 2 && xpath[0] == '/' | |
954 | && (!xpath[1] || xpath[1] == '*')) { | |
955 | root_xp = true; | |
956 | } | |
957 | ||
958 | MGMTD_BE_ADAPTER_DBG("XPATH: %s", xpath); | |
959 | for (indx = 0; indx < mgmt_num_xpath_maps; indx++) { | |
960 | /* | |
961 | * For Xpaths: '/' and '/ *' all xpath maps should match | |
962 | * the given xpath. | |
963 | */ | |
964 | if (!root_xp) { | |
965 | match = mgmt_be_eval_regexp_match( | |
966 | mgmt_xpath_map[indx].xpath_regexp, xpath); | |
967 | ||
968 | if (!match || match < max_match) | |
969 | continue; | |
970 | ||
971 | if (match > max_match) { | |
972 | num_reg = 0; | |
973 | max_match = match; | |
974 | } | |
975 | } | |
976 | ||
977 | reg_maps[num_reg] = &mgmt_xpath_map[indx].be_subscrs; | |
978 | num_reg++; | |
979 | } | |
980 | ||
981 | for (indx = 0; indx < num_reg; indx++) { | |
982 | FOREACH_MGMTD_BE_CLIENT_ID (id) { | |
983 | if (reg_maps[indx]->xpath_subscr[id].subscribed) { | |
984 | MGMTD_BE_ADAPTER_DBG( | |
985 | "Cient: %s", | |
986 | mgmt_be_client_id2name(id)); | |
987 | memcpy(&subscr_info->xpath_subscr[id], | |
988 | ®_maps[indx]->xpath_subscr[id], | |
989 | sizeof(subscr_info->xpath_subscr[id])); | |
990 | } | |
991 | } | |
992 | } | |
993 | ||
994 | return 0; | |
995 | } | |
996 | ||
997 | void mgmt_be_adapter_status_write(struct vty *vty) | |
998 | { | |
999 | struct mgmt_be_client_adapter *adapter; | |
1000 | ||
1001 | vty_out(vty, "MGMTD Backend Adapters\n"); | |
1002 | ||
1003 | FOREACH_ADAPTER_IN_LIST (adapter) { | |
1004 | vty_out(vty, " Client: \t\t\t%s\n", adapter->name); | |
1005 | vty_out(vty, " Conn-FD: \t\t\t%d\n", adapter->conn_fd); | |
1006 | vty_out(vty, " Client-Id: \t\t\t%d\n", adapter->id); | |
1007 | vty_out(vty, " Ref-Count: \t\t\t%u\n", adapter->refcount); | |
f82370b4 CH |
1008 | vty_out(vty, " Msg-Recvd: \t\t\t%" PRIu64 "\n", |
1009 | adapter->mstate.nrxm); | |
1010 | vty_out(vty, " Bytes-Recvd: \t\t%" PRIu64 "\n", | |
1011 | adapter->mstate.nrxb); | |
1012 | vty_out(vty, " Msg-Sent: \t\t\t%" PRIu64 "\n", | |
1013 | adapter->mstate.ntxm); | |
1014 | vty_out(vty, " Bytes-Sent: \t\t%" PRIu64 "\n", | |
1015 | adapter->mstate.ntxb); | |
7d65b7b7 CH |
1016 | } |
1017 | vty_out(vty, " Total: %d\n", | |
1018 | (int)mgmt_be_adapters_count(&mgmt_be_adapters)); | |
1019 | } | |
1020 | ||
1021 | void mgmt_be_xpath_register_write(struct vty *vty) | |
1022 | { | |
1023 | int indx; | |
1024 | enum mgmt_be_client_id id; | |
1025 | struct mgmt_be_client_adapter *adapter; | |
1026 | ||
1027 | vty_out(vty, "MGMTD Backend XPath Registry\n"); | |
1028 | ||
1029 | for (indx = 0; indx < mgmt_num_xpath_maps; indx++) { | |
1030 | vty_out(vty, " - XPATH: '%s'\n", | |
1031 | mgmt_xpath_map[indx].xpath_regexp); | |
1032 | FOREACH_MGMTD_BE_CLIENT_ID (id) { | |
1033 | if (mgmt_xpath_map[indx] | |
1034 | .be_subscrs.xpath_subscr[id] | |
1035 | .subscribed) { | |
1036 | vty_out(vty, | |
1037 | " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n", | |
1038 | mgmt_be_client_id2name(id), | |
1039 | mgmt_xpath_map[indx] | |
1040 | .be_subscrs | |
1041 | .xpath_subscr[id] | |
1042 | .validate_config | |
1043 | ? "T" | |
1044 | : "F", | |
1045 | mgmt_xpath_map[indx] | |
1046 | .be_subscrs | |
1047 | .xpath_subscr[id] | |
1048 | .notify_config | |
1049 | ? "T" | |
1050 | : "F", | |
1051 | mgmt_xpath_map[indx] | |
1052 | .be_subscrs | |
1053 | .xpath_subscr[id] | |
1054 | .own_oper_data | |
1055 | ? "T" | |
1056 | : "F"); | |
1057 | adapter = mgmt_be_get_adapter_by_id(id); | |
1058 | if (adapter) { | |
1059 | vty_out(vty, " -- Adapter: %p\n", | |
1060 | adapter); | |
1061 | } | |
1062 | } | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | vty_out(vty, "Total XPath Registries: %u\n", mgmt_num_xpath_maps); | |
1067 | } | |
1068 | ||
1069 | void mgmt_be_xpath_subscr_info_write(struct vty *vty, const char *xpath) | |
1070 | { | |
1071 | struct mgmt_be_client_subscr_info subscr; | |
1072 | enum mgmt_be_client_id id; | |
1073 | struct mgmt_be_client_adapter *adapter; | |
1074 | ||
1075 | if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr) != 0) { | |
1076 | vty_out(vty, "ERROR: Failed to get subscriber for '%s'\n", | |
1077 | xpath); | |
1078 | return; | |
1079 | } | |
1080 | ||
1081 | vty_out(vty, "XPath: '%s'\n", xpath); | |
1082 | FOREACH_MGMTD_BE_CLIENT_ID (id) { | |
1083 | if (subscr.xpath_subscr[id].subscribed) { | |
1084 | vty_out(vty, | |
1085 | " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n", | |
1086 | mgmt_be_client_id2name(id), | |
1087 | subscr.xpath_subscr[id].validate_config ? "T" | |
1088 | : "F", | |
1089 | subscr.xpath_subscr[id].notify_config ? "T" | |
1090 | : "F", | |
1091 | subscr.xpath_subscr[id].own_oper_data ? "T" | |
1092 | : "F"); | |
1093 | adapter = mgmt_be_get_adapter_by_id(id); | |
1094 | if (adapter) | |
1095 | vty_out(vty, " -- Adapter: %p\n", adapter); | |
1096 | } | |
1097 | } | |
1098 | } |