1 // SPDX-License-Identifier: GPL-2.0-or-later
3 // Copyright (c) 2021-2022, LabN Consulting, L.L.C
4 // Copyright (C) 2019 NetDEF, Inc.
9 #include <grpcpp/grpcpp.h>
10 #include "grpc/frr-northbound.grpc.pb.h"
14 #include "lib/version.h"
15 #include "lib/thread.h"
17 #include "lib_errors.h"
18 #include "northbound.h"
19 #include "northbound_db.h"
20 #include "frr_pthread.h"
27 #define GRPC_DEFAULT_PORT 50051
30 // ------------------------------------------------------
31 // File Local Variables
32 // ------------------------------------------------------
35 * NOTE: we can't use the FRR debugging infrastructure here since it uses
36 * atomics and C++ has a different atomics API. Enable gRPC debugging
37 * unconditionally until we figure out a way to solve this problem.
39 static bool nb_dbg_client_grpc
= 0;
41 static struct thread_master
*main_master
;
43 static struct frr_pthread
*fpt
;
45 static bool grpc_running
;
47 #define grpc_debug(...) \
49 if (nb_dbg_client_grpc) \
50 zlog_debug(__VA_ARGS__); \
53 // ------------------------------------------------------
55 // ------------------------------------------------------
57 enum CallState
{ CREATE
, PROCESS
, MORE
, FINISH
, DELETED
};
58 const char *call_states
[] = {"CREATE", "PROCESS", "MORE", "FINISH", "DELETED"};
62 struct nb_config
*config
;
63 struct nb_transaction
*transaction
;
72 for (auto it
= _cdb
.begin(); it
!= _cdb
.end(); it
++)
73 delete_candidate(it
->first
);
76 struct candidate
*create_candidate(void)
78 uint64_t id
= ++_next_id
;
79 assert(id
); // TODO: implement an algorithm for unique reusable
81 struct candidate
*c
= &_cdb
[id
];
83 c
->config
= nb_config_dup(running_config
);
84 c
->transaction
= NULL
;
89 bool contains(uint64_t candidate_id
)
91 return _cdb
.count(candidate_id
) > 0;
94 void delete_candidate(uint64_t candidate_id
)
96 struct candidate
*c
= &_cdb
[candidate_id
];
97 char errmsg
[BUFSIZ
] = {0};
99 nb_config_free(c
->config
);
101 nb_candidate_commit_abort(c
->transaction
, errmsg
,
106 struct candidate
*get_candidate(uint64_t id
)
108 return _cdb
.count(id
) == 0 ? NULL
: &_cdb
[id
];
112 uint64_t _next_id
= 0;
113 std::map
<uint64_t, struct candidate
> _cdb
;
117 * RpcStateBase is the common base class used to track a gRPC RPC.
122 virtual void do_request(::frr::Northbound::AsyncService
*service
,
123 ::grpc::ServerCompletionQueue
*cq
,
126 RpcStateBase(const char *name
) : name(name
){};
128 virtual ~RpcStateBase() = default;
130 CallState
get_state() const
135 bool is_initial_process() const
137 /* Will always be true for Unary */
138 return entered_state
== CREATE
;
141 // Returns "more" status, if false caller can delete
142 bool run(frr::Northbound::AsyncService
*service
,
143 grpc::ServerCompletionQueue
*cq
)
146 * We enter in either CREATE or MORE state, and transition to
149 this->entered_state
= this->state
;
150 this->state
= PROCESS
;
151 grpc_debug("%s RPC: %s -> %s on grpc-io-thread", name
,
152 call_states
[this->entered_state
],
153 call_states
[this->state
]);
155 * We schedule the callback on the main pthread, and wait for
156 * the state to transition out of the PROCESS state. The new
157 * state will either be MORE or FINISH. It will always be FINISH
160 thread_add_event(main_master
, c_callback
, (void *)this, 0,
163 pthread_mutex_lock(&this->cmux
);
164 while (this->state
== PROCESS
)
165 pthread_cond_wait(&this->cond
, &this->cmux
);
166 pthread_mutex_unlock(&this->cmux
);
168 grpc_debug("%s RPC in %s on grpc-io-thread", name
,
169 call_states
[this->state
]);
171 if (this->state
== FINISH
) {
173 * Server is done (FINISH) so prep to receive a new
174 * request of this type. We could do this earlier but
175 * that would mean we could be handling multiple same
176 * type requests in parallel without limit.
178 this->do_request(service
, cq
, false);
184 virtual CallState
run_mainthread(struct thread
*thread
) = 0;
186 static void c_callback(struct thread
*thread
)
188 auto _tag
= static_cast<RpcStateBase
*>(THREAD_ARG(thread
));
190 * We hold the lock until the callback finishes and has updated
191 * _tag->state, then we signal done and release.
193 pthread_mutex_lock(&_tag
->cmux
);
195 CallState enter_state
= _tag
->state
;
196 grpc_debug("%s RPC: running %s on main thread", _tag
->name
,
197 call_states
[enter_state
]);
199 _tag
->state
= _tag
->run_mainthread(thread
);
201 grpc_debug("%s RPC: %s -> %s [main thread]", _tag
->name
,
202 call_states
[enter_state
], call_states
[_tag
->state
]);
204 pthread_cond_signal(&_tag
->cond
);
205 pthread_mutex_unlock(&_tag
->cmux
);
209 grpc::ServerContext ctx
;
210 pthread_mutex_t cmux
= PTHREAD_MUTEX_INITIALIZER
;
211 pthread_cond_t cond
= PTHREAD_COND_INITIALIZER
;
212 CallState state
= CREATE
;
213 CallState entered_state
= CREATE
;
220 * The UnaryRpcState class is used to track the execution of a Unary RPC.
223 * Q - the request type for a given unary RPC
224 * S - the response type for a given unary RPC
226 template <typename Q
, typename S
> class UnaryRpcState
: public RpcStateBase
229 typedef void (frr::Northbound::AsyncService::*reqfunc_t
)(
230 ::grpc::ServerContext
*, Q
*,
231 ::grpc::ServerAsyncResponseWriter
<S
> *,
232 ::grpc::CompletionQueue
*, ::grpc::ServerCompletionQueue
*,
235 UnaryRpcState(Candidates
*cdb
, reqfunc_t rfunc
,
236 grpc::Status (*cb
)(UnaryRpcState
<Q
, S
> *),
238 : RpcStateBase(name
), cdb(cdb
), requestf(rfunc
), callback(cb
),
241 void do_request(::frr::Northbound::AsyncService
*service
,
242 ::grpc::ServerCompletionQueue
*cq
,
243 bool no_copy
) override
245 grpc_debug("%s, posting a request for: %s", __func__
, name
);
246 auto copy
= no_copy
? this
247 : new UnaryRpcState(cdb
, requestf
, callback
,
249 (service
->*requestf
)(©
->ctx
, ©
->request
,
250 ©
->responder
, cq
, cq
, copy
);
253 CallState
run_mainthread(struct thread
*thread
) override
255 // Unary RPC are always finished, see "Unary" :)
256 grpc::Status status
= this->callback(this);
257 responder
.Finish(response
, status
, this);
265 grpc::ServerAsyncResponseWriter
<S
> responder
;
267 grpc::Status (*callback
)(UnaryRpcState
<Q
, S
> *);
268 reqfunc_t requestf
= NULL
;
272 * The StreamRpcState class is used to track the execution of a Streaming RPC.
275 * Q - the request type for a given streaming RPC
276 * S - the response type for a given streaming RPC
277 * X - the type used to track the streaming state
279 template <typename Q
, typename S
, typename X
>
280 class StreamRpcState
: public RpcStateBase
283 typedef void (frr::Northbound::AsyncService::*reqsfunc_t
)(
284 ::grpc::ServerContext
*, Q
*, ::grpc::ServerAsyncWriter
<S
> *,
285 ::grpc::CompletionQueue
*, ::grpc::ServerCompletionQueue
*,
288 StreamRpcState(reqsfunc_t rfunc
, bool (*cb
)(StreamRpcState
<Q
, S
, X
> *),
290 : RpcStateBase(name
), requestsf(rfunc
), callback(cb
),
291 async_responder(&ctx
){};
293 void do_request(::frr::Northbound::AsyncService
*service
,
294 ::grpc::ServerCompletionQueue
*cq
,
295 bool no_copy
) override
297 grpc_debug("%s, posting a request for: %s", __func__
, name
);
300 : new StreamRpcState(requestsf
, callback
, name
);
301 (service
->*requestsf
)(©
->ctx
, ©
->request
,
302 ©
->async_responder
, cq
, cq
, copy
);
305 CallState
run_mainthread(struct thread
*thread
) override
307 if (this->callback(this))
315 grpc::ServerAsyncWriter
<S
> async_responder
;
317 bool (*callback
)(StreamRpcState
<Q
, S
, X
> *);
318 reqsfunc_t requestsf
= NULL
;
323 // ------------------------------------------------------
325 // ------------------------------------------------------
327 static LYD_FORMAT
encoding2lyd_format(enum frr::Encoding encoding
)
335 flog_err(EC_LIB_DEVELOPMENT
,
336 "%s: unknown data encoding format (%u)", __func__
,
342 static int yang_dnode_edit(struct lyd_node
*dnode
, const std::string
&path
,
345 LY_ERR err
= lyd_new_path(dnode
, ly_native_ctx
, path
.c_str(), value
,
346 LYD_NEW_PATH_UPDATE
, &dnode
);
347 if (err
!= LY_SUCCESS
) {
348 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed: %s",
349 __func__
, ly_errmsg(ly_native_ctx
));
356 static int yang_dnode_delete(struct lyd_node
*dnode
, const std::string
&path
)
358 dnode
= yang_dnode_get(dnode
, path
.c_str());
362 lyd_free_tree(dnode
);
367 static LY_ERR
data_tree_from_dnode(frr::DataTree
*dt
,
368 const struct lyd_node
*dnode
,
369 LYD_FORMAT lyd_format
, bool with_defaults
)
374 SET_FLAG(options
, LYD_PRINT_WITHSIBLINGS
);
376 SET_FLAG(options
, LYD_PRINT_WD_ALL
);
378 SET_FLAG(options
, LYD_PRINT_WD_TRIM
);
380 LY_ERR err
= lyd_print_mem(&strp
, dnode
, lyd_format
, options
);
381 if (err
== LY_SUCCESS
) {
390 static struct lyd_node
*dnode_from_data_tree(const frr::DataTree
*dt
,
393 struct lyd_node
*dnode
;
398 options
= LYD_PARSE_NO_STATE
;
399 opt2
= LYD_VALIDATE_NO_STATE
;
401 options
= LYD_PARSE_STRICT
;
405 err
= lyd_parse_data_mem(ly_native_ctx
, dt
->data().c_str(),
406 encoding2lyd_format(dt
->encoding()), options
,
408 if (err
!= LY_SUCCESS
) {
409 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_parse_mem() failed: %s",
410 __func__
, ly_errmsg(ly_native_ctx
));
415 static struct lyd_node
*get_dnode_config(const std::string
&path
)
417 struct lyd_node
*dnode
;
419 if (!yang_dnode_exists(running_config
->dnode
,
420 path
.empty() ? NULL
: path
.c_str()))
423 dnode
= yang_dnode_get(running_config
->dnode
,
424 path
.empty() ? NULL
: path
.c_str());
426 dnode
= yang_dnode_dup(dnode
);
431 static int get_oper_data_cb(const struct lysc_node
*snode
,
432 struct yang_translator
*translator
,
433 struct yang_data
*data
, void *arg
)
435 struct lyd_node
*dnode
= static_cast<struct lyd_node
*>(arg
);
436 int ret
= yang_dnode_edit(dnode
, data
->xpath
, data
->value
);
437 yang_data_free(data
);
439 return (ret
== 0) ? NB_OK
: NB_ERR
;
442 static struct lyd_node
*get_dnode_state(const std::string
&path
)
444 struct lyd_node
*dnode
= yang_dnode_new(ly_native_ctx
, false);
445 if (nb_oper_data_iterate(path
.c_str(), NULL
, 0, get_oper_data_cb
, dnode
)
447 yang_dnode_free(dnode
);
454 static grpc::Status
get_path(frr::DataTree
*dt
, const std::string
&path
,
455 int type
, LYD_FORMAT lyd_format
,
458 struct lyd_node
*dnode_config
= NULL
;
459 struct lyd_node
*dnode_state
= NULL
;
460 struct lyd_node
*dnode_final
;
462 // Configuration data.
463 if (type
== frr::GetRequest_DataType_ALL
464 || type
== frr::GetRequest_DataType_CONFIG
) {
465 dnode_config
= get_dnode_config(path
);
467 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
468 "Data path not found");
472 if (type
== frr::GetRequest_DataType_ALL
473 || type
== frr::GetRequest_DataType_STATE
) {
474 dnode_state
= get_dnode_state(path
);
477 yang_dnode_free(dnode_config
);
478 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
479 "Failed to fetch operational data");
484 case frr::GetRequest_DataType_ALL
:
486 // Combine configuration and state data into a single
489 if (lyd_merge_siblings(&dnode_state
, dnode_config
,
492 yang_dnode_free(dnode_state
);
493 yang_dnode_free(dnode_config
);
495 grpc::StatusCode::INTERNAL
,
496 "Failed to merge configuration and state data",
497 ly_errmsg(ly_native_ctx
));
500 dnode_final
= dnode_state
;
502 case frr::GetRequest_DataType_CONFIG
:
503 dnode_final
= dnode_config
;
505 case frr::GetRequest_DataType_STATE
:
506 dnode_final
= dnode_state
;
510 // Validate data to create implicit default nodes if necessary.
511 int validate_opts
= 0;
512 if (type
== frr::GetRequest_DataType_CONFIG
)
513 validate_opts
= LYD_VALIDATE_NO_STATE
;
517 LY_ERR err
= lyd_validate_all(&dnode_final
, ly_native_ctx
,
518 validate_opts
, NULL
);
521 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_validate_all() failed: %s",
522 __func__
, ly_errmsg(ly_native_ctx
));
523 // Dump data using the requested format.
525 err
= data_tree_from_dnode(dt
, dnode_final
, lyd_format
,
527 yang_dnode_free(dnode_final
);
529 return grpc::Status(grpc::StatusCode::INTERNAL
,
530 "Failed to dump data");
531 return grpc::Status::OK
;
535 // ------------------------------------------------------
536 // RPC Callback Functions: run on main thread
537 // ------------------------------------------------------
539 grpc::Status
HandleUnaryGetCapabilities(
540 UnaryRpcState
<frr::GetCapabilitiesRequest
, frr::GetCapabilitiesResponse
>
543 grpc_debug("%s: entered", __func__
);
545 // Response: string frr_version = 1;
546 tag
->response
.set_frr_version(FRR_VERSION
);
548 // Response: bool rollback_support = 2;
549 #ifdef HAVE_CONFIG_ROLLBACKS
550 tag
->response
.set_rollback_support(true);
552 tag
->response
.set_rollback_support(false);
554 // Response: repeated ModuleData supported_modules = 3;
555 struct yang_module
*module
;
556 RB_FOREACH (module
, yang_modules
, &yang_modules
) {
557 auto m
= tag
->response
.add_supported_modules();
559 m
->set_name(module
->name
);
560 if (module
->info
->revision
)
561 m
->set_revision(module
->info
->revision
);
562 m
->set_organization(module
->info
->org
);
565 // Response: repeated Encoding supported_encodings = 4;
566 tag
->response
.add_supported_encodings(frr::JSON
);
567 tag
->response
.add_supported_encodings(frr::XML
);
569 return grpc::Status::OK
;
572 // Define the context variable type for this streaming handler
573 typedef std::list
<std::string
> GetContextType
;
575 bool HandleStreamingGet(
576 StreamRpcState
<frr::GetRequest
, frr::GetResponse
, GetContextType
> *tag
)
578 grpc_debug("%s: entered", __func__
);
580 auto mypathps
= &tag
->context
;
581 if (tag
->is_initial_process()) {
582 // Fill our context container first time through
583 grpc_debug("%s: initialize streaming state", __func__
);
584 auto paths
= tag
->request
.path();
585 for (const std::string
&path
: paths
) {
586 mypathps
->push_back(std::string(path
));
590 // Request: DataType type = 1;
591 int type
= tag
->request
.type();
592 // Request: Encoding encoding = 2;
593 frr::Encoding encoding
= tag
->request
.encoding();
594 // Request: bool with_defaults = 3;
595 bool with_defaults
= tag
->request
.with_defaults();
597 if (mypathps
->empty()) {
598 tag
->async_responder
.Finish(grpc::Status::OK
, tag
);
602 frr::GetResponse response
;
605 // Response: int64 timestamp = 1;
606 response
.set_timestamp(time(NULL
));
608 // Response: DataTree data = 2;
609 auto *data
= response
.mutable_data();
610 data
->set_encoding(tag
->request
.encoding());
611 status
= get_path(data
, mypathps
->back().c_str(), type
,
612 encoding2lyd_format(encoding
), with_defaults
);
615 tag
->async_responder
.WriteAndFinish(
616 response
, grpc::WriteOptions(), status
, tag
);
620 mypathps
->pop_back();
621 if (mypathps
->empty()) {
622 tag
->async_responder
.WriteAndFinish(
623 response
, grpc::WriteOptions(), grpc::Status::OK
, tag
);
626 tag
->async_responder
.Write(response
, tag
);
631 grpc::Status
HandleUnaryCreateCandidate(
632 UnaryRpcState
<frr::CreateCandidateRequest
, frr::CreateCandidateResponse
>
635 grpc_debug("%s: entered", __func__
);
637 struct candidate
*candidate
= tag
->cdb
->create_candidate();
639 return grpc::Status(grpc::StatusCode::RESOURCE_EXHAUSTED
,
640 "Can't create candidate configuration");
641 tag
->response
.set_candidate_id(candidate
->id
);
642 return grpc::Status::OK
;
645 grpc::Status
HandleUnaryDeleteCandidate(
646 UnaryRpcState
<frr::DeleteCandidateRequest
, frr::DeleteCandidateResponse
>
649 grpc_debug("%s: entered", __func__
);
651 uint32_t candidate_id
= tag
->request
.candidate_id();
653 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
655 if (!tag
->cdb
->contains(candidate_id
))
656 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
657 "candidate configuration not found");
658 tag
->cdb
->delete_candidate(candidate_id
);
659 return grpc::Status::OK
;
662 grpc::Status
HandleUnaryUpdateCandidate(
663 UnaryRpcState
<frr::UpdateCandidateRequest
, frr::UpdateCandidateResponse
>
666 grpc_debug("%s: entered", __func__
);
668 uint32_t candidate_id
= tag
->request
.candidate_id();
670 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
672 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
675 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
676 "candidate configuration not found");
677 if (candidate
->transaction
)
679 grpc::StatusCode::FAILED_PRECONDITION
,
680 "candidate is in the middle of a transaction");
681 if (nb_candidate_update(candidate
->config
) != NB_OK
)
682 return grpc::Status(grpc::StatusCode::INTERNAL
,
683 "failed to update candidate configuration");
685 return grpc::Status::OK
;
688 grpc::Status
HandleUnaryEditCandidate(
689 UnaryRpcState
<frr::EditCandidateRequest
, frr::EditCandidateResponse
>
692 grpc_debug("%s: entered", __func__
);
694 uint32_t candidate_id
= tag
->request
.candidate_id();
696 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
698 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
700 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
701 "candidate configuration not found");
703 struct nb_config
*candidate_tmp
= nb_config_dup(candidate
->config
);
705 auto pvs
= tag
->request
.update();
706 for (const frr::PathValue
&pv
: pvs
) {
707 if (yang_dnode_edit(candidate_tmp
->dnode
, pv
.path(),
708 pv
.value().c_str()) != 0) {
709 nb_config_free(candidate_tmp
);
711 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
712 "Failed to update \"" + pv
.path() +
717 pvs
= tag
->request
.delete_();
718 for (const frr::PathValue
&pv
: pvs
) {
719 if (yang_dnode_delete(candidate_tmp
->dnode
, pv
.path()) != 0) {
720 nb_config_free(candidate_tmp
);
721 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
722 "Failed to remove \"" + pv
.path() +
727 // No errors, accept all changes.
728 nb_config_replace(candidate
->config
, candidate_tmp
, false);
729 return grpc::Status::OK
;
732 grpc::Status
HandleUnaryLoadToCandidate(
733 UnaryRpcState
<frr::LoadToCandidateRequest
, frr::LoadToCandidateResponse
>
736 grpc_debug("%s: entered", __func__
);
738 uint32_t candidate_id
= tag
->request
.candidate_id();
740 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
742 // Request: LoadType type = 2;
743 int load_type
= tag
->request
.type();
744 // Request: DataTree config = 3;
745 auto config
= tag
->request
.config();
747 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
749 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
750 "candidate configuration not found");
752 struct lyd_node
*dnode
= dnode_from_data_tree(&config
, true);
754 return grpc::Status(grpc::StatusCode::INTERNAL
,
755 "Failed to parse the configuration");
757 struct nb_config
*loaded_config
= nb_config_new(dnode
);
758 if (load_type
== frr::LoadToCandidateRequest::REPLACE
)
759 nb_config_replace(candidate
->config
, loaded_config
, false);
760 else if (nb_config_merge(candidate
->config
, loaded_config
, false) !=
762 return grpc::Status(grpc::StatusCode::INTERNAL
,
763 "Failed to merge the loaded configuration");
765 return grpc::Status::OK
;
769 HandleUnaryCommit(UnaryRpcState
<frr::CommitRequest
, frr::CommitResponse
> *tag
)
771 grpc_debug("%s: entered", __func__
);
773 // Request: uint32 candidate_id = 1;
774 uint32_t candidate_id
= tag
->request
.candidate_id();
776 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
778 // Request: Phase phase = 2;
779 int phase
= tag
->request
.phase();
780 // Request: string comment = 3;
781 const std::string comment
= tag
->request
.comment();
783 // Find candidate configuration.
784 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
786 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
787 "candidate configuration not found");
790 uint32_t transaction_id
= 0;
792 // Check for misuse of the two-phase commit protocol.
794 case frr::CommitRequest::PREPARE
:
795 case frr::CommitRequest::ALL
:
796 if (candidate
->transaction
)
798 grpc::StatusCode::FAILED_PRECONDITION
,
799 "candidate is in the middle of a transaction");
801 case frr::CommitRequest::ABORT
:
802 case frr::CommitRequest::APPLY
:
803 if (!candidate
->transaction
)
805 grpc::StatusCode::FAILED_PRECONDITION
,
806 "no transaction in progress");
813 // Execute the user request.
814 struct nb_context context
= {};
815 context
.client
= NB_CLIENT_GRPC
;
816 char errmsg
[BUFSIZ
] = {0};
819 case frr::CommitRequest::VALIDATE
:
820 grpc_debug("`-> Performing VALIDATE");
821 ret
= nb_candidate_validate(&context
, candidate
->config
, errmsg
,
824 case frr::CommitRequest::PREPARE
:
825 grpc_debug("`-> Performing PREPARE");
826 ret
= nb_candidate_commit_prepare(
827 context
, candidate
->config
, comment
.c_str(),
828 &candidate
->transaction
, false, false, errmsg
,
831 case frr::CommitRequest::ABORT
:
832 grpc_debug("`-> Performing ABORT");
833 nb_candidate_commit_abort(candidate
->transaction
, errmsg
,
836 case frr::CommitRequest::APPLY
:
837 grpc_debug("`-> Performing APPLY");
838 nb_candidate_commit_apply(candidate
->transaction
, true,
839 &transaction_id
, errmsg
,
842 case frr::CommitRequest::ALL
:
843 grpc_debug("`-> Performing ALL");
844 ret
= nb_candidate_commit(context
, candidate
->config
, true,
845 comment
.c_str(), &transaction_id
,
846 errmsg
, sizeof(errmsg
));
850 // Map northbound error codes to gRPC status codes.
854 status
= grpc::Status::OK
;
856 case NB_ERR_NO_CHANGES
:
857 status
= grpc::Status(grpc::StatusCode::ABORTED
, errmsg
);
860 status
= grpc::Status(grpc::StatusCode::UNAVAILABLE
, errmsg
);
862 case NB_ERR_VALIDATION
:
863 status
= grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
866 case NB_ERR_RESOURCE
:
867 status
= grpc::Status(grpc::StatusCode::RESOURCE_EXHAUSTED
,
872 status
= grpc::Status(grpc::StatusCode::INTERNAL
, errmsg
);
876 grpc_debug("`-> Result: %s (message: '%s')",
877 nb_err_name((enum nb_error
)ret
), errmsg
);
880 // Response: uint32 transaction_id = 1;
882 tag
->response
.set_transaction_id(transaction_id
);
884 if (strlen(errmsg
) > 0)
885 tag
->response
.set_error_message(errmsg
);
890 grpc::Status
HandleUnaryLockConfig(
891 UnaryRpcState
<frr::LockConfigRequest
, frr::LockConfigResponse
> *tag
)
893 grpc_debug("%s: entered", __func__
);
895 if (nb_running_lock(NB_CLIENT_GRPC
, NULL
))
896 return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION
,
897 "running configuration is locked already");
898 return grpc::Status::OK
;
901 grpc::Status
HandleUnaryUnlockConfig(
902 UnaryRpcState
<frr::UnlockConfigRequest
, frr::UnlockConfigResponse
> *tag
)
904 grpc_debug("%s: entered", __func__
);
906 if (nb_running_unlock(NB_CLIENT_GRPC
, NULL
))
908 grpc::StatusCode::FAILED_PRECONDITION
,
909 "failed to unlock the running configuration");
910 return grpc::Status::OK
;
913 static void list_transactions_cb(void *arg
, int transaction_id
,
914 const char *client_name
, const char *date
,
917 auto list
= static_cast<std::list
<
918 std::tuple
<int, std::string
, std::string
, std::string
>> *>(arg
);
920 std::make_tuple(transaction_id
, std::string(client_name
),
921 std::string(date
), std::string(comment
)));
924 // Define the context variable type for this streaming handler
925 typedef std::list
<std::tuple
<int, std::string
, std::string
, std::string
>>
926 ListTransactionsContextType
;
928 bool HandleStreamingListTransactions(
929 StreamRpcState
<frr::ListTransactionsRequest
,
930 frr::ListTransactionsResponse
,
931 ListTransactionsContextType
> *tag
)
933 grpc_debug("%s: entered", __func__
);
935 auto list
= &tag
->context
;
936 if (tag
->is_initial_process()) {
937 grpc_debug("%s: initialize streaming state", __func__
);
938 // Fill our context container first time through
939 nb_db_transactions_iterate(list_transactions_cb
, list
);
940 list
->push_back(std::make_tuple(
941 0xFFFF, std::string("fake client"),
942 std::string("fake date"), std::string("fake comment")));
943 list
->push_back(std::make_tuple(0xFFFE,
944 std::string("fake client2"),
945 std::string("fake date"),
946 std::string("fake comment2")));
950 tag
->async_responder
.Finish(grpc::Status::OK
, tag
);
954 auto item
= list
->back();
956 frr::ListTransactionsResponse response
;
958 // Response: uint32 id = 1;
959 response
.set_id(std::get
<0>(item
));
961 // Response: string client = 2;
962 response
.set_client(std::get
<1>(item
).c_str());
964 // Response: string date = 3;
965 response
.set_date(std::get
<2>(item
).c_str());
967 // Response: string comment = 4;
968 response
.set_comment(std::get
<3>(item
).c_str());
972 tag
->async_responder
.WriteAndFinish(
973 response
, grpc::WriteOptions(), grpc::Status::OK
, tag
);
976 tag
->async_responder
.Write(response
, tag
);
981 grpc::Status
HandleUnaryGetTransaction(
982 UnaryRpcState
<frr::GetTransactionRequest
, frr::GetTransactionResponse
>
985 grpc_debug("%s: entered", __func__
);
987 // Request: uint32 transaction_id = 1;
988 uint32_t transaction_id
= tag
->request
.transaction_id();
989 // Request: Encoding encoding = 2;
990 frr::Encoding encoding
= tag
->request
.encoding();
991 // Request: bool with_defaults = 3;
992 bool with_defaults
= tag
->request
.with_defaults();
994 grpc_debug("%s(transaction_id: %u, encoding: %u)", __func__
,
995 transaction_id
, encoding
);
997 struct nb_config
*nb_config
;
999 // Load configuration from the transactions database.
1000 nb_config
= nb_db_transaction_load(transaction_id
);
1002 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
1003 "Transaction not found");
1005 // Response: DataTree config = 1;
1006 auto config
= tag
->response
.mutable_config();
1007 config
->set_encoding(encoding
);
1009 // Dump data using the requested format.
1010 if (data_tree_from_dnode(config
, nb_config
->dnode
,
1011 encoding2lyd_format(encoding
), with_defaults
)
1013 nb_config_free(nb_config
);
1014 return grpc::Status(grpc::StatusCode::INTERNAL
,
1015 "Failed to dump data");
1018 nb_config_free(nb_config
);
1020 return grpc::Status::OK
;
1023 grpc::Status
HandleUnaryExecute(
1024 UnaryRpcState
<frr::ExecuteRequest
, frr::ExecuteResponse
> *tag
)
1026 grpc_debug("%s: entered", __func__
);
1028 struct nb_node
*nb_node
;
1029 struct list
*input_list
;
1030 struct list
*output_list
;
1031 struct listnode
*node
;
1032 struct yang_data
*data
;
1034 char errmsg
[BUFSIZ
] = {0};
1036 // Request: string path = 1;
1037 xpath
= tag
->request
.path().c_str();
1039 grpc_debug("%s(path: \"%s\")", __func__
, xpath
);
1041 if (tag
->request
.path().empty())
1042 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
1043 "Data path is empty");
1045 nb_node
= nb_node_find(xpath
);
1047 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
1048 "Unknown data path");
1050 input_list
= yang_data_list_new();
1051 output_list
= yang_data_list_new();
1053 // Read input parameters.
1054 auto input
= tag
->request
.input();
1055 for (const frr::PathValue
&pv
: input
) {
1056 // Request: repeated PathValue input = 2;
1057 data
= yang_data_new(pv
.path().c_str(), pv
.value().c_str());
1058 listnode_add(input_list
, data
);
1061 // Execute callback registered for this XPath.
1062 if (nb_callback_rpc(nb_node
, xpath
, input_list
, output_list
, errmsg
,
1065 flog_warn(EC_LIB_NB_CB_RPC
, "%s: rpc callback failed: %s",
1067 list_delete(&input_list
);
1068 list_delete(&output_list
);
1070 return grpc::Status(grpc::StatusCode::INTERNAL
, "RPC failed");
1073 // Process output parameters.
1074 for (ALL_LIST_ELEMENTS_RO(output_list
, node
, data
)) {
1075 // Response: repeated PathValue output = 1;
1076 frr::PathValue
*pv
= tag
->response
.add_output();
1077 pv
->set_path(data
->xpath
);
1078 pv
->set_value(data
->value
);
1082 list_delete(&input_list
);
1083 list_delete(&output_list
);
1085 return grpc::Status::OK
;
1088 // ------------------------------------------------------
1089 // Thread Initialization and Run Functions
1090 // ------------------------------------------------------
1093 #define REQUEST_NEWRPC(NAME, cdb) \
1095 auto _rpcState = new UnaryRpcState<frr::NAME##Request, \
1096 frr::NAME##Response>( \
1097 (cdb), &frr::Northbound::AsyncService::Request##NAME, \
1098 &HandleUnary##NAME, #NAME); \
1099 _rpcState->do_request(&service, cq.get(), true); \
1102 #define REQUEST_NEWRPC_STREAMING(NAME) \
1104 auto _rpcState = new StreamRpcState<frr::NAME##Request, \
1105 frr::NAME##Response, \
1106 NAME##ContextType>( \
1107 &frr::Northbound::AsyncService::Request##NAME, \
1108 &HandleStreaming##NAME, #NAME); \
1109 _rpcState->do_request(&service, cq.get(), true); \
1112 struct grpc_pthread_attr
{
1113 struct frr_pthread_attr attr
;
1117 // Capture these objects so we can try to shut down cleanly
1118 static pthread_mutex_t s_server_lock
= PTHREAD_MUTEX_INITIALIZER
;
1119 static grpc::Server
*s_server
;
1121 static void *grpc_pthread_start(void *arg
)
1123 struct frr_pthread
*fpt
= static_cast<frr_pthread
*>(arg
);
1124 uint port
= (uint
) reinterpret_cast<intptr_t>(fpt
->data
);
1126 Candidates candidates
;
1127 grpc::ServerBuilder builder
;
1128 std::stringstream server_address
;
1129 frr::Northbound::AsyncService service
;
1131 frr_pthread_set_name(fpt
);
1133 server_address
<< "0.0.0.0:" << port
;
1134 builder
.AddListeningPort(server_address
.str(),
1135 grpc::InsecureServerCredentials());
1136 builder
.RegisterService(&service
);
1137 builder
.AddChannelArgument(
1138 GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS
, 5000);
1139 std::unique_ptr
<grpc::ServerCompletionQueue
> cq
=
1140 builder
.AddCompletionQueue();
1141 std::unique_ptr
<grpc::Server
> server
= builder
.BuildAndStart();
1142 s_server
= server
.get();
1144 pthread_mutex_lock(&s_server_lock
); // Make coverity happy
1145 grpc_running
= true;
1146 pthread_mutex_unlock(&s_server_lock
); // Make coverity happy
1148 /* Schedule unary RPC handlers */
1149 REQUEST_NEWRPC(GetCapabilities
, NULL
);
1150 REQUEST_NEWRPC(CreateCandidate
, &candidates
);
1151 REQUEST_NEWRPC(DeleteCandidate
, &candidates
);
1152 REQUEST_NEWRPC(UpdateCandidate
, &candidates
);
1153 REQUEST_NEWRPC(EditCandidate
, &candidates
);
1154 REQUEST_NEWRPC(LoadToCandidate
, &candidates
);
1155 REQUEST_NEWRPC(Commit
, &candidates
);
1156 REQUEST_NEWRPC(GetTransaction
, NULL
);
1157 REQUEST_NEWRPC(LockConfig
, NULL
);
1158 REQUEST_NEWRPC(UnlockConfig
, NULL
);
1159 REQUEST_NEWRPC(Execute
, NULL
);
1161 /* Schedule streaming RPC handlers */
1162 REQUEST_NEWRPC_STREAMING(Get
);
1163 REQUEST_NEWRPC_STREAMING(ListTransactions
);
1165 zlog_notice("gRPC server listening on %s",
1166 server_address
.str().c_str());
1168 /* Process inbound RPCs */
1172 if (!cq
->Next(&tag
, &ok
)) {
1173 grpc_debug("%s: CQ empty exiting", __func__
);
1177 grpc_debug("%s: got next from CQ tag: %p ok: %d", __func__
, tag
,
1181 delete static_cast<RpcStateBase
*>(tag
);
1185 RpcStateBase
*rpc
= static_cast<RpcStateBase
*>(tag
);
1186 if (rpc
->get_state() != FINISH
)
1187 rpc
->run(&service
, cq
.get());
1189 grpc_debug("%s RPC FINISH -> [delete]", rpc
->name
);
1194 /* This was probably done for us to get here, but let's be safe */
1195 pthread_mutex_lock(&s_server_lock
);
1196 grpc_running
= false;
1198 grpc_debug("%s: shutdown server and CQ", __func__
);
1202 pthread_mutex_unlock(&s_server_lock
);
1204 grpc_debug("%s: shutting down CQ", __func__
);
1207 grpc_debug("%s: draining the CQ", __func__
);
1208 while (cq
->Next(&tag
, &ok
)) {
1209 grpc_debug("%s: drain tag %p", __func__
, tag
);
1210 delete static_cast<RpcStateBase
*>(tag
);
1213 zlog_info("%s: exiting from grpc pthread", __func__
);
1218 static int frr_grpc_init(uint port
)
1220 struct frr_pthread_attr attr
= {
1221 .start
= grpc_pthread_start
,
1225 grpc_debug("%s: entered", __func__
);
1227 fpt
= frr_pthread_new(&attr
, "frr-grpc", "frr-grpc");
1228 fpt
->data
= reinterpret_cast<void *>((intptr_t)port
);
1230 /* Create a pthread for gRPC since it runs its own event loop. */
1231 if (frr_pthread_run(fpt
, NULL
) < 0) {
1232 flog_err(EC_LIB_SYSTEM_CALL
, "%s: error creating pthread: %s",
1233 __func__
, safe_strerror(errno
));
1240 static int frr_grpc_finish(void)
1242 grpc_debug("%s: entered", __func__
);
1248 * Shut the server down here in main thread. This will cause the wait on
1249 * the completion queue (cq.Next()) to exit and cleanup everything else.
1251 pthread_mutex_lock(&s_server_lock
);
1252 grpc_running
= false;
1254 grpc_debug("%s: shutdown server", __func__
);
1255 s_server
->Shutdown();
1258 pthread_mutex_unlock(&s_server_lock
);
1260 grpc_debug("%s: joining and destroy grpc thread", __func__
);
1261 pthread_join(fpt
->thread
, NULL
);
1262 frr_pthread_destroy(fpt
);
1264 // Fix protobuf 'memory leaks' during shutdown.
1265 // https://groups.google.com/g/protobuf/c/4y_EmQiCGgs
1266 google::protobuf::ShutdownProtobufLibrary();
1272 * This is done this way because module_init and module_late_init are both
1273 * called during daemon pre-fork initialization. Because the GRPC library
1274 * spawns threads internally, we need to delay initializing it until after
1275 * fork. This is done by scheduling this init function as an event task, since
1276 * the event loop doesn't run until after fork.
1278 static void frr_grpc_module_very_late_init(struct thread
*thread
)
1280 const char *args
= THIS_MODULE
->load_args
;
1281 uint port
= GRPC_DEFAULT_PORT
;
1284 port
= std::stoul(args
);
1285 if (port
< 1024 || port
> UINT16_MAX
) {
1286 flog_err(EC_LIB_GRPC_INIT
,
1287 "%s: port number must be between 1025 and %d",
1288 __func__
, UINT16_MAX
);
1293 if (frr_grpc_init(port
) < 0)
1299 flog_err(EC_LIB_GRPC_INIT
, "failed to initialize the gRPC module");
1302 static int frr_grpc_module_late_init(struct thread_master
*tm
)
1305 hook_register(frr_fini
, frr_grpc_finish
);
1306 thread_add_event(tm
, frr_grpc_module_very_late_init
, NULL
, 0, NULL
);
1310 static int frr_grpc_module_init(void)
1312 hook_register(frr_late_init
, frr_grpc_module_late_init
);
1317 FRR_MODULE_SETUP(.name
= "frr_grpc", .version
= FRR_VERSION
,
1318 .description
= "FRR gRPC northbound module",
1319 .init
= frr_grpc_module_init
, );