1 // SPDX-License-Identifier: GPL-2.0-or-later
3 // Copyright (c) 2021-2022, LabN Consulting, L.L.C
4 // Copyright (C) 2019 NetDEF, Inc.
9 #include <grpcpp/grpcpp.h>
10 #include "grpc/frr-northbound.grpc.pb.h"
14 #include "lib/version.h"
17 #include "lib_errors.h"
18 #include "northbound.h"
19 #include "northbound_db.h"
20 #include "frr_pthread.h"
27 #define GRPC_DEFAULT_PORT 50051
30 // ------------------------------------------------------
31 // File Local Variables
32 // ------------------------------------------------------
35 * NOTE: we can't use the FRR debugging infrastructure here since it uses
36 * atomics and C++ has a different atomics API. Enable gRPC debugging
37 * unconditionally until we figure out a way to solve this problem.
39 static bool nb_dbg_client_grpc
= 0;
41 static struct event_loop
*main_master
;
43 static struct frr_pthread
*fpt
;
45 static bool grpc_running
;
47 #define grpc_debug(...) \
49 if (nb_dbg_client_grpc) \
50 zlog_debug(__VA_ARGS__); \
53 // ------------------------------------------------------
55 // ------------------------------------------------------
57 enum CallState
{ CREATE
, PROCESS
, MORE
, FINISH
, DELETED
};
58 const char *call_states
[] = {"CREATE", "PROCESS", "MORE", "FINISH", "DELETED"};
62 struct nb_config
*config
;
63 struct nb_transaction
*transaction
;
72 for (auto it
= _cdb
.begin(); it
!= _cdb
.end(); it
++)
73 delete_candidate(it
->first
);
76 struct candidate
*create_candidate(void)
78 uint64_t id
= ++_next_id
;
79 assert(id
); // TODO: implement an algorithm for unique reusable
81 struct candidate
*c
= &_cdb
[id
];
83 c
->config
= nb_config_dup(running_config
);
84 c
->transaction
= NULL
;
89 bool contains(uint64_t candidate_id
)
91 return _cdb
.count(candidate_id
) > 0;
94 void delete_candidate(uint64_t candidate_id
)
96 struct candidate
*c
= &_cdb
[candidate_id
];
97 char errmsg
[BUFSIZ
] = {0};
99 nb_config_free(c
->config
);
101 nb_candidate_commit_abort(c
->transaction
, errmsg
,
106 struct candidate
*get_candidate(uint64_t id
)
108 return _cdb
.count(id
) == 0 ? NULL
: &_cdb
[id
];
112 uint64_t _next_id
= 0;
113 std::map
<uint64_t, struct candidate
> _cdb
;
117 * RpcStateBase is the common base class used to track a gRPC RPC.
122 virtual void do_request(::frr::Northbound::AsyncService
*service
,
123 ::grpc::ServerCompletionQueue
*cq
,
126 RpcStateBase(const char *name
) : name(name
){};
128 virtual ~RpcStateBase() = default;
130 CallState
get_state() const
135 bool is_initial_process() const
137 /* Will always be true for Unary */
138 return entered_state
== CREATE
;
141 // Returns "more" status, if false caller can delete
142 bool run(frr::Northbound::AsyncService
*service
,
143 grpc::ServerCompletionQueue
*cq
)
146 * We enter in either CREATE or MORE state, and transition to
149 this->entered_state
= this->state
;
150 this->state
= PROCESS
;
151 grpc_debug("%s RPC: %s -> %s on grpc-io-thread", name
,
152 call_states
[this->entered_state
],
153 call_states
[this->state
]);
155 * We schedule the callback on the main pthread, and wait for
156 * the state to transition out of the PROCESS state. The new
157 * state will either be MORE or FINISH. It will always be FINISH
160 event_add_event(main_master
, c_callback
, (void *)this, 0, NULL
);
162 pthread_mutex_lock(&this->cmux
);
163 while (this->state
== PROCESS
)
164 pthread_cond_wait(&this->cond
, &this->cmux
);
165 pthread_mutex_unlock(&this->cmux
);
167 grpc_debug("%s RPC in %s on grpc-io-thread", name
,
168 call_states
[this->state
]);
170 if (this->state
== FINISH
) {
172 * Server is done (FINISH) so prep to receive a new
173 * request of this type. We could do this earlier but
174 * that would mean we could be handling multiple same
175 * type requests in parallel without limit.
177 this->do_request(service
, cq
, false);
183 virtual CallState
run_mainthread(struct event
*thread
) = 0;
185 static void c_callback(struct event
*thread
)
187 auto _tag
= static_cast<RpcStateBase
*>(EVENT_ARG(thread
));
189 * We hold the lock until the callback finishes and has updated
190 * _tag->state, then we signal done and release.
192 pthread_mutex_lock(&_tag
->cmux
);
194 CallState enter_state
= _tag
->state
;
195 grpc_debug("%s RPC: running %s on main thread", _tag
->name
,
196 call_states
[enter_state
]);
198 _tag
->state
= _tag
->run_mainthread(thread
);
200 grpc_debug("%s RPC: %s -> %s [main thread]", _tag
->name
,
201 call_states
[enter_state
], call_states
[_tag
->state
]);
203 pthread_cond_signal(&_tag
->cond
);
204 pthread_mutex_unlock(&_tag
->cmux
);
208 grpc::ServerContext ctx
;
209 pthread_mutex_t cmux
= PTHREAD_MUTEX_INITIALIZER
;
210 pthread_cond_t cond
= PTHREAD_COND_INITIALIZER
;
211 CallState state
= CREATE
;
212 CallState entered_state
= CREATE
;
219 * The UnaryRpcState class is used to track the execution of a Unary RPC.
222 * Q - the request type for a given unary RPC
223 * S - the response type for a given unary RPC
225 template <typename Q
, typename S
> class UnaryRpcState
: public RpcStateBase
228 typedef void (frr::Northbound::AsyncService::*reqfunc_t
)(
229 ::grpc::ServerContext
*, Q
*,
230 ::grpc::ServerAsyncResponseWriter
<S
> *,
231 ::grpc::CompletionQueue
*, ::grpc::ServerCompletionQueue
*,
234 UnaryRpcState(Candidates
*cdb
, reqfunc_t rfunc
,
235 grpc::Status (*cb
)(UnaryRpcState
<Q
, S
> *),
237 : RpcStateBase(name
), cdb(cdb
), requestf(rfunc
), callback(cb
),
240 void do_request(::frr::Northbound::AsyncService
*service
,
241 ::grpc::ServerCompletionQueue
*cq
,
242 bool no_copy
) override
244 grpc_debug("%s, posting a request for: %s", __func__
, name
);
245 auto copy
= no_copy
? this
246 : new UnaryRpcState(cdb
, requestf
, callback
,
248 (service
->*requestf
)(©
->ctx
, ©
->request
,
249 ©
->responder
, cq
, cq
, copy
);
252 CallState
run_mainthread(struct event
*thread
) override
254 // Unary RPC are always finished, see "Unary" :)
255 grpc::Status status
= this->callback(this);
256 responder
.Finish(response
, status
, this);
264 grpc::ServerAsyncResponseWriter
<S
> responder
;
266 grpc::Status (*callback
)(UnaryRpcState
<Q
, S
> *);
267 reqfunc_t requestf
= NULL
;
271 * The StreamRpcState class is used to track the execution of a Streaming RPC.
274 * Q - the request type for a given streaming RPC
275 * S - the response type for a given streaming RPC
276 * X - the type used to track the streaming state
278 template <typename Q
, typename S
, typename X
>
279 class StreamRpcState
: public RpcStateBase
282 typedef void (frr::Northbound::AsyncService::*reqsfunc_t
)(
283 ::grpc::ServerContext
*, Q
*, ::grpc::ServerAsyncWriter
<S
> *,
284 ::grpc::CompletionQueue
*, ::grpc::ServerCompletionQueue
*,
287 StreamRpcState(reqsfunc_t rfunc
, bool (*cb
)(StreamRpcState
<Q
, S
, X
> *),
289 : RpcStateBase(name
), requestsf(rfunc
), callback(cb
),
290 async_responder(&ctx
){};
292 void do_request(::frr::Northbound::AsyncService
*service
,
293 ::grpc::ServerCompletionQueue
*cq
,
294 bool no_copy
) override
296 grpc_debug("%s, posting a request for: %s", __func__
, name
);
299 : new StreamRpcState(requestsf
, callback
, name
);
300 (service
->*requestsf
)(©
->ctx
, ©
->request
,
301 ©
->async_responder
, cq
, cq
, copy
);
304 CallState
run_mainthread(struct event
*thread
) override
306 if (this->callback(this))
314 grpc::ServerAsyncWriter
<S
> async_responder
;
316 bool (*callback
)(StreamRpcState
<Q
, S
, X
> *);
317 reqsfunc_t requestsf
= NULL
;
322 // ------------------------------------------------------
324 // ------------------------------------------------------
326 static LYD_FORMAT
encoding2lyd_format(enum frr::Encoding encoding
)
334 flog_err(EC_LIB_DEVELOPMENT
,
335 "%s: unknown data encoding format (%u)", __func__
,
341 static int yang_dnode_edit(struct lyd_node
*dnode
, const std::string
&path
,
344 LY_ERR err
= lyd_new_path(dnode
, ly_native_ctx
, path
.c_str(), value
,
345 LYD_NEW_PATH_UPDATE
, &dnode
);
346 if (err
!= LY_SUCCESS
) {
347 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed: %s",
348 __func__
, ly_errmsg(ly_native_ctx
));
355 static int yang_dnode_delete(struct lyd_node
*dnode
, const std::string
&path
)
357 dnode
= yang_dnode_get(dnode
, path
.c_str());
361 lyd_free_tree(dnode
);
366 static LY_ERR
data_tree_from_dnode(frr::DataTree
*dt
,
367 const struct lyd_node
*dnode
,
368 LYD_FORMAT lyd_format
, bool with_defaults
)
373 SET_FLAG(options
, LYD_PRINT_WITHSIBLINGS
);
375 SET_FLAG(options
, LYD_PRINT_WD_ALL
);
377 SET_FLAG(options
, LYD_PRINT_WD_TRIM
);
379 LY_ERR err
= lyd_print_mem(&strp
, dnode
, lyd_format
, options
);
380 if (err
== LY_SUCCESS
) {
389 static struct lyd_node
*dnode_from_data_tree(const frr::DataTree
*dt
,
392 struct lyd_node
*dnode
;
397 options
= LYD_PARSE_NO_STATE
;
398 opt2
= LYD_VALIDATE_NO_STATE
;
400 options
= LYD_PARSE_STRICT
;
404 err
= lyd_parse_data_mem(ly_native_ctx
, dt
->data().c_str(),
405 encoding2lyd_format(dt
->encoding()), options
,
407 if (err
!= LY_SUCCESS
) {
408 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_parse_mem() failed: %s",
409 __func__
, ly_errmsg(ly_native_ctx
));
414 static struct lyd_node
*get_dnode_config(const std::string
&path
)
416 struct lyd_node
*dnode
;
418 if (!yang_dnode_exists(running_config
->dnode
,
419 path
.empty() ? NULL
: path
.c_str()))
422 dnode
= yang_dnode_get(running_config
->dnode
,
423 path
.empty() ? NULL
: path
.c_str());
425 dnode
= yang_dnode_dup(dnode
);
430 static int get_oper_data_cb(const struct lysc_node
*snode
,
431 struct yang_translator
*translator
,
432 struct yang_data
*data
, void *arg
)
434 struct lyd_node
*dnode
= static_cast<struct lyd_node
*>(arg
);
435 int ret
= yang_dnode_edit(dnode
, data
->xpath
, data
->value
);
436 yang_data_free(data
);
438 return (ret
== 0) ? NB_OK
: NB_ERR
;
441 static struct lyd_node
*get_dnode_state(const std::string
&path
)
443 struct lyd_node
*dnode
= yang_dnode_new(ly_native_ctx
, false);
444 if (nb_oper_data_iterate(path
.c_str(), NULL
, 0, get_oper_data_cb
, dnode
)
446 yang_dnode_free(dnode
);
453 static grpc::Status
get_path(frr::DataTree
*dt
, const std::string
&path
,
454 int type
, LYD_FORMAT lyd_format
,
457 struct lyd_node
*dnode_config
= NULL
;
458 struct lyd_node
*dnode_state
= NULL
;
459 struct lyd_node
*dnode_final
;
461 // Configuration data.
462 if (type
== frr::GetRequest_DataType_ALL
463 || type
== frr::GetRequest_DataType_CONFIG
) {
464 dnode_config
= get_dnode_config(path
);
466 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
467 "Data path not found");
471 if (type
== frr::GetRequest_DataType_ALL
472 || type
== frr::GetRequest_DataType_STATE
) {
473 dnode_state
= get_dnode_state(path
);
476 yang_dnode_free(dnode_config
);
477 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
478 "Failed to fetch operational data");
483 case frr::GetRequest_DataType_ALL
:
485 // Combine configuration and state data into a single
488 if (lyd_merge_siblings(&dnode_state
, dnode_config
,
491 yang_dnode_free(dnode_state
);
492 yang_dnode_free(dnode_config
);
494 grpc::StatusCode::INTERNAL
,
495 "Failed to merge configuration and state data",
496 ly_errmsg(ly_native_ctx
));
499 dnode_final
= dnode_state
;
501 case frr::GetRequest_DataType_CONFIG
:
502 dnode_final
= dnode_config
;
504 case frr::GetRequest_DataType_STATE
:
505 dnode_final
= dnode_state
;
509 // Validate data to create implicit default nodes if necessary.
510 int validate_opts
= 0;
511 if (type
== frr::GetRequest_DataType_CONFIG
)
512 validate_opts
= LYD_VALIDATE_NO_STATE
;
516 LY_ERR err
= lyd_validate_all(&dnode_final
, ly_native_ctx
,
517 validate_opts
, NULL
);
520 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_validate_all() failed: %s",
521 __func__
, ly_errmsg(ly_native_ctx
));
522 // Dump data using the requested format.
524 err
= data_tree_from_dnode(dt
, dnode_final
, lyd_format
,
526 yang_dnode_free(dnode_final
);
528 return grpc::Status(grpc::StatusCode::INTERNAL
,
529 "Failed to dump data");
530 return grpc::Status::OK
;
534 // ------------------------------------------------------
535 // RPC Callback Functions: run on main thread
536 // ------------------------------------------------------
538 grpc::Status
HandleUnaryGetCapabilities(
539 UnaryRpcState
<frr::GetCapabilitiesRequest
, frr::GetCapabilitiesResponse
>
542 grpc_debug("%s: entered", __func__
);
544 // Response: string frr_version = 1;
545 tag
->response
.set_frr_version(FRR_VERSION
);
547 // Response: bool rollback_support = 2;
548 #ifdef HAVE_CONFIG_ROLLBACKS
549 tag
->response
.set_rollback_support(true);
551 tag
->response
.set_rollback_support(false);
553 // Response: repeated ModuleData supported_modules = 3;
554 struct yang_module
*module
;
555 RB_FOREACH (module
, yang_modules
, &yang_modules
) {
556 auto m
= tag
->response
.add_supported_modules();
558 m
->set_name(module
->name
);
559 if (module
->info
->revision
)
560 m
->set_revision(module
->info
->revision
);
561 m
->set_organization(module
->info
->org
);
564 // Response: repeated Encoding supported_encodings = 4;
565 tag
->response
.add_supported_encodings(frr::JSON
);
566 tag
->response
.add_supported_encodings(frr::XML
);
568 return grpc::Status::OK
;
571 // Define the context variable type for this streaming handler
572 typedef std::list
<std::string
> GetContextType
;
574 bool HandleStreamingGet(
575 StreamRpcState
<frr::GetRequest
, frr::GetResponse
, GetContextType
> *tag
)
577 grpc_debug("%s: entered", __func__
);
579 auto mypathps
= &tag
->context
;
580 if (tag
->is_initial_process()) {
581 // Fill our context container first time through
582 grpc_debug("%s: initialize streaming state", __func__
);
583 auto paths
= tag
->request
.path();
584 for (const std::string
&path
: paths
) {
585 mypathps
->push_back(std::string(path
));
589 // Request: DataType type = 1;
590 int type
= tag
->request
.type();
591 // Request: Encoding encoding = 2;
592 frr::Encoding encoding
= tag
->request
.encoding();
593 // Request: bool with_defaults = 3;
594 bool with_defaults
= tag
->request
.with_defaults();
596 if (mypathps
->empty()) {
597 tag
->async_responder
.Finish(grpc::Status::OK
, tag
);
601 frr::GetResponse response
;
604 // Response: int64 timestamp = 1;
605 response
.set_timestamp(time(NULL
));
607 // Response: DataTree data = 2;
608 auto *data
= response
.mutable_data();
609 data
->set_encoding(tag
->request
.encoding());
610 status
= get_path(data
, mypathps
->back().c_str(), type
,
611 encoding2lyd_format(encoding
), with_defaults
);
614 tag
->async_responder
.WriteAndFinish(
615 response
, grpc::WriteOptions(), status
, tag
);
619 mypathps
->pop_back();
620 if (mypathps
->empty()) {
621 tag
->async_responder
.WriteAndFinish(
622 response
, grpc::WriteOptions(), grpc::Status::OK
, tag
);
625 tag
->async_responder
.Write(response
, tag
);
630 grpc::Status
HandleUnaryCreateCandidate(
631 UnaryRpcState
<frr::CreateCandidateRequest
, frr::CreateCandidateResponse
>
634 grpc_debug("%s: entered", __func__
);
636 struct candidate
*candidate
= tag
->cdb
->create_candidate();
638 return grpc::Status(grpc::StatusCode::RESOURCE_EXHAUSTED
,
639 "Can't create candidate configuration");
640 tag
->response
.set_candidate_id(candidate
->id
);
641 return grpc::Status::OK
;
644 grpc::Status
HandleUnaryDeleteCandidate(
645 UnaryRpcState
<frr::DeleteCandidateRequest
, frr::DeleteCandidateResponse
>
648 grpc_debug("%s: entered", __func__
);
650 uint32_t candidate_id
= tag
->request
.candidate_id();
652 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
654 if (!tag
->cdb
->contains(candidate_id
))
655 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
656 "candidate configuration not found");
657 tag
->cdb
->delete_candidate(candidate_id
);
658 return grpc::Status::OK
;
661 grpc::Status
HandleUnaryUpdateCandidate(
662 UnaryRpcState
<frr::UpdateCandidateRequest
, frr::UpdateCandidateResponse
>
665 grpc_debug("%s: entered", __func__
);
667 uint32_t candidate_id
= tag
->request
.candidate_id();
669 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
671 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
674 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
675 "candidate configuration not found");
676 if (candidate
->transaction
)
678 grpc::StatusCode::FAILED_PRECONDITION
,
679 "candidate is in the middle of a transaction");
680 if (nb_candidate_update(candidate
->config
) != NB_OK
)
681 return grpc::Status(grpc::StatusCode::INTERNAL
,
682 "failed to update candidate configuration");
684 return grpc::Status::OK
;
687 grpc::Status
HandleUnaryEditCandidate(
688 UnaryRpcState
<frr::EditCandidateRequest
, frr::EditCandidateResponse
>
691 grpc_debug("%s: entered", __func__
);
693 uint32_t candidate_id
= tag
->request
.candidate_id();
695 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
697 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
699 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
700 "candidate configuration not found");
702 struct nb_config
*candidate_tmp
= nb_config_dup(candidate
->config
);
704 auto pvs
= tag
->request
.update();
705 for (const frr::PathValue
&pv
: pvs
) {
706 if (yang_dnode_edit(candidate_tmp
->dnode
, pv
.path(),
707 pv
.value().c_str()) != 0) {
708 nb_config_free(candidate_tmp
);
710 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
711 "Failed to update \"" + pv
.path() +
716 pvs
= tag
->request
.delete_();
717 for (const frr::PathValue
&pv
: pvs
) {
718 if (yang_dnode_delete(candidate_tmp
->dnode
, pv
.path()) != 0) {
719 nb_config_free(candidate_tmp
);
720 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
721 "Failed to remove \"" + pv
.path() +
726 // No errors, accept all changes.
727 nb_config_replace(candidate
->config
, candidate_tmp
, false);
728 return grpc::Status::OK
;
731 grpc::Status
HandleUnaryLoadToCandidate(
732 UnaryRpcState
<frr::LoadToCandidateRequest
, frr::LoadToCandidateResponse
>
735 grpc_debug("%s: entered", __func__
);
737 uint32_t candidate_id
= tag
->request
.candidate_id();
739 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
741 // Request: LoadType type = 2;
742 int load_type
= tag
->request
.type();
743 // Request: DataTree config = 3;
744 auto config
= tag
->request
.config();
746 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
748 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
749 "candidate configuration not found");
751 struct lyd_node
*dnode
= dnode_from_data_tree(&config
, true);
753 return grpc::Status(grpc::StatusCode::INTERNAL
,
754 "Failed to parse the configuration");
756 struct nb_config
*loaded_config
= nb_config_new(dnode
);
757 if (load_type
== frr::LoadToCandidateRequest::REPLACE
)
758 nb_config_replace(candidate
->config
, loaded_config
, false);
759 else if (nb_config_merge(candidate
->config
, loaded_config
, false) !=
761 return grpc::Status(grpc::StatusCode::INTERNAL
,
762 "Failed to merge the loaded configuration");
764 return grpc::Status::OK
;
768 HandleUnaryCommit(UnaryRpcState
<frr::CommitRequest
, frr::CommitResponse
> *tag
)
770 grpc_debug("%s: entered", __func__
);
772 // Request: uint32 candidate_id = 1;
773 uint32_t candidate_id
= tag
->request
.candidate_id();
775 grpc_debug("%s(candidate_id: %u)", __func__
, candidate_id
);
777 // Request: Phase phase = 2;
778 int phase
= tag
->request
.phase();
779 // Request: string comment = 3;
780 const std::string comment
= tag
->request
.comment();
782 // Find candidate configuration.
783 struct candidate
*candidate
= tag
->cdb
->get_candidate(candidate_id
);
785 return grpc::Status(grpc::StatusCode::NOT_FOUND
,
786 "candidate configuration not found");
789 uint32_t transaction_id
= 0;
791 // Check for misuse of the two-phase commit protocol.
793 case frr::CommitRequest::PREPARE
:
794 case frr::CommitRequest::ALL
:
795 if (candidate
->transaction
)
797 grpc::StatusCode::FAILED_PRECONDITION
,
798 "candidate is in the middle of a transaction");
800 case frr::CommitRequest::ABORT
:
801 case frr::CommitRequest::APPLY
:
802 if (!candidate
->transaction
)
804 grpc::StatusCode::FAILED_PRECONDITION
,
805 "no transaction in progress");
812 // Execute the user request.
813 struct nb_context context
= {};
814 context
.client
= NB_CLIENT_GRPC
;
815 char errmsg
[BUFSIZ
] = {0};
818 case frr::CommitRequest::VALIDATE
:
819 grpc_debug("`-> Performing VALIDATE");
820 ret
= nb_candidate_validate(&context
, candidate
->config
, errmsg
,
823 case frr::CommitRequest::PREPARE
:
824 grpc_debug("`-> Performing PREPARE");
825 ret
= nb_candidate_commit_prepare(
826 context
, candidate
->config
, comment
.c_str(),
827 &candidate
->transaction
, false, false, errmsg
,
830 case frr::CommitRequest::ABORT
:
831 grpc_debug("`-> Performing ABORT");
832 nb_candidate_commit_abort(candidate
->transaction
, errmsg
,
835 case frr::CommitRequest::APPLY
:
836 grpc_debug("`-> Performing APPLY");
837 nb_candidate_commit_apply(candidate
->transaction
, true,
838 &transaction_id
, errmsg
,
841 case frr::CommitRequest::ALL
:
842 grpc_debug("`-> Performing ALL");
843 ret
= nb_candidate_commit(context
, candidate
->config
, true,
844 comment
.c_str(), &transaction_id
,
845 errmsg
, sizeof(errmsg
));
849 // Map northbound error codes to gRPC status codes.
853 status
= grpc::Status::OK
;
855 case NB_ERR_NO_CHANGES
:
856 status
= grpc::Status(grpc::StatusCode::ABORTED
, errmsg
);
859 status
= grpc::Status(grpc::StatusCode::UNAVAILABLE
, errmsg
);
861 case NB_ERR_VALIDATION
:
862 status
= grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
865 case NB_ERR_RESOURCE
:
866 status
= grpc::Status(grpc::StatusCode::RESOURCE_EXHAUSTED
,
871 status
= grpc::Status(grpc::StatusCode::INTERNAL
, errmsg
);
875 grpc_debug("`-> Result: %s (message: '%s')",
876 nb_err_name((enum nb_error
)ret
), errmsg
);
879 // Response: uint32 transaction_id = 1;
881 tag
->response
.set_transaction_id(transaction_id
);
883 if (strlen(errmsg
) > 0)
884 tag
->response
.set_error_message(errmsg
);
889 grpc::Status
HandleUnaryLockConfig(
890 UnaryRpcState
<frr::LockConfigRequest
, frr::LockConfigResponse
> *tag
)
892 grpc_debug("%s: entered", __func__
);
894 if (nb_running_lock(NB_CLIENT_GRPC
, NULL
))
895 return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION
,
896 "running configuration is locked already");
897 return grpc::Status::OK
;
900 grpc::Status
HandleUnaryUnlockConfig(
901 UnaryRpcState
<frr::UnlockConfigRequest
, frr::UnlockConfigResponse
> *tag
)
903 grpc_debug("%s: entered", __func__
);
905 if (nb_running_unlock(NB_CLIENT_GRPC
, NULL
))
907 grpc::StatusCode::FAILED_PRECONDITION
,
908 "failed to unlock the running configuration");
909 return grpc::Status::OK
;
912 static void list_transactions_cb(void *arg
, int transaction_id
,
913 const char *client_name
, const char *date
,
916 auto list
= static_cast<std::list
<
917 std::tuple
<int, std::string
, std::string
, std::string
>> *>(arg
);
919 std::make_tuple(transaction_id
, std::string(client_name
),
920 std::string(date
), std::string(comment
)));
923 // Define the context variable type for this streaming handler
924 typedef std::list
<std::tuple
<int, std::string
, std::string
, std::string
>>
925 ListTransactionsContextType
;
927 bool HandleStreamingListTransactions(
928 StreamRpcState
<frr::ListTransactionsRequest
,
929 frr::ListTransactionsResponse
,
930 ListTransactionsContextType
> *tag
)
932 grpc_debug("%s: entered", __func__
);
934 auto list
= &tag
->context
;
935 if (tag
->is_initial_process()) {
936 grpc_debug("%s: initialize streaming state", __func__
);
937 // Fill our context container first time through
938 nb_db_transactions_iterate(list_transactions_cb
, list
);
939 list
->push_back(std::make_tuple(
940 0xFFFF, std::string("fake client"),
941 std::string("fake date"), std::string("fake comment")));
942 list
->push_back(std::make_tuple(0xFFFE,
943 std::string("fake client2"),
944 std::string("fake date"),
945 std::string("fake comment2")));
949 tag
->async_responder
.Finish(grpc::Status::OK
, tag
);
953 auto item
= list
->back();
955 frr::ListTransactionsResponse response
;
957 // Response: uint32 id = 1;
958 response
.set_id(std::get
<0>(item
));
960 // Response: string client = 2;
961 response
.set_client(std::get
<1>(item
).c_str());
963 // Response: string date = 3;
964 response
.set_date(std::get
<2>(item
).c_str());
966 // Response: string comment = 4;
967 response
.set_comment(std::get
<3>(item
).c_str());
971 tag
->async_responder
.WriteAndFinish(
972 response
, grpc::WriteOptions(), grpc::Status::OK
, tag
);
975 tag
->async_responder
.Write(response
, tag
);
980 grpc::Status
HandleUnaryGetTransaction(
981 UnaryRpcState
<frr::GetTransactionRequest
, frr::GetTransactionResponse
>
984 grpc_debug("%s: entered", __func__
);
986 // Request: uint32 transaction_id = 1;
987 uint32_t transaction_id
= tag
->request
.transaction_id();
988 // Request: Encoding encoding = 2;
989 frr::Encoding encoding
= tag
->request
.encoding();
990 // Request: bool with_defaults = 3;
991 bool with_defaults
= tag
->request
.with_defaults();
993 grpc_debug("%s(transaction_id: %u, encoding: %u)", __func__
,
994 transaction_id
, encoding
);
996 struct nb_config
*nb_config
;
998 // Load configuration from the transactions database.
999 nb_config
= nb_db_transaction_load(transaction_id
);
1001 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
1002 "Transaction not found");
1004 // Response: DataTree config = 1;
1005 auto config
= tag
->response
.mutable_config();
1006 config
->set_encoding(encoding
);
1008 // Dump data using the requested format.
1009 if (data_tree_from_dnode(config
, nb_config
->dnode
,
1010 encoding2lyd_format(encoding
), with_defaults
)
1012 nb_config_free(nb_config
);
1013 return grpc::Status(grpc::StatusCode::INTERNAL
,
1014 "Failed to dump data");
1017 nb_config_free(nb_config
);
1019 return grpc::Status::OK
;
1022 grpc::Status
HandleUnaryExecute(
1023 UnaryRpcState
<frr::ExecuteRequest
, frr::ExecuteResponse
> *tag
)
1025 grpc_debug("%s: entered", __func__
);
1027 struct nb_node
*nb_node
;
1028 struct list
*input_list
;
1029 struct list
*output_list
;
1030 struct listnode
*node
;
1031 struct yang_data
*data
;
1033 char errmsg
[BUFSIZ
] = {0};
1035 // Request: string path = 1;
1036 xpath
= tag
->request
.path().c_str();
1038 grpc_debug("%s(path: \"%s\")", __func__
, xpath
);
1040 if (tag
->request
.path().empty())
1041 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
1042 "Data path is empty");
1044 nb_node
= nb_node_find(xpath
);
1046 return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT
,
1047 "Unknown data path");
1049 input_list
= yang_data_list_new();
1050 output_list
= yang_data_list_new();
1052 // Read input parameters.
1053 auto input
= tag
->request
.input();
1054 for (const frr::PathValue
&pv
: input
) {
1055 // Request: repeated PathValue input = 2;
1056 data
= yang_data_new(pv
.path().c_str(), pv
.value().c_str());
1057 listnode_add(input_list
, data
);
1060 // Execute callback registered for this XPath.
1061 if (nb_callback_rpc(nb_node
, xpath
, input_list
, output_list
, errmsg
,
1064 flog_warn(EC_LIB_NB_CB_RPC
, "%s: rpc callback failed: %s",
1066 list_delete(&input_list
);
1067 list_delete(&output_list
);
1069 return grpc::Status(grpc::StatusCode::INTERNAL
, "RPC failed");
1072 // Process output parameters.
1073 for (ALL_LIST_ELEMENTS_RO(output_list
, node
, data
)) {
1074 // Response: repeated PathValue output = 1;
1075 frr::PathValue
*pv
= tag
->response
.add_output();
1076 pv
->set_path(data
->xpath
);
1077 pv
->set_value(data
->value
);
1081 list_delete(&input_list
);
1082 list_delete(&output_list
);
1084 return grpc::Status::OK
;
1087 // ------------------------------------------------------
1088 // Thread Initialization and Run Functions
1089 // ------------------------------------------------------
1092 #define REQUEST_NEWRPC(NAME, cdb) \
1094 auto _rpcState = new UnaryRpcState<frr::NAME##Request, \
1095 frr::NAME##Response>( \
1096 (cdb), &frr::Northbound::AsyncService::Request##NAME, \
1097 &HandleUnary##NAME, #NAME); \
1098 _rpcState->do_request(&service, cq.get(), true); \
1101 #define REQUEST_NEWRPC_STREAMING(NAME) \
1103 auto _rpcState = new StreamRpcState<frr::NAME##Request, \
1104 frr::NAME##Response, \
1105 NAME##ContextType>( \
1106 &frr::Northbound::AsyncService::Request##NAME, \
1107 &HandleStreaming##NAME, #NAME); \
1108 _rpcState->do_request(&service, cq.get(), true); \
1111 struct grpc_pthread_attr
{
1112 struct frr_pthread_attr attr
;
1116 // Capture these objects so we can try to shut down cleanly
1117 static pthread_mutex_t s_server_lock
= PTHREAD_MUTEX_INITIALIZER
;
1118 static grpc::Server
*s_server
;
1120 static void *grpc_pthread_start(void *arg
)
1122 struct frr_pthread
*fpt
= static_cast<frr_pthread
*>(arg
);
1123 uint port
= (uint
) reinterpret_cast<intptr_t>(fpt
->data
);
1125 Candidates candidates
;
1126 grpc::ServerBuilder builder
;
1127 std::stringstream server_address
;
1128 frr::Northbound::AsyncService service
;
1130 frr_pthread_set_name(fpt
);
1132 server_address
<< "0.0.0.0:" << port
;
1133 builder
.AddListeningPort(server_address
.str(),
1134 grpc::InsecureServerCredentials());
1135 builder
.RegisterService(&service
);
1136 builder
.AddChannelArgument(
1137 GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS
, 5000);
1138 std::unique_ptr
<grpc::ServerCompletionQueue
> cq
=
1139 builder
.AddCompletionQueue();
1140 std::unique_ptr
<grpc::Server
> server
= builder
.BuildAndStart();
1141 s_server
= server
.get();
1143 pthread_mutex_lock(&s_server_lock
); // Make coverity happy
1144 grpc_running
= true;
1145 pthread_mutex_unlock(&s_server_lock
); // Make coverity happy
1147 /* Schedule unary RPC handlers */
1148 REQUEST_NEWRPC(GetCapabilities
, NULL
);
1149 REQUEST_NEWRPC(CreateCandidate
, &candidates
);
1150 REQUEST_NEWRPC(DeleteCandidate
, &candidates
);
1151 REQUEST_NEWRPC(UpdateCandidate
, &candidates
);
1152 REQUEST_NEWRPC(EditCandidate
, &candidates
);
1153 REQUEST_NEWRPC(LoadToCandidate
, &candidates
);
1154 REQUEST_NEWRPC(Commit
, &candidates
);
1155 REQUEST_NEWRPC(GetTransaction
, NULL
);
1156 REQUEST_NEWRPC(LockConfig
, NULL
);
1157 REQUEST_NEWRPC(UnlockConfig
, NULL
);
1158 REQUEST_NEWRPC(Execute
, NULL
);
1160 /* Schedule streaming RPC handlers */
1161 REQUEST_NEWRPC_STREAMING(Get
);
1162 REQUEST_NEWRPC_STREAMING(ListTransactions
);
1164 zlog_notice("gRPC server listening on %s",
1165 server_address
.str().c_str());
1167 /* Process inbound RPCs */
1171 if (!cq
->Next(&tag
, &ok
)) {
1172 grpc_debug("%s: CQ empty exiting", __func__
);
1176 grpc_debug("%s: got next from CQ tag: %p ok: %d", __func__
, tag
,
1180 delete static_cast<RpcStateBase
*>(tag
);
1184 RpcStateBase
*rpc
= static_cast<RpcStateBase
*>(tag
);
1185 if (rpc
->get_state() != FINISH
)
1186 rpc
->run(&service
, cq
.get());
1188 grpc_debug("%s RPC FINISH -> [delete]", rpc
->name
);
1193 /* This was probably done for us to get here, but let's be safe */
1194 pthread_mutex_lock(&s_server_lock
);
1195 grpc_running
= false;
1197 grpc_debug("%s: shutdown server and CQ", __func__
);
1201 pthread_mutex_unlock(&s_server_lock
);
1203 grpc_debug("%s: shutting down CQ", __func__
);
1206 grpc_debug("%s: draining the CQ", __func__
);
1207 while (cq
->Next(&tag
, &ok
)) {
1208 grpc_debug("%s: drain tag %p", __func__
, tag
);
1209 delete static_cast<RpcStateBase
*>(tag
);
1212 zlog_info("%s: exiting from grpc pthread", __func__
);
1217 static int frr_grpc_init(uint port
)
1219 struct frr_pthread_attr attr
= {
1220 .start
= grpc_pthread_start
,
1224 grpc_debug("%s: entered", __func__
);
1226 fpt
= frr_pthread_new(&attr
, "frr-grpc", "frr-grpc");
1227 fpt
->data
= reinterpret_cast<void *>((intptr_t)port
);
1229 /* Create a pthread for gRPC since it runs its own event loop. */
1230 if (frr_pthread_run(fpt
, NULL
) < 0) {
1231 flog_err(EC_LIB_SYSTEM_CALL
, "%s: error creating pthread: %s",
1232 __func__
, safe_strerror(errno
));
1239 static int frr_grpc_finish(void)
1241 grpc_debug("%s: entered", __func__
);
1247 * Shut the server down here in main thread. This will cause the wait on
1248 * the completion queue (cq.Next()) to exit and cleanup everything else.
1250 pthread_mutex_lock(&s_server_lock
);
1251 grpc_running
= false;
1253 grpc_debug("%s: shutdown server", __func__
);
1254 s_server
->Shutdown();
1257 pthread_mutex_unlock(&s_server_lock
);
1259 grpc_debug("%s: joining and destroy grpc thread", __func__
);
1260 pthread_join(fpt
->thread
, NULL
);
1261 frr_pthread_destroy(fpt
);
1263 // Fix protobuf 'memory leaks' during shutdown.
1264 // https://groups.google.com/g/protobuf/c/4y_EmQiCGgs
1265 google::protobuf::ShutdownProtobufLibrary();
1271 * This is done this way because module_init and module_late_init are both
1272 * called during daemon pre-fork initialization. Because the GRPC library
1273 * spawns threads internally, we need to delay initializing it until after
1274 * fork. This is done by scheduling this init function as an event task, since
1275 * the event loop doesn't run until after fork.
1277 static void frr_grpc_module_very_late_init(struct event
*thread
)
1279 const char *args
= THIS_MODULE
->load_args
;
1280 uint port
= GRPC_DEFAULT_PORT
;
1283 port
= std::stoul(args
);
1284 if (port
< 1024 || port
> UINT16_MAX
) {
1285 flog_err(EC_LIB_GRPC_INIT
,
1286 "%s: port number must be between 1025 and %d",
1287 __func__
, UINT16_MAX
);
1292 if (frr_grpc_init(port
) < 0)
1298 flog_err(EC_LIB_GRPC_INIT
, "failed to initialize the gRPC module");
1301 static int frr_grpc_module_late_init(struct event_loop
*tm
)
1304 hook_register(frr_fini
, frr_grpc_finish
);
1305 event_add_event(tm
, frr_grpc_module_very_late_init
, NULL
, 0, NULL
);
1309 static int frr_grpc_module_init(void)
1311 hook_register(frr_late_init
, frr_grpc_module_late_init
);
1316 FRR_MODULE_SETUP(.name
= "frr_grpc", .version
= FRR_VERSION
,
1317 .description
= "FRR gRPC northbound module",
1318 .init
= frr_grpc_module_init
, );