]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #11485 from AbhishekNR/ipv6_mld_todo
authorDonatas Abraitis <donatas@opensourcerouting.org>
Thu, 18 Aug 2022 06:47:03 +0000 (09:47 +0300)
committerGitHub <noreply@github.com>
Thu, 18 Aug 2022 06:47:03 +0000 (09:47 +0300)
pim6d: Completing "ipv6 mld" config command.

374 files changed:
alpine/APKBUILD.in
babeld/message.c
bfdd/bfd.c
bfdd/bfd.h
bfdd/bfd_packet.c
bfdd/bfdd_cli.c
bfdd/bfdd_vty.c
bgpd/bgp_advertise.c
bgpd/bgp_advertise.h
bgpd/bgp_aspath.c
bgpd/bgp_aspath.h
bgpd/bgp_attr.c
bgpd/bgp_bmp.c
bgpd/bgp_community.c
bgpd/bgp_community_alias.c
bgpd/bgp_conditional_adv.c
bgpd/bgp_damp.c
bgpd/bgp_debug.c
bgpd/bgp_dump.c
bgpd/bgp_evpn.c
bgpd/bgp_evpn_mh.c
bgpd/bgp_evpn_vty.c
bgpd/bgp_filter.c
bgpd/bgp_fsm.c
bgpd/bgp_fsm.h
bgpd/bgp_io.c
bgpd/bgp_keepalives.c
bgpd/bgp_main.c
bgpd/bgp_mplsvpn.c
bgpd/bgp_mplsvpn.h
bgpd/bgp_network.c
bgpd/bgp_nexthop.c
bgpd/bgp_nexthop.h
bgpd/bgp_nht.c
bgpd/bgp_open.c
bgpd/bgp_packet.c
bgpd/bgp_route.c
bgpd/bgp_route.h
bgpd/bgp_routemap.c
bgpd/bgp_updgrp.c
bgpd/bgp_updgrp.h
bgpd/bgp_updgrp_adv.c
bgpd/bgp_updgrp_packet.c
bgpd/bgp_vty.c
bgpd/bgp_vty.h
bgpd/bgp_zebra.c
bgpd/bgpd.c
bgpd/bgpd.h
bgpd/rfapi/rfapi.c
bgpd/rfapi/rfapi_ap.c
bgpd/rfapi/rfapi_encap_tlv.c
bgpd/rfapi/rfapi_import.c
bgpd/rfapi/rfapi_monitor.c
bgpd/rfapi/rfapi_rib.c
bgpd/rfapi/rfapi_vty.c
bgpd/rfapi/vnc_export_bgp.c
bgpd/rfapi/vnc_export_table.c
bgpd/rfapi/vnc_zebra.c
configure.ac
debian/changelog
debian/control
debian/frr.logrotate
debian/rules
doc/developer/locking.rst
doc/developer/packaging-debian.rst
doc/developer/rcu.rst
doc/developer/topotests.rst
doc/user/basic.rst
doc/user/bfd.rst
doc/user/bgp.rst
doc/user/bmp.rst
doc/user/overview.rst
doc/user/pim.rst
doc/user/pimv6.rst
doc/user/ripd.rst
doc/user/zebra.rst
docker/alpine/Dockerfile
docker/alpine/libyang/APKBUILD
include/linux/pkt_cls.h [new file with mode: 0644]
isisd/fabricd.c
isisd/isis_adjacency.c
isisd/isis_circuit.c
isisd/isis_cli.c
isisd/isis_dr.c
isisd/isis_dynhn.c
isisd/isis_events.c
isisd/isis_lfa.c
isisd/isis_lsp.c
isisd/isis_pdu.c
isisd/isis_spf.c
isisd/isis_sr.c
isisd/isis_tlvs.c
isisd/isis_tx_queue.c
isisd/isisd.c
ldpd/accept.c
ldpd/adjacency.c
ldpd/control.c
ldpd/interface.c
ldpd/lde.c
ldpd/lde_lib.c
ldpd/ldpd.c
ldpd/ldpe.c
ldpd/neighbor.c
ldpd/packet.c
lib/agentx.c
lib/command.c
lib/command.h
lib/ferr.c
lib/filter_nb.c
lib/frr_pthread.c
lib/hash.c
lib/link_state.c
lib/link_state.h
lib/log_filter.c
lib/northbound_grpc.cpp
lib/prefix.c
lib/prefix.h
lib/privs.c
lib/resolver.c [changed mode: 0644->0755]
lib/routemap.c
lib/routemap.h
lib/sockopt.c
lib/sockunion.c
lib/stream.c
lib/subdir.am
lib/termtable.h
lib/thread.c
lib/typesafe.h
lib/yang_wrappers.c
lib/zlog_targets.c
nhrpd/nhrp_peer.c
ospf6d/ospf6_gr_helper.c
ospf6d/ospf6_interface.c
ospf6d/ospf6_intra.c
ospf6d/ospf6_message.c
ospfclient/ospf_apiclient.c
ospfd/ospf_abr.c
ospfd/ospf_apiserver.c
ospfd/ospf_bfd.c
ospfd/ospf_flood.c
ospfd/ospf_gr.c
ospfd/ospf_interface.c
ospfd/ospf_interface.h
ospfd/ospf_ism.c
ospfd/ospf_ism.h
ospfd/ospf_lsa.c
ospfd/ospf_neighbor.c
ospfd/ospf_nsm.c
ospfd/ospf_nsm.h
ospfd/ospf_opaque.c
ospfd/ospf_packet.c
ospfd/ospf_spf.c
ospfd/ospf_sr.c
ospfd/ospf_vty.c
ospfd/ospfd.c
ospfd/ospfd.h
pathd/path_pcep_pcc.c
pathd/path_zebra.c
pathd/path_zebra.h
pathd/pathd.c
pbrd/pbr_vty.c
pimd/mtracebis_netlink.c
pimd/pim6_cmd.c
pimd/pim6_cmd.h
pimd/pim6_main.c
pimd/pim6_mld.c
pimd/pim6_mld.h
pimd/pim6_stubs.c [deleted file]
pimd/pim_addr.h
pimd/pim_bsm.c
pimd/pim_bsm.h
pimd/pim_cmd.c
pimd/pim_cmd_common.c
pimd/pim_cmd_common.h
pimd/pim_iface.c
pimd/pim_ifchannel.c
pimd/pim_igmp.c
pimd/pim_instance.c
pimd/pim_instance.h
pimd/pim_join.c
pimd/pim_jp_agg.c
pimd/pim_msg.c
pimd/pim_nb_config.c
pimd/pim_neighbor.c
pimd/pim_nht.c
pimd/pim_nht.h
pimd/pim_oil.h
pimd/pim_register.c
pimd/pim_rp.c
pimd/pim_rpf.c
pimd/pim_rpf.h
pimd/pim_sock.c
pimd/pim_sock.h
pimd/pim_static.c
pimd/pim_str.h
pimd/pim_tib.c
pimd/pim_tlv.c
pimd/pim_upstream.c
pimd/pim_upstream.h
pimd/pim_vty.c
pimd/pim_vxlan.c
pimd/pim_zebra.c
pimd/pim_zebra.h
pimd/subdir.am
qpb/qpb.h
redhat/frr.logrotate
redhat/frr.spec.in
ripd/rip_interface.c
ripd/rip_nb_rpcs.c
ripd/rip_peer.c
ripd/ripd.c
ripd/ripd.h
ripngd/ripng_interface.c
ripngd/ripng_nb_rpcs.c
ripngd/ripng_peer.c
ripngd/ripngd.c
ripngd/ripngd.h
snapcraft/snapcraft.yaml.in
staticd/static_nb_config.c
tests/topotests/analyze.py
tests/topotests/bfd_profiles_topo1/r1/ospfd.conf
tests/topotests/bfd_vrf_topo1/r1/bfdd.conf
tests/topotests/bfd_vrf_topo1/r1/peers.json
tests/topotests/bfd_vrf_topo1/r2/bfdd.conf
tests/topotests/bfd_vrf_topo1/r2/peers.json
tests/topotests/bfd_vrf_topo1/r3/bfdd.conf
tests/topotests/bfd_vrflite_topo1/__init__.py [new file with mode: 0644]
tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json [new file with mode: 0644]
tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf [new file with mode: 0644]
tests/topotests/bfd_vrflite_topo1/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf [new file with mode: 0644]
tests/topotests/bfd_vrflite_topo1/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py [new file with mode: 0644]
tests/topotests/bgp_as_override/__init__.py [new file with mode: 0644]
tests/topotests/bgp_as_override/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r3/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r3/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r4/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/r4/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_as_override/test_bgp_as_override.py [new file with mode: 0644]
tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py
tests/topotests/bgp_default_originate/bgp_default_orginate_vrf.json [new file with mode: 0644]
tests/topotests/bgp_default_originate/bgp_default_originate_2links.json [new file with mode: 0644]
tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py [new file with mode: 0644]
tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py [new file with mode: 0644]
tests/topotests/bgp_default_originate/test_default_orginate_vrf.py [new file with mode: 0644]
tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py [new file with mode: 0644]
tests/topotests/bgp_default_route_route_map_match/r1/zebra.conf
tests/topotests/bgp_default_route_route_map_match_set/r1/zebra.conf
tests/topotests/bgp_ipv4_class_e_peer/__init__.py [new file with mode: 0644]
tests/topotests/bgp_ipv4_class_e_peer/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_ipv4_class_e_peer/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_ipv4_class_e_peer/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_ipv4_class_e_peer/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_ipv4_class_e_peer/test_bgp_ipv4_class_e_peer.py [new file with mode: 0644]
tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py
tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py
tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py
tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py
tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py
tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py
tests/topotests/bgp_l3vpn_to_bgp_direct/r1/zebra.conf
tests/topotests/bgp_l3vpn_to_bgp_direct/r2/zebra.conf
tests/topotests/bgp_l3vpn_to_bgp_direct/r3/zebra.conf
tests/topotests/bgp_l3vpn_to_bgp_direct/r4/zebra.conf
tests/topotests/bgp_orf/__init__.py [new file with mode: 0644]
tests/topotests/bgp_orf/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_orf/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_orf/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_orf/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_orf/test_bgp_orf.py [new file with mode: 0644]
tests/topotests/bgp_roles_capability/r1/bgpd.conf
tests/topotests/bgp_roles_capability/r1/zebra.conf
tests/topotests/bgp_roles_capability/r2/bgpd.conf
tests/topotests/bgp_roles_capability/r3/bgpd.conf
tests/topotests/bgp_roles_capability/r4/bgpd.conf
tests/topotests/bgp_roles_capability/r5/bgpd.conf
tests/topotests/bgp_roles_capability/r6/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_roles_capability/r6/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_roles_capability/test_bgp_roles_capability.py
tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py
tests/topotests/bgp_vpnv4_noretain/__init__.py [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf [new file with mode: 0644]
tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py [new file with mode: 0644]
tests/topotests/conftest.py
tests/topotests/lib/bgp.py
tests/topotests/lib/common_config.py
tests/topotests/lib/micronet.py
tests/topotests/lib/pim.py
tests/topotests/lib/topogen.py
tests/topotests/lib/topojson.py
tests/topotests/lib/topotest.py
tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json [new file with mode: 0644]
tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py [new file with mode: 0755]
tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py [new file with mode: 0755]
tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py [new file with mode: 0755]
tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
tests/topotests/ospf_topo1/test_ospf_topo1.py
tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
tools/etc/frr/daemons
tools/etc/rsyslog.d/45-frr.conf
tools/frr-reload.py
tools/frr.in
tools/frrcommon.sh.in
tools/gcc-plugins/frr-format.c
tools/gcc-plugins/gcc-common.h
vrrpd/vrrp.c
vtysh/vtysh.c
vtysh/vtysh_config.c
vtysh/vtysh_main.c
watchfrr/watchfrr.c
zebra/debug_nl.c
zebra/dplane_fpm_nl.c
zebra/if_ioctl.c
zebra/if_netlink.c
zebra/if_socket.c
zebra/interface.c
zebra/interface.h
zebra/irdp_main.c
zebra/kernel_netlink.c
zebra/kernel_socket.c
zebra/main.c
zebra/netconf_netlink.c
zebra/netconf_netlink.h
zebra/rib.h
zebra/rt.h
zebra/rt_netlink.c
zebra/rtadv.c
zebra/subdir.am
zebra/tc_netlink.c [new file with mode: 0644]
zebra/tc_netlink.h [new file with mode: 0644]
zebra/tc_socket.c [new file with mode: 0644]
zebra/zapi_msg.c
zebra/zebra_dplane.c
zebra/zebra_dplane.h
zebra/zebra_evpn_mac.c
zebra/zebra_evpn_neigh.c
zebra/zebra_fpm.c
zebra/zebra_mpls.c
zebra/zebra_mpls.h
zebra/zebra_neigh.c
zebra/zebra_netns_notify.c
zebra/zebra_nhg.c
zebra/zebra_nhg.h
zebra/zebra_opaque.c
zebra/zebra_pbr.c
zebra/zebra_ptm.c
zebra/zebra_pw.c
zebra/zebra_rib.c
zebra/zebra_router.c
zebra/zebra_router.h
zebra/zebra_script.c
zebra/zebra_vrf.c
zebra/zebra_vty.c
zebra/zebra_vxlan.c
zebra/zserv.c

index ccae9bfd0b50e556edf9ef705a8ac0494c5128ea..51986de2dd2af1a92c6e94c373ef43225b77994b 100644 (file)
@@ -18,7 +18,7 @@ makedepends="ncurses-dev net-snmp-dev gawk texinfo perl
     ncurses-libs ncurses-terminfo ncurses-terminfo-base patch pax-utils pcre
     perl pkgconf python3 python3-dev readline readline-dev sqlite-libs
     squashfs-tools sudo tar texinfo xorriso xz-libs py-pip rtrlib rtrlib-dev
-    py3-sphinx elfutils elfutils-dev"
+    py3-sphinx elfutils elfutils-dev libyang-dev"
 checkdepends="pytest py-setuptools"
 install="$pkgname.pre-install $pkgname.pre-deinstall $pkgname.post-deinstall"
 subpackages="$pkgname-dev $pkgname-doc $pkgname-dbg"
index c2ea2a268394631728ac0e8e80b4957b0395de0a..7d45d91bf7a50f73171fc8fea8061e1890585739 100644 (file)
@@ -636,7 +636,7 @@ parse_packet(const unsigned char *from, struct interface *ifp,
                                                    len - parsed_len, channels);
            }
 
-           if (ignore_update)
+           if (!ignore_update)
                    update_route(router_id, prefix, plen, seqno, metric,
                                 interval, neigh, nh, channels,
                                 channels_len(channels));
index d52eeeddba704b17812e9afc0efd2061ebba2f78..a1fb67d3577477ac0687abf1d09865291d13ad5e 100644 (file)
@@ -381,6 +381,9 @@ int bfd_session_enable(struct bfd_session *bs)
                ptm_bfd_start_xmt_timer(bs, false);
        }
 
+       /* initialize RTT */
+       bfd_rtt_init(bs);
+
        return 0;
 }
 
@@ -454,7 +457,17 @@ void ptm_bfd_start_xmt_timer(struct bfd_session *bfd, bool is_echo)
 static void ptm_bfd_echo_xmt_TO(struct bfd_session *bfd)
 {
        /* Send the scheduled echo  packet */
-       ptm_bfd_echo_snd(bfd);
+       /* if ipv4 use the new echo implementation that causes
+        * the packet to be looped in forwarding plane of peer
+        */
+       if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6) == 0)
+#ifdef BFD_LINUX
+               ptm_bfd_echo_fp_snd(bfd);
+#else
+               ptm_bfd_echo_snd(bfd);
+#endif
+       else
+               ptm_bfd_echo_snd(bfd);
 
        /* Restart the timer for next time */
        ptm_bfd_start_xmt_timer(bfd, true);
@@ -558,6 +571,15 @@ void ptm_bfd_sess_dn(struct bfd_session *bfd, uint8_t diag)
                                   state_list[bfd->ses_state].str,
                                   get_diag_str(bfd->local_diag));
        }
+
+       /* clear peer's mac address */
+       UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET);
+       memset(bfd->peer_hw_addr, 0, sizeof(bfd->peer_hw_addr));
+       /* reset local address ,it might has been be changed after bfd is up*/
+       memset(&bfd->local_address, 0, sizeof(bfd->local_address));
+
+       /* reset RTT */
+       bfd_rtt_init(bfd);
 }
 
 static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa,
@@ -1934,40 +1956,38 @@ static int bfd_vrf_enable(struct vrf *vrf)
        if (bglobal.debug_zebra)
                zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id);
 
-       if (vrf->vrf_id == VRF_DEFAULT ||
-           vrf_get_backend() == VRF_BACKEND_NETNS) {
-               if (!bvrf->bg_shop)
-                       bvrf->bg_shop = bp_udp_shop(vrf);
-               if (!bvrf->bg_mhop)
-                       bvrf->bg_mhop = bp_udp_mhop(vrf);
-               if (!bvrf->bg_shop6)
-                       bvrf->bg_shop6 = bp_udp6_shop(vrf);
-               if (!bvrf->bg_mhop6)
-                       bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
-               if (!bvrf->bg_echo)
-                       bvrf->bg_echo = bp_echo_socket(vrf);
-               if (!bvrf->bg_echov6)
-                       bvrf->bg_echov6 = bp_echov6_socket(vrf);
-
-               if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
-                       thread_add_read(master, bfd_recv_cb, bvrf,
-                                       bvrf->bg_shop, &bvrf->bg_ev[0]);
-               if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
-                       thread_add_read(master, bfd_recv_cb, bvrf,
-                                       bvrf->bg_mhop, &bvrf->bg_ev[1]);
-               if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
-                       thread_add_read(master, bfd_recv_cb, bvrf,
-                                       bvrf->bg_shop6, &bvrf->bg_ev[2]);
-               if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
-                       thread_add_read(master, bfd_recv_cb, bvrf,
-                                       bvrf->bg_mhop6, &bvrf->bg_ev[3]);
-               if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
-                       thread_add_read(master, bfd_recv_cb, bvrf,
-                                       bvrf->bg_echo, &bvrf->bg_ev[4]);
-               if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
-                       thread_add_read(master, bfd_recv_cb, bvrf,
-                                       bvrf->bg_echov6, &bvrf->bg_ev[5]);
-       }
+       if (!bvrf->bg_shop)
+               bvrf->bg_shop = bp_udp_shop(vrf);
+       if (!bvrf->bg_mhop)
+               bvrf->bg_mhop = bp_udp_mhop(vrf);
+       if (!bvrf->bg_shop6)
+               bvrf->bg_shop6 = bp_udp6_shop(vrf);
+       if (!bvrf->bg_mhop6)
+               bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
+       if (!bvrf->bg_echo)
+               bvrf->bg_echo = bp_echo_socket(vrf);
+       if (!bvrf->bg_echov6)
+               bvrf->bg_echov6 = bp_echov6_socket(vrf);
+
+       if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
+               thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
+                               &bvrf->bg_ev[0]);
+       if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
+               thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
+                               &bvrf->bg_ev[1]);
+       if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
+               thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
+                               &bvrf->bg_ev[2]);
+       if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
+               thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
+                               &bvrf->bg_ev[3]);
+       if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
+               thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
+                               &bvrf->bg_ev[4]);
+       if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
+               thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
+                               &bvrf->bg_ev[5]);
+
        if (vrf->vrf_id != VRF_DEFAULT) {
                bfdd_zclient_register(vrf->vrf_id);
                bfdd_sessions_enable_vrf(vrf);
@@ -2049,3 +2069,14 @@ unsigned long bfd_get_session_count(void)
 {
        return bfd_key_hash->count;
 }
+
+void bfd_rtt_init(struct bfd_session *bfd)
+{
+       uint8_t i;
+
+       /* initialize RTT */
+       bfd->rtt_valid = 0;
+       bfd->rtt_index = 0;
+       for (i = 0; i < BFD_RTT_SAMPLE; i++)
+               bfd->rtt[i] = 0;
+}
index 6aa9e005863f9e75197a3b5f8585a23fa7bf4936..a5881cddb0ec5d7f641b71e2e4ccd4a111435c7e 100644 (file)
@@ -86,7 +86,8 @@ struct bfd_echo_pkt {
                };
        };
        uint32_t my_discr;
-       uint8_t pad[16];
+       uint64_t time_sent_sec;
+       uint64_t time_sent_usec;
 };
 
 
@@ -168,9 +169,10 @@ enum bfd_session_flags {
                                                 * expires
                                                 */
        BFD_SESS_FLAG_SHUTDOWN = 1 << 7,        /* disable BGP peer function */
-       BFD_SESS_FLAG_CONFIG = 1 << 8,  /* Session configured with bfd NB API */
-       BFD_SESS_FLAG_CBIT = 1 << 9,    /* CBIT is set */
+       BFD_SESS_FLAG_CONFIG = 1 << 8, /* Session configured with bfd NB API */
+       BFD_SESS_FLAG_CBIT = 1 << 9,   /* CBIT is set */
        BFD_SESS_FLAG_PASSIVE = 1 << 10, /* Passive mode */
+       BFD_SESS_FLAG_MAC_SET = 1 << 11, /* MAC of peer known */
 };
 
 /*
@@ -248,6 +250,8 @@ struct bfd_config_timers {
        uint32_t required_min_echo_rx;
 };
 
+#define BFD_RTT_SAMPLE 8
+
 /*
  * Session state information
  */
@@ -290,6 +294,8 @@ struct bfd_session {
        struct peer_label *pl;
 
        struct bfd_dplane_ctx *bdc;
+       struct sockaddr_any local_address;
+       uint8_t peer_hw_addr[ETH_ALEN];
        struct interface *ifp;
        struct vrf *vrf;
 
@@ -308,6 +314,10 @@ struct bfd_session {
        struct bfd_timers remote_timers;
 
        uint64_t refcount; /* number of pointers referencing this. */
+
+       uint8_t rtt_valid;          /* number of valid samples */
+       uint8_t rtt_index;          /* last index added */
+       uint64_t rtt[BFD_RTT_SAMPLE]; /* RRT in usec for echo to be looped */
 };
 
 struct peer_label {
@@ -554,6 +564,7 @@ int bp_echov6_socket(const struct vrf *vrf);
 
 void ptm_bfd_snd(struct bfd_session *bfd, int fbit);
 void ptm_bfd_echo_snd(struct bfd_session *bfd);
+void ptm_bfd_echo_fp_snd(struct bfd_session *bfd);
 
 void bfd_recv_cb(struct thread *t);
 
@@ -631,6 +642,7 @@ const struct bfd_session *bfd_session_next(const struct bfd_session *bs,
                                           bool mhop);
 void bfd_sessions_remove_manual(void);
 void bfd_profiles_remove(void);
+void bfd_rtt_init(struct bfd_session *bfd);
 
 /**
  * Set the BFD session echo state.
index c717a333a6a913a833d99c82f9e0156b66b3d175..98411a87329d6bc4bbabb2f066c717a270857f82 100644 (file)
@@ -34,6 +34,8 @@
 #include <netinet/udp.h>
 
 #include "lib/sockopt.h"
+#include "lib/checksum.h"
+#include "lib/network.h"
 
 #include "bfd.h"
 
@@ -53,8 +55,20 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
                      struct sockaddr_any *peer);
 int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen,
                struct sockaddr *to, socklen_t tolen);
-int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd,
-                  uint8_t *ttl, uint32_t *my_discr);
+int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd, uint8_t *ttl,
+                  uint32_t *my_discr, uint64_t *my_rtt);
+#ifdef BFD_LINUX
+ssize_t bfd_recv_ipv4_fp(int sd, uint8_t *msgbuf, size_t msgbuflen,
+                        uint8_t *ttl, ifindex_t *ifindex,
+                        struct sockaddr_any *local, struct sockaddr_any *peer);
+void bfd_peer_mac_set(int sd, struct bfd_session *bfd,
+                     struct sockaddr_any *peer, struct interface *ifp);
+int bp_udp_send_fp(int sd, uint8_t *data, size_t datalen,
+                  struct bfd_session *bfd);
+ssize_t bfd_recv_fp_echo(int sd, uint8_t *msgbuf, size_t msgbuflen,
+                        uint8_t *ttl, ifindex_t *ifindex,
+                        struct sockaddr_any *local, struct sockaddr_any *peer);
+#endif
 
 /* socket related prototypes */
 static void bp_set_ipopts(int sd);
@@ -126,6 +140,148 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data,
        return 0;
 }
 
+#ifdef BFD_LINUX
+/*
+ * Compute the UDP checksum.
+ *
+ * Checksum is not set in the packet, just computed.
+ *
+ * pkt
+ *    Packet, fully filled out except for checksum field.
+ *
+ * pktsize
+ *    sizeof(*pkt)
+ *
+ * ip
+ *    IP address that pkt will be transmitted from and too.
+ *
+ * Returns:
+ *    Checksum in network byte order.
+ */
+static uint16_t bfd_pkt_checksum(struct udphdr *pkt, size_t pktsize,
+                                struct in6_addr *ip, sa_family_t family)
+{
+       uint16_t chksum;
+
+       pkt->check = 0;
+
+       if (family == AF_INET6) {
+               struct ipv6_ph ph = {};
+
+               memcpy(&ph.src, ip, sizeof(ph.src));
+               memcpy(&ph.dst, ip, sizeof(ph.dst));
+               ph.ulpl = htons(pktsize);
+               ph.next_hdr = IPPROTO_UDP;
+               chksum = in_cksum_with_ph6(&ph, pkt, pktsize);
+       } else {
+               struct ipv4_ph ph = {};
+
+               memcpy(&ph.src, ip, sizeof(ph.src));
+               memcpy(&ph.dst, ip, sizeof(ph.dst));
+               ph.proto = IPPROTO_UDP;
+               ph.len = htons(pktsize);
+               chksum = in_cksum_with_ph4(&ph, pkt, pktsize);
+       }
+
+       return chksum;
+}
+
+/*
+ * This routine creates the entire ECHO packet so that it will be looped
+ * in the forwarding plane of the peer router instead of going up the
+ * stack in BFD to be looped.  If we haven't learned the peers MAC yet
+ * no echo is sent.
+ *
+ * echo packet with src/dst IP equal to local IP
+ * dest MAC as peer's MAC
+ *
+ * currently support ipv4
+ */
+void ptm_bfd_echo_fp_snd(struct bfd_session *bfd)
+{
+       int sd;
+       struct bfd_vrf_global *bvrf = bfd_vrf_look_by_session(bfd);
+       int total_len = 0;
+       struct ethhdr *eth;
+       struct udphdr *uh;
+       struct iphdr *iph;
+       struct bfd_echo_pkt *beph;
+       static char sendbuff[100];
+       struct timeval time_sent;
+
+       if (!bvrf)
+               return;
+       if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET))
+               return;
+       if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE))
+               SET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE);
+
+       memset(sendbuff, 0, sizeof(sendbuff));
+
+       /* add eth hdr */
+       eth = (struct ethhdr *)(sendbuff);
+       memcpy(eth->h_source, bfd->ifp->hw_addr, sizeof(eth->h_source));
+       memcpy(eth->h_dest, bfd->peer_hw_addr, sizeof(eth->h_dest));
+
+       total_len += sizeof(struct ethhdr);
+
+       sd = bvrf->bg_echo;
+       eth->h_proto = htons(ETH_P_IP);
+
+       /* add ip hdr */
+       iph = (struct iphdr *)(sendbuff + sizeof(struct ethhdr));
+
+       iph->ihl = sizeof(struct ip) >> 2;
+       iph->version = IPVERSION;
+       iph->tos = IPTOS_PREC_INTERNETCONTROL;
+       iph->id = (uint16_t)frr_weak_random();
+       iph->ttl = BFD_TTL_VAL;
+       iph->protocol = IPPROTO_UDP;
+       memcpy(&iph->saddr, &bfd->local_address.sa_sin.sin_addr,
+              sizeof(bfd->local_address.sa_sin.sin_addr));
+       memcpy(&iph->daddr, &bfd->local_address.sa_sin.sin_addr,
+              sizeof(bfd->local_address.sa_sin.sin_addr));
+       total_len += sizeof(struct iphdr);
+
+       /* add udp hdr */
+       uh = (struct udphdr *)(sendbuff + sizeof(struct iphdr) +
+                              sizeof(struct ethhdr));
+       uh->source = htons(BFD_DEF_ECHO_PORT);
+       uh->dest = htons(BFD_DEF_ECHO_PORT);
+
+       total_len += sizeof(struct udphdr);
+
+       /* add bfd echo */
+       beph = (struct bfd_echo_pkt *)(sendbuff + sizeof(struct udphdr) +
+                                      sizeof(struct iphdr) +
+                                      sizeof(struct ethhdr));
+
+       beph->ver = BFD_ECHO_VERSION;
+       beph->len = BFD_ECHO_PKT_LEN;
+       beph->my_discr = htonl(bfd->discrs.my_discr);
+
+       /* RTT calculation: add starting time in packet */
+       monotime(&time_sent);
+       beph->time_sent_sec = htobe64(time_sent.tv_sec);
+       beph->time_sent_usec = htobe64(time_sent.tv_usec);
+
+       total_len += sizeof(struct bfd_echo_pkt);
+       uh->len =
+               htons(total_len - sizeof(struct iphdr) - sizeof(struct ethhdr));
+       uh->check = bfd_pkt_checksum(
+               uh, (total_len - sizeof(struct iphdr) - sizeof(struct ethhdr)),
+               (struct in6_addr *)&iph->saddr, AF_INET);
+
+       iph->tot_len = htons(total_len - sizeof(struct ethhdr));
+       iph->check = in_cksum((const void *)iph, sizeof(struct iphdr));
+
+       if (bp_udp_send_fp(sd, (uint8_t *)&sendbuff, total_len, bfd) == -1)
+               return;
+
+       bfd->stats.tx_echo_pkt++;
+}
+#endif
+
 void ptm_bfd_echo_snd(struct bfd_session *bfd)
 {
        struct sockaddr *sa;
@@ -188,10 +344,11 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s)
 {
        struct bfd_session *bfd;
        uint32_t my_discr = 0;
+       uint64_t my_rtt = 0;
        uint8_t ttl = 0;
 
        /* Receive and parse echo packet. */
-       if (bp_bfd_echo_in(bvrf, s, &ttl, &my_discr) == -1)
+       if (bp_bfd_echo_in(bvrf, s, &ttl, &my_discr, &my_rtt) == -1)
                return 0;
 
        /* Your discriminator not zero - use it to find session */
@@ -210,6 +367,16 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s)
                return -1;
        }
 
+       /* RTT Calculation: add current RTT to samples */
+       if (my_rtt != 0) {
+               bfd->rtt[bfd->rtt_index] = my_rtt;
+               bfd->rtt_index++;
+               if (bfd->rtt_index >= BFD_RTT_SAMPLE)
+                       bfd->rtt_index = 0;
+               if (bfd->rtt_valid < BFD_RTT_SAMPLE)
+                       bfd->rtt_valid++;
+       }
+
        bfd->stats.rx_echo_pkt++;
 
        /* Compute detect time */
@@ -275,6 +442,94 @@ void ptm_bfd_snd(struct bfd_session *bfd, int fbit)
        bfd->stats.tx_ctrl_pkt++;
 }
 
+#ifdef BFD_LINUX
+/*
+ * receive the ipv4 echo packet that was loopback in the peers forwarding plane
+ */
+ssize_t bfd_recv_ipv4_fp(int sd, uint8_t *msgbuf, size_t msgbuflen,
+                        uint8_t *ttl, ifindex_t *ifindex,
+                        struct sockaddr_any *local, struct sockaddr_any *peer)
+{
+       ssize_t mlen;
+       struct sockaddr_ll msgaddr;
+       struct msghdr msghdr;
+       struct iovec iov[1];
+       uint16_t recv_checksum;
+       uint16_t checksum;
+       struct iphdr *ip;
+       struct udphdr *uh;
+
+       /* Prepare the recvmsg params. */
+       iov[0].iov_base = msgbuf;
+       iov[0].iov_len = msgbuflen;
+
+       memset(&msghdr, 0, sizeof(msghdr));
+       msghdr.msg_name = &msgaddr;
+       msghdr.msg_namelen = sizeof(msgaddr);
+       msghdr.msg_iov = iov;
+       msghdr.msg_iovlen = 1;
+
+       mlen = recvmsg(sd, &msghdr, MSG_DONTWAIT);
+       if (mlen == -1) {
+               if (errno != EAGAIN || errno != EWOULDBLOCK || errno != EINTR)
+                       zlog_err("%s: recv failed: %s", __func__,
+                                strerror(errno));
+
+               return -1;
+       }
+
+       ip = (struct iphdr *)(msgbuf + sizeof(struct ethhdr));
+
+       /* verify ip checksum */
+       recv_checksum = ip->check;
+       ip->check = 0;
+       checksum = in_cksum((const void *)ip, sizeof(struct iphdr));
+       if (recv_checksum != checksum) {
+               if (bglobal.debug_network)
+                       zlog_debug(
+                               "%s: invalid iphdr checksum expected 0x%x rcvd 0x%x",
+                               __func__, checksum, recv_checksum);
+               return -1;
+       }
+
+       *ttl = ip->ttl;
+       if (*ttl != 254) {
+               /* Echo should be looped in peer's forwarding plane, but it also
+                * comes up to BFD so silently drop it
+                */
+               if (ip->daddr == ip->saddr)
+                       return -1;
+
+               if (bglobal.debug_network)
+                       zlog_debug("%s: invalid TTL: %u", __func__, *ttl);
+               return -1;
+       }
+
+       local->sa_sin.sin_family = AF_INET;
+       memcpy(&local->sa_sin.sin_addr, &ip->saddr, sizeof(ip->saddr));
+       peer->sa_sin.sin_family = AF_INET;
+       memcpy(&peer->sa_sin.sin_addr, &ip->daddr, sizeof(ip->daddr));
+
+       *ifindex = msgaddr.sll_ifindex;
+
+       /* verify udp checksum */
+       uh = (struct udphdr *)(msgbuf + sizeof(struct iphdr) +
+                              sizeof(struct ethhdr));
+       recv_checksum = uh->check;
+       uh->check = 0;
+       checksum = bfd_pkt_checksum(uh, ntohs(uh->len),
+                                   (struct in6_addr *)&ip->saddr, AF_INET);
+       if (recv_checksum != checksum) {
+               if (bglobal.debug_network)
+                       zlog_debug(
+                               "%s: invalid udphdr checksum expected 0x%x rcvd 0x%x",
+                               __func__, checksum, recv_checksum);
+               return -1;
+       }
+       return mlen;
+}
+#endif
+
 ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
                      ifindex_t *ifindex, struct sockaddr_any *local,
                      struct sockaddr_any *peer)
@@ -638,6 +893,14 @@ void bfd_recv_cb(struct thread *t)
                         "no session found");
                return;
        }
+       /*
+        * We may have a situation where received packet is on wrong vrf
+        */
+       if (bfd && bfd->vrf && bfd->vrf != bvrf->vrf) {
+               cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
+                        "wrong vrfid.");
+               return;
+       }
 
        /* Ensure that existing good sessions are not overridden. */
        if (!cp->discrs.remote_discr && bfd->ses_state != PTM_BFD_DOWN &&
@@ -649,6 +912,8 @@ void bfd_recv_cb(struct thread *t)
 
        /*
         * Multi hop: validate packet TTL.
+        * Single hop: set local address that received the packet.
+        *             set peers mac address for echo packets
         */
        if (is_mhop) {
                if (ttl < bfd->mh_ttl) {
@@ -657,6 +922,14 @@ void bfd_recv_cb(struct thread *t)
                                 bfd->mh_ttl, ttl);
                        return;
                }
+       } else {
+
+               if (bfd->local_address.sa_sin.sin_family == AF_UNSPEC)
+                       bfd->local_address = local;
+#ifdef BFD_LINUX
+               if (ifp)
+                       bfd_peer_mac_set(sd, bfd, &peer, ifp);
+#endif
        }
 
        bfd->stats.rx_ctrl_pkt++;
@@ -747,8 +1020,8 @@ void bfd_recv_cb(struct thread *t)
  *
  * Returns -1 on error or loopback or 0 on success.
  */
-int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd,
-                  uint8_t *ttl, uint32_t *my_discr)
+int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd, uint8_t *ttl,
+                  uint32_t *my_discr, uint64_t *my_rtt)
 {
        struct bfd_echo_pkt *bep;
        ssize_t rlen;
@@ -756,13 +1029,30 @@ int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd,
        ifindex_t ifindex = IFINDEX_INTERNAL;
        vrf_id_t vrfid = VRF_DEFAULT;
        uint8_t msgbuf[1516];
+       size_t bfd_offset = 0;
 
-       if (sd == bvrf->bg_echo)
+       if (sd == bvrf->bg_echo) {
+#ifdef BFD_LINUX
+               rlen = bfd_recv_ipv4_fp(sd, msgbuf, sizeof(msgbuf), ttl,
+                                       &ifindex, &local, &peer);
+
+               /* silently drop echo packet that is looped in fastpath but
+                * still comes up to BFD
+                */
+               if (rlen == -1)
+                       return -1;
+               bfd_offset = sizeof(struct udphdr) + sizeof(struct iphdr) +
+                            sizeof(struct ethhdr);
+#else
                rlen = bfd_recv_ipv4(sd, msgbuf, sizeof(msgbuf), ttl, &ifindex,
                                     &local, &peer);
-       else
+               bfd_offset = 0;
+#endif
+       } else {
                rlen = bfd_recv_ipv6(sd, msgbuf, sizeof(msgbuf), ttl, &ifindex,
                                     &local, &peer);
+               bfd_offset = 0;
+       }
 
        /* Short packet, better not risk reading it. */
        if (rlen < (ssize_t)sizeof(*bep)) {
@@ -771,8 +1061,8 @@ int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd,
                return -1;
        }
 
-       /* Test for loopback. */
-       if (*ttl == BFD_TTL_VAL) {
+       /* Test for loopback for ipv6, ipv4 is looped in forwarding plane */
+       if ((*ttl == BFD_TTL_VAL) && (sd == bvrf->bg_echov6)) {
                bp_udp_send(sd, *ttl - 1, msgbuf, rlen,
                            (struct sockaddr *)&peer,
                            (sd == bvrf->bg_echo) ? sizeof(peer.sa_sin)
@@ -781,7 +1071,7 @@ int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd,
        }
 
        /* Read my discriminator from BFD Echo packet. */
-       bep = (struct bfd_echo_pkt *)msgbuf;
+       bep = (struct bfd_echo_pkt *)(msgbuf + bfd_offset);
        *my_discr = ntohl(bep->my_discr);
        if (*my_discr == 0) {
                cp_debug(false, &peer, &local, ifindex, vrfid,
@@ -789,8 +1079,67 @@ int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd,
                return -1;
        }
 
+#ifdef BFD_LINUX
+       /* RTT Calculation: determine RTT time of IPv4 echo pkt */
+       if (sd == bvrf->bg_echo) {
+               struct timeval time_sent = {0, 0};
+
+               time_sent.tv_sec = be64toh(bep->time_sent_sec);
+               time_sent.tv_usec = be64toh(bep->time_sent_usec);
+               *my_rtt = monotime_since(&time_sent, NULL);
+       }
+#endif
+
+       return 0;
+}
+
+#ifdef BFD_LINUX
+/*
+ * send a bfd packet with src/dst same IP so that the peer will receive
+ * the packet and forward it back to sender in the forwarding plane
+ */
+int bp_udp_send_fp(int sd, uint8_t *data, size_t datalen,
+                  struct bfd_session *bfd)
+{
+       ssize_t wlen;
+       struct msghdr msg = {0};
+       struct iovec iov[1];
+       uint8_t msgctl[255];
+       struct sockaddr_ll sadr_ll = {0};
+
+       sadr_ll.sll_ifindex = bfd->ifp->ifindex;
+       sadr_ll.sll_halen = ETH_ALEN;
+       memcpy(sadr_ll.sll_addr, bfd->peer_hw_addr, sizeof(bfd->peer_hw_addr));
+       sadr_ll.sll_protocol = htons(ETH_P_IP);
+
+       /* Prepare message data. */
+       iov[0].iov_base = data;
+       iov[0].iov_len = datalen;
+
+       memset(msgctl, 0, sizeof(msgctl));
+       msg.msg_name = &sadr_ll;
+       msg.msg_namelen = sizeof(sadr_ll);
+       msg.msg_iov = iov;
+       msg.msg_iovlen = 1;
+
+       /* Send echo to peer */
+       wlen = sendmsg(sd, &msg, 0);
+
+       if (wlen <= 0) {
+               if (bglobal.debug_network)
+                       zlog_debug("udp-send: loopback failure: (%d) %s", errno,
+                                  strerror(errno));
+               return -1;
+       } else if (wlen < (ssize_t)datalen) {
+               if (bglobal.debug_network)
+                       zlog_debug("udp-send: partial send: %zd expected %zu",
+                                  wlen, datalen);
+               return -1;
+       }
+
        return 0;
 }
+#endif
 
 int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen,
                struct sockaddr *to, socklen_t tolen)
@@ -893,10 +1242,41 @@ int bp_set_tos(int sd, uint8_t value)
        return 0;
 }
 
+static bool bp_set_reuse_addr(int sd)
+{
+       int one = 1;
+
+       if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) {
+               zlog_warn("set-reuse-addr: setsockopt(SO_REUSEADDR, %d): %s",
+                         one, strerror(errno));
+               return false;
+       }
+       return true;
+}
+
+static bool bp_set_reuse_port(int sd)
+{
+       int one = 1;
+
+       if (setsockopt(sd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) == -1) {
+               zlog_warn("set-reuse-port: setsockopt(SO_REUSEPORT, %d): %s",
+                         one, strerror(errno));
+               return false;
+       }
+       return true;
+}
+
+
 static void bp_set_ipopts(int sd)
 {
        int rcvttl = BFD_RCV_TTL_VAL;
 
+       if (!bp_set_reuse_addr(sd))
+               zlog_fatal("set-reuse-addr: failed");
+
+       if (!bp_set_reuse_port(sd))
+               zlog_fatal("set-reuse-port: failed");
+
        if (bp_set_ttl(sd, BFD_TTL_VAL) != 0)
                zlog_fatal("set-ipopts: TTL configuration failed");
 
@@ -1138,6 +1518,12 @@ static void bp_set_ipv6opts(int sd)
        int ipv6_pktinfo = BFD_IPV6_PKT_INFO_VAL;
        int ipv6_only = BFD_IPV6_ONLY_VAL;
 
+       if (!bp_set_reuse_addr(sd))
+               zlog_fatal("set-reuse-addr: failed");
+
+       if (!bp_set_reuse_port(sd))
+               zlog_fatal("set-reuse-port: failed");
+
        if (bp_set_ttlv6(sd, BFD_TTL_VAL) == -1)
                zlog_fatal(
                        "set-ipv6opts: setsockopt(IPV6_UNICAST_HOPS, %d): %s",
@@ -1219,6 +1605,59 @@ int bp_udp6_mhop(const struct vrf *vrf)
        return sd;
 }
 
+#ifdef BFD_LINUX
+/* tcpdump -dd udp dst port 3785 */
+struct sock_filter my_filterudp[] = {
+       {0x28, 0, 0, 0x0000000c}, {0x15, 0, 8, 0x00000800},
+       {0x30, 0, 0, 0x00000017}, {0x15, 0, 6, 0x00000011},
+       {0x28, 0, 0, 0x00000014}, {0x45, 4, 0, 0x00001fff},
+       {0xb1, 0, 0, 0x0000000e}, {0x48, 0, 0, 0x00000010},
+       {0x15, 0, 1, 0x00000ec9}, {0x6, 0, 0, 0x00040000},
+       {0x6, 0, 0, 0x00000000},
+};
+
+#define MY_FILTER_LENGTH 11
+
+int bp_echo_socket(const struct vrf *vrf)
+{
+       int s;
+
+       frr_with_privs (&bglobal.bfdd_privs) {
+               s = vrf_socket(AF_PACKET, SOCK_RAW, ETH_P_IP, vrf->vrf_id,
+                              vrf->name);
+       }
+
+       if (s == -1)
+               zlog_fatal("echo-socket: socket: %s", strerror(errno));
+
+       struct sock_fprog pf;
+       struct sockaddr_ll sll = {0};
+
+       /* adjust filter for socket to only receive ECHO packets */
+       pf.filter = my_filterudp;
+       pf.len = MY_FILTER_LENGTH;
+       if (setsockopt(s, SOL_SOCKET, SO_ATTACH_FILTER, &pf, sizeof(pf)) ==
+           -1) {
+               zlog_warn("%s: setsockopt(SO_ATTACH_FILTER): %s", __func__,
+                         strerror(errno));
+               close(s);
+               return -1;
+       }
+
+       memset(&sll, 0, sizeof(sll));
+       sll.sll_family = AF_PACKET;
+       sll.sll_protocol = htons(ETH_P_IP);
+       sll.sll_ifindex = 0;
+       if (bind(s, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
+               zlog_warn("Failed to bind echo socket: %s",
+                         safe_strerror(errno));
+               close(s);
+               return -1;
+       }
+
+       return s;
+}
+#else
 int bp_echo_socket(const struct vrf *vrf)
 {
        int s;
@@ -1234,6 +1673,7 @@ int bp_echo_socket(const struct vrf *vrf)
 
        return s;
 }
+#endif
 
 int bp_echov6_socket(const struct vrf *vrf)
 {
@@ -1257,3 +1697,44 @@ int bp_echov6_socket(const struct vrf *vrf)
 
        return s;
 }
+
+#ifdef BFD_LINUX
+/* get peer's mac address to be used with Echo packets when they are looped in
+ * peers forwarding plane
+ */
+void bfd_peer_mac_set(int sd, struct bfd_session *bfd,
+                     struct sockaddr_any *peer, struct interface *ifp)
+{
+       struct arpreq arpreq_;
+
+       if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET))
+               return;
+       if (ifp->flags & IFF_NOARP)
+               return;
+
+       if (peer->sa_sin.sin_family == AF_INET) {
+               /* IPV4 */
+               struct sockaddr_in *addr =
+                       (struct sockaddr_in *)&arpreq_.arp_pa;
+
+               memset(&arpreq_, 0, sizeof(struct arpreq));
+               addr->sin_family = AF_INET;
+               memcpy(&addr->sin_addr.s_addr, &peer->sa_sin.sin_addr,
+                      sizeof(addr->sin_addr));
+               strlcpy(arpreq_.arp_dev, ifp->name, sizeof(arpreq_.arp_dev));
+
+               if (ioctl(sd, SIOCGARP, &arpreq_) < 0) {
+                       zlog_warn(
+                               "BFD: getting peer's mac on %s failed error %s",
+                               ifp->name, strerror(errno));
+                       UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET);
+                       memset(bfd->peer_hw_addr, 0, sizeof(bfd->peer_hw_addr));
+
+               } else {
+                       memcpy(bfd->peer_hw_addr, arpreq_.arp_ha.sa_data,
+                              sizeof(bfd->peer_hw_addr));
+                       SET_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET);
+               }
+       }
+}
+#endif
index d4e12e4f1a10944ae118fbc9396079742a4dbad7..69424c45d9a8d5220c830271d83169c73aeef924 100644 (file)
@@ -423,12 +423,19 @@ DEFPY_YANG(
        "Configure echo mode\n")
 {
        if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty)) {
-               vty_out(vty, "%% Echo mode is only available for single hop sessions.\n");
+               vty_out(vty,
+                       "%% Echo mode is only available for single hop sessions.\n");
                return CMD_WARNING_CONFIG_FAILED;
        }
 
        if (!no && !bglobal.bg_use_dplane) {
-               vty_out(vty, "%% Current implementation of echo mode works only when the peer is also FRR.\n");
+#ifdef BFD_LINUX
+               vty_out(vty,
+                       "%% Echo mode works correctly for IPv4, but only works when the peer is also FRR for IPv6.\n");
+#else
+               vty_out(vty,
+                       "%% Current implementation of echo mode works only when the peer is also FRR.\n");
+#endif /* BFD_LINUX */
        }
 
        nb_cli_enqueue_change(vty, "./echo-mode", NB_OP_MODIFY,
index a9fc716177d6b78d51e12cbd5baf56822ee3522b..21429f06cfbf269c4630bd38d4c527a80089d794 100644 (file)
@@ -66,6 +66,9 @@ static void _display_peer_counters_json(struct vty *vty, struct bfd_session *bs)
 static void _display_peer_counter_iter(struct hash_bucket *hb, void *arg);
 static void _display_peer_counter_json_iter(struct hash_bucket *hb, void *arg);
 static void _display_peers_counter(struct vty *vty, char *vrfname, bool use_json);
+static void _display_rtt(uint32_t *min, uint32_t *avg, uint32_t *max,
+                        struct bfd_session *bs);
+
 static struct bfd_session *
 _find_peer_or_error(struct vty *vty, int argc, struct cmd_token **argv,
                    const char *label, const char *peer_str,
@@ -106,6 +109,9 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs)
 {
        char buf[256];
        time_t now;
+       uint32_t min = 0;
+       uint32_t avg = 0;
+       uint32_t max = 0;
 
        _display_peer_header(vty, bs);
 
@@ -150,6 +156,8 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs)
        vty_out(vty, "\t\tRemote diagnostics: %s\n", diag2str(bs->remote_diag));
        vty_out(vty, "\t\tPeer Type: %s\n",
                CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic");
+       _display_rtt(&min, &avg, &max, bs);
+       vty_out(vty, "\t\tRTT min/avg/max: %u/%u/%u usec\n", min, avg, max);
 
        vty_out(vty, "\t\tLocal timers:\n");
        vty_out(vty, "\t\t\tDetect-multiplier: %u\n",
@@ -217,6 +225,9 @@ static struct json_object *_peer_json_header(struct bfd_session *bs)
 static struct json_object *__display_peer_json(struct bfd_session *bs)
 {
        struct json_object *jo = _peer_json_header(bs);
+       uint32_t min = 0;
+       uint32_t avg = 0;
+       uint32_t max = 0;
 
        json_object_int_add(jo, "id", bs->discrs.my_discr);
        json_object_int_add(jo, "remote-id", bs->discrs.remote_discr);
@@ -275,6 +286,11 @@ static struct json_object *__display_peer_json(struct bfd_session *bs)
        json_object_int_add(jo, "remote-detect-multiplier",
                            bs->remote_detect_mult);
 
+       _display_rtt(&min, &avg, &max, bs);
+       json_object_int_add(jo, "rtt-min", min);
+       json_object_int_add(jo, "rtt-avg", avg);
+       json_object_int_add(jo, "rtt-max", max);
+
        return jo;
 }
 
@@ -608,6 +624,31 @@ _find_peer_or_error(struct vty *vty, int argc, struct cmd_token **argv,
        return bs;
 }
 
+void _display_rtt(uint32_t *min, uint32_t *avg, uint32_t *max,
+                 struct bfd_session *bs)
+{
+#ifdef BFD_LINUX
+       uint8_t i;
+       uint32_t average = 0;
+
+       if (bs->rtt_valid == 0)
+               return;
+
+       *max = bs->rtt[0];
+       *min = 1000;
+       *avg = 0;
+
+       for (i = 0; i < bs->rtt_valid; i++) {
+               if (bs->rtt[i] < *min)
+                       *min = bs->rtt[i];
+               if (bs->rtt[i] > *max)
+                       *max = bs->rtt[i];
+               average += bs->rtt[i];
+       }
+       *avg = average / bs->rtt_valid;
+
+#endif
+}
 
 /*
  * Show commands.
index 34776bd6db9886b163d97fc6cded5de614a25954..cfbb29df1c070f2a0596542345a8f7932259f7a0 100644 (file)
 /* BGP advertise attribute is used for pack same attribute update into
    one packet.  To do that we maintain attribute hash in struct
    peer.  */
-struct bgp_advertise_attr *baa_new(void)
+struct bgp_advertise_attr *bgp_advertise_attr_new(void)
 {
        return XCALLOC(MTYPE_BGP_ADVERTISE_ATTR,
                       sizeof(struct bgp_advertise_attr));
 }
 
-static void baa_free(struct bgp_advertise_attr *baa)
+void bgp_advertise_attr_free(struct bgp_advertise_attr *baa)
 {
        XFREE(MTYPE_BGP_ADVERTISE_ATTR, baa);
 }
 
-static void *baa_hash_alloc(void *p)
+static void *bgp_advertise_attr_hash_alloc(void *p)
 {
        struct bgp_advertise_attr *ref = (struct bgp_advertise_attr *)p;
        struct bgp_advertise_attr *baa;
 
-       baa = baa_new();
+       baa = bgp_advertise_attr_new();
        baa->attr = ref->attr;
        return baa;
 }
 
-unsigned int baa_hash_key(const void *p)
+unsigned int bgp_advertise_attr_hash_key(const void *p)
 {
        const struct bgp_advertise_attr *baa = p;
 
        return attrhash_key_make(baa->attr);
 }
 
-bool baa_hash_cmp(const void *p1, const void *p2)
+bool bgp_advertise_attr_hash_cmp(const void *p1, const void *p2)
 {
        const struct bgp_advertise_attr *baa1 = p1;
        const struct bgp_advertise_attr *baa2 = p2;
@@ -115,20 +115,22 @@ void bgp_advertise_delete(struct bgp_advertise_attr *baa,
                baa->adv = adv->next;
 }
 
-struct bgp_advertise_attr *bgp_advertise_intern(struct hash *hash,
-                                               struct attr *attr)
+struct bgp_advertise_attr *bgp_advertise_attr_intern(struct hash *hash,
+                                                    struct attr *attr)
 {
        struct bgp_advertise_attr ref;
        struct bgp_advertise_attr *baa;
 
        ref.attr = bgp_attr_intern(attr);
-       baa = (struct bgp_advertise_attr *)hash_get(hash, &ref, baa_hash_alloc);
+       baa = (struct bgp_advertise_attr *)hash_get(
+               hash, &ref, bgp_advertise_attr_hash_alloc);
        baa->refcnt++;
 
        return baa;
 }
 
-void bgp_advertise_unintern(struct hash *hash, struct bgp_advertise_attr *baa)
+void bgp_advertise_attr_unintern(struct hash *hash,
+                                struct bgp_advertise_attr *baa)
 {
        if (baa->refcnt)
                baa->refcnt--;
@@ -140,7 +142,7 @@ void bgp_advertise_unintern(struct hash *hash, struct bgp_advertise_attr *baa)
                        hash_release(hash, baa);
                        bgp_attr_unintern(&baa->attr);
                }
-               baa_free(baa);
+               bgp_advertise_attr_free(baa);
        }
 }
 
index 4b032ba9c6c0a79c858a76137935fab4ac026dbe..70294c2d89071e3ae7fa2d82e99f843ff18a4739 100644 (file)
@@ -152,18 +152,19 @@ extern void bgp_adj_in_remove(struct bgp_dest *dest, struct bgp_adj_in *bai);
 
 extern void bgp_sync_init(struct peer *peer);
 extern void bgp_sync_delete(struct peer *peer);
-extern unsigned int baa_hash_key(const void *p);
-extern bool baa_hash_cmp(const void *p1, const void *p2);
+extern unsigned int bgp_advertise_attr_hash_key(const void *p);
+extern bool bgp_advertise_attr_hash_cmp(const void *p1, const void *p2);
 extern void bgp_advertise_add(struct bgp_advertise_attr *baa,
                              struct bgp_advertise *adv);
 extern struct bgp_advertise *bgp_advertise_new(void);
 extern void bgp_advertise_free(struct bgp_advertise *adv);
-extern struct bgp_advertise_attr *bgp_advertise_intern(struct hash *hash,
-                                                      struct attr *attr);
-extern struct bgp_advertise_attr *baa_new(void);
+extern struct bgp_advertise_attr *bgp_advertise_attr_intern(struct hash *hash,
+                                                           struct attr *attr);
+extern struct bgp_advertise_attr *bgp_advertise_attr_new(void);
 extern void bgp_advertise_delete(struct bgp_advertise_attr *baa,
                                 struct bgp_advertise *adv);
-extern void bgp_advertise_unintern(struct hash *hash,
-                                  struct bgp_advertise_attr *baa);
+extern void bgp_advertise_attr_unintern(struct hash *hash,
+                                       struct bgp_advertise_attr *baa);
+extern void bgp_advertise_attr_free(struct bgp_advertise_attr *baa);
 
 #endif /* _QUAGGA_BGP_ADVERTISE_H */
index 39886337f39e1844e38bffdf1a1c24a0c2248399..06f6073781773be2e8255bf5711cc5bada11c976 100644 (file)
@@ -1209,28 +1209,6 @@ bool aspath_private_as_check(struct aspath *aspath)
        return true;
 }
 
-/* Return True if the entire ASPATH consist of the specified ASN */
-bool aspath_single_asn_check(struct aspath *aspath, as_t asn)
-{
-       struct assegment *seg;
-
-       if (!(aspath && aspath->segments))
-               return false;
-
-       seg = aspath->segments;
-
-       while (seg) {
-               int i;
-
-               for (i = 0; i < seg->length; i++) {
-                       if (seg->as[i] != asn)
-                               return false;
-               }
-               seg = seg->next;
-       }
-       return true;
-}
-
 /* Replace all instances of the target ASN with our own ASN */
 struct aspath *aspath_replace_specific_asn(struct aspath *aspath,
                                           as_t target_asn, as_t our_asn)
index 5caab73c4d9830b316f5c16bd72ea6bf8e351a64..0b58e1adc4a77af8ae8168aae853b0d4a450255b 100644 (file)
@@ -112,7 +112,6 @@ extern unsigned int aspath_get_first_as(struct aspath *aspath);
 extern unsigned int aspath_get_last_as(struct aspath *aspath);
 extern int aspath_loop_check(struct aspath *aspath, as_t asno);
 extern bool aspath_private_as_check(struct aspath *aspath);
-extern bool aspath_single_asn_check(struct aspath *aspath, as_t asn);
 extern struct aspath *aspath_replace_specific_asn(struct aspath *aspath,
                                                  as_t target_asn,
                                                  as_t our_asn);
index faf44aa2512a1293779f404d19ab74ac022e3424..d91c717f37cde3e993a5672956ce6d26ae954027 100644 (file)
@@ -1301,6 +1301,7 @@ bgp_attr_malformed(struct bgp_attr_parser_args *args, uint8_t subcode,
        case BGP_ATTR_LARGE_COMMUNITIES:
        case BGP_ATTR_ORIGINATOR_ID:
        case BGP_ATTR_CLUSTER_LIST:
+       case BGP_ATTR_OTC:
                return BGP_ATTR_PARSE_WITHDRAW;
        case BGP_ATTR_MP_REACH_NLRI:
        case BGP_ATTR_MP_UNREACH_NLRI:
@@ -1514,6 +1515,19 @@ static int bgp_attr_aspath(struct bgp_attr_parser_args *args)
                                          0);
        }
 
+       /* Conformant BGP speakers SHOULD NOT send BGP
+        * UPDATE messages containing AS_SET or AS_CONFED_SET.  Upon receipt of
+        * such messages, conformant BGP speakers SHOULD use the "Treat-as-
+        * withdraw" error handling behavior as per [RFC7606].
+        */
+       if (peer->bgp->reject_as_sets && aspath_check_as_sets(attr->aspath)) {
+               flog_err(EC_BGP_ATTR_MAL_AS_PATH,
+                        "AS_SET and AS_CONFED_SET are deprecated from %pBP",
+                        peer);
+               return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
+                                         0);
+       }
+
        /* Set aspath attribute flag. */
        attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH);
 
@@ -1594,6 +1608,19 @@ static int bgp_attr_as4_path(struct bgp_attr_parser_args *args,
                                          0);
        }
 
+       /* Conformant BGP speakers SHOULD NOT send BGP
+        * UPDATE messages containing AS_SET or AS_CONFED_SET.  Upon receipt of
+        * such messages, conformant BGP speakers SHOULD use the "Treat-as-
+        * withdraw" error handling behavior as per [RFC7606].
+        */
+       if (peer->bgp->reject_as_sets && aspath_check_as_sets(attr->aspath)) {
+               flog_err(EC_BGP_ATTR_MAL_AS_PATH,
+                        "AS_SET and AS_CONFED_SET are deprecated from %pBP",
+                        peer);
+               return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
+                                         0);
+       }
+
        /* Set aspath attribute flag. */
        attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH);
 
@@ -1606,12 +1633,9 @@ static int bgp_attr_as4_path(struct bgp_attr_parser_args *args,
 enum bgp_attr_parse_ret bgp_attr_nexthop_valid(struct peer *peer,
                                               struct attr *attr)
 {
-       in_addr_t nexthop_h;
+       struct bgp *bgp = peer->bgp;
 
-       nexthop_h = ntohl(attr->nexthop.s_addr);
-       if ((IPV4_NET0(nexthop_h) || IPV4_NET127(nexthop_h) ||
-            !ipv4_unicast_valid(&attr->nexthop)) &&
-           !BGP_DEBUG(allow_martians, ALLOW_MARTIANS)) {
+       if (ipv4_martian(&attr->nexthop) && !bgp->allow_martian) {
                uint8_t data[7]; /* type(2) + length(1) + nhop(4) */
                char buf[INET_ADDRSTRLEN];
 
@@ -3045,6 +3069,11 @@ static enum bgp_attr_parse_ret bgp_attr_otc(struct bgp_attr_parser_args *args)
        }
 
        attr->otc = stream_getl(peer->curr);
+       if (!attr->otc) {
+               flog_err(EC_BGP_ATTR_MAL_AS_PATH, "OTC attribute value is 0");
+               return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
+                                         args->total);
+       }
 
        attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_OTC);
 
index b561b50ff5121647b6f1ff7fc3c9b35b1b0e7a57..bcab4099c049442ab8d338ed423e8da9f2d41da9 100644 (file)
@@ -136,6 +136,12 @@ static int bmp_listener_cmp(const struct bmp_listener *a,
 DECLARE_SORTLIST_UNIQ(bmp_listeners, struct bmp_listener, bli,
                      bmp_listener_cmp);
 
+static void bmp_listener_put(struct bmp_listener *bl)
+{
+       bmp_listeners_del(&bl->targets->listeners, bl);
+       XFREE(MTYPE_BMP_LISTENER, bl);
+}
+
 static int bmp_targets_cmp(const struct bmp_targets *a,
                           const struct bmp_targets *b)
 {
@@ -162,6 +168,16 @@ static int bmp_qhash_cmp(const struct bmp_queue_entry *a,
        else if (b->afi == AFI_L2VPN && b->safi == SAFI_EVPN)
                return -1;
 
+       if (a->afi == b->afi && a->safi == SAFI_MPLS_VPN &&
+           b->safi == SAFI_MPLS_VPN) {
+               ret = prefix_cmp(&a->rd, &b->rd);
+               if (ret)
+                       return ret;
+       } else if (a->safi == SAFI_MPLS_VPN)
+               return 1;
+       else if (b->safi == SAFI_MPLS_VPN)
+               return -1;
+
        ret = prefix_cmp(&a->p, &b->p);
        if (ret)
                return ret;
@@ -180,7 +196,8 @@ static uint32_t bmp_qhash_hkey(const struct bmp_queue_entry *e)
                    offsetof(struct bmp_queue_entry, refcount)
                            - offsetof(struct bmp_queue_entry, peerid),
                    key);
-       if (e->afi == AFI_L2VPN && e->safi == SAFI_EVPN)
+       if ((e->afi == AFI_L2VPN && e->safi == SAFI_EVPN) ||
+           (e->safi == SAFI_MPLS_VPN))
                key = jhash(&e->rd,
                            offsetof(struct bmp_queue_entry, rd)
                                    - offsetof(struct bmp_queue_entry, refcount)
@@ -975,11 +992,12 @@ afibreak:
        }
 
        struct bgp_table *table = bmp->targets->bgp->rib[afi][safi];
-       struct bgp_dest *bn;
+       struct bgp_dest *bn = NULL;
        struct bgp_path_info *bpi = NULL, *bpiter;
        struct bgp_adj_in *adjin = NULL, *adjiter;
 
-       if (afi == AFI_L2VPN && safi == SAFI_EVPN) {
+       if ((afi == AFI_L2VPN && safi == SAFI_EVPN) ||
+           (safi == SAFI_MPLS_VPN)) {
                /* initialize syncrdpos to the first
                 * mid-layer table entry
                 */
@@ -1008,7 +1026,8 @@ afibreak:
                if (!bn) {
                        bn = bgp_table_get_next(table, &bmp->syncpos);
                        if (!bn) {
-                               if (afi == AFI_L2VPN && safi == SAFI_EVPN) {
+                               if ((afi == AFI_L2VPN && safi == SAFI_EVPN) ||
+                                   (safi == SAFI_MPLS_VPN)) {
                                        /* reset bottom-layer pointer */
                                        memset(&bmp->syncpos, 0,
                                               sizeof(bmp->syncpos));
@@ -1090,7 +1109,8 @@ afibreak:
 
        const struct prefix *bn_p = bgp_dest_get_prefix(bn);
        struct prefix_rd *prd = NULL;
-       if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+       if (((afi == AFI_L2VPN) && (safi == SAFI_EVPN)) ||
+           (safi == SAFI_MPLS_VPN))
                prd = (struct prefix_rd *)bgp_dest_get_prefix(bmp->syncrdpos);
 
        if (bpi)
@@ -1100,6 +1120,9 @@ afibreak:
                bmp_monitor(bmp, adjin->peer, 0, bn_p, prd, adjin->attr, afi,
                            safi, adjin->uptime);
 
+       if (bn)
+               bgp_dest_unlock_node(bn);
+
        return true;
 }
 
@@ -1125,7 +1148,7 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
 {
        struct bmp_queue_entry *bqe;
        struct peer *peer;
-       struct bgp_dest *bn;
+       struct bgp_dest *bn = NULL;
        bool written = false;
 
        bqe = bmp_pull(bmp);
@@ -1160,10 +1183,13 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
        if (!peer_established(peer))
                goto out;
 
-       bn = bgp_node_lookup(bmp->targets->bgp->rib[afi][safi], &bqe->p);
-       struct prefix_rd *prd = NULL;
-       if (bqe->afi == AFI_L2VPN && bqe->safi == SAFI_EVPN)
-               prd = &bqe->rd;
+       bool is_vpn = (bqe->afi == AFI_L2VPN && bqe->safi == SAFI_EVPN) ||
+                     (bqe->safi == SAFI_MPLS_VPN);
+
+       struct prefix_rd *prd = is_vpn ? &bqe->rd : NULL;
+       bn = bgp_afi_node_lookup(bmp->targets->bgp->rib[afi][safi], afi, safi,
+                                &bqe->p, prd);
+
 
        if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
                struct bgp_path_info *bpi;
@@ -1199,6 +1225,10 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
 out:
        if (!bqe->refcount)
                XFREE(MTYPE_BMP_QUEUE, bqe);
+
+       if (bn)
+               bgp_dest_unlock_node(bn);
+
        return written;
 }
 
@@ -1250,7 +1280,8 @@ static void bmp_process_one(struct bmp_targets *bt, struct bgp *bgp, afi_t afi,
        bqeref.afi = afi;
        bqeref.safi = safi;
 
-       if (afi == AFI_L2VPN && safi == SAFI_EVPN && bn->pdest)
+       if ((afi == AFI_L2VPN && safi == SAFI_EVPN && bn->pdest) ||
+           (safi == SAFI_MPLS_VPN))
                prefix_copy(&bqeref.rd,
                            (struct prefix_rd *)bgp_dest_get_prefix(bn->pdest));
 
@@ -1541,11 +1572,16 @@ static struct bmp_bgp *bmp_bgp_get(struct bgp *bgp)
 static void bmp_bgp_put(struct bmp_bgp *bmpbgp)
 {
        struct bmp_targets *bt;
+       struct bmp_listener *bl;
 
        bmp_bgph_del(&bmp_bgph, bmpbgp);
 
-       frr_each_safe(bmp_targets, &bmpbgp->targets, bt)
+       frr_each_safe (bmp_targets, &bmpbgp->targets, bt) {
+               frr_each_safe (bmp_listeners, &bt->listeners, bl)
+                       bmp_listener_put(bl);
+
                bmp_targets_put(bt);
+       }
 
        bmp_mirrorq_fini(&bmpbgp->mirrorq);
        XFREE(MTYPE_BMP, bmpbgp);
@@ -1675,12 +1711,6 @@ static struct bmp_listener *bmp_listener_get(struct bmp_targets *bt,
        return bl;
 }
 
-static void bmp_listener_put(struct bmp_listener *bl)
-{
-       bmp_listeners_del(&bl->targets->listeners, bl);
-       XFREE(MTYPE_BMP_LISTENER, bl);
-}
-
 static void bmp_listener_start(struct bmp_listener *bl)
 {
        int sock, ret;
@@ -2190,12 +2220,17 @@ DEFPY(bmp_stats_cfg,
 
 DEFPY(bmp_monitor_cfg,
       bmp_monitor_cmd,
-      "[no] bmp monitor <ipv4|ipv6|l2vpn> <unicast|multicast|evpn> <pre-policy|post-policy>$policy",
+      "[no] bmp monitor <ipv4|ipv6|l2vpn> <unicast|multicast|evpn|vpn> <pre-policy|post-policy>$policy",
       NO_STR
       BMP_STR
       "Send BMP route monitoring messages\n"
-      "Address Family\nAddress Family\nAddress Family\n"
-      "Address Family\nAddress Family\nAddress Family\n"
+      BGP_AF_STR
+      BGP_AF_STR
+      BGP_AF_STR
+      BGP_AF_STR
+      BGP_AF_STR
+      BGP_AF_STR
+      BGP_AF_STR
       "Send state before policy and filter processing\n"
       "Send state with policy and filters applied\n")
 {
index 78cf9ea76c8fe2a6de544fa69147d4590ddc601a..9f6f337c8808f852ddfc91a9b4b0c120e82ee415 100644 (file)
@@ -914,8 +914,16 @@ void community_init(void)
                            "BGP Community Hash");
 }
 
+static void community_hash_free(void *data)
+{
+       struct community *com = data;
+
+       community_free(&com);
+}
+
 void community_finish(void)
 {
+       hash_clean(comhash, community_hash_free);
        hash_free(comhash);
        comhash = NULL;
 }
index 431e6e00a4a04b2d01110c08f9a67271b4186aaa..3750a24680a159b56638775134974a5530f6a575 100644 (file)
@@ -81,9 +81,16 @@ void bgp_community_alias_init(void)
                            "BGP community alias (alias)");
 }
 
+static void bgp_ca_free(void *ca)
+{
+       XFREE(MTYPE_COMMUNITY_ALIAS, ca);
+}
+
 void bgp_community_alias_finish(void)
 {
+       hash_clean(bgp_ca_community_hash, bgp_ca_free);
        hash_free(bgp_ca_community_hash);
+       hash_clean(bgp_ca_alias_hash, bgp_ca_free);
        hash_free(bgp_ca_alias_hash);
 }
 
index dd1510a6781fbc33ca1c850001ba40a6a4363ce5..fc44e86cbc479c359cf4300a648bdceabbaf9c4b 100644 (file)
@@ -100,7 +100,8 @@ static void bgp_conditional_adv_routes(struct peer *peer, afi_t afi,
 
        if (BGP_DEBUG(update, UPDATE_OUT))
                zlog_debug("%s: %s routes to/from %s for %s", __func__,
-                          update_type == ADVERTISE ? "Advertise" : "Withdraw",
+                          update_type == UPDATE_TYPE_ADVERTISE ? "Advertise"
+                                                               : "Withdraw",
                           peer->host, get_afi_safi_str(afi, safi, false));
 
        addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
@@ -133,7 +134,7 @@ static void bgp_conditional_adv_routes(struct peer *peer, afi_t afi,
                         * on same peer, routes in advertise-map may not
                         * be advertised as expected.
                         */
-                       if (update_type == ADVERTISE &&
+                       if (update_type == UPDATE_TYPE_ADVERTISE &&
                            subgroup_announce_check(dest, pi, subgrp, dest_p,
                                                    &attr, &advmap_attr)) {
                                bgp_adj_out_set_subgroup(dest, subgrp, &attr,
@@ -248,12 +249,33 @@ static void bgp_conditional_adv_timer(struct thread *t)
                         */
                        if (filter->advmap.condition == CONDITION_EXIST)
                                filter->advmap.update_type =
-                                       (ret == RMAP_PERMITMATCH) ? ADVERTISE
-                                                                 : WITHDRAW;
+                                       (ret == RMAP_PERMITMATCH)
+                                               ? UPDATE_TYPE_ADVERTISE
+                                               : UPDATE_TYPE_WITHDRAW;
                        else
                                filter->advmap.update_type =
-                                       (ret == RMAP_PERMITMATCH) ? WITHDRAW
-                                                                 : ADVERTISE;
+                                       (ret == RMAP_PERMITMATCH)
+                                               ? UPDATE_TYPE_WITHDRAW
+                                               : UPDATE_TYPE_ADVERTISE;
+
+                       /*
+                        * Update condadv update type so
+                        * subgroup_announce_check() can properly apply
+                        * outbound policy according to advertisement state
+                        */
+                       paf = peer_af_find(peer, afi, safi);
+                       if (paf && (SUBGRP_PEER(PAF_SUBGRP(paf))
+                                           ->filter[afi][safi]
+                                           .advmap.update_type !=
+                                   filter->advmap.update_type)) {
+                               /* Handle change to peer advmap */
+                               if (BGP_DEBUG(update, UPDATE_OUT))
+                                       zlog_debug(
+                                               "%s: advmap.update_type changed for peer %s, adjusting update_group.",
+                                               __func__, peer->host);
+
+                               update_group_adjust_peer(paf);
+                       }
 
                        /* Send regular update as per the existing policy.
                         * There is a change in route-map, match-rule, ACLs,
@@ -267,11 +289,10 @@ static void bgp_conditional_adv_timer(struct thread *t)
                                                __func__, peer->host,
                                                get_afi_safi_str(afi, safi,
                                                                 false));
-
-                               paf = peer_af_find(peer, afi, safi);
                                if (paf) {
                                        update_subgroup_split_peer(paf, NULL);
                                        subgrp = paf->subgroup;
+
                                        if (subgrp && subgrp->update_group)
                                                subgroup_announce_table(
                                                        paf->subgroup, NULL);
@@ -312,8 +333,9 @@ void bgp_conditional_adv_enable(struct peer *peer, afi_t afi, safi_t safi)
        }
 
        /* Register for conditional routes polling timer */
-       thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
-                        bgp->condition_check_period, &bgp->t_condition_check);
+       if (!thread_is_scheduled(bgp->t_condition_check))
+               thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
+                                &bgp->t_condition_check);
 }
 
 void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
index 62e8e71aa03f6717f1f8aa36efa12cf6212fe14c..9acbaf773371fb97bcae176870a70071a6eb9176 100644 (file)
@@ -465,7 +465,7 @@ int bgp_damp_disable(struct bgp *bgp, afi_t afi, safi_t safi)
                return 0;
 
        /* Cancel reuse event. */
-       thread_cancel(&(bdc->t_reuse));
+       THREAD_OFF(bdc->t_reuse);
 
        /* Clean BGP dampening information.  */
        bgp_damp_info_clean(afi, safi);
index fbe967d8b2537812dbedecbccdc9fa5cb71e6c38..d0ab88ee1b5875188b51f53e0afa9cf1caeec206 100644 (file)
@@ -1810,40 +1810,6 @@ DEFUN (no_debug_bgp_zebra_prefix,
        return CMD_SUCCESS;
 }
 
-DEFUN (debug_bgp_allow_martians,
-       debug_bgp_allow_martians_cmd,
-       "debug bgp allow-martians",
-       DEBUG_STR
-       BGP_STR
-       "BGP allow martian next hops\n")
-{
-       if (vty->node == CONFIG_NODE)
-               DEBUG_ON(allow_martians, ALLOW_MARTIANS);
-       else {
-               TERM_DEBUG_ON(allow_martians, ALLOW_MARTIANS);
-               vty_out(vty, "BGP allow_martian next hop debugging is on\n");
-       }
-       return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_bgp_allow_martians,
-       no_debug_bgp_allow_martians_cmd,
-       "no debug bgp allow-martians",
-       NO_STR
-       DEBUG_STR
-       BGP_STR
-       "BGP allow martian next hops\n")
-{
-       if (vty->node == CONFIG_NODE)
-               DEBUG_OFF(allow_martians, ALLOW_MARTIANS);
-       else {
-               TERM_DEBUG_OFF(allow_martians, ALLOW_MARTIANS);
-               vty_out(vty, "BGP allow martian next hop debugging is off\n");
-       }
-       return CMD_SUCCESS;
-}
-
-
 /* debug bgp update-groups */
 DEFUN (debug_bgp_update_groups,
        debug_bgp_update_groups_cmd,
@@ -2439,8 +2405,6 @@ void bgp_debug_init(void)
        install_element(CONFIG_NODE, &debug_bgp_update_cmd);
        install_element(ENABLE_NODE, &debug_bgp_zebra_cmd);
        install_element(CONFIG_NODE, &debug_bgp_zebra_cmd);
-       install_element(ENABLE_NODE, &debug_bgp_allow_martians_cmd);
-       install_element(CONFIG_NODE, &debug_bgp_allow_martians_cmd);
        install_element(ENABLE_NODE, &debug_bgp_update_groups_cmd);
        install_element(CONFIG_NODE, &debug_bgp_update_groups_cmd);
        install_element(ENABLE_NODE, &debug_bgp_bestpath_prefix_cmd);
@@ -2504,8 +2468,6 @@ void bgp_debug_init(void)
        install_element(CONFIG_NODE, &no_debug_bgp_update_cmd);
        install_element(ENABLE_NODE, &no_debug_bgp_zebra_cmd);
        install_element(CONFIG_NODE, &no_debug_bgp_zebra_cmd);
-       install_element(ENABLE_NODE, &no_debug_bgp_allow_martians_cmd);
-       install_element(CONFIG_NODE, &no_debug_bgp_allow_martians_cmd);
        install_element(ENABLE_NODE, &no_debug_bgp_update_groups_cmd);
        install_element(CONFIG_NODE, &no_debug_bgp_update_groups_cmd);
        install_element(ENABLE_NODE, &no_debug_bgp_cmd);
index e57f449f781c7891be35d99afcec8b2413d8b316..720925b20fd7053f162e6ebf5628214860ebb0e9 100644 (file)
@@ -702,7 +702,7 @@ static int bgp_dump_unset(struct bgp_dump *bgp_dump)
        }
 
        /* Removing interval event. */
-       thread_cancel(&bgp_dump->t_interval);
+       THREAD_OFF(bgp_dump->t_interval);
 
        bgp_dump->interval = 0;
 
index 3483ece5b8aac4c2cc63cfa227f0c22119c9c28d..0642c966eb1078087f86df8f5fac64c65899bd49 100644 (file)
@@ -1753,6 +1753,16 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
                bgp_attr_set_pmsi_tnl_type(&attr, PMSI_TNLTYPE_INGR_REPL);
        }
 
+       /* router mac is only needed for type-2 routes here. */
+       if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
+               uint8_t af_flags = 0;
+
+               if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_SVI_IP))
+                       SET_FLAG(af_flags, BGP_EVPN_MACIP_TYPE_SVI_IP);
+
+               bgp_evpn_get_rmac_nexthop(vpn, p, &attr, af_flags);
+       }
+
        if (bgp_debug_zebra(NULL)) {
                char buf3[ESI_STR_LEN];
 
@@ -1763,15 +1773,6 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
                        vpn->vni, p, &attr.rmac, &attr.mp_nexthop_global_in,
                        esi_to_str(esi, buf3, sizeof(buf3)));
        }
-       /* router mac is only needed for type-2 routes here. */
-       if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
-               uint8_t af_flags = 0;
-
-               if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_SVI_IP))
-                       SET_FLAG(af_flags, BGP_EVPN_MACIP_TYPE_SVI_IP);
-
-               bgp_evpn_get_rmac_nexthop(vpn, p, &attr, af_flags);
-       }
 
        vni2label(vpn->vni, &(attr.label));
 
@@ -2447,8 +2448,10 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
        if (attr.evpn_overlay.type != OVERLAY_INDEX_GATEWAY_IP) {
                if (afi == AFI_IP6)
                        evpn_convert_nexthop_to_ipv6(&attr);
-               else
+               else {
+                       attr.nexthop = attr.mp_nexthop_global_in;
                        attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
+               }
        } else {
 
                /*
@@ -6427,7 +6430,7 @@ static void bgp_evpn_remote_ip_process_nexthops(struct bgpevpn *vpn,
                return;
 
        tree = &vpn->bgp_vrf->nexthop_cache_table[afi];
-       bnc = bnc_find(tree, &p, 0);
+       bnc = bnc_find(tree, &p, 0, 0);
 
        if (!bnc || !bnc->is_evpn_gwip_nexthop)
                return;
index b42296f4de455f92fb761cfb37d6a7e375bd3e9d..3f801f7ea0288136498581b84c81eddae4898846 100644 (file)
@@ -4952,7 +4952,7 @@ void bgp_evpn_mh_finish(void)
                bgp_evpn_es_local_info_clear(es, true);
        }
        if (bgp_mh_info->t_cons_check)
-               thread_cancel(&bgp_mh_info->t_cons_check);
+               THREAD_OFF(bgp_mh_info->t_cons_check);
        list_delete(&bgp_mh_info->local_es_list);
        list_delete(&bgp_mh_info->pend_es_list);
        list_delete(&bgp_mh_info->ead_es_export_rtl);
index a94ff5ef64f12cd3fc31b08464fa64b3cd4e7d2b..6ba516c39c5befb5d558e207d854f097b543993d 100644 (file)
@@ -4036,6 +4036,9 @@ DEFUN (no_bgp_evpn_advertise_type5,
        afi_t afi = 0;
        safi_t safi = 0;
 
+       if (!bgp_vrf)
+               return CMD_WARNING;
+
        argv_find_and_parse_afi(argv, argc, &idx_afi, &afi);
        argv_find_and_parse_safi(argv, argc, &idx_safi, &safi);
 
@@ -4344,10 +4347,16 @@ DEFUN(show_bgp_l2vpn_evpn_vni,
                                                                 : "Disabled");
                        json_object_string_add(
                                json, "flooding",
-                               bgp_evpn->vxlan_flood_ctrl
-                                               == VXLAN_FLOOD_HEAD_END_REPL
+                               bgp_evpn->vxlan_flood_ctrl ==
+                                               VXLAN_FLOOD_HEAD_END_REPL
                                        ? "Head-end replication"
                                        : "Disabled");
+                       json_object_string_add(
+                               json, "vxlanFlooding",
+                               bgp_evpn->vxlan_flood_ctrl ==
+                                               VXLAN_FLOOD_HEAD_END_REPL
+                                       ? "Enabled"
+                                       : "Disabled");
                        json_object_int_add(json, "numVnis", num_vnis);
                        json_object_int_add(json, "numL2Vnis", num_l2vnis);
                        json_object_int_add(json, "numL3Vnis", num_l3vnis);
@@ -4361,10 +4370,15 @@ DEFUN(show_bgp_l2vpn_evpn_vni,
                        vty_out(vty, "Advertise All VNI flag: %s\n",
                                is_evpn_enabled() ? "Enabled" : "Disabled");
                        vty_out(vty, "BUM flooding: %s\n",
-                               bgp_evpn->vxlan_flood_ctrl
-                                               == VXLAN_FLOOD_HEAD_END_REPL
+                               bgp_evpn->vxlan_flood_ctrl ==
+                                               VXLAN_FLOOD_HEAD_END_REPL
                                        ? "Head-end replication"
                                        : "Disabled");
+                       vty_out(vty, "VXLAN flooding: %s\n",
+                               bgp_evpn->vxlan_flood_ctrl ==
+                                               VXLAN_FLOOD_HEAD_END_REPL
+                                       ? "Enabled"
+                                       : "Disabled");
                        vty_out(vty, "Number of L2 VNIs: %u\n", num_l2vnis);
                        vty_out(vty, "Number of L3 VNIs: %u\n", num_l3vnis);
                }
@@ -4700,7 +4714,7 @@ DEFUN(show_bgp_l2vpn_evpn_route_rd,
        if (uj)
                json = json_object_new_object();
 
-       if (argv_find(argv, argc, "all", &rd_all)) {
+       if (!argv_find(argv, argc, "all", &rd_all)) {
                /* get the RD */
                if (argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
                              &idx_ext_community)) {
@@ -4772,7 +4786,7 @@ DEFUN(show_bgp_l2vpn_evpn_route_rd_macip,
                json = json_object_new_object();
 
        /* get the prd */
-       if (argv_find(argv, argc, "all", &rd_all)) {
+       if (!argv_find(argv, argc, "all", &rd_all)) {
                if (argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
                              &idx_ext_community)) {
                        ret = str2prefix_rd(argv[idx_ext_community]->arg, &prd);
index fc9fc1e523ce7ef97ec39161533746ec3a44f138..89216229538c894dc689769469fe895c6b34be68 100644 (file)
@@ -189,7 +189,7 @@ static void as_list_filter_add(struct as_list *aslist,
                replace = bgp_aslist_seq_check(aslist, asfilter->seq);
                if (replace) {
                        as_filter_entry_replace(aslist, replace, asfilter);
-                       return;
+                       goto hook;
                }
 
                /* Check insert point. */
@@ -218,6 +218,7 @@ static void as_list_filter_add(struct as_list *aslist,
                aslist->tail = asfilter;
        }
 
+hook:
        /* Run hook function. */
        if (as_list_master.add_hook)
                (*as_list_master.add_hook)(aslist->name);
@@ -484,6 +485,7 @@ DEFUN(as_path, bgp_as_path_cmd,
        if (!config_bgp_aspath_validate(regstr)) {
                vty_out(vty, "Invalid character in as-path access-list %s\n",
                        regstr);
+               XFREE(MTYPE_TMP, regstr);
                return CMD_WARNING_CONFIG_FAILED;
        }
 
index b034437a18d84ef92d3054e57faa5c3e09c1893f..b570c84d8b2a440be95436c505286963e8c3eb8f 100644 (file)
@@ -177,24 +177,24 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
         */
        bgp_keepalives_off(from_peer);
 
-       BGP_TIMER_OFF(peer->t_routeadv);
-       BGP_TIMER_OFF(peer->t_connect);
-       BGP_TIMER_OFF(peer->t_delayopen);
-       BGP_TIMER_OFF(peer->t_connect_check_r);
-       BGP_TIMER_OFF(peer->t_connect_check_w);
-       BGP_TIMER_OFF(from_peer->t_routeadv);
-       BGP_TIMER_OFF(from_peer->t_connect);
-       BGP_TIMER_OFF(from_peer->t_delayopen);
-       BGP_TIMER_OFF(from_peer->t_connect_check_r);
-       BGP_TIMER_OFF(from_peer->t_connect_check_w);
-       BGP_TIMER_OFF(from_peer->t_process_packet);
+       THREAD_OFF(peer->t_routeadv);
+       THREAD_OFF(peer->t_connect);
+       THREAD_OFF(peer->t_delayopen);
+       THREAD_OFF(peer->t_connect_check_r);
+       THREAD_OFF(peer->t_connect_check_w);
+       THREAD_OFF(from_peer->t_routeadv);
+       THREAD_OFF(from_peer->t_connect);
+       THREAD_OFF(from_peer->t_delayopen);
+       THREAD_OFF(from_peer->t_connect_check_r);
+       THREAD_OFF(from_peer->t_connect_check_w);
+       THREAD_OFF(from_peer->t_process_packet);
 
        /*
         * At this point in time, it is possible that there are packets pending
         * on various buffers. Those need to be transferred or dropped,
         * otherwise we'll get spurious failures during session establishment.
         */
-       frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
+       frr_with_mutex (&peer->io_mtx, &from_peer->io_mtx) {
                fd = peer->fd;
                peer->fd = from_peer->fd;
                from_peer->fd = fd;
@@ -365,23 +365,23 @@ void bgp_timer_set(struct peer *peer)
                   inactive.  All other timer must be turned off */
                if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer)
                    || peer->bgp->vrf_id == VRF_UNKNOWN) {
-                       BGP_TIMER_OFF(peer->t_start);
+                       THREAD_OFF(peer->t_start);
                } else {
                        BGP_TIMER_ON(peer->t_start, bgp_start_timer,
                                     peer->v_start);
                }
-               BGP_TIMER_OFF(peer->t_connect);
-               BGP_TIMER_OFF(peer->t_holdtime);
+               THREAD_OFF(peer->t_connect);
+               THREAD_OFF(peer->t_holdtime);
                bgp_keepalives_off(peer);
-               BGP_TIMER_OFF(peer->t_routeadv);
-               BGP_TIMER_OFF(peer->t_delayopen);
+               THREAD_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_delayopen);
                break;
 
        case Connect:
                /* After start timer is expired, the peer moves to Connect
                   status.  Make sure start timer is off and connect timer is
                   on. */
-               BGP_TIMER_OFF(peer->t_start);
+               THREAD_OFF(peer->t_start);
                if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
                        BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
                                     (peer->v_delayopen + peer->v_connect));
@@ -389,19 +389,19 @@ void bgp_timer_set(struct peer *peer)
                        BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
                                     peer->v_connect);
 
-               BGP_TIMER_OFF(peer->t_holdtime);
+               THREAD_OFF(peer->t_holdtime);
                bgp_keepalives_off(peer);
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
                break;
 
        case Active:
                /* Active is waiting connection from remote peer.  And if
                   connect timer is expired, change status to Connect. */
-               BGP_TIMER_OFF(peer->t_start);
+               THREAD_OFF(peer->t_start);
                /* If peer is passive mode, do not set connect timer. */
                if (CHECK_FLAG(peer->flags, PEER_FLAG_PASSIVE)
                    || CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {
-                       BGP_TIMER_OFF(peer->t_connect);
+                       THREAD_OFF(peer->t_connect);
                } else {
                        if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
                                BGP_TIMER_ON(
@@ -411,56 +411,56 @@ void bgp_timer_set(struct peer *peer)
                                BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
                                             peer->v_connect);
                }
-               BGP_TIMER_OFF(peer->t_holdtime);
+               THREAD_OFF(peer->t_holdtime);
                bgp_keepalives_off(peer);
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
                break;
 
        case OpenSent:
                /* OpenSent status. */
-               BGP_TIMER_OFF(peer->t_start);
-               BGP_TIMER_OFF(peer->t_connect);
+               THREAD_OFF(peer->t_start);
+               THREAD_OFF(peer->t_connect);
                if (peer->v_holdtime != 0) {
                        BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
                                     peer->v_holdtime);
                } else {
-                       BGP_TIMER_OFF(peer->t_holdtime);
+                       THREAD_OFF(peer->t_holdtime);
                }
                bgp_keepalives_off(peer);
-               BGP_TIMER_OFF(peer->t_routeadv);
-               BGP_TIMER_OFF(peer->t_delayopen);
+               THREAD_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_delayopen);
                break;
 
        case OpenConfirm:
                /* OpenConfirm status. */
-               BGP_TIMER_OFF(peer->t_start);
-               BGP_TIMER_OFF(peer->t_connect);
+               THREAD_OFF(peer->t_start);
+               THREAD_OFF(peer->t_connect);
 
                /* If the negotiated Hold Time value is zero, then the Hold Time
                   timer and KeepAlive timers are not started. */
                if (peer->v_holdtime == 0) {
-                       BGP_TIMER_OFF(peer->t_holdtime);
+                       THREAD_OFF(peer->t_holdtime);
                        bgp_keepalives_off(peer);
                } else {
                        BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
                                     peer->v_holdtime);
                        bgp_keepalives_on(peer);
                }
-               BGP_TIMER_OFF(peer->t_routeadv);
-               BGP_TIMER_OFF(peer->t_delayopen);
+               THREAD_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_delayopen);
                break;
 
        case Established:
                /* In Established status start and connect timer is turned
                   off. */
-               BGP_TIMER_OFF(peer->t_start);
-               BGP_TIMER_OFF(peer->t_connect);
-               BGP_TIMER_OFF(peer->t_delayopen);
+               THREAD_OFF(peer->t_start);
+               THREAD_OFF(peer->t_connect);
+               THREAD_OFF(peer->t_delayopen);
 
                /* Same as OpenConfirm, if holdtime is zero then both holdtime
                   and keepalive must be turned off. */
                if (peer->v_holdtime == 0) {
-                       BGP_TIMER_OFF(peer->t_holdtime);
+                       THREAD_OFF(peer->t_holdtime);
                        bgp_keepalives_off(peer);
                } else {
                        BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
@@ -469,22 +469,22 @@ void bgp_timer_set(struct peer *peer)
                }
                break;
        case Deleted:
-               BGP_TIMER_OFF(peer->t_gr_restart);
-               BGP_TIMER_OFF(peer->t_gr_stale);
+               THREAD_OFF(peer->t_gr_restart);
+               THREAD_OFF(peer->t_gr_stale);
 
                FOREACH_AFI_SAFI (afi, safi)
-                       BGP_TIMER_OFF(peer->t_llgr_stale[afi][safi]);
+                       THREAD_OFF(peer->t_llgr_stale[afi][safi]);
 
-               BGP_TIMER_OFF(peer->t_pmax_restart);
-               BGP_TIMER_OFF(peer->t_refresh_stalepath);
+               THREAD_OFF(peer->t_pmax_restart);
+               THREAD_OFF(peer->t_refresh_stalepath);
        /* fallthru */
        case Clearing:
-               BGP_TIMER_OFF(peer->t_start);
-               BGP_TIMER_OFF(peer->t_connect);
-               BGP_TIMER_OFF(peer->t_holdtime);
+               THREAD_OFF(peer->t_start);
+               THREAD_OFF(peer->t_connect);
+               THREAD_OFF(peer->t_holdtime);
                bgp_keepalives_off(peer);
-               BGP_TIMER_OFF(peer->t_routeadv);
-               BGP_TIMER_OFF(peer->t_delayopen);
+               THREAD_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_delayopen);
                break;
        case BGP_STATUS_MAX:
                flog_err(EC_LIB_DEVELOPMENT,
@@ -516,7 +516,7 @@ static void bgp_connect_timer(struct thread *thread)
        peer = THREAD_ARG(thread);
 
        /* stop the DelayOpenTimer if it is running */
-       BGP_TIMER_OFF(peer->t_delayopen);
+       THREAD_OFF(peer->t_delayopen);
 
        assert(!peer->t_write);
        assert(!peer->t_read);
@@ -647,7 +647,7 @@ static void bgp_graceful_restart_timer_off(struct peer *peer)
                        return;
 
        UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
-       BGP_TIMER_OFF(peer->t_gr_stale);
+       THREAD_OFF(peer->t_gr_stale);
 
        if (peer_dynamic_neighbor(peer) &&
            !(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
@@ -965,7 +965,7 @@ void bgp_start_routeadv(struct bgp *bgp)
        for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
                if (!peer_established(peer))
                        continue;
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
                BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
        }
 }
@@ -985,7 +985,7 @@ void bgp_adjust_routeadv(struct peer *peer)
                 * different
                 * duration and schedule write thread immediately.
                 */
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
 
                peer->synctime = bgp_clock();
                /* If suppress fib pending is enabled, route is advertised to
@@ -1017,7 +1017,7 @@ void bgp_adjust_routeadv(struct peer *peer)
         */
        diff = difftime(nowtime, peer->last_update);
        if (diff > (double)peer->v_routeadv) {
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
                BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
                return;
        }
@@ -1044,7 +1044,7 @@ void bgp_adjust_routeadv(struct peer *peer)
                remain = peer->v_routeadv;
        diff = peer->v_routeadv - diff;
        if (diff <= (double)remain) {
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
                BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, diff);
        }
 }
@@ -1401,7 +1401,7 @@ int bgp_stop(struct peer *peer)
 
                /* graceful restart */
                if (peer->t_gr_stale) {
-                       BGP_TIMER_OFF(peer->t_gr_stale);
+                       THREAD_OFF(peer->t_gr_stale);
                        if (bgp_debug_neighbor_events(peer))
                                zlog_debug(
                                        "%pBP graceful restart stalepath timer stopped",
@@ -1431,7 +1431,7 @@ int bgp_stop(struct peer *peer)
 
                /* Stop route-refresh stalepath timer */
                if (peer->t_refresh_stalepath) {
-                       BGP_TIMER_OFF(peer->t_refresh_stalepath);
+                       THREAD_OFF(peer->t_refresh_stalepath);
 
                        if (bgp_debug_neighbor_events(peer))
                                zlog_debug(
@@ -1464,8 +1464,7 @@ int bgp_stop(struct peer *peer)
 
                                /* There is no pending EOR message */
                                if (gr_info->eor_required == 0) {
-                                       BGP_TIMER_OFF(
-                                               gr_info->t_select_deferral);
+                                       THREAD_OFF(gr_info->t_select_deferral);
                                        gr_info->eor_received = 0;
                                }
                        }
@@ -1494,14 +1493,14 @@ int bgp_stop(struct peer *peer)
        THREAD_OFF(peer->t_connect_check_w);
 
        /* Stop all timers. */
-       BGP_TIMER_OFF(peer->t_start);
-       BGP_TIMER_OFF(peer->t_connect);
-       BGP_TIMER_OFF(peer->t_holdtime);
-       BGP_TIMER_OFF(peer->t_routeadv);
-       BGP_TIMER_OFF(peer->t_delayopen);
+       THREAD_OFF(peer->t_start);
+       THREAD_OFF(peer->t_connect);
+       THREAD_OFF(peer->t_holdtime);
+       THREAD_OFF(peer->t_routeadv);
+       THREAD_OFF(peer->t_delayopen);
 
        /* Clear input and output buffer.  */
-       frr_with_mutex(&peer->io_mtx) {
+       frr_with_mutex (&peer->io_mtx) {
                if (peer->ibuf)
                        stream_fifo_clean(peer->ibuf);
                if (peer->obuf)
@@ -1993,7 +1992,7 @@ static int bgp_fsm_holdtime_expire(struct peer *peer)
 static int bgp_fsm_delayopen_timer_expire(struct peer *peer)
 {
        /* Stop the DelayOpenTimer */
-       BGP_TIMER_OFF(peer->t_delayopen);
+       THREAD_OFF(peer->t_delayopen);
 
        /* Send open message to peer */
        bgp_open_send(peer);
@@ -2203,7 +2202,7 @@ static int bgp_establish(struct peer *peer)
        else {
                UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_MODE);
                if (peer->t_gr_stale) {
-                       BGP_TIMER_OFF(peer->t_gr_stale);
+                       THREAD_OFF(peer->t_gr_stale);
                        if (bgp_debug_neighbor_events(peer))
                                zlog_debug(
                                        "%pBP graceful restart stalepath timer stopped",
@@ -2212,7 +2211,7 @@ static int bgp_establish(struct peer *peer)
        }
 
        if (peer->t_gr_restart) {
-               BGP_TIMER_OFF(peer->t_gr_restart);
+               THREAD_OFF(peer->t_gr_restart);
                if (bgp_debug_neighbor_events(peer))
                        zlog_debug("%pBP graceful restart timer stopped", peer);
        }
@@ -2228,7 +2227,7 @@ static int bgp_establish(struct peer *peer)
         */
        FOREACH_AFI_SAFI (afi, safi) {
                if (peer->t_llgr_stale[afi][safi]) {
-                       BGP_TIMER_OFF(peer->t_llgr_stale[afi][safi]);
+                       THREAD_OFF(peer->t_llgr_stale[afi][safi]);
                        if (bgp_debug_neighbor_events(peer))
                                zlog_debug(
                                        "%pBP Long-lived stale timer stopped for afi/safi: %d/%d",
@@ -2273,7 +2272,7 @@ static int bgp_establish(struct peer *peer)
         * of read-only mode.
         */
        if (!bgp_update_delay_active(peer->bgp)) {
-               BGP_TIMER_OFF(peer->t_routeadv);
+               THREAD_OFF(peer->t_routeadv);
                BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
        }
 
@@ -2309,14 +2308,14 @@ static int bgp_establish(struct peer *peer)
 /* Keepalive packet is received. */
 static int bgp_fsm_keepalive(struct peer *peer)
 {
-       BGP_TIMER_OFF(peer->t_holdtime);
+       THREAD_OFF(peer->t_holdtime);
        return 0;
 }
 
 /* Update packet is received. */
 static int bgp_fsm_update(struct peer *peer)
 {
-       BGP_TIMER_OFF(peer->t_holdtime);
+       THREAD_OFF(peer->t_holdtime);
        return 0;
 }
 
@@ -2358,13 +2357,13 @@ void bgp_fsm_nht_update(struct peer *peer, bool has_valid_nexthops)
                break;
        case Connect:
                if (!has_valid_nexthops) {
-                       BGP_TIMER_OFF(peer->t_connect);
+                       THREAD_OFF(peer->t_connect);
                        BGP_EVENT_ADD(peer, TCP_fatal_error);
                }
                break;
        case Active:
                if (has_valid_nexthops) {
-                       BGP_TIMER_OFF(peer->t_connect);
+                       THREAD_OFF(peer->t_connect);
                        BGP_EVENT_ADD(peer, ConnectRetry_timer_expired);
                }
                break;
index 765a5aec5ee15c2c35ba7c8658478498afb358c0..aaf6c480b2400b237b81b3825b75d1c260f84b14 100644 (file)
                        thread_add_timer(bm->master, (F), peer, (V), &(T));    \
        } while (0)
 
-#define BGP_TIMER_OFF(T)                                                       \
-       do {                                                                   \
-               THREAD_OFF((T));                                               \
-       } while (0)
-
 #define BGP_EVENT_ADD(P, E)                                                    \
        do {                                                                   \
                if ((P)->status != Deleted)                                    \
index 75d34a84e02d8bf7f2d5c263529a6f4577876479..aba28fa50449d671096823aebb29d9bc61165a71 100644 (file)
@@ -134,7 +134,7 @@ static void bgp_process_writes(struct thread *thread)
 
        struct frr_pthread *fpt = bgp_pth_io;
 
-       frr_with_mutex(&peer->io_mtx) {
+       frr_with_mutex (&peer->io_mtx) {
                status = bgp_write(peer);
                reschedule = (stream_fifo_head(peer->obuf) != NULL);
        }
@@ -188,7 +188,7 @@ static void bgp_process_reads(struct thread *thread)
 
        struct frr_pthread *fpt = bgp_pth_io;
 
-       frr_with_mutex(&peer->io_mtx) {
+       frr_with_mutex (&peer->io_mtx) {
                status = bgp_read(peer, &code);
        }
 
@@ -247,7 +247,7 @@ static void bgp_process_reads(struct thread *thread)
                        stream_set_endp(pkt, pktsize);
 
                        frrtrace(2, frr_bgp, packet_read, peer, pkt);
-                       frr_with_mutex(&peer->io_mtx) {
+                       frr_with_mutex (&peer->io_mtx) {
                                stream_fifo_push(peer->ibuf, pkt);
                        }
 
index 86202a0e3dc5f6f0f0287eade66157aaab2497ef..158f1633585639a098218d537625e2d158a160d5 100644 (file)
@@ -252,7 +252,7 @@ void bgp_keepalives_on(struct peer *peer)
         */
        assert(peerhash_mtx);
 
-       frr_with_mutex(peerhash_mtx) {
+       frr_with_mutex (peerhash_mtx) {
                holder.peer = peer;
                if (!hash_lookup(peerhash, &holder)) {
                        struct pkat *pkat = pkat_new(peer);
@@ -280,7 +280,7 @@ void bgp_keepalives_off(struct peer *peer)
         */
        assert(peerhash_mtx);
 
-       frr_with_mutex(peerhash_mtx) {
+       frr_with_mutex (peerhash_mtx) {
                holder.peer = peer;
                struct pkat *res = hash_release(peerhash, &holder);
                if (res) {
@@ -293,7 +293,7 @@ void bgp_keepalives_off(struct peer *peer)
 
 void bgp_keepalives_wake(void)
 {
-       frr_with_mutex(peerhash_mtx) {
+       frr_with_mutex (peerhash_mtx) {
                pthread_cond_signal(peerhash_cond);
        }
 }
index d9b0fab518dcfcf443cd48e497f1dfd0dc9b1647..1297eb440ea36748ae5ca2ecb1f02308bda83912 100644 (file)
@@ -454,8 +454,7 @@ int main(int argc, char **argv)
                }
                case 'l':
                        listnode_add_sort_nodup(addresses, optarg);
-               /* listenon implies -n */
-               /* fallthru */
+                       break;
                case 'n':
                        no_fib_flag = 1;
                        break;
index cc4ff57f4effe8817e6c5ac8fa244a78765323d7..7b8f0df2e27e80e29d4f8b90a101b31448b0c4a6 100644 (file)
@@ -767,23 +767,74 @@ static void unsetsids(struct bgp_path_info *bpi)
        memset(extra->sid, 0, sizeof(extra->sid));
 }
 
+static bool leak_update_nexthop_valid(struct bgp *to_bgp, struct bgp_dest *bn,
+                                     struct attr *new_attr, afi_t afi,
+                                     safi_t safi,
+                                     struct bgp_path_info *source_bpi,
+                                     struct bgp_path_info *bpi,
+                                     struct bgp *bgp_orig,
+                                     const struct prefix *p, int debug)
+{
+       struct bgp_path_info *bpi_ultimate;
+       struct bgp *bgp_nexthop;
+       bool nh_valid;
+
+       bpi_ultimate = bgp_get_imported_bpi_ultimate(source_bpi);
+
+       if (bpi->extra && bpi->extra->bgp_orig)
+               bgp_nexthop = bpi->extra->bgp_orig;
+       else
+               bgp_nexthop = bgp_orig;
+
+       /*
+        * No nexthop tracking for redistributed routes or for
+        * EVPN-imported routes that get leaked.
+        */
+       if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
+           is_pi_family_evpn(bpi_ultimate))
+               nh_valid = 1;
+       else
+               /*
+                * TBD do we need to do anything about the
+                * 'connected' parameter?
+                */
+               nh_valid = bgp_find_or_add_nexthop(to_bgp, bgp_nexthop, afi,
+                                                  safi, bpi, NULL, 0, p);
+
+       /*
+        * If you are using SRv6 VPN instead of MPLS, it need to check
+        * the SID allocation. If the sid is not allocated, the rib
+        * will be invalid.
+        */
+       if (to_bgp->srv6_enabled &&
+           (!new_attr->srv6_l3vpn && !new_attr->srv6_vpn)) {
+               nh_valid = false;
+       }
+
+       if (debug)
+               zlog_debug("%s: %pFX nexthop is %svalid (in vrf %s)", __func__,
+                          p, (nh_valid ? "" : "not "),
+                          bgp_nexthop->name_pretty);
+
+       return nh_valid;
+}
+
 /*
  * returns pointer to new bgp_path_info upon success
  */
 static struct bgp_path_info *
-leak_update(struct bgp *bgp, /* destination bgp instance */
-           struct bgp_dest *bn, struct attr *new_attr, /* already interned */
+leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
+           struct attr *new_attr, /* already interned */
            afi_t afi, safi_t safi, struct bgp_path_info *source_bpi,
-           mpls_label_t *label, uint32_t num_labels, void *parent,
-           struct bgp *bgp_orig, struct prefix *nexthop_orig,
-           int nexthop_self_flag, int debug)
+           mpls_label_t *label, uint32_t num_labels, struct bgp *bgp_orig,
+           struct prefix *nexthop_orig, int nexthop_self_flag, int debug)
 {
        const struct prefix *p = bgp_dest_get_prefix(bn);
        struct bgp_path_info *bpi;
-       struct bgp_path_info *bpi_ultimate;
        struct bgp_path_info *new;
        struct bgp_path_info_extra *extra;
        uint32_t num_sids = 0;
+       void *parent = source_bpi;
 
        if (new_attr->srv6_l3vpn || new_attr->srv6_vpn)
                num_sids = 1;
@@ -791,7 +842,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
        if (debug)
                zlog_debug(
                        "%s: entry: leak-to=%s, p=%pBD, type=%d, sub_type=%d",
-                       __func__, bgp->name_pretty, bn, source_bpi->type,
+                       __func__, to_bgp->name_pretty, bn, source_bpi->type,
                        source_bpi->sub_type);
 
        /*
@@ -809,7 +860,6 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
         * schemes that could be implemented in the future.
         *
         */
-       bpi_ultimate = bgp_get_imported_bpi_ultimate(source_bpi);
 
        /*
         * match parent
@@ -827,7 +877,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
                        if (debug) {
                                zlog_debug(
                                        "%s: ->%s(s_flags: 0x%x b_flags: 0x%x): %pFX: Found route, being removed, not leaking",
-                                       __func__, bgp->name_pretty,
+                                       __func__, to_bgp->name_pretty,
                                        source_bpi->flags, bpi->flags, p);
                        }
                        return NULL;
@@ -840,7 +890,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
                        if (debug)
                                zlog_debug(
                                        "%s: ->%s: %pBD: Found route, no change",
-                                       __func__, bgp->name_pretty, bn);
+                                       __func__, to_bgp->name_pretty, bn);
                        return NULL;
                }
 
@@ -858,8 +908,9 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
                        if (!ecommunity_cmp(
                                    bgp_attr_get_ecommunity(bpi->attr),
                                    bgp_attr_get_ecommunity(new_attr))) {
-                               vpn_leak_to_vrf_withdraw(bgp, bpi);
-                               bgp_aggregate_decrement(bgp, p, bpi, afi, safi);
+                               vpn_leak_to_vrf_withdraw(to_bgp, bpi);
+                               bgp_aggregate_decrement(to_bgp, p, bpi, afi,
+                                                       safi);
                                bgp_path_info_delete(bn, bpi);
                        }
                }
@@ -871,7 +922,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
                if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED))
                        bgp_path_info_restore(bn, bpi);
                else
-                       bgp_aggregate_decrement(bgp, p, bpi, afi, safi);
+                       bgp_aggregate_decrement(to_bgp, p, bpi, afi, safi);
                bgp_attr_unintern(&bpi->attr);
                bpi->attr = new_attr;
                bpi->uptime = bgp_clock();
@@ -914,54 +965,21 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
                if (nexthop_self_flag)
                        bgp_path_info_set_flag(bn, bpi, BGP_PATH_ANNC_NH_SELF);
 
-               struct bgp *bgp_nexthop = bgp;
-               int nh_valid;
-
-               if (bpi->extra && bpi->extra->bgp_orig)
-                       bgp_nexthop = bpi->extra->bgp_orig;
-
-               /*
-                * No nexthop tracking for redistributed routes or for
-                * EVPN-imported routes that get leaked.
-                */
-               if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
-                   is_pi_family_evpn(bpi_ultimate))
-                       nh_valid = 1;
+               if (leak_update_nexthop_valid(to_bgp, bn, new_attr, afi, safi,
+                                             source_bpi, bpi, bgp_orig, p,
+                                             debug))
+                       bgp_path_info_set_flag(bn, bpi, BGP_PATH_VALID);
                else
-                       /*
-                        * TBD do we need to do anything about the
-                        * 'connected' parameter?
-                        */
-                       nh_valid = bgp_find_or_add_nexthop(
-                               bgp, bgp_nexthop, afi, safi, bpi, NULL, 0, p);
-
-               /*
-                * If you are using SRv6 VPN instead of MPLS, it need to check
-                * the SID allocation. If the sid is not allocated, the rib
-                * will be invalid.
-                */
-               if (bgp->srv6_enabled
-                   && (!new_attr->srv6_l3vpn && !new_attr->srv6_vpn)) {
                        bgp_path_info_unset_flag(bn, bpi, BGP_PATH_VALID);
-                       nh_valid = false;
-               }
-
-               if (debug)
-                       zlog_debug("%s: nexthop is %svalid (in vrf %s)",
-                               __func__, (nh_valid ? "" : "not "),
-                               bgp_nexthop->name_pretty);
-
-               if (nh_valid)
-                       bgp_path_info_set_flag(bn, bpi, BGP_PATH_VALID);
 
                /* Process change. */
-               bgp_aggregate_increment(bgp, p, bpi, afi, safi);
-               bgp_process(bgp, bn, afi, safi);
+               bgp_aggregate_increment(to_bgp, p, bpi, afi, safi);
+               bgp_process(to_bgp, bn, afi, safi);
                bgp_dest_unlock_node(bn);
 
                if (debug)
                        zlog_debug("%s: ->%s: %pBD Found route, changed attr",
-                                  __func__, bgp->name_pretty, bn);
+                                  __func__, to_bgp->name_pretty, bn);
 
                return bpi;
        }
@@ -970,14 +988,14 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
                if (debug) {
                        zlog_debug(
                                "%s: ->%s(s_flags: 0x%x): %pFX: New route, being removed, not leaking",
-                               __func__, bgp->name_pretty,
+                               __func__, to_bgp->name_pretty,
                                source_bpi->flags, p);
                }
                return NULL;
        }
 
        new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_IMPORTED, 0,
-               bgp->peer_self, new_attr, bn);
+                       to_bgp->peer_self, new_attr, bn);
 
        if (nexthop_self_flag)
                bgp_path_info_set_flag(bn, new, BGP_PATH_ANNC_NH_SELF);
@@ -1019,67 +1037,28 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
        if (nexthop_orig)
                new->extra->nexthop_orig = *nexthop_orig;
 
-       /*
-        * nexthop tracking for unicast routes
-        */
-       struct bgp *bgp_nexthop = bgp;
-       int nh_valid;
-
-       if (new->extra->bgp_orig)
-               bgp_nexthop = new->extra->bgp_orig;
-
-       /*
-        * No nexthop tracking for redistributed routes because
-        * their originating protocols will do the tracking and
-        * withdraw those routes if the nexthops become unreachable
-        * This also holds good for EVPN-imported routes that get
-        * leaked.
-        */
-       if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
-           is_pi_family_evpn(bpi_ultimate))
-               nh_valid = 1;
+       if (leak_update_nexthop_valid(to_bgp, bn, new_attr, afi, safi,
+                                     source_bpi, new, bgp_orig, p, debug))
+               bgp_path_info_set_flag(bn, new, BGP_PATH_VALID);
        else
-               /*
-                * TBD do we need to do anything about the
-                * 'connected' parameter?
-                */
-               nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop, afi, safi,
-                                                  new, NULL, 0, p);
-
-       /*
-        * If you are using SRv6 VPN instead of MPLS, it need to check
-        * the SID allocation. If the sid is not allocated, the rib
-        * will be invalid.
-        */
-       if (bgp->srv6_enabled
-           && (!new->attr->srv6_l3vpn && !new->attr->srv6_vpn)) {
                bgp_path_info_unset_flag(bn, new, BGP_PATH_VALID);
-               nh_valid = false;
-       }
 
-       if (debug)
-               zlog_debug("%s: nexthop is %svalid (in vrf %s)",
-                       __func__, (nh_valid ? "" : "not "),
-                       bgp_nexthop->name_pretty);
-       if (nh_valid)
-               bgp_path_info_set_flag(bn, new, BGP_PATH_VALID);
-
-       bgp_aggregate_increment(bgp, p, new, afi, safi);
+       bgp_aggregate_increment(to_bgp, p, new, afi, safi);
        bgp_path_info_add(bn, new);
 
        bgp_dest_unlock_node(bn);
-       bgp_process(bgp, bn, afi, safi);
+       bgp_process(to_bgp, bn, afi, safi);
 
        if (debug)
                zlog_debug("%s: ->%s: %pBD: Added new route", __func__,
-                          bgp->name_pretty, bn);
+                          to_bgp->name_pretty, bn);
 
        return new;
 }
 
 /* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
-void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,         /* to */
-                             struct bgp *bgp_vrf,          /* from */
+void vpn_leak_from_vrf_update(struct bgp *to_bgp,           /* to */
+                             struct bgp *from_bgp,        /* from */
                              struct bgp_path_info *path_vrf) /* route */
 {
        int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
@@ -1095,7 +1074,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,            /* to */
        int nexthop_self_flag = 0;
 
        if (debug)
-               zlog_debug("%s: from vrf %s", __func__, bgp_vrf->name_pretty);
+               zlog_debug("%s: from vrf %s", __func__, from_bgp->name_pretty);
 
        if (debug && bgp_attr_get_ecommunity(path_vrf->attr)) {
                char *s = ecommunity_ecom2str(
@@ -1103,11 +1082,11 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
                        ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
 
                zlog_debug("%s: %s path_vrf->type=%d, EC{%s}", __func__,
-                          bgp_vrf->name, path_vrf->type, s);
+                          from_bgp->name, path_vrf->type, s);
                XFREE(MTYPE_ECOMMUNITY_STR, s);
        }
 
-       if (!bgp_vpn)
+       if (!to_bgp)
                return;
 
        if (!afi) {
@@ -1120,10 +1099,10 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
        if (!is_route_injectable_into_vpn(path_vrf))
                return;
 
-       if (!vpn_leak_to_vpn_active(bgp_vrf, afi, &debugmsg)) {
+       if (!vpn_leak_to_vpn_active(from_bgp, afi, &debugmsg)) {
                if (debug)
                        zlog_debug("%s: %s skipping: %s", __func__,
-                                  bgp_vrf->name, debugmsg);
+                                  from_bgp->name, debugmsg);
                return;
        }
 
@@ -1133,23 +1112,23 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
        /*
         * route map handling
         */
-       if (bgp_vrf->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_TOVPN]) {
+       if (from_bgp->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_TOVPN]) {
                struct bgp_path_info info;
                route_map_result_t ret;
 
                memset(&info, 0, sizeof(info));
-               info.peer = bgp_vpn->peer_self;
+               info.peer = to_bgp->peer_self;
                info.attr = &static_attr;
-               ret = route_map_apply(
-                       bgp_vrf->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_TOVPN],
-                       p, &info);
+               ret = route_map_apply(from_bgp->vpn_policy[afi]
+                                             .rmap[BGP_VPN_POLICY_DIR_TOVPN],
+                                     p, &info);
                if (RMAP_DENYMATCH == ret) {
                        bgp_attr_flush(&static_attr); /* free any added parts */
                        if (debug)
                                zlog_debug(
                                        "%s: vrf %s route map \"%s\" says DENY, returning",
-                                       __func__, bgp_vrf->name_pretty,
-                                       bgp_vrf->vpn_policy[afi]
+                                       __func__, from_bgp->name_pretty,
+                                       from_bgp->vpn_policy[afi]
                                                .rmap[BGP_VPN_POLICY_DIR_TOVPN]
                                                ->name);
                        return;
@@ -1177,17 +1156,17 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
        old_ecom = bgp_attr_get_ecommunity(&static_attr);
        if (old_ecom) {
                new_ecom = ecommunity_dup(old_ecom);
-               if (CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
-                               BGP_CONFIG_VRF_TO_VRF_EXPORT))
+               if (CHECK_FLAG(from_bgp->af_flags[afi][SAFI_UNICAST],
+                              BGP_CONFIG_VRF_TO_VRF_EXPORT))
                        ecommunity_strip_rts(new_ecom);
-               new_ecom = ecommunity_merge(new_ecom,
-                       bgp_vrf->vpn_policy[afi]
-                               .rtlist[BGP_VPN_POLICY_DIR_TOVPN]);
+               new_ecom = ecommunity_merge(
+                       new_ecom, from_bgp->vpn_policy[afi]
+                                         .rtlist[BGP_VPN_POLICY_DIR_TOVPN]);
                if (!old_ecom->refcnt)
                        ecommunity_free(&old_ecom);
        } else {
                new_ecom = ecommunity_dup(
-                       bgp_vrf->vpn_policy[afi]
+                       from_bgp->vpn_policy[afi]
                                .rtlist[BGP_VPN_POLICY_DIR_TOVPN]);
        }
        bgp_attr_set_ecommunity(&static_attr, new_ecom);
@@ -1204,10 +1183,10 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
 
        /* Nexthop */
        /* if policy nexthop not set, use 0 */
-       if (CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
+       if (CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
                       BGP_VPN_POLICY_TOVPN_NEXTHOP_SET)) {
                struct prefix *nexthop =
-                       &bgp_vrf->vpn_policy[afi].tovpn_nexthop;
+                       &from_bgp->vpn_policy[afi].tovpn_nexthop;
 
                switch (nexthop->family) {
                case AF_INET:
@@ -1228,7 +1207,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,            /* to */
                        assert(0);
                }
        } else {
-               if (!CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
+               if (!CHECK_FLAG(from_bgp->af_flags[afi][SAFI_UNICAST],
                                BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
                        if (afi == AFI_IP) {
                                /*
@@ -1266,7 +1245,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,            /* to */
                nexthop_self_flag = 1;
        }
 
-       label_val = bgp_vrf->vpn_policy[afi].tovpn_label;
+       label_val = from_bgp->vpn_policy[afi].tovpn_label;
        if (label_val == MPLS_LABEL_NONE) {
                encode_label(MPLS_LABEL_IMPLICIT_NULL, &label);
        } else {
@@ -1275,12 +1254,13 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
 
        /* Set originator ID to "me" */
        SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
-       static_attr.originator_id = bgp_vpn->router_id;
+       static_attr.originator_id = to_bgp->router_id;
 
        /* Set SID for SRv6 VPN */
-       if (bgp_vrf->vpn_policy[afi].tovpn_sid_locator) {
-               encode_label(bgp_vrf->vpn_policy[afi].tovpn_sid_transpose_label,
-                            &label);
+       if (from_bgp->vpn_policy[afi].tovpn_sid_locator) {
+               encode_label(
+                       from_bgp->vpn_policy[afi].tovpn_sid_transpose_label,
+                       &label);
                static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN,
                                sizeof(struct bgp_attr_srv6_l3vpn));
                static_attr.srv6_l3vpn->sid_flags = 0x00;
@@ -1298,7 +1278,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,            /* to */
                static_attr.srv6_l3vpn->transposition_offset =
                        BGP_PREFIX_SID_SRV6_TRANSPOSITION_OFFSET;
                memcpy(&static_attr.srv6_l3vpn->sid,
-                      bgp_vrf->vpn_policy[afi].tovpn_sid_locator,
+                      from_bgp->vpn_policy[afi].tovpn_sid_locator,
                       sizeof(struct in6_addr));
        }
 
@@ -1317,14 +1297,14 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
 
        /* Now new_attr is an allocated interned attr */
 
-       bn = bgp_afi_node_get(bgp_vpn->rib[afi][safi], afi, safi, p,
-                             &(bgp_vrf->vpn_policy[afi].tovpn_rd));
+       bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p,
+                             &(from_bgp->vpn_policy[afi].tovpn_rd));
 
        struct bgp_path_info *new_info;
 
-       new_info = leak_update(bgp_vpn, bn, new_attr, afi, safi, path_vrf,
-                              &label, 1, path_vrf, bgp_vrf, NULL,
-                              nexthop_self_flag, debug);
+       new_info =
+               leak_update(to_bgp, bn, new_attr, afi, safi, path_vrf, &label,
+                           1, from_bgp, NULL, nexthop_self_flag, debug);
 
        /*
         * Routes actually installed in the vpn RIB must also be
@@ -1336,11 +1316,11 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,          /* to */
         * because of loop checking.
         */
        if (new_info)
-               vpn_leak_to_vrf_update(bgp_vrf, new_info);
+               vpn_leak_to_vrf_update(from_bgp, new_info);
 }
 
-void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn,           /* to */
-                               struct bgp *bgp_vrf,            /* from */
+void vpn_leak_from_vrf_withdraw(struct bgp *to_bgp,            /* to */
+                               struct bgp *from_bgp,           /* from */
                                struct bgp_path_info *path_vrf) /* route */
 {
        int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
@@ -1354,11 +1334,11 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn,            /* to */
        if (debug) {
                zlog_debug(
                        "%s: entry: leak-from=%s, p=%pBD, type=%d, sub_type=%d",
-                       __func__, bgp_vrf->name_pretty, path_vrf->net,
+                       __func__, from_bgp->name_pretty, path_vrf->net,
                        path_vrf->type, path_vrf->sub_type);
        }
 
-       if (!bgp_vpn)
+       if (!to_bgp)
                return;
 
        if (!afi) {
@@ -1371,7 +1351,7 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn,              /* to */
        if (!is_route_injectable_into_vpn(path_vrf))
                return;
 
-       if (!vpn_leak_to_vpn_active(bgp_vrf, afi, &debugmsg)) {
+       if (!vpn_leak_to_vpn_active(from_bgp, afi, &debugmsg)) {
                if (debug)
                        zlog_debug("%s: skipping: %s", __func__, debugmsg);
                return;
@@ -1380,8 +1360,8 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn,              /* to */
        if (debug)
                zlog_debug("%s: withdrawing (path_vrf=%p)", __func__, path_vrf);
 
-       bn = bgp_afi_node_get(bgp_vpn->rib[afi][safi], afi, safi, p,
-                             &(bgp_vrf->vpn_policy[afi].tovpn_rd));
+       bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p,
+                             &(from_bgp->vpn_policy[afi].tovpn_rd));
 
        if (!bn)
                return;
@@ -1397,17 +1377,16 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn,            /* to */
 
        if (bpi) {
                /* withdraw from looped vrfs as well */
-               vpn_leak_to_vrf_withdraw(bgp_vpn, bpi);
+               vpn_leak_to_vrf_withdraw(to_bgp, bpi);
 
-               bgp_aggregate_decrement(bgp_vpn, p, bpi, afi, safi);
+               bgp_aggregate_decrement(to_bgp, p, bpi, afi, safi);
                bgp_path_info_delete(bn, bpi);
-               bgp_process(bgp_vpn, bn, afi, safi);
+               bgp_process(to_bgp, bn, afi, safi);
        }
        bgp_dest_unlock_node(bn);
 }
 
-void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */
-                                   struct bgp *bgp_vrf, /* from */
+void vpn_leak_from_vrf_withdraw_all(struct bgp *to_bgp, struct bgp *from_bgp,
                                    afi_t afi)
 {
        int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
@@ -1415,9 +1394,9 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */
        safi_t safi = SAFI_MPLS_VPN;
 
        /*
-        * Walk vpn table, delete bpi with bgp_orig == bgp_vrf
+        * Walk vpn table, delete bpi with bgp_orig == from_bgp
         */
-       for (pdest = bgp_table_top(bgp_vpn->rib[afi][safi]); pdest;
+       for (pdest = bgp_table_top(to_bgp->rib[afi][safi]); pdest;
             pdest = bgp_route_next(pdest)) {
 
                struct bgp_table *table;
@@ -1446,28 +1425,26 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */
                                        continue;
                                if (!bpi->extra)
                                        continue;
-                               if ((struct bgp *)bpi->extra->bgp_orig
-                                   == bgp_vrf) {
+                               if ((struct bgp *)bpi->extra->bgp_orig ==
+                                   from_bgp) {
                                        /* delete route */
                                        if (debug)
                                                zlog_debug("%s: deleting it",
                                                           __func__);
                                        /* withdraw from leak-to vrfs as well */
-                                       vpn_leak_to_vrf_withdraw(bgp_vpn, bpi);
+                                       vpn_leak_to_vrf_withdraw(to_bgp, bpi);
                                        bgp_aggregate_decrement(
-                                               bgp_vpn,
-                                               bgp_dest_get_prefix(bn), bpi,
-                                               afi, safi);
+                                               to_bgp, bgp_dest_get_prefix(bn),
+                                               bpi, afi, safi);
                                        bgp_path_info_delete(bn, bpi);
-                                       bgp_process(bgp_vpn, bn, afi, safi);
+                                       bgp_process(to_bgp, bn, afi, safi);
                                }
                        }
                }
        }
 }
 
-void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn, /* to */
-                                 struct bgp *bgp_vrf, /* from */
+void vpn_leak_from_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
                                  afi_t afi)
 {
        struct bgp_dest *bn;
@@ -1476,9 +1453,9 @@ void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn, /* to */
 
        if (debug)
                zlog_debug("%s: entry, afi=%d, vrf=%s", __func__, afi,
-                          bgp_vrf->name_pretty);
+                          from_bgp->name_pretty);
 
-       for (bn = bgp_table_top(bgp_vrf->rib[afi][SAFI_UNICAST]); bn;
+       for (bn = bgp_table_top(from_bgp->rib[afi][SAFI_UNICAST]); bn;
             bn = bgp_route_next(bn)) {
 
                if (debug)
@@ -1490,14 +1467,14 @@ void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn, /* to */
                                zlog_debug(
                                        "%s: calling vpn_leak_from_vrf_update",
                                        __func__);
-                       vpn_leak_from_vrf_update(bgp_vpn, bgp_vrf, bpi);
+                       vpn_leak_from_vrf_update(to_bgp, from_bgp, bpi);
                }
        }
 }
 
-static void
-vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,         /* to */
-                             struct bgp *bgp_vpn,          /* from */
+static bool
+vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp,           /* to */
+                             struct bgp *from_bgp,        /* from */
                              struct bgp_path_info *path_vpn) /* route */
 {
        const struct prefix *p = bgp_dest_get_prefix(path_vpn->net);
@@ -1518,26 +1495,26 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,          /* to */
 
        int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
 
-       if (!vpn_leak_from_vpn_active(bgp_vrf, afi, &debugmsg)) {
+       if (!vpn_leak_from_vpn_active(to_bgp, afi, &debugmsg)) {
                if (debug)
                        zlog_debug("%s: skipping: %s", __func__, debugmsg);
-               return;
+               return false;
        }
 
        /* Check for intersection of route targets */
        if (!ecom_intersect(
-                   bgp_vrf->vpn_policy[afi].rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
+                   to_bgp->vpn_policy[afi].rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
                    bgp_attr_get_ecommunity(path_vpn->attr))) {
                if (debug)
                        zlog_debug(
                                "from vpn (%s) to vrf (%s), skipping after no intersection of route targets",
-                               bgp_vpn->name_pretty, bgp_vrf->name_pretty);
-               return;
+                               from_bgp->name_pretty, to_bgp->name_pretty);
+               return false;
        }
 
        if (debug)
                zlog_debug("%s: updating %pFX to vrf %s", __func__, p,
-                          bgp_vrf->name_pretty);
+                          to_bgp->name_pretty);
 
        /* shallow copy */
        static_attr = *path_vpn->attr;
@@ -1547,8 +1524,8 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,            /* to */
 
        /* If doing VRF-to-VRF leaking, strip RTs. */
        old_ecom = bgp_attr_get_ecommunity(&static_attr);
-       if (old_ecom && CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
-                               BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+       if (old_ecom && CHECK_FLAG(to_bgp->af_flags[afi][safi],
+                                  BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
                new_ecom = ecommunity_dup(old_ecom);
                ecommunity_strip_rts(new_ecom);
                bgp_attr_set_ecommunity(&static_attr, new_ecom);
@@ -1580,7 +1557,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,            /* to */
                nexthop_orig.u.prefix4 = path_vpn->attr->mp_nexthop_global_in;
                nexthop_orig.prefixlen = IPV4_MAX_BITLEN;
 
-               if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+               if (CHECK_FLAG(to_bgp->af_flags[afi][safi],
                               BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
                        static_attr.nexthop.s_addr =
                                nexthop_orig.u.prefix4.s_addr;
@@ -1597,7 +1574,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,            /* to */
                nexthop_orig.u.prefix6 = path_vpn->attr->mp_nexthop_global;
                nexthop_orig.prefixlen = IPV6_MAX_BITLEN;
 
-               if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+               if (CHECK_FLAG(to_bgp->af_flags[afi][safi],
                               BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
                        static_attr.mp_nexthop_global = nexthop_orig.u.prefix6;
                }
@@ -1607,15 +1584,15 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,          /* to */
        /*
         * route map handling
         */
-       if (bgp_vrf->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_FROMVPN]) {
+       if (to_bgp->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_FROMVPN]) {
                struct bgp_path_info info;
                route_map_result_t ret;
 
                memset(&info, 0, sizeof(info));
-               info.peer = bgp_vrf->peer_self;
+               info.peer = to_bgp->peer_self;
                info.attr = &static_attr;
                info.extra = path_vpn->extra; /* Used for source-vrf filter */
-               ret = route_map_apply(bgp_vrf->vpn_policy[afi]
+               ret = route_map_apply(to_bgp->vpn_policy[afi]
                                              .rmap[BGP_VPN_POLICY_DIR_FROMVPN],
                                      p, &info);
                if (RMAP_DENYMATCH == ret) {
@@ -1623,11 +1600,11 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,          /* to */
                        if (debug)
                                zlog_debug(
                                        "%s: vrf %s vpn-policy route map \"%s\" says DENY, returning",
-                                       __func__, bgp_vrf->name_pretty,
-                                       bgp_vrf->vpn_policy[afi]
+                                       __func__, to_bgp->name_pretty,
+                                       to_bgp->vpn_policy[afi]
                                                .rmap[BGP_VPN_POLICY_DIR_FROMVPN]
                                                ->name);
-                       return;
+                       return false;
                }
                /*
                 * if route-map changed nexthop, don't nexthop-self on output
@@ -1640,7 +1617,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,            /* to */
        new_attr = bgp_attr_intern(&static_attr);
        bgp_attr_flush(&static_attr);
 
-       bn = bgp_afi_node_get(bgp_vrf->rib[afi][safi], afi, safi, p, NULL);
+       bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p, NULL);
 
        /*
         * ensure labels are copied
@@ -1654,7 +1631,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,            /* to */
         * labels for these routes enables the non-labeled nexthops
         * from the originating VRF to be considered valid for this route.
         */
-       if (!CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+       if (!CHECK_FLAG(to_bgp->af_flags[afi][safi],
                        BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
                /* work back to original route */
                bpi_ultimate = bgp_get_imported_bpi_ultimate(path_vpn);
@@ -1692,18 +1669,20 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,          /* to */
        if (path_vpn->extra && path_vpn->extra->bgp_orig)
                src_vrf = path_vpn->extra->bgp_orig;
        else
-               src_vrf = bgp_vpn;
+               src_vrf = from_bgp;
 
-       leak_update(bgp_vrf, bn, new_attr, afi, safi, path_vpn, pLabels,
-                   num_labels, path_vpn, /* parent */
-                   src_vrf, &nexthop_orig, nexthop_self_flag, debug);
+       leak_update(to_bgp, bn, new_attr, afi, safi, path_vpn, pLabels,
+                   num_labels, src_vrf, &nexthop_orig, nexthop_self_flag,
+                   debug);
+       return true;
 }
 
-void vpn_leak_to_vrf_update(struct bgp *bgp_vpn,           /* from */
+bool vpn_leak_to_vrf_update(struct bgp *from_bgp,         /* from */
                            struct bgp_path_info *path_vpn) /* route */
 {
        struct listnode *mnode, *mnnode;
        struct bgp *bgp;
+       bool leak_success = false;
 
        int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
 
@@ -1715,12 +1694,14 @@ void vpn_leak_to_vrf_update(struct bgp *bgp_vpn,            /* from */
 
                if (!path_vpn->extra
                    || path_vpn->extra->bgp_orig != bgp) { /* no loop */
-                       vpn_leak_to_vrf_update_onevrf(bgp, bgp_vpn, path_vpn);
+                       leak_success |= vpn_leak_to_vrf_update_onevrf(
+                               bgp, from_bgp, path_vpn);
                }
        }
+       return leak_success;
 }
 
-void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,         /* from */
+void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp,       /* from */
                              struct bgp_path_info *path_vpn) /* route */
 {
        const struct prefix *p;
@@ -1804,8 +1785,7 @@ void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,            /* from */
        }
 }
 
-void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, /* to */
-                                 afi_t afi)
+void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi)
 {
        struct bgp_dest *bn;
        struct bgp_path_info *bpi;
@@ -1817,40 +1797,38 @@ void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, /* to */
        /*
         * Walk vrf table, delete bpi with bgp_orig in a different vrf
         */
-       for (bn = bgp_table_top(bgp_vrf->rib[afi][safi]); bn;
+       for (bn = bgp_table_top(to_bgp->rib[afi][safi]); bn;
             bn = bgp_route_next(bn)) {
 
                for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
                     bpi = bpi->next) {
-                       if (bpi->extra
-                           && bpi->extra->bgp_orig != bgp_vrf
-                           && bpi->extra->parent
-                           && is_pi_family_vpn(bpi->extra->parent)) {
+                       if (bpi->extra && bpi->extra->bgp_orig != to_bgp &&
+                           bpi->extra->parent &&
+                           is_pi_family_vpn(bpi->extra->parent)) {
 
                                /* delete route */
-                               bgp_aggregate_decrement(bgp_vrf,
+                               bgp_aggregate_decrement(to_bgp,
                                                        bgp_dest_get_prefix(bn),
                                                        bpi, afi, safi);
                                bgp_path_info_delete(bn, bpi);
-                               bgp_process(bgp_vrf, bn, afi, safi);
+                               bgp_process(to_bgp, bn, afi, safi);
                        }
                }
        }
 }
 
-void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, /* to */
-                               struct bgp *bgp_vpn, /* from */
+void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *vpn_from,
                                afi_t afi)
 {
        struct bgp_dest *pdest;
        safi_t safi = SAFI_MPLS_VPN;
 
-       assert(bgp_vpn);
+       assert(vpn_from);
 
        /*
         * Walk vpn table
         */
-       for (pdest = bgp_table_top(bgp_vpn->rib[afi][safi]); pdest;
+       for (pdest = bgp_table_top(vpn_from->rib[afi][safi]); pdest;
             pdest = bgp_route_next(pdest)) {
                struct bgp_table *table;
                struct bgp_dest *bn;
@@ -1867,11 +1845,11 @@ void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, /* to */
                        for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
                             bpi = bpi->next) {
 
-                               if (bpi->extra
-                                   && bpi->extra->bgp_orig == bgp_vrf)
+                               if (bpi->extra &&
+                                   bpi->extra->bgp_orig == to_bgp)
                                        continue;
 
-                               vpn_leak_to_vrf_update_onevrf(bgp_vrf, bgp_vpn,
+                               vpn_leak_to_vrf_update_onevrf(to_bgp, vpn_from,
                                                              bpi);
                        }
                }
@@ -2513,7 +2491,7 @@ DEFUN (show_ip_bgp_vpn_rd,
        IP_STR
        BGP_STR
        BGP_AFI_HELP_STR
-       "Address Family modifier\n"
+       BGP_AF_MODIFIER_STR
        "Display information for a route distinguisher\n"
        "VPN Route Distinguisher\n"
        "All VPN Route Distinguishers\n")
index 8c2eae279c0b68927eeec130620e344c5bbca6b5..c5cc7d42949f5688a8c9de0ec25424b059a570d3 100644 (file)
@@ -25,6 +25,7 @@
 #include "bgpd/bgp_route.h"
 #include "bgpd/bgp_rd.h"
 #include "bgpd/bgp_zebra.h"
+#include "bgpd/bgp_vty.h"
 
 #define MPLS_LABEL_IS_SPECIAL(label) ((label) <= MPLS_LABEL_EXTENSION)
 #define MPLS_LABEL_IS_NULL(label)                                              \
@@ -32,9 +33,7 @@
         || (label) == MPLS_LABEL_IPV6_EXPLICIT_NULL                           \
         || (label) == MPLS_LABEL_IMPLICIT_NULL)
 
-#define BGP_VPNVX_HELP_STR                                                     \
-       "Address Family\n"                                                     \
-       "Address Family\n"
+#define BGP_VPNVX_HELP_STR BGP_AF_STR BGP_AF_STR
 
 #define V4_HEADER                                                              \
        "   Network          Next Hop            Metric LocPrf Weight Path\n"
@@ -53,27 +52,27 @@ extern int bgp_show_mpls_vpn(struct vty *vty, afi_t afi, struct prefix_rd *prd,
                             enum bgp_show_type type, void *output_arg,
                             int tags, bool use_json);
 
-extern void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, struct bgp *bgp_vrf,
+extern void vpn_leak_from_vrf_update(struct bgp *to_bgp, struct bgp *from_bgp,
                                     struct bgp_path_info *path_vrf);
 
-extern void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, struct bgp *bgp_vrf,
+extern void vpn_leak_from_vrf_withdraw(struct bgp *to_bgp, struct bgp *from_bgp,
                                       struct bgp_path_info *path_vrf);
 
-extern void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn,
-                                          struct bgp *bgp_vrf, afi_t afi);
+extern void vpn_leak_from_vrf_withdraw_all(struct bgp *to_bgp,
+                                          struct bgp *from_bgp, afi_t afi);
 
-extern void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn,
-                                        struct bgp *bgp_vrf, afi_t afi);
+extern void vpn_leak_from_vrf_update_all(struct bgp *to_bgp,
+                                        struct bgp *from_bgp, afi_t afi);
 
-extern void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, afi_t afi);
+extern void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi);
 
-extern void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, struct bgp *bgp_vpn,
+extern void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
                                       afi_t afi);
 
-extern void vpn_leak_to_vrf_update(struct bgp *bgp_vpn,
+extern bool vpn_leak_to_vrf_update(struct bgp *from_bgp,
                                   struct bgp_path_info *path_vpn);
 
-extern void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,
+extern void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp,
                                     struct bgp_path_info *path_vpn);
 
 extern void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi);
@@ -233,8 +232,14 @@ static inline void vpn_leak_postchange(enum vpn_policy_direction direction,
        if (!bgp_vpn)
                return;
 
-       if (direction == BGP_VPN_POLICY_DIR_FROMVPN)
-               vpn_leak_to_vrf_update_all(bgp_vrf, bgp_vpn, afi);
+       if (direction == BGP_VPN_POLICY_DIR_FROMVPN) {
+               /* trigger a flush to re-sync with ADJ-RIB-in */
+               if (!CHECK_FLAG(bgp_vpn->af_flags[afi][SAFI_MPLS_VPN],
+                               BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL))
+                       bgp_clear_soft_in(bgp_vpn, afi, SAFI_MPLS_VPN);
+               else
+                       vpn_leak_to_vrf_update_all(bgp_vrf, bgp_vpn, afi);
+       }
        if (direction == BGP_VPN_POLICY_DIR_TOVPN) {
 
                if (bgp_vrf->vpn_policy[afi].tovpn_label !=
index da4cc03b66729da68839b79eac4a2cd7e8638cc4..9ecc2ae4e42667a9c08a37bcc936ae3037f181d5 100644 (file)
@@ -429,7 +429,7 @@ static void bgp_accept(struct thread *thread)
                                sockopt_tcp_mss_set(bgp_sock, peer1->tcp_mss);
 
                        bgp_fsm_change_status(peer1, Active);
-                       BGP_TIMER_OFF(
+                       THREAD_OFF(
                                peer1->t_start); /* created in peer_create() */
 
                        if (peer_active(peer1)) {
@@ -558,7 +558,7 @@ static void bgp_accept(struct thread *thread)
        }
        bgp_peer_reg_with_nht(peer);
        bgp_fsm_change_status(peer, Active);
-       BGP_TIMER_OFF(peer->t_start); /* created in peer_create() */
+       THREAD_OFF(peer->t_start); /* created in peer_create() */
 
        SET_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
        /* Make dummy peer until read Open packet. */
index c6043807dd9196f99718610ffc08fe15f4900bd1..e1fcc743eca3a158f8cd4a663bd3ecb788a017a3 100644 (file)
@@ -56,6 +56,11 @@ int bgp_nexthop_cache_compare(const struct bgp_nexthop_cache *a,
        if (a->srte_color > b->srte_color)
                return 1;
 
+       if (a->ifindex < b->ifindex)
+               return -1;
+       if (a->ifindex > b->ifindex)
+               return 1;
+
        return prefix_cmp(&a->prefix, &b->prefix);
 }
 
@@ -70,13 +75,15 @@ void bnc_nexthop_free(struct bgp_nexthop_cache *bnc)
 }
 
 struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
-                                 struct prefix *prefix, uint32_t srte_color)
+                                 struct prefix *prefix, uint32_t srte_color,
+                                 ifindex_t ifindex)
 {
        struct bgp_nexthop_cache *bnc;
 
        bnc = XCALLOC(MTYPE_BGP_NEXTHOP_CACHE,
                      sizeof(struct bgp_nexthop_cache));
        bnc->prefix = *prefix;
+       bnc->ifindex = ifindex;
        bnc->srte_color = srte_color;
        bnc->tree = tree;
        LIST_INIT(&(bnc->paths));
@@ -106,7 +113,8 @@ void bnc_free(struct bgp_nexthop_cache *bnc)
 }
 
 struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
-                                  struct prefix *prefix, uint32_t srte_color)
+                                  struct prefix *prefix, uint32_t srte_color,
+                                  ifindex_t ifindex)
 {
        struct bgp_nexthop_cache bnc = {};
 
@@ -115,6 +123,7 @@ struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
 
        bnc.prefix = *prefix;
        bnc.srte_color = srte_color;
+       bnc.ifindex = ifindex;
        return bgp_nexthop_cache_find(tree, &bnc);
 }
 
@@ -915,7 +924,7 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
                }
                tree = import_table ? &bgp->import_check_table
                                    : &bgp->nexthop_cache_table;
-               bnc = bnc_find(tree[family2afi(nhop.family)], &nhop, 0);
+               bnc = bnc_find(tree[family2afi(nhop.family)], &nhop, 0, 0);
                if (!bnc) {
                        vty_out(vty, "specified nexthop does not have entry\n");
                        return CMD_SUCCESS;
index 16c2b6c65a8ad36c5f4a5d04d1d9f5dc15f2fa64..9d653ef4dc2f497b1a6795be2c18c366086ca819 100644 (file)
@@ -152,12 +152,14 @@ extern bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type,
                             struct bgp_dest *dest);
 extern struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
                                         struct prefix *prefix,
-                                        uint32_t srte_color);
+                                        uint32_t srte_color,
+                                        ifindex_t ifindex);
 extern bool bnc_existing_for_prefix(struct bgp_nexthop_cache *bnc);
 extern void bnc_free(struct bgp_nexthop_cache *bnc);
 extern struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
                                          struct prefix *prefix,
-                                         uint32_t srte_color);
+                                         uint32_t srte_color,
+                                         ifindex_t ifindex);
 extern void bnc_nexthop_free(struct bgp_nexthop_cache *bnc);
 extern const char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size);
 extern void bgp_scan_init(struct bgp *bgp);
index 3ab67a618152bf0447f00a094c6a4fe30043386f..344608fda1cc54245630cddc8b0ce7de0d558127 100644 (file)
@@ -84,9 +84,10 @@ static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
        if (LIST_EMPTY(&(bnc->paths)) && !bnc->nht_info) {
                if (BGP_DEBUG(nht, NHT)) {
                        char buf[PREFIX2STR_BUFFER];
-                       zlog_debug("%s: freeing bnc %s(%u)(%s)", __func__,
+                       zlog_debug("%s: freeing bnc %s(%d)(%u)(%s)", __func__,
                                   bnc_str(bnc, buf, PREFIX2STR_BUFFER),
-                                  bnc->srte_color, bnc->bgp->name_pretty);
+                                  bnc->ifindex, bnc->srte_color,
+                                  bnc->bgp->name_pretty);
                }
                /* only unregister if this is the last nh for this prefix*/
                if (!bnc_existing_for_prefix(bnc))
@@ -113,17 +114,32 @@ void bgp_replace_nexthop_by_peer(struct peer *from, struct peer *to)
        struct prefix pt;
        struct bgp_nexthop_cache *bncp, *bnct;
        afi_t afi;
+       ifindex_t ifindex = 0;
 
        if (!sockunion2hostprefix(&from->su, &pp))
                return;
 
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       if (from->conf_if && IN6_IS_ADDR_LINKLOCAL(&from->su.sin6.sin6_addr))
+               ifindex = from->su.sin6.sin6_scope_id;
+
        afi = family2afi(pp.family);
-       bncp = bnc_find(&from->bgp->nexthop_cache_table[afi], &pp, 0);
+       bncp = bnc_find(&from->bgp->nexthop_cache_table[afi], &pp, 0, ifindex);
 
        if (!sockunion2hostprefix(&to->su, &pt))
                return;
 
-       bnct = bnc_find(&to->bgp->nexthop_cache_table[afi], &pt, 0);
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       ifindex = 0;
+       if (to->conf_if && IN6_IS_ADDR_LINKLOCAL(&to->su.sin6.sin6_addr))
+               ifindex = to->su.sin6.sin6_scope_id;
+       bnct = bnc_find(&to->bgp->nexthop_cache_table[afi], &pt, 0, ifindex);
 
        if (bnct != bncp)
                return;
@@ -137,11 +153,17 @@ void bgp_unlink_nexthop_by_peer(struct peer *peer)
        struct prefix p;
        struct bgp_nexthop_cache *bnc;
        afi_t afi = family2afi(peer->su.sa.sa_family);
+       ifindex_t ifindex = 0;
 
        if (!sockunion2hostprefix(&peer->su, &p))
                return;
-
-       bnc = bnc_find(&peer->bgp->nexthop_cache_table[afi], &p, 0);
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       if (afi == AFI_IP6 && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+               ifindex = peer->su.sin6.sin6_scope_id;
+       bnc = bnc_find(&peer->bgp->nexthop_cache_table[afi], &p, 0, ifindex);
        if (!bnc)
                return;
 
@@ -177,8 +199,8 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
                   to derive
                   address-family from the next-hop. */
                if (!is_bgp_static_route)
-                       afi = BGP_ATTR_NEXTHOP_AFI_IP6(pi->attr) ? AFI_IP6
-                                                                : AFI_IP;
+                       afi = BGP_ATTR_MP_NEXTHOP_LEN_IP6(pi->attr) ? AFI_IP6
+                                                                   : AFI_IP;
 
                /* Validation for the ipv4 mapped ipv6 nexthop. */
                if (IS_MAPPED_IPV6(&pi->attr->mp_nexthop_global)) {
@@ -206,9 +228,18 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
                 * Gather the ifindex for if up/down events to be
                 * tagged into this fun
                 */
-               if (afi == AFI_IP6
-                   && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+               if (afi == AFI_IP6 &&
+                   IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr)) {
                        ifindex = peer->su.sin6.sin6_scope_id;
+                       if (ifindex == 0) {
+                               if (BGP_DEBUG(nht, NHT)) {
+                                       zlog_debug(
+                                               "%s: Unable to locate ifindex, waiting till we have one",
+                                               peer->conf_if);
+                               }
+                               return 0;
+                       }
+               }
 
                if (!sockunion2hostprefix(&peer->su, &p)) {
                        if (BGP_DEBUG(nht, NHT)) {
@@ -226,28 +257,27 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
        else
                tree = &bgp_nexthop->nexthop_cache_table[afi];
 
-       bnc = bnc_find(tree, &p, srte_color);
+       bnc = bnc_find(tree, &p, srte_color, ifindex);
        if (!bnc) {
-               bnc = bnc_new(tree, &p, srte_color);
+               bnc = bnc_new(tree, &p, srte_color, ifindex);
                bnc->bgp = bgp_nexthop;
-               bnc->ifindex = ifindex;
                if (BGP_DEBUG(nht, NHT)) {
                        char buf[PREFIX2STR_BUFFER];
 
-                       zlog_debug("Allocated bnc %s(%u)(%s) peer %p",
+                       zlog_debug("Allocated bnc %s(%d)(%u)(%s) peer %p",
                                   bnc_str(bnc, buf, PREFIX2STR_BUFFER),
-                                  bnc->srte_color, bnc->bgp->name_pretty,
-                                  peer);
+                                  bnc->ifindex, bnc->srte_color,
+                                  bnc->bgp->name_pretty, peer);
                }
        } else {
                if (BGP_DEBUG(nht, NHT)) {
                        char buf[PREFIX2STR_BUFFER];
 
                        zlog_debug(
-                               "Found existing bnc %s(%s) flags 0x%x ifindex %d #paths %d peer %p",
+                               "Found existing bnc %s(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
                                bnc_str(bnc, buf, PREFIX2STR_BUFFER),
-                               bnc->bgp->name_pretty, bnc->flags, bnc->ifindex,
-                               bnc->path_count, bnc->nht_info);
+                               bnc->ifindex, bnc->bgp->name_pretty, bnc->flags,
+                               bnc->ifindex, bnc->path_count, bnc->nht_info);
                }
        }
 
@@ -351,15 +381,21 @@ void bgp_delete_connected_nexthop(afi_t afi, struct peer *peer)
 {
        struct bgp_nexthop_cache *bnc;
        struct prefix p;
+       ifindex_t ifindex = 0;
 
        if (!peer)
                return;
 
        if (!sockunion2hostprefix(&peer->su, &p))
                return;
-
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       if (afi == AFI_IP6 && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+               ifindex = peer->su.sin6.sin6_scope_id;
        bnc = bnc_find(&peer->bgp->nexthop_cache_table[family2afi(p.family)],
-                      &p, 0);
+                      &p, 0, ifindex);
        if (!bnc) {
                if (BGP_DEBUG(nht, NHT))
                        zlog_debug(
@@ -408,9 +444,9 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
                char bnc_buf[BNC_FLAG_DUMP_SIZE];
 
                zlog_debug(
-                       "%s(%u): Rcvd NH update %pFX(%u) - metric %d/%d #nhops %d/%d flags %s",
+                       "%s(%u): Rcvd NH update %pFX(%u)%u) - metric %d/%d #nhops %d/%d flags %s",
                        bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix,
-                       bnc->srte_color, nhr->metric, bnc->metric,
+                       bnc->ifindex, bnc->srte_color, nhr->metric, bnc->metric,
                        nhr->nexthop_num, bnc->nexthop_num,
                        bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
                                                   sizeof(bnc_buf)));
@@ -659,15 +695,22 @@ void bgp_nht_interface_events(struct peer *peer)
        struct bgp_nexthop_cache_head *table;
        struct bgp_nexthop_cache *bnc;
        struct prefix p;
+       ifindex_t ifindex = 0;
 
        if (!IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
                return;
 
        if (!sockunion2hostprefix(&peer->su, &p))
                return;
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+               ifindex = peer->su.sin6.sin6_scope_id;
 
        table = &bgp->nexthop_cache_table[AFI_IP6];
-       bnc = bnc_find(table, &p, 0);
+       bnc = bnc_find(table, &p, 0, ifindex);
        if (!bnc)
                return;
 
@@ -703,7 +746,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
        afi = family2afi(match.family);
        tree = &bgp->nexthop_cache_table[afi];
 
-       bnc_nhc = bnc_find(tree, &match, nhr.srte_color);
+       bnc_nhc = bnc_find(tree, &match, nhr.srte_color, 0);
        if (!bnc_nhc) {
                if (BGP_DEBUG(nht, NHT))
                        zlog_debug(
@@ -714,7 +757,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
 
        tree = &bgp->import_check_table[afi];
 
-       bnc_import = bnc_find(tree, &match, nhr.srte_color);
+       bnc_import = bnc_find(tree, &match, nhr.srte_color, 0);
        if (!bnc_import) {
                if (BGP_DEBUG(nht, NHT))
                        zlog_debug(
@@ -804,7 +847,11 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
                                p->u.prefix4 = ipv4;
                                p->prefixlen = IPV4_MAX_BITLEN;
                        } else {
-                               p->u.prefix4 = pi->attr->nexthop;
+                               if (p_orig->family == AF_EVPN)
+                                       p->u.prefix4 =
+                                               pi->attr->mp_nexthop_global_in;
+                               else
+                                       p->u.prefix4 = pi->attr->nexthop;
                                p->prefixlen = IPV4_MAX_BITLEN;
                        }
                }
@@ -982,8 +1029,9 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
 
                bnc_str(bnc, buf, PREFIX2STR_BUFFER);
                zlog_debug(
-                       "NH update for %s(%u)(%s) - flags %s chgflags %s- evaluate paths",
-                       buf, bnc->srte_color, bnc->bgp->name_pretty,
+                       "NH update for %s(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
+                       buf, bnc->ifindex, bnc->srte_color,
+                       bnc->bgp->name_pretty,
                        bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
                                                   sizeof(bnc_buf)),
                        bgp_nexthop_dump_bnc_change_flags(bnc, chg_buf,
@@ -1190,6 +1238,7 @@ void bgp_nht_reg_enhe_cap_intfs(struct peer *peer)
        struct nexthop *nhop;
        struct interface *ifp;
        struct prefix p;
+       ifindex_t ifindex = 0;
 
        if (peer->ifp)
                return;
@@ -1203,8 +1252,14 @@ void bgp_nht_reg_enhe_cap_intfs(struct peer *peer)
 
        if (p.family != AF_INET6)
                return;
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+               ifindex = peer->su.sin6.sin6_scope_id;
 
-       bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0);
+       bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0, ifindex);
        if (!bnc)
                return;
 
@@ -1231,6 +1286,7 @@ void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
        struct nexthop *nhop;
        struct interface *ifp;
        struct prefix p;
+       ifindex_t ifindex = 0;
 
        if (peer->ifp)
                return;
@@ -1245,8 +1301,14 @@ void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
 
        if (p.family != AF_INET6)
                return;
+       /*
+        * Gather the ifindex for if up/down events to be
+        * tagged into this fun
+        */
+       if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+               ifindex = peer->su.sin6.sin6_scope_id;
 
-       bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0);
+       bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0, ifindex);
        if (!bnc)
                return;
 
index 28cacd6086eb379567e83bfb5ccaead449636b3f..7248f034a5a02c27a84e21f604f873fa9df2ef19 100644 (file)
@@ -1152,7 +1152,7 @@ static bool bgp_role_violation(struct peer *peer)
                return true;
        }
        if (remote_role == ROLE_UNDEFINED &&
-           CHECK_FLAG(peer->flags, PEER_FLAG_STRICT_MODE)) {
+           CHECK_FLAG(peer->flags, PEER_FLAG_ROLE_STRICT_MODE)) {
                const char *err_msg =
                        "Strict mode. Please set the role on your side.";
                bgp_notify_send_with_data(peer, BGP_NOTIFY_OPEN_ERR,
index 9def9622d9d2e1bd2407e7854b5b3fb77ec35449..fe1887565e51385755e07134b12240a42e27c614 100644 (file)
@@ -125,7 +125,7 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
        intmax_t delta;
        uint32_t holdtime;
 
-       frr_with_mutex(&peer->io_mtx) {
+       frr_with_mutex (&peer->io_mtx) {
                /* if the queue is empty, reset the "last OK" timestamp to
                 * now, otherwise if we write another packet immediately
                 * after it'll get confused
@@ -780,7 +780,7 @@ struct bgp_notify bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify)
        bn.subcode = notify->raw_data[1];
        bn.length = notify->length - 2;
 
-       bn.raw_data = XCALLOC(MTYPE_BGP_NOTIFICATION, bn.length);
+       bn.raw_data = XMALLOC(MTYPE_BGP_NOTIFICATION, bn.length);
        memcpy(bn.raw_data, notify->raw_data + 2, bn.length);
 
        return bn;
@@ -2002,15 +2002,12 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
                                                        gr_info->eor_required,
                                                        "EOR RCV",
                                                        gr_info->eor_received);
-                                       BGP_TIMER_OFF(
-                                               gr_info->t_select_deferral);
+                                       THREAD_OFF(gr_info->t_select_deferral);
                                        gr_info->eor_required = 0;
                                        gr_info->eor_received = 0;
                                        /* Best path selection */
-                                       if (bgp_best_path_select_defer(
-                                                   peer->bgp, afi, safi)
-                                           < 0)
-                                               return BGP_Stop;
+                                       bgp_best_path_select_defer(peer->bgp,
+                                                                  afi, safi);
                                }
                        }
 
@@ -2121,6 +2118,12 @@ static int bgp_notify_receive(struct peer *peer, bgp_size_t size)
                if (outer.length) {
                        XFREE(MTYPE_BGP_NOTIFICATION, outer.data);
                        XFREE(MTYPE_BGP_NOTIFICATION, outer.raw_data);
+
+                       /* If this is a Hard Reset notification, we MUST free
+                        * the inner (encapsulated) notification too.
+                        */
+                       if (hard_reset)
+                               XFREE(MTYPE_BGP_NOTIFICATION, inner.raw_data);
                        outer.length = 0;
                }
        }
@@ -2280,17 +2283,26 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
                                                peer, orf_type, orf_len);
                                }
 
+                               /* ORF prefix-list name */
+                               snprintf(name, sizeof(name), "%s.%d.%d",
+                                        peer->host, afi, safi);
+
                                /* we're going to read at least 1 byte of common
                                 * ORF header,
                                 * and 7 bytes of ORF Address-filter entry from
                                 * the stream
                                 */
-                               if (orf_len < 7)
+                               if (*p_pnt & ORF_COMMON_PART_REMOVE_ALL) {
+                                       if (bgp_debug_neighbor_events(peer))
+                                               zlog_debug(
+                                                       "%pBP rcvd Remove-All pfxlist ORF request",
+                                                       peer);
+                                       prefix_bgp_orf_remove_all(afi, name);
                                        break;
+                               }
 
-                               /* ORF prefix-list name */
-                               snprintf(name, sizeof(name), "%s.%d.%d",
-                                        peer->host, afi, safi);
+                               if (orf_len < 7)
+                                       break;
 
                                while (p_pnt < p_end) {
                                        /* If the ORF entry is malformed, want
@@ -2303,17 +2315,6 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
                                        memset(&orfp, 0, sizeof(orfp));
                                        common = *p_pnt++;
                                        /* after ++: p_pnt <= p_end */
-                                       if (common
-                                           & ORF_COMMON_PART_REMOVE_ALL) {
-                                               if (bgp_debug_neighbor_events(
-                                                           peer))
-                                                       zlog_debug(
-                                                               "%pBP rcvd Remove-All pfxlist ORF request",
-                                                               peer);
-                                               prefix_bgp_orf_remove_all(afi,
-                                                                         name);
-                                               break;
-                                       }
                                        ok = ((uint32_t)(p_end - p_pnt)
                                              >= sizeof(uint32_t));
                                        if (ok) {
@@ -2504,7 +2505,7 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
                        return BGP_PACKET_NOOP;
                }
 
-               BGP_TIMER_OFF(peer->t_refresh_stalepath);
+               THREAD_OFF(peer->t_refresh_stalepath);
 
                SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_EORR_RECEIVED);
                UNSET_FLAG(peer->af_sflags[afi][safi],
@@ -2615,6 +2616,14 @@ static int bgp_capability_msg_parse(struct peer *peer, uint8_t *pnt,
                                "%s CAPABILITY has action: %d, code: %u, length %u",
                                peer->host, action, hdr->code, hdr->length);
 
+               if (hdr->length < sizeof(struct capability_mp_data)) {
+                       zlog_info(
+                               "%pBP Capability structure is not properly filled out, expected at least %zu bytes but header length specified is %d",
+                               peer, sizeof(struct capability_mp_data),
+                               hdr->length);
+                       return BGP_Stop;
+               }
+
                /* Capability length check. */
                if ((pnt + hdr->length + 3) > end) {
                        zlog_info("%s Capability length error", peer->host);
@@ -2771,7 +2780,7 @@ void bgp_process_packet(struct thread *thread)
                bgp_size_t size;
                char notify_data_length[2];
 
-               frr_with_mutex(&peer->io_mtx) {
+               frr_with_mutex (&peer->io_mtx) {
                        peer->curr = stream_fifo_pop(peer->ibuf);
                }
 
@@ -2898,7 +2907,7 @@ void bgp_process_packet(struct thread *thread)
 
        if (fsm_update_result != FSM_PEER_TRANSFERRED
            && fsm_update_result != FSM_PEER_STOPPED) {
-               frr_with_mutex(&peer->io_mtx) {
+               frr_with_mutex (&peer->io_mtx) {
                        // more work to do, come back later
                        if (peer->ibuf->count > 0)
                                thread_add_event(
index b35cbeb21f6b3eef26db041c282c60e4c1f80bf1..04f955f97a1ae9d716d732b802f7baea4ed8edf1 100644 (file)
@@ -1773,12 +1773,10 @@ static void bgp_peer_remove_private_as(struct bgp *bgp, afi_t afi, safi_t safi,
 static void bgp_peer_as_override(struct bgp *bgp, afi_t afi, safi_t safi,
                                 struct peer *peer, struct attr *attr)
 {
-       if (peer->sort == BGP_PEER_EBGP
-           && peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE)) {
-               if (aspath_single_asn_check(attr->aspath, peer->as))
-                       attr->aspath = aspath_replace_specific_asn(
-                               attr->aspath, peer->as, bgp->as);
-       }
+       if (peer->sort == BGP_PEER_EBGP &&
+           peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE))
+               attr->aspath = aspath_replace_specific_asn(attr->aspath,
+                                                          peer->as, bgp->as);
 }
 
 void bgp_attr_add_llgr_community(struct attr *attr)
@@ -2214,6 +2212,32 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
        bgp_peer_remove_private_as(bgp, afi, safi, peer, attr);
        bgp_peer_as_override(bgp, afi, safi, peer, attr);
 
+       if (filter->advmap.update_type == UPDATE_TYPE_WITHDRAW &&
+           filter->advmap.aname &&
+           route_map_lookup_by_name(filter->advmap.aname)) {
+               struct bgp_path_info rmap_path = {0};
+               struct bgp_path_info_extra dummy_rmap_path_extra = {0};
+               struct attr dummy_attr = *attr;
+
+               /* Fill temp path_info */
+               prep_for_rmap_apply(&rmap_path, &dummy_rmap_path_extra, dest,
+                                   pi, peer, &dummy_attr);
+
+               struct route_map *amap =
+                       route_map_lookup_by_name(filter->advmap.aname);
+
+               ret = route_map_apply(amap, p, &rmap_path);
+
+               bgp_attr_flush(&dummy_attr);
+
+               /*
+                * The conditional advertisement mode is Withdraw and this
+                * prefix is a conditional prefix. Don't advertise it
+                */
+               if (ret == RMAP_PERMITMATCH)
+                       return false;
+       }
+
        /* Route map & unsuppress-map apply. */
        if (!post_attr &&
            (ROUTE_MAP_OUT_NAME(filter) || bgp_path_suppressed(pi))) {
@@ -3193,7 +3217,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
 }
 
 /* Process the routes with the flag BGP_NODE_SELECT_DEFER set */
-int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
+void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
 {
        struct bgp_dest *dest;
        int cnt = 0;
@@ -3204,7 +3228,7 @@ int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
 
                thread_info = THREAD_ARG(t);
                XFREE(MTYPE_TMP, thread_info);
-               BGP_TIMER_OFF(bgp->gr_info[afi][safi].t_route_select);
+               THREAD_OFF(bgp->gr_info[afi][safi].t_route_select);
        }
 
        if (BGP_DEBUG(update, UPDATE_OUT)) {
@@ -3240,7 +3264,7 @@ int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
                /* Send route processing complete message to RIB */
                bgp_zebra_update(afi, safi, bgp->vrf_id,
                                 ZEBRA_CLIENT_ROUTE_UPDATE_COMPLETE);
-               return 0;
+               return;
        }
 
        thread_info = XMALLOC(MTYPE_TMP, sizeof(struct afi_safi_info));
@@ -3255,7 +3279,6 @@ int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
        thread_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
                        BGP_ROUTE_SELECT_DELAY,
                        &bgp->gr_info[afi][safi].t_route_select);
-       return 0;
 }
 
 static wq_item_status bgp_process_wq(struct work_queue *wq, void *data)
@@ -3785,6 +3808,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
        uint8_t pi_sub_type = 0;
        bool force_evpn_import = false;
        safi_t orig_safi = safi;
+       bool leak_success = true;
 
        if (frrtrace_enabled(frr_bgp, process_update)) {
                char pfxprint[PREFIX2STR_BUFFER];
@@ -4410,7 +4434,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
                if ((SAFI_MPLS_VPN == safi)
                    && (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
 
-                       vpn_leak_to_vrf_update(bgp, pi);
+                       leak_success = vpn_leak_to_vrf_update(bgp, pi);
                }
 
 #ifdef ENABLE_BGP_VNC
@@ -4425,7 +4449,13 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
                                           type, sub_type, NULL);
                }
 #endif
-
+               if ((safi == SAFI_MPLS_VPN) &&
+                   !CHECK_FLAG(bgp->af_flags[afi][safi],
+                               BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
+                   !leak_success) {
+                       bgp_unlink_nexthop(pi);
+                       bgp_path_info_delete(dest, pi);
+               }
                return 0;
        } // End of implicit withdraw
 
@@ -4559,8 +4589,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
        }
        if ((SAFI_MPLS_VPN == safi)
            && (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
-
-               vpn_leak_to_vrf_update(bgp, new);
+               leak_success = vpn_leak_to_vrf_update(bgp, new);
        }
 #ifdef ENABLE_BGP_VNC
        if (SAFI_MPLS_VPN == safi) {
@@ -4574,6 +4603,13 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
                                   sub_type, NULL);
        }
 #endif
+       if ((safi == SAFI_MPLS_VPN) &&
+           !CHECK_FLAG(bgp->af_flags[afi][safi],
+                       BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
+           !leak_success) {
+               bgp_unlink_nexthop(new);
+               bgp_path_info_delete(dest, new);
+       }
 
        return 0;
 
@@ -4744,7 +4780,7 @@ void bgp_stop_announce_route_timer(struct peer_af *paf)
        if (!paf->t_announce_route)
                return;
 
-       thread_cancel(&paf->t_announce_route);
+       THREAD_OFF(paf->t_announce_route);
 }
 
 /*
@@ -5042,7 +5078,7 @@ void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp,
 
                list_delete(&ntable->soft_reconfig_peers);
                bgp_soft_reconfig_table_flag(ntable, false);
-               BGP_TIMER_OFF(ntable->soft_reconfig_thread);
+               THREAD_OFF(ntable->soft_reconfig_thread);
        }
 }
 
@@ -5871,6 +5907,9 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
        attr.med = bgp_static->igpmetric;
        attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC);
 
+       if (afi == AFI_IP)
+               attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+
        if (bgp_static->atomic)
                attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE);
 
@@ -8381,6 +8420,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
        case NEXTHOP_TYPE_IPV4:
        case NEXTHOP_TYPE_IPV4_IFINDEX:
                attr.nexthop = nexthop->ipv4;
+               attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
                break;
        case NEXTHOP_TYPE_IPV6:
        case NEXTHOP_TYPE_IPV6_IFINDEX:
@@ -8391,6 +8431,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
                switch (p->family) {
                case AF_INET:
                        attr.nexthop.s_addr = INADDR_ANY;
+                       attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
                        break;
                case AF_INET6:
                        memset(&attr.mp_nexthop_global, 0,
@@ -8941,7 +8982,8 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
                        json_nexthop_global = json_object_new_object();
 
                        json_object_string_addf(json_nexthop_global, "ip",
-                                               "%pI4", &attr->nexthop);
+                                               "%pI4",
+                                               &attr->mp_nexthop_global_in);
 
                        if (path->peer->hostname)
                                json_object_string_add(json_nexthop_global,
@@ -8954,10 +8996,12 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
                                                     "used");
                } else {
                        if (nexthop_hostname)
-                               len = vty_out(vty, "%pI4(%s)%s", &attr->nexthop,
+                               len = vty_out(vty, "%pI4(%s)%s",
+                                             &attr->mp_nexthop_global_in,
                                              nexthop_hostname, vrf_id_str);
                        else
-                               len = vty_out(vty, "%pI4%s", &attr->nexthop,
+                               len = vty_out(vty, "%pI4%s",
+                                             &attr->mp_nexthop_global_in,
                                              vrf_id_str);
 
                        len = wide ? (41 - len) : (16 - len);
@@ -9003,7 +9047,7 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
                                        vty_out(vty, "%*s", len, " ");
                        }
                }
-       } else if (p->family == AF_INET && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
+       } else if (p->family == AF_INET && !BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr)) {
                if (json_paths) {
                        json_nexthop_global = json_object_new_object();
 
@@ -9036,7 +9080,7 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
        }
 
        /* IPv6 Next Hop */
-       else if (p->family == AF_INET6 || BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
+       else if (p->family == AF_INET6 || BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr)) {
                if (json_paths) {
                        json_nexthop_global = json_object_new_object();
                        json_object_string_addf(json_nexthop_global, "ip",
@@ -9311,9 +9355,9 @@ void route_vty_out_tmp(struct vty *vty, struct bgp_dest *dest,
        /* Print attribute */
        if (attr) {
                if (use_json) {
-                       if (p->family == AF_INET
-                           && (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP
-                               || !BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
+                       if (p->family == AF_INET &&
+                           (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP ||
+                            !BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))) {
                                if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP)
                                        json_object_string_addf(
                                                json_net, "nextHop", "%pI4",
@@ -9322,13 +9366,13 @@ void route_vty_out_tmp(struct vty *vty, struct bgp_dest *dest,
                                        json_object_string_addf(
                                                json_net, "nextHop", "%pI4",
                                                &attr->nexthop);
-                       } else if (p->family == AF_INET6
-                                  || BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
+                       } else if (p->family == AF_INET6 ||
+                                  BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr)) {
                                json_object_string_addf(
                                        json_net, "nextHopGlobal", "%pI6",
                                        &attr->mp_nexthop_global);
-                       } else if (p->family == AF_EVPN
-                                  && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
+                       } else if (p->family == AF_EVPN &&
+                                  !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
                                json_object_string_addf(
                                        json_net, "nextHop", "%pI4",
                                        &attr->mp_nexthop_global_in);
@@ -9354,10 +9398,10 @@ void route_vty_out_tmp(struct vty *vty, struct bgp_dest *dest,
                        json_object_string_add(json_net, "bgpOriginCode",
                                               bgp_origin_str[attr->origin]);
                } else {
-                       if (p->family == AF_INET
-                           && (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP
-                               || safi == SAFI_EVPN
-                               || !BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
+                       if (p->family == AF_INET &&
+                           (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP ||
+                            safi == SAFI_EVPN ||
+                            !BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))) {
                                if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP
                                    || safi == SAFI_EVPN)
                                        vty_out(vty, "%-16pI4",
@@ -9366,8 +9410,8 @@ void route_vty_out_tmp(struct vty *vty, struct bgp_dest *dest,
                                        vty_out(vty, "%-41pI4", &attr->nexthop);
                                else
                                        vty_out(vty, "%-16pI4", &attr->nexthop);
-                       } else if (p->family == AF_INET6
-                                  || BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
+                       } else if (p->family == AF_INET6 ||
+                                  BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr)) {
                                char buf[BUFSIZ];
 
                                len = vty_out(
@@ -9446,10 +9490,10 @@ void route_vty_out_tag(struct vty *vty, const struct prefix *p,
 
        /* Print attribute */
        attr = path->attr;
-       if (((p->family == AF_INET)
-            && ((safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP)))
-           || (safi == SAFI_EVPN && !BGP_ATTR_NEXTHOP_AFI_IP6(attr))
-           || (!BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
+       if (((p->family == AF_INET) &&
+            ((safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP))) ||
+           (safi == SAFI_EVPN && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) ||
+           (!BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))) {
                if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP
                    || safi == SAFI_EVPN) {
                        if (json)
@@ -9466,10 +9510,10 @@ void route_vty_out_tag(struct vty *vty, const struct prefix *p,
                        else
                                vty_out(vty, "%-16pI4", &attr->nexthop);
                }
-       } else if (((p->family == AF_INET6)
-                   && ((safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP)))
-                  || (safi == SAFI_EVPN && BGP_ATTR_NEXTHOP_AFI_IP6(attr))
-                  || (BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
+       } else if (((p->family == AF_INET6) &&
+                   ((safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP))) ||
+                  (safi == SAFI_EVPN && BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr)) ||
+                  (BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))) {
                char buf_a[512];
 
                if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL) {
@@ -10092,10 +10136,10 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
        /* Display the nexthop */
        const struct prefix *bn_p = bgp_dest_get_prefix(bn);
 
-       if ((bn_p->family == AF_INET || bn_p->family == AF_ETHERNET
-            || bn_p->family == AF_EVPN)
-           && (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || safi == SAFI_EVPN
-               || !BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
+       if ((bn_p->family == AF_INET || bn_p->family == AF_ETHERNET ||
+            bn_p->family == AF_EVPN) &&
+           (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || safi == SAFI_EVPN ||
+            !BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))) {
                if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP
                    || safi == SAFI_EVPN) {
                        if (json_paths) {
@@ -10200,9 +10244,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
        /* This path was originated locally */
        if (path->peer == bgp->peer_self) {
 
-               if (safi == SAFI_EVPN
-                   || (bn_p->family == AF_INET
-                       && !BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
+               if (safi == SAFI_EVPN || (bn_p->family == AF_INET &&
+                                         !BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))) {
                        if (json_paths)
                                json_object_string_add(json_peer, "peerId",
                                                       "0.0.0.0");
@@ -11041,6 +11084,13 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
                                    != PREFIX_PERMIT)
                                        continue;
                        }
+                       if (type == bgp_show_type_access_list) {
+                               struct access_list *alist = output_arg;
+
+                               if (access_list_apply(alist, dest_p) !=
+                                   FILTER_PERMIT)
+                                       continue;
+                       }
                        if (type == bgp_show_type_filter_list) {
                                struct as_list *as_list = output_arg;
 
@@ -12274,6 +12324,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
           |community-list <(1-500)|COMMUNITY_LIST_NAME> [exact-match]\
           |filter-list AS_PATH_FILTER_NAME\
           |prefix-list WORD\
+          |access-list ACCESSLIST_NAME\
           |route-map RMAP_NAME\
           |rpki <invalid|valid|notfound>\
           |version (1-4294967295)\
@@ -12312,6 +12363,8 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
       "Regular expression access list name\n"
       "Display routes conforming to the prefix-list\n"
       "Prefix-list name\n"
+      "Display routes conforming to the access-list\n"
+      "Access-list name\n"
       "Display routes matching the route-map\n"
       "A route-map to match on\n"
       "RPKI route types\n"
@@ -12411,8 +12464,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
                list = community_list_lookup(bgp_clist, clist_number_or_name, 0,
                                             COMMUNITY_LIST_MASTER);
                if (list == NULL) {
-                       vty_out(vty,
-                               "%% %s is not a valid community-list name\n",
+                       vty_out(vty, "%% %s community-list not found\n",
                                clist_number_or_name);
                        return CMD_WARNING;
                }
@@ -12430,8 +12482,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
 
                as_list = as_list_lookup(filter);
                if (as_list == NULL) {
-                       vty_out(vty,
-                               "%% %s is not a valid AS-path access-list name\n",
+                       vty_out(vty, "%% %s AS-path access-list not found\n",
                                filter);
                        return CMD_WARNING;
                }
@@ -12446,7 +12497,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
 
                plist = prefix_list_lookup(afi, prefix_list_str);
                if (plist == NULL) {
-                       vty_out(vty, "%% %s is not a valid prefix-list name\n",
+                       vty_out(vty, "%% %s prefix-list not found\n",
                                prefix_list_str);
                        return CMD_WARNING;
                }
@@ -12455,14 +12506,28 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
                output_arg = plist;
        }
 
+       if (argv_find(argv, argc, "access-list", &idx)) {
+               const char *access_list_str = argv[++idx]->arg;
+               struct access_list *alist;
+
+               alist = access_list_lookup(afi, access_list_str);
+               if (!alist) {
+                       vty_out(vty, "%% %s access-list not found\n",
+                               access_list_str);
+                       return CMD_WARNING;
+               }
+
+               sh_type = bgp_show_type_access_list;
+               output_arg = alist;
+       }
+
        if (argv_find(argv, argc, "route-map", &idx)) {
                const char *rmap_str = argv[++idx]->arg;
                struct route_map *rmap;
 
                rmap = route_map_lookup_by_name(rmap_str);
                if (!rmap) {
-                       vty_out(vty, "%% %s is not a valid route-map name\n",
-                               rmap_str);
+                       vty_out(vty, "%% %s route-map not found\n", rmap_str);
                        return CMD_WARNING;
                }
 
@@ -14189,9 +14254,9 @@ DEFUN (show_ip_bgp_neighbor_received_prefix_filter,
        IP_STR
        BGP_STR
        BGP_INSTANCE_HELP_STR
-       "Address Family\n"
-       "Address Family\n"
-       "Address Family modifier\n"
+       BGP_AF_STR
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR
        "Detailed information on TCP and BGP neighbor connections\n"
        "Neighbor to display information about\n"
        "Neighbor to display information about\n"
@@ -14377,7 +14442,7 @@ DEFUN (show_bgp_afi_vpn_rd_route,
        SHOW_STR
        BGP_STR
        BGP_AFI_HELP_STR
-       "Address Family modifier\n"
+       BGP_AF_MODIFIER_STR
        "Display information for a route distinguisher\n"
        "Route Distinguisher\n"
        "All Route Distinguishers\n"
index 743b369bfa444e36608df7a4d81044e87ce69ede..3e46c7043e15dd521f86742f237004f8f1c241c2 100644 (file)
@@ -37,6 +37,7 @@ enum bgp_show_type {
        bgp_show_type_normal,
        bgp_show_type_regexp,
        bgp_show_type_prefix_list,
+       bgp_show_type_access_list,
        bgp_show_type_filter_list,
        bgp_show_type_route_map,
        bgp_show_type_neighbor,
@@ -468,12 +469,16 @@ struct bgp_aggregate {
                 ? 0                                                           \
                 : ((nhlen) < IPV6_MAX_BYTELEN ? AFI_IP : AFI_IP6))
 
+#define BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr)                                      \
+       ((attr)->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL ||               \
+        (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL ||        \
+        (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL ||              \
+        (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL)
+
 #define BGP_ATTR_NEXTHOP_AFI_IP6(attr)                                         \
-       (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP))             \
-        && ((attr)->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL              \
-            || (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL    \
-            || (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL          \
-            || (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL))
+       (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) &&          \
+        BGP_ATTR_MP_NEXTHOP_LEN_IP6(attr))
+
 #define BGP_PATH_COUNTABLE(BI)                                                 \
        (!CHECK_FLAG((BI)->flags, BGP_PATH_HISTORY)                            \
         && !CHECK_FLAG((BI)->flags, BGP_PATH_REMOVED))
@@ -827,7 +832,7 @@ extern int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi,
                             struct bgp_table *table, struct prefix_rd *prd,
                             enum bgp_show_type type, void *output_arg,
                             bool use_json);
-extern int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi);
+extern void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi);
 extern bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi,
                                       uint8_t type, uint8_t stype,
                                       struct attr *attr, struct bgp_dest *dest);
index e94b633b3b8eb3b59400927337c09edead2b4a1e..33f68c9e8811107426d58ecf849d5307176e36b0 100644 (file)
@@ -3867,7 +3867,7 @@ static void bgp_route_map_update_peer_group(const char *rmap_name,
  * network statements, etc looking to see if they use this route-map.
  */
 static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
-                                        int route_update)
+                                        bool route_update)
 {
        int i;
        bool matched;
@@ -4080,7 +4080,7 @@ static void bgp_route_map_process_update_cb(char *rmap_name)
        struct bgp *bgp;
 
        for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
-               bgp_route_map_process_update(bgp, rmap_name, 1);
+               bgp_route_map_process_update(bgp, rmap_name, true);
 
 #ifdef ENABLE_BGP_VNC
                vnc_routemap_update(bgp, __func__);
@@ -4116,12 +4116,11 @@ static void bgp_route_map_mark_update(const char *rmap_name)
                /* Signal the groups that a route-map update event has
                 * started */
                for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp))
-                       update_group_policy_update(bgp,
-                                                  BGP_POLICY_ROUTE_MAP,
-                                                  rmap_name, 1, 1);
+                       update_group_policy_update(bgp, BGP_POLICY_ROUTE_MAP,
+                                                  rmap_name, true, 1);
        } else {
                for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
-                       bgp_route_map_process_update(bgp, rmap_name, 0);
+                       bgp_route_map_process_update(bgp, rmap_name, false);
 #ifdef ENABLE_BGP_VNC
                        vnc_routemap_update(bgp, __func__);
 #endif
index c2b66326432c94121d5c95a94b1834e4910ecbc5..f1173941a0fd369c37fb1ea23c08367ce67feb6e 100644 (file)
@@ -91,7 +91,8 @@ static void sync_init(struct update_subgroup *subgrp,
        bgp_adv_fifo_init(&subgrp->sync->withdraw);
        bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
        subgrp->hash =
-               hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
+               hash_create(bgp_advertise_attr_hash_key,
+                           bgp_advertise_attr_hash_cmp, "BGP SubGroup Hash");
 
        /* We use a larger buffer for subgrp->work in the event that:
         * - We RX a BGP_UPDATE where the attributes alone are just
@@ -115,8 +116,11 @@ static void sync_init(struct update_subgroup *subgrp,
 static void sync_delete(struct update_subgroup *subgrp)
 {
        XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
-       if (subgrp->hash)
+       if (subgrp->hash) {
+               hash_clean(subgrp->hash,
+                          (void (*)(void *))bgp_advertise_attr_free);
                hash_free(subgrp->hash);
+       }
        subgrp->hash = NULL;
        if (subgrp->work)
                stream_free(subgrp->work);
@@ -214,6 +218,8 @@ static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
                        MTYPE_BGP_FILTER_NAME, CONDITION_MAP_NAME(srcfilter));
                CONDITION_MAP(dstfilter) = CONDITION_MAP(srcfilter);
        }
+
+       dstfilter->advmap.update_type = srcfilter->advmap.update_type;
 }
 
 /**
@@ -385,6 +391,9 @@ static unsigned int updgrp_hash_key_make(const void *p)
                                        strlen(filter->advmap.aname), SEED1),
                                  key);
 
+       if (filter->advmap.update_type)
+               key = jhash_1word(filter->advmap.update_type, key);
+
        if (peer->default_rmap[afi][safi].name)
                key = jhash_1word(
                        jhash(peer->default_rmap[afi][safi].name,
@@ -421,10 +430,9 @@ static unsigned int updgrp_hash_key_make(const void *p)
 
        if (bgp_debug_neighbor_events(peer)) {
                zlog_debug(
-                       "%pBP Update Group Hash: sort: %d UpdGrpFlags: %" PRIu64
-                       " UpdGrpAFFlags: %u",
+                       "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %u",
                        peer, peer->sort,
-                       (uint64_t)(peer->flags & PEER_UPDGRP_FLAGS),
+                       (intmax_t)(peer->flags & PEER_UPDGRP_FLAGS),
                        flags & PEER_UPDGRP_AF_FLAGS);
                zlog_debug(
                        "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
@@ -464,10 +472,8 @@ static unsigned int updgrp_hash_key_make(const void *p)
                        peer->shared_network &&
                                peer_afi_active_nego(peer, AFI_IP6));
                zlog_debug(
-                       "%pBP Update Group Hash: Lonesoul: %" PRIu64
-                       " ORF prefix: %u ORF old: %u max prefix out: %u",
-                       peer,
-                       (uint64_t)CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
+                       "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %u",
+                       peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
                        CHECK_FLAG(peer->af_cap[afi][safi],
                                   PEER_CAP_ORF_PREFIX_SM_RCV),
                        CHECK_FLAG(peer->af_cap[afi][safi],
@@ -587,6 +593,9 @@ static bool updgrp_hash_cmp(const void *p1, const void *p2)
                && strcmp(fl1->advmap.aname, fl2->advmap.aname)))
                return false;
 
+       if (fl1->advmap.update_type != fl2->advmap.update_type)
+               return false;
+
        if ((pe1->default_rmap[afi][safi].name
             && !pe2->default_rmap[afi][safi].name)
            || (!pe1->default_rmap[afi][safi].name
@@ -1482,7 +1491,24 @@ static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
                                        "u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change",
                                        updgrp->id, subgrp->id,
                                        ctx->policy_name);
-                       subgroup_default_originate(subgrp, 0);
+                       if (route_map_lookup_by_name(ctx->policy_name)) {
+                               /*
+                                * When there is change in routemap, this flow
+                                * is triggered. the routemap is still present
+                                * in lib, hence its a update flow. The flag
+                                * needs to be unset.
+                                */
+                               UNSET_FLAG(subgrp->sflags,
+                                          SUBGRP_STATUS_DEFAULT_ORIGINATE);
+                               subgroup_default_originate(subgrp, 0);
+                       } else {
+                               /*
+                                * This is a explicit withdraw, since the
+                                * routemap is not present in routemap lib. need
+                                * to pass 1 for withdraw arg.
+                                */
+                               subgroup_default_originate(subgrp, 1);
+                       }
                }
                update_subgroup_set_needs_refresh(subgrp, 0);
        }
@@ -1529,7 +1555,7 @@ static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
  *             update groups.
  */
 void update_group_policy_update(struct bgp *bgp, enum bgp_policy_type ptype,
-                               const char *pname, int route_update,
+                               const char *pname, bool route_update,
                                int start_event)
 {
        struct updwalk_context ctx;
@@ -1860,6 +1886,13 @@ update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
                safi = SUBGRP_SAFI(subgrp);
 
                if (peer->default_rmap[afi][safi].name) {
+                       /*
+                        * When there is change in routemap this flow will
+                        * be triggered. We need to unset the Flag to ensure
+                        * the update flow gets triggered.
+                        */
+                       UNSET_FLAG(subgrp->sflags,
+                                  SUBGRP_STATUS_DEFAULT_ORIGINATE);
                        subgroup_default_originate(subgrp, 0);
                }
        }
@@ -1875,7 +1908,7 @@ void update_group_refresh_default_originate_route_map(struct thread *thread)
        bgp = THREAD_ARG(thread);
        update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
                          reason);
-       thread_cancel(&bgp->t_rmap_def_originate_eval);
+       THREAD_OFF(bgp->t_rmap_def_originate_eval);
        bgp_unlock(bgp);
 }
 
index e3309ab7c5c907bebabbe03c41aef468e8dfd524..56289d399d3b4c7827907edc38a41532e5f54a12 100644 (file)
@@ -252,6 +252,13 @@ struct update_subgroup {
 #define SUBGRP_STATUS_DEFAULT_ORIGINATE (1 << 0)
 #define SUBGRP_STATUS_FORCE_UPDATES (1 << 1)
 #define SUBGRP_STATUS_TABLE_REPARSING (1 << 2)
+/*
+ * This flag has been added to ensure that the SNT counters
+ * gets incremented and decremented only during the creation
+ * and deletion workflows of default originate,
+ * not during the update workflow.
+ */
+#define SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED (1 << 3)
 
        uint16_t flags;
 #define SUBGRP_FLAG_NEEDS_REFRESH (1 << 0)
@@ -291,7 +298,7 @@ struct updwalk_context {
        enum bgp_policy_type policy_type;
        const char *policy_name;
        int policy_event_start_flag;
-       int policy_route_update;
+       bool policy_route_update;
        updgrp_walkcb cb;
        void *context;
        uint8_t flags;
@@ -370,7 +377,7 @@ extern bool update_subgroup_trigger_merge_check(struct update_subgroup *,
                                                int force);
 extern void update_group_policy_update(struct bgp *bgp,
                                       enum bgp_policy_type ptype,
-                                      const char *pname, int route_update,
+                                      const char *pname, bool route_update,
                                       int start_event);
 extern void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
                                 updgrp_walkcb cb, void *ctx);
index 518ce485af6d88e09d24847c6ec0dd7051eddb50..27e3677702ef35938f62e5a12fa81e9923090685 100644 (file)
@@ -115,23 +115,22 @@ static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
        /* Look through all of the paths we have advertised for this rn and send
         * a withdraw for the ones that are no longer present */
        RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->dest->adj_out, adj_next) {
+               if (adj->subgroup != subgrp)
+                       continue;
 
-               if (adj->subgroup == subgrp) {
-                       for (pi = bgp_dest_get_bgp_path_info(ctx->dest); pi;
-                            pi = pi->next) {
-                               id = bgp_addpath_id_for_peer(peer, afi, safi,
-                                       &pi->tx_addpath);
+               for (pi = bgp_dest_get_bgp_path_info(ctx->dest); pi;
+                    pi = pi->next) {
+                       id = bgp_addpath_id_for_peer(peer, afi, safi,
+                                                    &pi->tx_addpath);
 
-                               if (id == adj->addpath_tx_id) {
-                                       break;
-                               }
+                       if (id == adj->addpath_tx_id) {
+                               break;
                        }
+               }
 
-                       if (!pi) {
-                               subgroup_process_announce_selected(
-                                       subgrp, NULL, ctx->dest,
-                                       adj->addpath_tx_id);
-                       }
+               if (!pi) {
+                       subgroup_process_announce_selected(
+                               subgrp, NULL, ctx->dest, adj->addpath_tx_id);
                }
        }
 }
@@ -165,6 +164,7 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
                 * coalesce timer fires.
                 */
                if (!subgrp->t_coalesce) {
+
                        /* An update-group that uses addpath */
                        if (addpath_capable) {
                                subgrp_withdraw_stale_addpath(ctx, subgrp);
@@ -193,7 +193,6 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
                                                        peer, afi, safi,
                                                        &ctx->pi->tx_addpath));
                        }
-
                        /* An update-group that does not use addpath */
                        else {
                                if (ctx->pi) {
@@ -249,39 +248,37 @@ static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
        for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
                const struct prefix *dest_p = bgp_dest_get_prefix(dest);
 
-               RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out)
-                       if (adj->subgroup == subgrp) {
-                               if (header1) {
-                                       vty_out(vty,
-                                               "BGP table version is %" PRIu64
-                                               ", local router ID is %pI4\n",
-                                               table->version,
-                                               &bgp->router_id);
-                                       vty_out(vty, BGP_SHOW_SCODE_HEADER);
-                                       vty_out(vty, BGP_SHOW_OCODE_HEADER);
-                                       header1 = 0;
-                               }
-                               if (header2) {
-                                       vty_out(vty, BGP_SHOW_HEADER);
-                                       header2 = 0;
-                               }
-                               if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv
-                                   && adj->adv->baa) {
-                                       route_vty_out_tmp(vty, dest, dest_p,
-                                                         adj->adv->baa->attr,
-                                                         SUBGRP_SAFI(subgrp),
-                                                         0, NULL, false);
-                                       output_count++;
-                               }
-                               if ((flags & UPDWALK_FLAGS_ADVERTISED)
-                                   && adj->attr) {
-                                       route_vty_out_tmp(vty, dest, dest_p,
-                                                         adj->attr,
-                                                         SUBGRP_SAFI(subgrp),
-                                                         0, NULL, false);
-                                       output_count++;
-                               }
+               RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out) {
+                       if (adj->subgroup != subgrp)
+                               continue;
+
+                       if (header1) {
+                               vty_out(vty,
+                                       "BGP table version is %" PRIu64
+                                       ", local router ID is %pI4\n",
+                                       table->version, &bgp->router_id);
+                               vty_out(vty, BGP_SHOW_SCODE_HEADER);
+                               vty_out(vty, BGP_SHOW_OCODE_HEADER);
+                               header1 = 0;
+                       }
+                       if (header2) {
+                               vty_out(vty, BGP_SHOW_HEADER);
+                               header2 = 0;
+                       }
+                       if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv &&
+                           adj->adv->baa) {
+                               route_vty_out_tmp(
+                                       vty, dest, dest_p, adj->adv->baa->attr,
+                                       SUBGRP_SAFI(subgrp), 0, NULL, false);
+                               output_count++;
+                       }
+                       if ((flags & UPDWALK_FLAGS_ADVERTISED) && adj->attr) {
+                               route_vty_out_tmp(vty, dest, dest_p, adj->attr,
+                                                 SUBGRP_SAFI(subgrp), 0, NULL,
+                                                 false);
+                               output_count++;
                        }
+               }
        }
        if (output_count != 0)
                vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
@@ -347,7 +344,7 @@ static void subgroup_coalesce_timer(struct thread *thread)
 
                SUBGRP_FOREACH_PEER (subgrp, paf) {
                        peer = PAF_PEER(paf);
-                       BGP_TIMER_OFF(peer->t_routeadv);
+                       THREAD_OFF(peer->t_routeadv);
                        BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
                }
        }
@@ -437,7 +434,7 @@ bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
                next = baa->adv;
 
                /* Unintern BGP advertise attribute.  */
-               bgp_advertise_unintern(subgrp->hash, baa);
+               bgp_advertise_attr_unintern(subgrp->hash, baa);
        } else
                fhead = &subgrp->sync->withdraw;
 
@@ -523,7 +520,7 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest,
        /* bgp_path_info adj_out reference */
        adv->pathi = bgp_path_info_lock(path);
 
-       adv->baa = bgp_advertise_intern(subgrp->hash, attr);
+       adv->baa = bgp_advertise_attr_intern(subgrp->hash, attr);
        adv->adj = adj;
        adj->attr_hash = attr_hash;
 
@@ -683,49 +680,47 @@ void subgroup_announce_table(struct update_subgroup *subgrp,
                /* Check if the route can be advertised */
                advertise = bgp_check_advertise(bgp, dest);
 
-               for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next)
-
-                       if (bgp_check_selected(ri, peer, addpath_capable, afi,
-                                              safi)) {
-                               if (subgroup_announce_check(dest, ri, subgrp,
-                                                           dest_p, &attr,
-                                                           NULL)) {
-                                       /* Check if route can be advertised */
-                                       if (advertise) {
-                                               if (!bgp_check_withdrawal(bgp,
-                                                                         dest))
-                                                       bgp_adj_out_set_subgroup(
-                                                               dest, subgrp,
-                                                               &attr, ri);
-                                               else
-                                                       bgp_adj_out_unset_subgroup(
-                                                               dest, subgrp, 1,
-                                                               bgp_addpath_id_for_peer(
-                                                                       peer,
-                                                                       afi,
-                                                                       safi,
-                                                                       &ri->tx_addpath));
-                                       }
-                               } else {
-                                       /* If default originate is enabled for
-                                        * the peer, do not send explicit
-                                        * withdraw. This will prevent deletion
-                                        * of default route advertised through
-                                        * default originate
-                                        */
-                                       if (CHECK_FLAG(
-                                                   peer->af_flags[afi][safi],
-                                                   PEER_FLAG_DEFAULT_ORIGINATE)
-                                           && is_default_prefix(bgp_dest_get_prefix(dest)))
-                                               break;
-
-                                       bgp_adj_out_unset_subgroup(
-                                               dest, subgrp, 1,
-                                               bgp_addpath_id_for_peer(
-                                                       peer, afi, safi,
-                                                       &ri->tx_addpath));
+               for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next) {
+
+                       if (!bgp_check_selected(ri, peer, addpath_capable, afi,
+                                               safi))
+                               continue;
+
+                       if (subgroup_announce_check(dest, ri, subgrp, dest_p,
+                                                   &attr, NULL)) {
+                               /* Check if route can be advertised */
+                               if (advertise) {
+                                       if (!bgp_check_withdrawal(bgp, dest))
+                                               bgp_adj_out_set_subgroup(
+                                                       dest, subgrp, &attr,
+                                                       ri);
+                                       else
+                                               bgp_adj_out_unset_subgroup(
+                                                       dest, subgrp, 1,
+                                                       bgp_addpath_id_for_peer(
+                                                               peer, afi, safi,
+                                                               &ri->tx_addpath));
                                }
+                       } else {
+                               /* If default originate is enabled for
+                                * the peer, do not send explicit
+                                * withdraw. This will prevent deletion
+                                * of default route advertised through
+                                * default originate
+                                */
+                               if (CHECK_FLAG(peer->af_flags[afi][safi],
+                                              PEER_FLAG_DEFAULT_ORIGINATE) &&
+                                   is_default_prefix(
+                                           bgp_dest_get_prefix(dest)))
+                                       break;
+
+                               bgp_adj_out_unset_subgroup(
+                                       dest, subgrp, 1,
+                                       bgp_addpath_id_for_peer(
+                                               peer, afi, safi,
+                                               &ri->tx_addpath));
                        }
+               }
        }
        UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
 
@@ -796,8 +791,11 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
        struct peer *peer;
        struct bgp_adj_out *adj;
        route_map_result_t ret = RMAP_DENYMATCH;
+       route_map_result_t new_ret = RMAP_DENYMATCH;
        afi_t afi;
        safi_t safi;
+       int pref = 65536;
+       int new_pref = 0;
 
        if (!subgrp)
                return;
@@ -853,34 +851,45 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
 
                                tmp_pi.attr = &tmp_attr;
 
-                               ret = route_map_apply_ext(
+                               new_ret = route_map_apply_ext(
                                        peer->default_rmap[afi][safi].map,
-                                       bgp_dest_get_prefix(dest), pi, &tmp_pi);
-
-                               if (ret == RMAP_DENYMATCH) {
-                                       bgp_attr_flush(&tmp_attr);
-                                       continue;
-                               } else {
-                                       new_attr = bgp_attr_intern(&tmp_attr);
-
+                                       bgp_dest_get_prefix(dest), pi, &tmp_pi,
+                                       &new_pref);
+
+                               if (new_ret == RMAP_PERMITMATCH) {
+                                       if (new_pref < pref) {
+                                               pref = new_pref;
+                                               bgp_attr_flush(new_attr);
+                                               new_attr = bgp_attr_intern(
+                                                       tmp_pi.attr);
+                                               bgp_attr_flush(tmp_pi.attr);
+                                       }
                                        subgroup_announce_reset_nhop(
                                                (peer_cap_enhe(peer, afi, safi)
                                                         ? AF_INET6
                                                         : AF_INET),
                                                new_attr);
-
-                                       break;
-                               }
-                       }
-                       if (ret == RMAP_PERMITMATCH) {
-                               bgp_dest_unlock_node(dest);
-                               break;
+                                       ret = new_ret;
+                               } else
+                                       bgp_attr_flush(&tmp_attr);
                        }
                }
                bgp->peer_self->rmap_type = 0;
 
-               if (ret == RMAP_DENYMATCH)
+               if (ret == RMAP_DENYMATCH) {
+                       /*
+                        * If its a implicit withdraw due to routemap
+                        * deny operation need to set the flag back.
+                        * This is a convertion of update flow to
+                        * withdraw flow.
+                        */
+                       if (!withdraw &&
+                           (!CHECK_FLAG(subgrp->sflags,
+                                        SUBGRP_STATUS_DEFAULT_ORIGINATE)))
+                               SET_FLAG(subgrp->sflags,
+                                        SUBGRP_STATUS_DEFAULT_ORIGINATE);
                        withdraw = 1;
+               }
        }
 
        /* Check if the default route is in local BGP RIB which is
index c4a3ca75003d1a0ffc68ee7cf37be75aee227e80..88a81f255de86d0afbf982f1f606a8ebc6e9a7cb 100644 (file)
@@ -1142,7 +1142,12 @@ void subgroup_default_update_packet(struct update_subgroup *subgrp,
 
        (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
        subgroup_trigger_write(subgrp);
-       subgrp->scount++;
+
+       if (!CHECK_FLAG(subgrp->sflags,
+                       SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED)) {
+               subgrp->scount++;
+               SET_FLAG(subgrp->sflags, SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED);
+       }
 }
 
 void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
@@ -1235,7 +1240,12 @@ void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
 
        (void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
        subgroup_trigger_write(subgrp);
-       subgrp->scount--;
+
+       if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED)) {
+               subgrp->scount--;
+               UNSET_FLAG(subgrp->sflags,
+                          SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED);
+       }
 }
 
 static void
index 03518dcfeea9f7025da7c2d4d9cb0bc06fbe67b5..0eba5ea4479ad6de6ffd3da20c9094e9e73ee54a 100644 (file)
@@ -951,14 +951,24 @@ static void bgp_clear_vty_error(struct vty *vty, struct peer *peer, afi_t afi,
 {
        switch (error) {
        case BGP_ERR_AF_UNCONFIGURED:
-               vty_out(vty,
-                       "%% BGP: Enable %s address family for the neighbor %s\n",
-                       get_afi_safi_str(afi, safi, false), peer->host);
+               if (vty)
+                       vty_out(vty,
+                               "%% BGP: Enable %s address family for the neighbor %s\n",
+                               get_afi_safi_str(afi, safi, false), peer->host);
+               else
+                       zlog_warn(
+                               "%% BGP: Enable %s address family for the neighbor %s",
+                               get_afi_safi_str(afi, safi, false), peer->host);
                break;
        case BGP_ERR_SOFT_RECONFIG_UNCONFIGURED:
-               vty_out(vty,
-                       "%% BGP: Inbound soft reconfig for %s not possible as it\n      has neither refresh capability, nor inbound soft reconfig\n",
-                       peer->host);
+               if (vty)
+                       vty_out(vty,
+                               "%% BGP: Inbound soft reconfig for %s not possible as it\n      has neither refresh capability, nor inbound soft reconfig\n",
+                               peer->host);
+               else
+                       zlog_warn(
+                               "%% BGP: Inbound soft reconfig for %s not possible as it has neither refresh capability, nor inbound soft reconfig",
+                               peer->host);
                break;
        default:
                break;
@@ -1274,6 +1284,11 @@ static void bgp_clear_star_soft_out(struct vty *vty, const char *name)
 }
 
 
+void bgp_clear_soft_in(struct bgp *bgp, afi_t afi, safi_t safi)
+{
+       bgp_clear(NULL, bgp, afi, safi, clear_all, BGP_CLEAR_SOFT_IN, NULL);
+}
+
 #ifndef VTYSH_EXTRACT_PL
 #include "bgpd/bgp_vty_clippy.c"
 #endif
@@ -2055,7 +2070,7 @@ DEFUN (no_bgp_maxmed_onstartup,
 
        /* Cancel max-med onstartup if its on */
        if (bgp->t_maxmed_onstartup) {
-               thread_cancel(&bgp->t_maxmed_onstartup);
+               THREAD_OFF(bgp->t_maxmed_onstartup);
                bgp->maxmed_onstartup_over = 1;
        }
 
@@ -3840,11 +3855,17 @@ DEFPY(bgp_default_afi_safi, bgp_default_afi_safi_cmd,
        afi_t afi = bgp_vty_afi_from_str(afi_str);
        safi_t safi;
 
+       /*
+        * Impossible situation but making coverity happy
+        */
+       assert(afi != AFI_MAX);
+
        if (strmatch(safi_str, "labeled"))
                safi = bgp_vty_safi_from_str("labeled-unicast");
        else
                safi = bgp_vty_safi_from_str(safi_str);
 
+       assert(safi != SAFI_MAX);
        if (no)
                bgp->default_af[afi][safi] = false;
        else {
@@ -4429,6 +4450,24 @@ DEFUN (neighbor_remote_as,
        return peer_remote_as_vty(vty, argv[idx_peer]->arg,
                                  argv[idx_remote_as]->arg);
 }
+
+DEFPY (bgp_allow_martian,
+       bgp_allow_martian_cmd,
+       "[no]$no bgp allow-martian-nexthop",
+       NO_STR
+       BGP_STR
+       "Allow Martian nexthops to be received in the NLRI from a peer\n")
+{
+       VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+       if (no)
+               bgp->allow_martian = false;
+       else
+               bgp->allow_martian = true;
+
+       return CMD_SUCCESS;
+}
+
 /* Enable fast convergence of bgp sessions. If this is enabled, bgp
  * sessions do not wait for hold timer expiry to bring down the sessions
  * when nexthop becomes unreachable
@@ -6431,7 +6470,7 @@ static int peer_role_set_vty(struct vty *vty, const char *ip_str,
 {
        struct peer *peer;
 
-       peer = peer_lookup_vty(vty, ip_str);
+       peer = peer_and_group_lookup_vty(vty, ip_str);
        if (!peer)
                return CMD_WARNING_CONFIG_FAILED;
        uint8_t role = get_role_by_name(role_str);
@@ -6445,7 +6484,7 @@ static int peer_role_unset_vty(struct vty *vty, const char *ip_str)
 {
        struct peer *peer;
 
-       peer = peer_lookup_vty(vty, ip_str);
+       peer = peer_and_group_lookup_vty(vty, ip_str);
        if (!peer)
                return CMD_WARNING_CONFIG_FAILED;
        return bgp_vty_return(vty, peer_role_unset(peer));
@@ -6820,7 +6859,7 @@ static int peer_port_vty(struct vty *vty, const char *ip_str, int afi,
        uint16_t port;
        struct servent *sp;
 
-       peer = peer_lookup_vty(vty, ip_str);
+       peer = peer_and_group_lookup_vty(vty, ip_str);
        if (!peer)
                return CMD_WARNING_CONFIG_FAILED;
 
@@ -6839,9 +6878,9 @@ static int peer_port_vty(struct vty *vty, const char *ip_str, int afi,
 /* Set specified peer's BGP port.  */
 DEFUN (neighbor_port,
        neighbor_port_cmd,
-       "neighbor <A.B.C.D|X:X::X:X> port (0-65535)",
+       "neighbor <A.B.C.D|X:X::X:X|WORD> port (0-65535)",
        NEIGHBOR_STR
-       NEIGHBOR_ADDR_STR
+       NEIGHBOR_ADDR_STR2
        "Neighbor's BGP port\n"
        "TCP port number\n")
 {
@@ -6853,10 +6892,10 @@ DEFUN (neighbor_port,
 
 DEFUN (no_neighbor_port,
        no_neighbor_port_cmd,
-       "no neighbor <A.B.C.D|X:X::X:X> port [(0-65535)]",
+       "no neighbor <A.B.C.D|X:X::X:X|WORD> port [(0-65535)]",
        NO_STR
        NEIGHBOR_STR
-       NEIGHBOR_ADDR_STR
+       NEIGHBOR_ADDR_STR2
        "Neighbor's BGP port\n"
        "TCP port number\n")
 {
@@ -7238,7 +7277,7 @@ DEFUN (bgp_set_route_map_delay_timer,
                 * fired.
                 */
                if (!rmap_delay_timer && bm->t_rmap_update) {
-                       BGP_TIMER_OFF(bm->t_rmap_update);
+                       THREAD_OFF(bm->t_rmap_update);
                        thread_execute(bm->master, bgp_route_map_update_timer,
                                       NULL, 0);
                }
@@ -9349,7 +9388,7 @@ DEFUN_NOSH (address_family_ipv4_safi,
        address_family_ipv4_safi_cmd,
        "address-family ipv4 [<unicast|multicast|vpn|labeled-unicast|flowspec>]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
+       BGP_AF_STR
        BGP_SAFI_WITH_LABEL_HELP_STR)
 {
 
@@ -9374,7 +9413,7 @@ DEFUN_NOSH (address_family_ipv6_safi,
        address_family_ipv6_safi_cmd,
        "address-family ipv6 [<unicast|multicast|vpn|labeled-unicast|flowspec>]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
+       BGP_AF_STR
        BGP_SAFI_WITH_LABEL_HELP_STR)
 {
        if (argc == 3) {
@@ -9399,8 +9438,8 @@ DEFUN_NOSH (address_family_vpnv4,
        address_family_vpnv4_cmd,
        "address-family vpnv4 [unicast]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_VPNV4_NODE;
        return CMD_SUCCESS;
@@ -9410,8 +9449,8 @@ DEFUN_NOSH (address_family_vpnv6,
        address_family_vpnv6_cmd,
        "address-family vpnv6 [unicast]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_VPNV6_NODE;
        return CMD_SUCCESS;
@@ -9422,8 +9461,8 @@ DEFUN_NOSH (address_family_evpn,
        address_family_evpn_cmd,
        "address-family l2vpn evpn",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        VTY_DECLVAR_CONTEXT(bgp, bgp);
        vty->node = BGP_EVPN_NODE;
@@ -9674,9 +9713,9 @@ DEFUN (clear_ip_bgp_all,
        BGP_STR
        BGP_INSTANCE_HELP_STR
        BGP_AFI_HELP_STR
-       "Address Family\n"
+       BGP_AF_STR
        BGP_SAFI_WITH_LABEL_HELP_STR
-       "Address Family modifier\n"
+       BGP_AF_MODIFIER_STR
        "Clear all peers\n"
        "BGP IPv4 neighbor to clear\n"
        "BGP IPv6 neighbor to clear\n"
@@ -9809,7 +9848,7 @@ DEFUN (clear_bgp_ipv6_safi_prefix,
        CLEAR_STR
        IP_STR
        BGP_STR
-       "Address Family\n"
+       BGP_AF_STR
        BGP_SAFI_HELP_STR
        "Clear bestpath and re-advertise\n"
        "IPv6 prefix\n")
@@ -9833,7 +9872,7 @@ DEFUN (clear_bgp_instance_ipv6_safi_prefix,
        IP_STR
        BGP_STR
        BGP_INSTANCE_HELP_STR
-       "Address Family\n"
+       BGP_AF_STR
        BGP_SAFI_HELP_STR
        "Clear bestpath and re-advertise\n"
        "IPv6 prefix\n")
@@ -10155,9 +10194,11 @@ DEFUN (show_bgp_memory,
                        count, mtype_memstr(memstrbuf, sizeof(memstrbuf),
                                            count * sizeof(struct community)));
        if ((count = mtype_stats_alloc(MTYPE_ECOMMUNITY)))
-               vty_out(vty, "%ld BGP community entries, using %s of memory\n",
-                       count, mtype_memstr(memstrbuf, sizeof(memstrbuf),
-                                           count * sizeof(struct ecommunity)));
+               vty_out(vty,
+                       "%ld BGP ext-community entries, using %s of memory\n",
+                       count,
+                       mtype_memstr(memstrbuf, sizeof(memstrbuf),
+                                    count * sizeof(struct ecommunity)));
        if ((count = mtype_stats_alloc(MTYPE_LCOMMUNITY)))
                vty_out(vty,
                        "%ld BGP large-community entries, using %s of memory\n",
@@ -10374,9 +10415,24 @@ static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp,
 static char *bgp_peer_description_stripped(char *desc, uint32_t size)
 {
        static char stripped[BUFSIZ];
-       uint32_t len = size > strlen(desc) ? strlen(desc) : size;
+       uint32_t i = 0;
+       uint32_t last_space = 0;
 
-       strlcpy(stripped, desc, len + 1);
+       while (i < size) {
+               if (*(desc + i) == 0) {
+                       stripped[i] = '\0';
+                       return stripped;
+               }
+               if (i != 0 && *(desc + i) == ' ' && last_space != i - 1)
+                       last_space = i;
+               stripped[i] = *(desc + i);
+               i++;
+       }
+
+       if (last_space > size)
+               stripped[size + 1] = '\0';
+       else
+               stripped[last_space] = '\0';
 
        return stripped;
 }
@@ -12097,11 +12153,12 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
                                               filter->advmap.cname);
                        json_object_string_add(json_advmap, "advertiseMap",
                                               filter->advmap.aname);
-                       json_object_string_add(json_advmap, "advertiseStatus",
-                                              filter->advmap.update_type
-                                                              == ADVERTISE
-                                                      ? "Advertise"
-                                                      : "Withdraw");
+                       json_object_string_add(
+                               json_advmap, "advertiseStatus",
+                               filter->advmap.update_type ==
+                                               UPDATE_TYPE_ADVERTISE
+                                       ? "Advertise"
+                                       : "Withdraw");
                        json_object_object_add(json_addr, "advertiseMap",
                                               json_advmap);
                }
@@ -12411,7 +12468,8 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
                                filter->advmap.cname,
                                filter->advmap.amap ? "*" : "",
                                filter->advmap.aname,
-                               filter->advmap.update_type == ADVERTISE
+                               filter->advmap.update_type ==
+                                               UPDATE_TYPE_ADVERTISE
                                        ? "Advertise"
                                        : "Withdraw");
 
@@ -12720,12 +12778,14 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
                /* Configured timer values. */
                json_object_int_add(json_neigh,
                                    "bgpTimerConfiguredHoldTimeMsecs",
-                                   p->holdtime ? p->holdtime * 1000
-                                               : bgp->default_holdtime * 1000);
-               json_object_int_add(
-                       json_neigh, "bgpTimerConfiguredKeepAliveIntervalMsecs",
-                       p->keepalive ? p->keepalive * 1000
-                                    : bgp->default_keepalive * 1000);
+                                   CHECK_FLAG(p->flags, PEER_FLAG_TIMER)
+                                           ? p->holdtime * 1000
+                                           : bgp->default_holdtime * 1000);
+               json_object_int_add(json_neigh,
+                                   "bgpTimerConfiguredKeepAliveIntervalMsecs",
+                                   CHECK_FLAG(p->flags, PEER_FLAG_TIMER)
+                                           ? p->keepalive * 1000
+                                           : bgp->default_keepalive * 1000);
                json_object_int_add(json_neigh, "bgpTimerHoldTimeMsecs",
                                    p->v_holdtime * 1000);
                json_object_int_add(json_neigh,
@@ -12812,12 +12872,16 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
 
                /* Configured timer values. */
                vty_out(vty,
-                       "  Hold time is %d, keepalive interval is %d seconds\n",
+                       "  Hold time is %d seconds, keepalive interval is %d seconds\n",
                        p->v_holdtime, p->v_keepalive);
-               vty_out(vty, "  Configured hold time is %d",
-                       p->holdtime ? p->holdtime : bgp->default_holdtime);
+               vty_out(vty, "  Configured hold time is %d seconds",
+                       CHECK_FLAG(p->flags, PEER_FLAG_TIMER)
+                               ? p->holdtime
+                               : bgp->default_holdtime);
                vty_out(vty, ", keepalive interval is %d seconds\n",
-                       p->keepalive ? p->keepalive : bgp->default_keepalive);
+                       CHECK_FLAG(p->flags, PEER_FLAG_TIMER)
+                               ? p->keepalive
+                               : bgp->default_keepalive);
                if (CHECK_FLAG(p->flags, PEER_FLAG_TIMER_DELAYOPEN))
                        vty_out(vty,
                                "  Configured DelayOpenTime is %d seconds\n",
@@ -14646,8 +14710,8 @@ DEFUN (show_ip_bgp_neighbors,
        IP_STR
        BGP_STR
        BGP_INSTANCE_HELP_STR
-       "Address Family\n"
-       "Address Family\n"
+       BGP_AF_STR
+       BGP_AF_STR
        "Detailed information on TCP and BGP neighbor connections\n"
        "Neighbor to display information about\n"
        "Neighbor to display information about\n"
@@ -16289,6 +16353,34 @@ DEFUN(no_neighbor_tcp_mss, no_neighbor_tcp_mss_cmd,
        return peer_tcp_mss_vty(vty, argv[peer_index]->arg, NULL);
 }
 
+DEFPY(bgp_retain_route_target, bgp_retain_route_target_cmd,
+      "[no$no] bgp retain route-target all",
+      NO_STR BGP_STR
+      "Retain BGP updates\n"
+      "Retain BGP updates based on route-target values\n"
+      "Retain all BGP updates\n")
+{
+       bool check;
+       struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+
+       check = CHECK_FLAG(bgp->af_flags[bgp_node_afi(vty)][bgp_node_safi(vty)],
+                          BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
+       if (check != !no) {
+               if (!no)
+                       SET_FLAG(bgp->af_flags[bgp_node_afi(vty)]
+                                             [bgp_node_safi(vty)],
+                                BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
+               else
+                       UNSET_FLAG(bgp->af_flags[bgp_node_afi(vty)]
+                                               [bgp_node_safi(vty)],
+                                  BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
+               /* trigger a flush to re-sync with ADJ-RIB-in */
+               bgp_clear(vty, bgp, bgp_node_afi(vty), bgp_node_safi(vty),
+                         clear_all, BGP_CLEAR_SOFT_IN, NULL);
+       }
+       return CMD_SUCCESS;
+}
+
 static void bgp_config_write_redistribute(struct vty *vty, struct bgp *bgp,
                                          afi_t afi, safi_t safi)
 {
@@ -16767,13 +16859,13 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
        }
 
        /* role */
-       if (peer->local_role != ROLE_UNDEFINED) {
+       if (peergroup_flag_check(peer, PEER_FLAG_ROLE) &&
+           peer->local_role != ROLE_UNDEFINED)
                vty_out(vty, " neighbor %s local-role %s%s\n", addr,
                        bgp_get_name_by_role(peer->local_role),
-                       CHECK_FLAG(peer->flags, PEER_FLAG_STRICT_MODE)
+                       CHECK_FLAG(peer->flags, PEER_FLAG_ROLE_STRICT_MODE)
                                ? " strict-mode"
                                : "");
-       }
 
        /* ttl-security hops */
        if (peer->gtsm_hops != BGP_GTSM_HOPS_DISABLED) {
@@ -17171,6 +17263,14 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp,
        }
 }
 
+static void bgp_vpn_config_write(struct vty *vty, struct bgp *bgp, afi_t afi,
+                                safi_t safi)
+{
+       if (!CHECK_FLAG(bgp->af_flags[afi][safi],
+                       BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL))
+               vty_out(vty, "  no bgp retain route-target all\n");
+}
+
 /* Address family based peer configuration display.  */
 static void bgp_config_write_family(struct vty *vty, struct bgp *bgp, afi_t afi,
                                    safi_t safi)
@@ -17241,6 +17341,9 @@ static void bgp_config_write_family(struct vty *vty, struct bgp *bgp, afi_t afi,
        if (safi == SAFI_FLOWSPEC)
                bgp_fs_config_write_pbr(vty, bgp, afi, safi);
 
+       if (safi == SAFI_MPLS_VPN)
+               bgp_vpn_config_write(vty, bgp, afi, safi);
+
        if (safi == SAFI_UNICAST) {
                bgp_vpn_policy_config_write_afi(vty, bgp, afi);
                if (CHECK_FLAG(bgp->af_flags[afi][safi],
@@ -17643,6 +17746,9 @@ int bgp_config_write(struct vty *vty)
                if (CHECK_FLAG(bgp->flags, BGP_FLAG_SHUTDOWN))
                        vty_out(vty, " bgp shutdown\n");
 
+               if (bgp->allow_martian)
+                       vty_out(vty, " bgp allow-martian-nexthop\n");
+
                if (bgp->fast_convergence)
                        vty_out(vty, " bgp fast-convergence\n");
 
@@ -17986,6 +18092,8 @@ void bgp_vty_init(void)
        install_element(CONFIG_NODE, &bgp_set_route_map_delay_timer_cmd);
        install_element(CONFIG_NODE, &no_bgp_set_route_map_delay_timer_cmd);
 
+       install_element(BGP_NODE, &bgp_allow_martian_cmd);
+
        /* bgp fast-convergence command */
        install_element(BGP_NODE, &bgp_fast_convergence_cmd);
        install_element(BGP_NODE, &no_bgp_fast_convergence_cmd);
@@ -19220,6 +19328,10 @@ void bgp_vty_init(void)
        install_element(BGP_FLOWSPECV6_NODE, &exit_address_family_cmd);
        install_element(BGP_EVPN_NODE, &exit_address_family_cmd);
 
+       /* BGP retain all route-target */
+       install_element(BGP_VPNV4_NODE, &bgp_retain_route_target_cmd);
+       install_element(BGP_VPNV6_NODE, &bgp_retain_route_target_cmd);
+
        /* "clear ip bgp commands" */
        install_element(ENABLE_NODE, &clear_ip_bgp_all_cmd);
 
index 143d3c1ac5f02efc2bd602508afbe0b829d60e0c..9526b50fb9f257d54cb8dbe4904f49fbd8a06a99 100644 (file)
@@ -28,23 +28,20 @@ struct bgp;
 #define BGP_INSTANCE_HELP_STR "BGP view\nBGP VRF\nView/VRF name\n"
 #define BGP_INSTANCE_ALL_HELP_STR "BGP view\nBGP VRF\nAll Views/VRFs\n"
 
+#define BGP_AF_STR "Address Family\n"
+#define BGP_AF_MODIFIER_STR "Address Family modifier\n"
 #define BGP_AFI_CMD_STR         "<ipv4|ipv6>"
-#define BGP_AFI_HELP_STR        "Address Family\nAddress Family\n"
+#define BGP_AFI_HELP_STR BGP_AF_STR BGP_AF_STR
 #define BGP_SAFI_CMD_STR        "<unicast|multicast|vpn>"
 #define BGP_SAFI_HELP_STR                                                      \
-       "Address Family modifier\n"                                            \
-       "Address Family modifier\n"                                            \
-       "Address Family modifier\n"
+       BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR
 #define BGP_AFI_SAFI_CMD_STR    BGP_AFI_CMD_STR" "BGP_SAFI_CMD_STR
 #define BGP_AFI_SAFI_HELP_STR   BGP_AFI_HELP_STR BGP_SAFI_HELP_STR
 
 #define BGP_SAFI_WITH_LABEL_CMD_STR  "<unicast|multicast|vpn|labeled-unicast|flowspec>"
 #define BGP_SAFI_WITH_LABEL_HELP_STR                                           \
-       "Address Family modifier\n"                                            \
-       "Address Family modifier\n"                                            \
-       "Address Family modifier\n"                                            \
-       "Address Family modifier\n"                                            \
-       "Address Family modifier\n"
+       BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR            \
+               BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR
 
 #define SHOW_GR_HEADER \
        "Codes: GR - Graceful Restart," \
@@ -152,6 +149,7 @@ struct bgp;
                                "endOfRibSentAfterUpdate");                    \
        } while (0)
 
+extern void bgp_clear_soft_in(struct bgp *bgp, afi_t afi, safi_t safi);
 extern void bgp_vty_init(void);
 extern void community_alias_vty(void);
 extern const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json);
index fc7590dcc23ca0ed4535652f03969df2152f5592..9c9b88e1255420d7ca144496ab5b1d51472d7ac3 100644 (file)
@@ -1000,19 +1000,13 @@ static bool bgp_table_map_apply(struct route_map *map, const struct prefix *p,
                                p, &path->attr->nexthop);
                }
                if (p->family == AF_INET6) {
-                       char buf[2][INET6_ADDRSTRLEN];
                        ifindex_t ifindex;
                        struct in6_addr *nexthop;
 
                        nexthop = bgp_path_info_to_ipv6_nexthop(path, &ifindex);
                        zlog_debug(
-                               "Zebra rmap deny: IPv6 route %pFX nexthop %s",
-                               p,
-                               nexthop ? inet_ntop(AF_INET6, nexthop, buf[1],
-                                                   sizeof(buf[1]))
-                                       : inet_ntop(AF_INET,
-                                                   &path->attr->nexthop,
-                                                   buf[1], sizeof(buf[1])));
+                               "Zebra rmap deny: IPv6 route %pFX nexthop %pI6",
+                               p, nexthop);
                }
        }
        return false;
@@ -1369,12 +1363,12 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
                nh_weight = 0;
 
                /* Get nexthop address-family */
-               if (p->family == AF_INET
-                   && !BGP_ATTR_NEXTHOP_AFI_IP6(mpinfo_cp->attr))
+               if (p->family == AF_INET &&
+                   !BGP_ATTR_MP_NEXTHOP_LEN_IP6(mpinfo_cp->attr))
                        nh_family = AF_INET;
-               else if (p->family == AF_INET6
-                        || (p->family == AF_INET
-                            && BGP_ATTR_NEXTHOP_AFI_IP6(mpinfo_cp->attr)))
+               else if (p->family == AF_INET6 ||
+                        (p->family == AF_INET &&
+                         BGP_ATTR_MP_NEXTHOP_LEN_IP6(mpinfo_cp->attr)))
                        nh_family = AF_INET6;
                else
                        continue;
@@ -2687,6 +2681,11 @@ static void bgp_encode_pbr_rule_action(struct stream *s,
        else
                stream_putl(s, pbra->fwmark);  /* fwmark */
 
+       stream_putl(s, 0); /* queue id */
+       stream_putw(s, 0); /* vlan_id */
+       stream_putw(s, 0); /* vlan_flags */
+       stream_putw(s, 0); /* pcp */
+
        stream_putl(s, pbra->table_id);
 
        memset(ifname, 0, sizeof(ifname));
index 339cd1975aab43dd8f8a20baaeaaa9fb79b06ec9..c17bd76ad766f4b6548084b77261ac9c797e5125 100644 (file)
@@ -482,14 +482,14 @@ void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set)
 }
 
 /* BGP's cluster-id control. */
-int bgp_cluster_id_set(struct bgp *bgp, struct in_addr *cluster_id)
+void bgp_cluster_id_set(struct bgp *bgp, struct in_addr *cluster_id)
 {
        struct peer *peer;
        struct listnode *node, *nnode;
 
        if (bgp_config_check(bgp, BGP_CONFIG_CLUSTER_ID)
            && IPV4_ADDR_SAME(&bgp->cluster_id, cluster_id))
-               return 0;
+               return;
 
        IPV4_ADDR_COPY(&bgp->cluster_id, cluster_id);
        bgp_config_set(bgp, BGP_CONFIG_CLUSTER_ID);
@@ -505,16 +505,15 @@ int bgp_cluster_id_set(struct bgp *bgp, struct in_addr *cluster_id)
                                        BGP_NOTIFY_CEASE_CONFIG_CHANGE);
                }
        }
-       return 0;
 }
 
-int bgp_cluster_id_unset(struct bgp *bgp)
+void bgp_cluster_id_unset(struct bgp *bgp)
 {
        struct peer *peer;
        struct listnode *node, *nnode;
 
        if (!bgp_config_check(bgp, BGP_CONFIG_CLUSTER_ID))
-               return 0;
+               return;
 
        bgp->cluster_id.s_addr = 0;
        bgp_config_unset(bgp, BGP_CONFIG_CLUSTER_ID);
@@ -530,7 +529,6 @@ int bgp_cluster_id_unset(struct bgp *bgp)
                                        BGP_NOTIFY_CEASE_CONFIG_CHANGE);
                }
        }
-       return 0;
 }
 
 /* time_t value that is monotonicly increasing
@@ -623,7 +621,7 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as)
        return;
 }
 
-int bgp_confederation_id_unset(struct bgp *bgp)
+void bgp_confederation_id_unset(struct bgp *bgp)
 {
        struct peer *peer;
        struct listnode *node, *nnode;
@@ -645,7 +643,6 @@ int bgp_confederation_id_unset(struct bgp *bgp)
                                bgp_session_reset_safe(peer, &nnode);
                }
        }
-       return 0;
 }
 
 /* Is an AS part of the confed or not? */
@@ -664,16 +661,16 @@ bool bgp_confederation_peers_check(struct bgp *bgp, as_t as)
 }
 
 /* Add an AS to the confederation set.  */
-int bgp_confederation_peers_add(struct bgp *bgp, as_t as)
+void bgp_confederation_peers_add(struct bgp *bgp, as_t as)
 {
        struct peer *peer;
        struct listnode *node, *nnode;
 
        if (bgp->as == as)
-               return BGP_ERR_INVALID_AS;
+               return;
 
        if (bgp_confederation_peers_check(bgp, as))
-               return -1;
+               return;
 
        bgp->confed_peers =
                XREALLOC(MTYPE_BGP_CONFED_LIST, bgp->confed_peers,
@@ -699,11 +696,10 @@ int bgp_confederation_peers_add(struct bgp *bgp, as_t as)
                        }
                }
        }
-       return 0;
 }
 
 /* Delete an AS from the confederation set.  */
-int bgp_confederation_peers_remove(struct bgp *bgp, as_t as)
+void bgp_confederation_peers_remove(struct bgp *bgp, as_t as)
 {
        int i;
        int j;
@@ -711,10 +707,10 @@ int bgp_confederation_peers_remove(struct bgp *bgp, as_t as)
        struct listnode *node, *nnode;
 
        if (!bgp)
-               return -1;
+               return;
 
        if (!bgp_confederation_peers_check(bgp, as))
-               return -1;
+               return;
 
        for (i = 0; i < bgp->confed_peers_cnt; i++)
                if (bgp->confed_peers[i] == as)
@@ -751,71 +747,58 @@ int bgp_confederation_peers_remove(struct bgp *bgp, as_t as)
                        }
                }
        }
-
-       return 0;
 }
 
 /* Local preference configuration.  */
-int bgp_default_local_preference_set(struct bgp *bgp, uint32_t local_pref)
+void bgp_default_local_preference_set(struct bgp *bgp, uint32_t local_pref)
 {
        if (!bgp)
-               return -1;
+               return;
 
        bgp->default_local_pref = local_pref;
-
-       return 0;
 }
 
-int bgp_default_local_preference_unset(struct bgp *bgp)
+void bgp_default_local_preference_unset(struct bgp *bgp)
 {
        if (!bgp)
-               return -1;
+               return;
 
        bgp->default_local_pref = BGP_DEFAULT_LOCAL_PREF;
-
-       return 0;
 }
 
 /* Local preference configuration.  */
-int bgp_default_subgroup_pkt_queue_max_set(struct bgp *bgp, uint32_t queue_size)
+void bgp_default_subgroup_pkt_queue_max_set(struct bgp *bgp,
+                                           uint32_t queue_size)
 {
        if (!bgp)
-               return -1;
+               return;
 
        bgp->default_subgroup_pkt_queue_max = queue_size;
-
-       return 0;
 }
 
-int bgp_default_subgroup_pkt_queue_max_unset(struct bgp *bgp)
+void bgp_default_subgroup_pkt_queue_max_unset(struct bgp *bgp)
 {
        if (!bgp)
-               return -1;
+               return;
        bgp->default_subgroup_pkt_queue_max =
                BGP_DEFAULT_SUBGROUP_PKT_QUEUE_MAX;
-
-       return 0;
 }
 
 /* Listen limit configuration.  */
-int bgp_listen_limit_set(struct bgp *bgp, int listen_limit)
+void bgp_listen_limit_set(struct bgp *bgp, int listen_limit)
 {
        if (!bgp)
-               return -1;
+               return;
 
        bgp->dynamic_neighbors_limit = listen_limit;
-
-       return 0;
 }
 
-int bgp_listen_limit_unset(struct bgp *bgp)
+void bgp_listen_limit_unset(struct bgp *bgp)
 {
        if (!bgp)
-               return -1;
+               return;
 
        bgp->dynamic_neighbors_limit = BGP_DYNAMIC_NEIGHBORS_LIMIT_DEFAULT;
-
-       return 0;
 }
 
 int bgp_map_afi_safi_iana2int(iana_afi_t pkt_afi, iana_safi_t pkt_safi,
@@ -1828,7 +1811,7 @@ struct peer *peer_create_accept(struct bgp *bgp)
 /*
  * Return true if we have a peer configured to use this afi/safi
  */
-int bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi)
+bool bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi)
 {
        struct listnode *node;
        struct peer *peer;
@@ -1838,10 +1821,10 @@ int bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi)
                        continue;
 
                if (peer->afc[afi][safi])
-                       return 1;
+                       return true;
        }
 
-       return 0;
+       return false;
 }
 
 /* Change peer's AS number.  */
@@ -2401,12 +2384,12 @@ void peer_nsf_stop(struct peer *peer)
                peer->nsf[afi][safi] = 0;
 
        if (peer->t_gr_restart) {
-               BGP_TIMER_OFF(peer->t_gr_restart);
+               THREAD_OFF(peer->t_gr_restart);
                if (bgp_debug_neighbor_events(peer))
                        zlog_debug("%pBP graceful restart timer stopped", peer);
        }
        if (peer->t_gr_stale) {
-               BGP_TIMER_OFF(peer->t_gr_stale);
+               THREAD_OFF(peer->t_gr_stale);
                if (bgp_debug_neighbor_events(peer))
                        zlog_debug(
                                "%pBP graceful restart stalepath timer stopped",
@@ -2731,6 +2714,9 @@ static void peer_group2peer_config_copy(struct peer_group *group,
                }
        }
 
+       /* role */
+       PEER_ATTR_INHERIT(peer, group, local_role);
+
        /* Update GR flags for the peer. */
        bgp_peer_gr_flags_update(peer);
 
@@ -3148,6 +3134,7 @@ static struct bgp *bgp_create(as_t *as, const char *name,
 
        bgp_lock(bgp);
 
+       bgp->allow_martian = false;
        bgp_process_queue_init(bgp);
        bgp->heuristic_coalesce = true;
        bgp->inst_type = inst_type;
@@ -3236,6 +3223,8 @@ static struct bgp *bgp_create(as_t *as, const char *name,
                bgp->vpn_policy[afi].export_vrf = list_new();
                bgp->vpn_policy[afi].export_vrf->del =
                        bgp_vrf_string_name_delete;
+               SET_FLAG(bgp->af_flags[afi][SAFI_MPLS_VPN],
+                        BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
        }
        if (name)
                bgp->name = XSTRDUP(MTYPE_BGP, name);
@@ -3580,7 +3569,7 @@ void bgp_instance_down(struct bgp *bgp)
 
        /* Stop timers. */
        if (bgp->t_rmap_def_originate_eval) {
-               BGP_TIMER_OFF(bgp->t_rmap_def_originate_eval);
+               THREAD_OFF(bgp->t_rmap_def_originate_eval);
                bgp_unlock(bgp); /* TODO - This timer is started with a lock -
                                    why? */
        }
@@ -3647,7 +3636,7 @@ int bgp_delete(struct bgp *bgp)
                if (!gr_info)
                        continue;
 
-               BGP_TIMER_OFF(gr_info->t_select_deferral);
+               THREAD_OFF(gr_info->t_select_deferral);
 
                t = gr_info->t_route_select;
                if (t) {
@@ -3655,7 +3644,7 @@ int bgp_delete(struct bgp *bgp)
 
                        XFREE(MTYPE_TMP, info);
                }
-               BGP_TIMER_OFF(gr_info->t_route_select);
+               THREAD_OFF(gr_info->t_route_select);
        }
 
        if (BGP_DEBUG(zebra, ZEBRA)) {
@@ -3678,7 +3667,7 @@ int bgp_delete(struct bgp *bgp)
 
        /* Stop timers. */
        if (bgp->t_rmap_def_originate_eval) {
-               BGP_TIMER_OFF(bgp->t_rmap_def_originate_eval);
+               THREAD_OFF(bgp->t_rmap_def_originate_eval);
                bgp_unlock(bgp); /* TODO - This timer is started with a lock -
                                    why? */
        }
@@ -4255,7 +4244,9 @@ static const struct peer_flag_action peer_flag_action_list[] = {
        {PEER_FLAG_UPDATE_SOURCE, 0, peer_change_none},
        {PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE, 0, peer_change_none},
        {PEER_FLAG_EXTENDED_OPT_PARAMS, 0, peer_change_reset},
-       {PEER_FLAG_STRICT_MODE, 0, peer_change_reset},
+       {PEER_FLAG_ROLE_STRICT_MODE, 0, peer_change_reset},
+       {PEER_FLAG_ROLE, 0, peer_change_reset},
+       {PEER_FLAG_PORT, 0, peer_change_reset},
        {0, 0, 0}};
 
 static const struct peer_flag_action peer_af_flag_action_list[] = {
@@ -4346,7 +4337,7 @@ static void peer_flag_modify_action(struct peer *peer, uint32_t flag)
                        UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
 
                        if (peer->t_pmax_restart) {
-                               BGP_TIMER_OFF(peer->t_pmax_restart);
+                               THREAD_OFF(peer->t_pmax_restart);
                                if (bgp_debug_neighbor_events(peer))
                                        zlog_debug(
                                                "%pBP Maximum-prefix restart timer canceled",
@@ -4928,36 +4919,109 @@ int peer_ebgp_multihop_unset(struct peer *peer)
 /* Set Open Policy Role and check its correctness */
 int peer_role_set(struct peer *peer, uint8_t role, bool strict_mode)
 {
-       if (peer->sort != BGP_PEER_EBGP)
-               return BGP_ERR_INVALID_INTERNAL_ROLE;
-
-       if (peer->local_role == role) {
-               if (CHECK_FLAG(peer->flags, PEER_FLAG_STRICT_MODE) &&
-                   !strict_mode)
-                       /* TODO: Is session restart needed if it was down? */
-                       UNSET_FLAG(peer->flags, PEER_FLAG_STRICT_MODE);
-               if (!CHECK_FLAG(peer->flags, PEER_FLAG_STRICT_MODE) &&
-                   strict_mode) {
-                       SET_FLAG(peer->flags, PEER_FLAG_STRICT_MODE);
-                       /* Restart session to throw Role Mismatch Notification
-                        */
-                       if (peer->remote_role == ROLE_UNDEFINED)
-                               bgp_session_reset(peer);
+       struct peer *member;
+       struct listnode *node, *nnode;
+
+       peer_flag_set(peer, PEER_FLAG_ROLE);
+
+       if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+               if (peer->sort != BGP_PEER_EBGP)
+                       return BGP_ERR_INVALID_INTERNAL_ROLE;
+
+               if (peer->local_role == role) {
+                       if (CHECK_FLAG(peer->flags,
+                                      PEER_FLAG_ROLE_STRICT_MODE) &&
+                           !strict_mode)
+                               /* TODO: Is session restart needed if it was
+                                * down?
+                                */
+                               UNSET_FLAG(peer->flags,
+                                          PEER_FLAG_ROLE_STRICT_MODE);
+                       if (!CHECK_FLAG(peer->flags,
+                                       PEER_FLAG_ROLE_STRICT_MODE) &&
+                           strict_mode) {
+                               SET_FLAG(peer->flags,
+                                        PEER_FLAG_ROLE_STRICT_MODE);
+                               /* Restart session to throw Role Mismatch
+                                * Notification
+                                */
+                               if (peer->remote_role == ROLE_UNDEFINED)
+                                       bgp_session_reset(peer);
+                       }
+               } else {
+                       peer->local_role = role;
+                       if (strict_mode)
+                               SET_FLAG(peer->flags,
+                                        PEER_FLAG_ROLE_STRICT_MODE);
+                       else
+                               UNSET_FLAG(peer->flags,
+                                          PEER_FLAG_ROLE_STRICT_MODE);
+                       bgp_session_reset(peer);
                }
-       } else {
-               peer->local_role = role;
-               if (strict_mode)
-                       SET_FLAG(peer->flags, PEER_FLAG_STRICT_MODE);
-               else
-                       UNSET_FLAG(peer->flags, PEER_FLAG_STRICT_MODE);
-               bgp_session_reset(peer);
+
+               return CMD_SUCCESS;
        }
-       return 0;
+
+       peer->local_role = role;
+       for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+               if (member->sort != BGP_PEER_EBGP)
+                       return BGP_ERR_INVALID_INTERNAL_ROLE;
+
+               if (member->local_role == role) {
+                       if (CHECK_FLAG(member->flags,
+                                      PEER_FLAG_ROLE_STRICT_MODE) &&
+                           !strict_mode)
+                               /* TODO: Is session restart needed if it was
+                                * down?
+                                */
+                               UNSET_FLAG(member->flags,
+                                          PEER_FLAG_ROLE_STRICT_MODE);
+                       if (!CHECK_FLAG(member->flags,
+                                       PEER_FLAG_ROLE_STRICT_MODE) &&
+                           strict_mode) {
+                               SET_FLAG(peer->flags,
+                                        PEER_FLAG_ROLE_STRICT_MODE);
+                               SET_FLAG(member->flags,
+                                        PEER_FLAG_ROLE_STRICT_MODE);
+                               /* Restart session to throw Role Mismatch
+                                * Notification
+                                */
+                               if (member->remote_role == ROLE_UNDEFINED)
+                                       bgp_session_reset(member);
+                       }
+               } else {
+                       member->local_role = role;
+
+                       if (strict_mode) {
+                               SET_FLAG(peer->flags,
+                                        PEER_FLAG_ROLE_STRICT_MODE);
+                               SET_FLAG(member->flags,
+                                        PEER_FLAG_ROLE_STRICT_MODE);
+                       } else {
+                               UNSET_FLAG(member->flags,
+                                          PEER_FLAG_ROLE_STRICT_MODE);
+                       }
+                       bgp_session_reset(member);
+               }
+       }
+
+       return CMD_SUCCESS;
 }
 
 int peer_role_unset(struct peer *peer)
 {
-       return peer_role_set(peer, ROLE_UNDEFINED, 0);
+       struct peer *member;
+       struct listnode *node, *nnode;
+
+       peer_flag_unset(peer, PEER_FLAG_ROLE);
+
+       if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+               return peer_role_set(peer, ROLE_UNDEFINED, 0);
+
+       for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member))
+               peer_role_set(member, ROLE_UNDEFINED, 0);
+
+       return CMD_SUCCESS;
 }
 
 /* Neighbor description. */
@@ -5046,7 +5110,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname)
        return 0;
 }
 
-int peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
+void peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
 {
        struct peer *member;
        struct listnode *node, *nnode;
@@ -5055,7 +5119,7 @@ int peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
        peer_flag_set(peer, PEER_FLAG_UPDATE_SOURCE);
        if (peer->update_source) {
                if (sockunion_cmp(peer->update_source, su) == 0)
-                       return 0;
+                       return;
                sockunion_free(peer->update_source);
        }
        peer->update_source = sockunion_dup(su);
@@ -5076,7 +5140,7 @@ int peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
                        bgp_peer_bfd_update_source(peer);
 
                /* Skip peer-group mechanics for regular peers. */
-               return 0;
+               return;
        }
 
        /*
@@ -5112,17 +5176,15 @@ int peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
                if (member->bfd_config)
                        bgp_peer_bfd_update_source(member);
        }
-
-       return 0;
 }
 
-int peer_update_source_unset(struct peer *peer)
+void peer_update_source_unset(struct peer *peer)
 {
        struct peer *member;
        struct listnode *node, *nnode;
 
        if (!CHECK_FLAG(peer->flags, PEER_FLAG_UPDATE_SOURCE))
-               return 0;
+               return;
 
        /* Inherit configuration from peer-group if peer is member. */
        if (peer_group_active(peer)) {
@@ -5153,7 +5215,7 @@ int peer_update_source_unset(struct peer *peer)
                        bgp_peer_bfd_update_source(peer);
 
                /* Skip peer-group mechanics for regular peers. */
-               return 0;
+               return;
        }
 
        /*
@@ -5188,8 +5250,6 @@ int peer_update_source_unset(struct peer *peer)
                if (member->bfd_config)
                        bgp_peer_bfd_update_source(member);
        }
-
-       return 0;
 }
 
 int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
@@ -5197,9 +5257,13 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
 {
        struct peer *member;
        struct listnode *node, *nnode;
+       struct update_subgroup *subgrp;
 
        /* Set flag and configuration on peer. */
        peer_af_flag_set(peer, afi, safi, PEER_FLAG_DEFAULT_ORIGINATE);
+
+       subgrp = peer_subgroup(peer, afi, safi);
+
        if (rmap) {
                if (!peer->default_rmap[afi][safi].name
                    || strcmp(rmap, peer->default_rmap[afi][safi].name) != 0) {
@@ -5207,6 +5271,17 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
                                XFREE(MTYPE_ROUTE_MAP_NAME,
                                      peer->default_rmap[afi][safi].name);
 
+                       /*
+                        * When there is a change in route-map policy,
+                        * this flow gets triggered. Since, the default
+                        * route is already originated, the flag is set.
+                        * The flag should be unset here,
+                        * to trigger the flow of sending update message.
+                        */
+                       if (subgrp)
+                               UNSET_FLAG(subgrp->sflags,
+                                          SUBGRP_STATUS_DEFAULT_ORIGINATE);
+
                        route_map_counter_decrement(peer->default_rmap[afi][safi].map);
                        peer->default_rmap[afi][safi].name =
                                XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap);
@@ -5218,6 +5293,15 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
                        XFREE(MTYPE_ROUTE_MAP_NAME,
                              peer->default_rmap[afi][safi].name);
 
+               /*
+                * This is triggered in case of route-map deletion.
+                * The flag needs to be unset, to trigger the flow
+                * of sending an update message.
+                */
+               if (subgrp)
+                       UNSET_FLAG(subgrp->sflags,
+                                  SUBGRP_STATUS_DEFAULT_ORIGINATE);
+
                route_map_counter_decrement(peer->default_rmap[afi][safi].map);
                peer->default_rmap[afi][safi].name = NULL;
                peer->default_rmap[afi][safi].map = NULL;
@@ -5346,11 +5430,13 @@ int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi)
 void peer_port_set(struct peer *peer, uint16_t port)
 {
        peer->port = port;
+       peer_flag_set(peer, PEER_FLAG_PORT);
 }
 
 void peer_port_unset(struct peer *peer)
 {
        peer->port = BGP_PORT_DEFAULT;
+       peer_flag_unset(peer, PEER_FLAG_PORT);
 }
 
 /* Set the TCP-MSS value in the peer structure,
@@ -5390,12 +5476,23 @@ static void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi,
                        return;
 
                if (CHECK_FLAG(peer->af_flags[afi][safi],
-                              PEER_FLAG_SOFT_RECONFIG))
+                              PEER_FLAG_SOFT_RECONFIG)) {
                        bgp_soft_reconfig_in(peer, afi, safi);
-               else if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV)
-                        || CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV))
-                       bgp_route_refresh_send(peer, afi, safi, 0, 0, 0,
-                                              BGP_ROUTE_REFRESH_NORMAL);
+               } else if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) ||
+                          CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) {
+                       if (CHECK_FLAG(peer->af_cap[afi][safi],
+                                      PEER_CAP_ORF_PREFIX_SM_ADV) &&
+                           (CHECK_FLAG(peer->af_cap[afi][safi],
+                                       PEER_CAP_ORF_PREFIX_RM_RCV) ||
+                            CHECK_FLAG(peer->af_cap[afi][safi],
+                                       PEER_CAP_ORF_PREFIX_RM_OLD_RCV)))
+                               peer_clear_soft(peer, afi, safi,
+                                               BGP_CLEAR_SOFT_IN_ORF_PREFIX);
+                       else
+                               bgp_route_refresh_send(
+                                       peer, afi, safi, 0, 0, 0,
+                                       BGP_ROUTE_REFRESH_NORMAL);
+               }
        }
 }
 
@@ -6439,7 +6536,7 @@ static void peer_distribute_update(struct access_list *access)
                if (access->name)
                        update_group_policy_update(bgp,
                                                   BGP_POLICY_DISTRIBUTE_LIST,
-                                                  access->name, 0, 0);
+                                                  access->name, true, 0);
                for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
                        FOREACH_AFI_SAFI (afi, safi) {
                                filter = &peer->filter[afi][safi];
@@ -6628,7 +6725,7 @@ static void peer_prefix_list_update(struct prefix_list *plist)
                 */
                update_group_policy_update(
                        bgp, BGP_POLICY_PREFIX_LIST,
-                       plist ? prefix_list_name(plist) : NULL, 0, 0);
+                       plist ? prefix_list_name(plist) : NULL, true, 0);
 
                for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
                        FOREACH_AFI_SAFI (afi, safi) {
@@ -6646,6 +6743,14 @@ static void peer_prefix_list_update(struct prefix_list *plist)
                                                filter->plist[direct].plist =
                                                        NULL;
                                }
+
+                               /* If we touch prefix-list, we need to process
+                                * new updates. This is important for ORF to
+                                * work correctly as well.
+                                */
+                               if (peer->afc_nego[afi][safi])
+                                       peer_on_policy_change(peer, afi, safi,
+                                                             0);
                        }
                }
                for (ALL_LIST_ELEMENTS(bgp->group, node, nnode, group)) {
@@ -6805,7 +6910,7 @@ static void peer_aslist_update(const char *aslist_name)
 
        for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) {
                update_group_policy_update(bgp, BGP_POLICY_FILTER_LIST,
-                                          aslist_name, 0, 0);
+                                          aslist_name, true, 0);
 
                for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
                        FOREACH_AFI_SAFI (afi, safi) {
@@ -7147,18 +7252,15 @@ static void peer_advertise_map_filter_update(struct peer *peer, afi_t afi,
 
        /* Increment condition_filter_count and/or create timer. */
        if (!filter_exists) {
-               filter->advmap.update_type = ADVERTISE;
+               filter->advmap.update_type = UPDATE_TYPE_ADVERTISE;
                bgp_conditional_adv_enable(peer, afi, safi);
        }
+
+       /* Process peer route updates. */
+       peer_on_policy_change(peer, afi, safi, 1);
 }
 
-/* Set advertise-map to the peer but do not process peer route updates here.  *
- * Hold filter changes until the conditional routes polling thread is called  *
- * AS we need to advertise/withdraw prefixes (in advertise-map) based on the  *
- * condition (exist-map/non-exist-map) and routes(specified in condition-map) *
- * in BGP table. So do not call peer_on_policy_change() here, only create     *
- * polling timer thread, update filters and increment condition_filter_count.
- */
+/* Set advertise-map to the peer. */
 int peer_advertise_map_set(struct peer *peer, afi_t afi, safi_t safi,
                           const char *advertise_name,
                           struct route_map *advertise_map,
@@ -7238,7 +7340,6 @@ int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi,
                                   __func__, peer->host,
                                   get_afi_safi_str(afi, safi, false));
 
-               peer_on_policy_change(peer, afi, safi, 1);
                return 0;
        }
 
@@ -7261,8 +7362,6 @@ int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi,
                        zlog_debug("%s: Send normal update to %s for %s ",
                                   __func__, member->host,
                                   get_afi_safi_str(afi, safi, false));
-
-               peer_on_policy_change(member, afi, safi, 1);
        }
 
        return 0;
@@ -7275,7 +7374,7 @@ static bool peer_maximum_prefix_clear_overflow(struct peer *peer)
 
        UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
        if (peer->t_pmax_restart) {
-               BGP_TIMER_OFF(peer->t_pmax_restart);
+               THREAD_OFF(peer->t_pmax_restart);
                if (bgp_debug_neighbor_events(peer))
                        zlog_debug(
                                "%pBP Maximum-prefix restart timer cancelled",
@@ -8132,20 +8231,22 @@ void bgp_terminate(void)
         * of a large number of peers this will ensure that no peer is left with
         * a dangling connection
         */
-       /* reverse bgp_master_init */
-       bgp_close();
-
-       if (bm->listen_sockets)
-               list_delete(&bm->listen_sockets);
 
-       for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp))
+       bgp_close();
+       /* reverse bgp_master_init */
+       for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) {
+               bgp_close_vrf_socket(bgp);
                for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer))
                        if (peer_established(peer) || peer->status == OpenSent
                            || peer->status == OpenConfirm)
                                bgp_notify_send(peer, BGP_NOTIFY_CEASE,
                                                BGP_NOTIFY_CEASE_PEER_UNCONFIG);
+       }
+
+       if (bm->listen_sockets)
+               list_delete(&bm->listen_sockets);
 
-       BGP_TIMER_OFF(bm->t_rmap_update);
+       THREAD_OFF(bm->t_rmap_update);
 
        bgp_mac_finish();
 }
index df26c82af8cba8a22ad7b0032d15d9e6a93fa8f5..8348b37b8e97542addd9d1c2fdaa2ede890f20b3 100644 (file)
@@ -527,6 +527,8 @@ struct bgp {
 /* vrf-route leaking flags */
 #define BGP_CONFIG_VRF_TO_VRF_IMPORT (1 << 9)
 #define BGP_CONFIG_VRF_TO_VRF_EXPORT (1 << 10)
+/* vpnvx retain flag */
+#define BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL (1 << 11)
 
        /* BGP per AF peer count */
        uint32_t af_peer_count[AFI_MAX][SAFI_MAX];
@@ -770,6 +772,8 @@ struct bgp {
        struct timeval ebgprequirespolicywarning;
 #define FIFTEENMINUTE2USEC (int64_t)15 * 60 * 1000000
 
+       bool allow_martian;
+
        QOBJ_FIELDS;
 };
 DECLARE_QOBJ_TYPE(bgp);
@@ -857,7 +861,7 @@ struct bgp_nexthop {
 #define CONDITION_NON_EXIST    false
 #define CONDITION_EXIST                true
 
-enum update_type { WITHDRAW, ADVERTISE };
+enum update_type { UPDATE_TYPE_WITHDRAW, UPDATE_TYPE_ADVERTISE };
 
 #include "filter.h"
 
@@ -1335,9 +1339,13 @@ struct peer {
 #define PEER_FLAG_EXTENDED_OPT_PARAMS (1ULL << 30)
 
        /* BGP Open Policy flags.
-        * Enforce using roles on both sides
+        * Enforce using roles on both sides:
+        * `local-role ROLE strict-mode` configured.
         */
-#define PEER_FLAG_STRICT_MODE               (1ULL << 31)
+#define PEER_FLAG_ROLE_STRICT_MODE (1ULL << 31)
+       /* `local-role` configured */
+#define PEER_FLAG_ROLE (1ULL << 32)
+#define PEER_FLAG_PORT (1ULL << 33)
 
        /*
         *GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART
@@ -2120,32 +2128,34 @@ extern void bgp_router_id_static_set(struct bgp *, struct in_addr);
 
 extern void bm_wait_for_fib_set(bool set);
 extern void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set);
-extern int bgp_cluster_id_set(struct bgp *, struct in_addr *);
-extern int bgp_cluster_id_unset(struct bgp *);
+extern void bgp_cluster_id_set(struct bgp *bgp, struct in_addr *cluster_id);
+extern void bgp_cluster_id_unset(struct bgp *bgp);
 
-extern void bgp_confederation_id_set(struct bgp *, as_t);
-extern int bgp_confederation_id_unset(struct bgp *);
+extern void bgp_confederation_id_set(struct bgp *bgp, as_t as);
+extern void bgp_confederation_id_unset(struct bgp *bgp);
 extern bool bgp_confederation_peers_check(struct bgp *, as_t);
 
-extern int bgp_confederation_peers_add(struct bgp *, as_t);
-extern int bgp_confederation_peers_remove(struct bgp *, as_t);
+extern void bgp_confederation_peers_add(struct bgp *bgp, as_t as);
+extern void bgp_confederation_peers_remove(struct bgp *bgp, as_t as);
 
 extern void bgp_timers_set(struct bgp *, uint32_t keepalive, uint32_t holdtime,
                           uint32_t connect_retry, uint32_t delayopen);
 extern void bgp_timers_unset(struct bgp *);
 
-extern int bgp_default_local_preference_set(struct bgp *, uint32_t);
-extern int bgp_default_local_preference_unset(struct bgp *);
+extern void bgp_default_local_preference_set(struct bgp *bgp,
+                                            uint32_t local_pref);
+extern void bgp_default_local_preference_unset(struct bgp *bgp);
 
-extern int bgp_default_subgroup_pkt_queue_max_set(struct bgp *bgp, uint32_t);
-extern int bgp_default_subgroup_pkt_queue_max_unset(struct bgp *bgp);
+extern void bgp_default_subgroup_pkt_queue_max_set(struct bgp *bgp,
+                                                  uint32_t queue_size);
+extern void bgp_default_subgroup_pkt_queue_max_unset(struct bgp *bgp);
 
-extern int bgp_listen_limit_set(struct bgp *, int);
-extern int bgp_listen_limit_unset(struct bgp *);
+extern void bgp_listen_limit_set(struct bgp *bgp, int listen_limit);
+extern void bgp_listen_limit_unset(struct bgp *bgp);
 
 extern bool bgp_update_delay_active(struct bgp *);
 extern bool bgp_update_delay_configured(struct bgp *);
-extern int bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi);
+extern bool bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi);
 extern void peer_as_change(struct peer *, as_t, int);
 extern int peer_remote_as(struct bgp *, union sockunion *, const char *, as_t *,
                          int);
@@ -2186,8 +2196,9 @@ extern void peer_description_set(struct peer *, const char *);
 extern void peer_description_unset(struct peer *);
 
 extern int peer_update_source_if_set(struct peer *, const char *);
-extern int peer_update_source_addr_set(struct peer *, const union sockunion *);
-extern int peer_update_source_unset(struct peer *);
+extern void peer_update_source_addr_set(struct peer *peer,
+                                       const union sockunion *su);
+extern void peer_update_source_unset(struct peer *peer);
 
 extern int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
                                      const char *rmap,
index 35a76e7836e27255077bda1a98098ee0000a2506..382886e0bd75f02d3a950f7762293a2636b93a25 100644 (file)
@@ -721,7 +721,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
 
                encaptlv = XCALLOC(MTYPE_ENCAP_TLV,
                                   sizeof(struct bgp_attr_encap_subtlv) + 4);
-               assert(encaptlv);
                encaptlv->type =
                        BGP_VNC_SUBTLV_TYPE_LIFETIME; /* prefix lifetime */
                encaptlv->length = 4;
@@ -766,7 +765,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
                                        MTYPE_ENCAP_TLV,
                                        sizeof(struct bgp_attr_encap_subtlv) + 2
                                                + hop->length);
-                               assert(encaptlv);
                                encaptlv->type =
                                        BGP_VNC_SUBTLV_TYPE_RFPOPTION; /* RFP
                                                                          option
@@ -1041,7 +1039,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
        SET_FLAG(new->flags, BGP_PATH_VALID);
 
        /* save backref to rfapi handle */
-       assert(bgp_path_info_extra_get(new));
+       bgp_path_info_extra_get(new);
        new->extra->vnc.export.rfapi_handle = (void *)rfd;
        encode_label(label_val, &new->extra->label[0]);
 
@@ -1260,7 +1258,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
         * since this peer is not on the I/O thread, this lock is not strictly
         * necessary, but serves as a reminder to those who may meddle...
         */
-       frr_with_mutex(&rfd->peer->io_mtx) {
+       frr_with_mutex (&rfd->peer->io_mtx) {
                // we don't need any I/O related facilities
                if (rfd->peer->ibuf)
                        stream_fifo_free(rfd->peer->ibuf);
@@ -1963,7 +1961,6 @@ int rfapi_open(void *rfp_start_val, struct rfapi_ip_addr *vn,
                rfd = XCALLOC(MTYPE_RFAPI_DESC,
                              sizeof(struct rfapi_descriptor));
        }
-       assert(rfd);
 
        rfd->bgp = bgp;
        if (default_options) {
@@ -3081,7 +3078,7 @@ DEFUN (debug_rfapi_register_vn_un,
 
 DEFUN (debug_rfapi_register_vn_un_l2o,
        debug_rfapi_register_vn_un_l2o_cmd,
-       "debug rfapi-dev register vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime SECONDS macaddr YY:YY:YY:YY:YY:YY lni (0-16777215)",
+       "debug rfapi-dev register vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime SECONDS macaddr X:X:X:X:X:X lni (0-16777215)",
        DEBUG_STR
        DEBUG_RFAPI_STR
        "rfapi_register\n"
@@ -3309,7 +3306,7 @@ DEFUN (debug_rfapi_query_vn_un,
 
 DEFUN (debug_rfapi_query_vn_un_l2o,
        debug_rfapi_query_vn_un_l2o_cmd,
-       "debug rfapi-dev query vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lni LNI target YY:YY:YY:YY:YY:YY",
+       "debug rfapi-dev query vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lni LNI target X:X:X:X:X:X",
        DEBUG_STR
        DEBUG_RFAPI_STR
        "rfapi_query\n"
index abb18aeb2c8ff259338f4fced8c828db076c35d4..fcc6168cfaf9834a0f1162349274eb56ee170bb5 100644 (file)
@@ -459,7 +459,6 @@ int rfapiApAdd(struct bgp *bgp, struct rfapi_descriptor *rfd,
        if (rc) {
                /* Not found */
                adb = XCALLOC(MTYPE_RFAPI_ADB, sizeof(struct rfapi_adb));
-               assert(adb);
                adb->lifetime = lifetime;
                adb->u.key = rk;
 
index a7bc909c589161dd576560dfdd9d4a9bed530073..d4e875df2ad601b6b1ee4b7407add38a0454201e 100644 (file)
@@ -170,7 +170,6 @@ struct rfapi_un_option *rfapi_encap_tlv_to_un_option(struct attr *attr)
        stlv = attr->encap_subtlvs;
 
        uo = XCALLOC(MTYPE_RFAPI_UN_OPTION, sizeof(struct rfapi_un_option));
-       assert(uo);
        uo->type = RFAPI_UN_OPTION_TYPE_TUNNELTYPE;
        uo->v.tunnel.type = attr->encap_tunneltype;
        tto = &uo->v.tunnel;
index 1d58c0331335752906fde37fb7d047367a1e061f..1d427027696c87511faa92c0a6f3e8f6a1ea723f 100644 (file)
@@ -856,13 +856,11 @@ static void rfapiBgpInfoChainFree(struct bgp_path_info *bpi)
                 */
                if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
                    && bpi->extra->vnc.import.timer) {
-
-                       struct thread **t =
-                               &(bpi->extra->vnc.import.timer);
-                       struct rfapi_withdraw *wcb = (*t)->arg;
+                       struct rfapi_withdraw *wcb =
+                               THREAD_ARG(bpi->extra->vnc.import.timer);
 
                        XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
-                       thread_cancel(t);
+                       THREAD_OFF(bpi->extra->vnc.import.timer);
                }
 
                next = bpi->next;
@@ -1273,7 +1271,6 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix,
 #endif
 
        new = XCALLOC(MTYPE_RFAPI_NEXTHOP, sizeof(struct rfapi_next_hop_entry));
-       assert(new);
 
        new->prefix = *rprefix;
 
@@ -1286,7 +1283,6 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix,
 
                vo = XCALLOC(MTYPE_RFAPI_VN_OPTION,
                             sizeof(struct rfapi_vn_option));
-               assert(vo);
 
                vo->type = RFAPI_VN_OPTION_TYPE_L2ADDR;
 
@@ -2308,7 +2304,6 @@ rfapiMonitorEncapAdd(struct rfapi_import_table *import_table,
 
        m = XCALLOC(MTYPE_RFAPI_MONITOR_ENCAP,
                    sizeof(struct rfapi_monitor_encap));
-       assert(m);
 
        m->node = vpn_rn;
        m->bpi = vpn_bpi;
@@ -2374,7 +2369,7 @@ static void rfapiMonitorEncapDelete(struct bgp_path_info *vpn_bpi)
  */
 static void rfapiWithdrawTimerVPN(struct thread *t)
 {
-       struct rfapi_withdraw *wcb = t->arg;
+       struct rfapi_withdraw *wcb = THREAD_ARG(t);
        struct bgp_path_info *bpi = wcb->info;
        struct bgp *bgp = bgp_get_default();
        const struct prefix *p;
@@ -2675,7 +2670,7 @@ rfapiWithdrawEncapUpdateCachedUn(struct rfapi_import_table *import_table,
 
 static void rfapiWithdrawTimerEncap(struct thread *t)
 {
-       struct rfapi_withdraw *wcb = t->arg;
+       struct rfapi_withdraw *wcb = THREAD_ARG(t);
        struct bgp_path_info *bpi = wcb->info;
        int was_first_route = 0;
        struct rfapi_monitor_encap *em;
@@ -2791,7 +2786,6 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
         * service routine, which is supposed to free the wcb.
         */
        wcb = XCALLOC(MTYPE_RFAPI_WITHDRAW, sizeof(struct rfapi_withdraw));
-       assert(wcb);
        wcb->node = rn;
        wcb->info = bpi;
        wcb->import_table = import_table;
@@ -3093,13 +3087,12 @@ static void rfapiBgpInfoFilteredImportEncap(
                                 */
                                if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
                                    && bpi->extra->vnc.import.timer) {
-
-                                       struct thread **t =
-                                               &(bpi->extra->vnc.import.timer);
-                                       struct rfapi_withdraw *wcb = (*t)->arg;
+                                       struct rfapi_withdraw *wcb = THREAD_ARG(
+                                               bpi->extra->vnc.import.timer);
 
                                        XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
-                                       thread_cancel(t);
+                                       THREAD_OFF(
+                                               bpi->extra->vnc.import.timer);
                                }
 
                                if (action == FIF_ACTION_UPDATE) {
@@ -3186,12 +3179,11 @@ static void rfapiBgpInfoFilteredImportEncap(
                        "%s: removing holddown bpi matching NVE of new route",
                        __func__);
                if (bpi->extra->vnc.import.timer) {
-                       struct thread **t =
-                               &(bpi->extra->vnc.import.timer);
-                       struct rfapi_withdraw *wcb = (*t)->arg;
+                       struct rfapi_withdraw *wcb =
+                               THREAD_ARG(bpi->extra->vnc.import.timer);
 
                        XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
-                       thread_cancel(t);
+                       THREAD_OFF(bpi->extra->vnc.import.timer);
                }
                rfapiExpireEncapNow(import_table, rn, bpi);
        }
@@ -3224,7 +3216,6 @@ static void rfapiBgpInfoFilteredImportEncap(
                struct agg_table *referenced_vpn_table;
 
                referenced_vpn_table = agg_table_init();
-               assert(referenced_vpn_table);
 
 /*
  * iterate over the set of monitors at this ENCAP node.
@@ -3282,7 +3273,6 @@ static void rfapiBgpInfoFilteredImportEncap(
                                mnext = XCALLOC(
                                        MTYPE_RFAPI_MONITOR_ENCAP,
                                        sizeof(struct rfapi_monitor_encap));
-                               assert(mnext);
                                mnext->node = m->node;
                                mnext->next = referenced_vpn_prefix->info;
                                referenced_vpn_prefix->info = mnext;
@@ -3549,13 +3539,12 @@ void rfapiBgpInfoFilteredImportVPN(
                                 */
                                if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
                                    && bpi->extra->vnc.import.timer) {
-
-                                       struct thread **t =
-                                               &(bpi->extra->vnc.import.timer);
-                                       struct rfapi_withdraw *wcb = (*t)->arg;
+                                       struct rfapi_withdraw *wcb = THREAD_ARG(
+                                               bpi->extra->vnc.import.timer);
 
                                        XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
-                                       thread_cancel(t);
+                                       THREAD_OFF(
+                                               bpi->extra->vnc.import.timer);
 
                                        import_table->holddown_count[afi] -= 1;
                                        RFAPI_UPDATE_ITABLE_COUNT(
@@ -3768,12 +3757,11 @@ void rfapiBgpInfoFilteredImportVPN(
                        "%s: removing holddown bpi matching NVE of new route",
                        __func__);
                if (bpi->extra->vnc.import.timer) {
-                       struct thread **t =
-                               &(bpi->extra->vnc.import.timer);
-                       struct rfapi_withdraw *wcb = (*t)->arg;
+                       struct rfapi_withdraw *wcb =
+                               THREAD_ARG(bpi->extra->vnc.import.timer);
 
                        XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
-                       thread_cancel(t);
+                       THREAD_OFF(bpi->extra->vnc.import.timer);
                }
                rfapiExpireVpnNow(import_table, rn, bpi, 0);
        }
@@ -4332,7 +4320,6 @@ rfapiImportTableRefAdd(struct bgp *bgp, struct ecommunity *rt_import_list,
        if (!it) {
                it = XCALLOC(MTYPE_RFAPI_IMPORTTABLE,
                             sizeof(struct rfapi_import_table));
-               assert(it);
                it->next = h->imports;
                h->imports = it;
 
@@ -4497,12 +4484,11 @@ static void rfapiDeleteRemotePrefixesIt(
                                        if (!delete_holddown)
                                                continue;
                                        if (bpi->extra->vnc.import.timer) {
-
-                                               struct thread **t =
-                                                       &(bpi->extra->vnc
-                                                               .import.timer);
                                                struct rfapi_withdraw *wcb =
-                                                       (*t)->arg;
+                                                       THREAD_ARG(
+                                                               bpi->extra->vnc
+                                                                       .import
+                                                                       .timer);
 
                                                wcb->import_table
                                                        ->holddown_count[afi] -=
@@ -4512,7 +4498,9 @@ static void rfapiDeleteRemotePrefixesIt(
                                                        afi, 1);
                                                XFREE(MTYPE_RFAPI_WITHDRAW,
                                                      wcb);
-                                               thread_cancel(t);
+                                               THREAD_OFF(
+                                                       bpi->extra->vnc.import
+                                                               .timer);
                                        }
                                } else {
                                        if (!delete_active)
@@ -4551,7 +4539,6 @@ static void rfapiDeleteRemotePrefixesIt(
                                                        MTYPE_RFAPI_NVE_ADDR,
                                                        sizeof(struct
                                                               rfapi_nve_addr));
-                                               assert(nap);
                                                *nap = na;
                                                nap->info = is_active
                                                                    ? pAHcount
index 58a0f8dea7ca51c9ac78a1598f9230311d4b501b..0e71d5d7e196e0ed7aea994aceee2e5fcce31896 100644 (file)
@@ -620,7 +620,7 @@ void rfapiMonitorDel(struct bgp *bgp, struct rfapi_descriptor *rfd,
                rfapiMonitorDetachImport(m);
        }
 
-       thread_cancel(&m->timer);
+       THREAD_OFF(m->timer);
 
        /*
         * remove from rfd list
@@ -657,7 +657,7 @@ int rfapiMonitorDelHd(struct rfapi_descriptor *rfd)
                                        rfapiMonitorDetachImport(m);
                                }
 
-                               thread_cancel(&m->timer);
+                               THREAD_OFF(m->timer);
 
                                XFREE(MTYPE_RFAPI_MONITOR, m);
                                rn->info = NULL;
@@ -691,7 +691,7 @@ int rfapiMonitorDelHd(struct rfapi_descriptor *rfd)
 #endif
                        }
 
-                       thread_cancel(&mon_eth->timer);
+                       THREAD_OFF(mon_eth->timer);
 
                        /*
                         * remove from rfd list
@@ -733,7 +733,7 @@ void rfapiMonitorResponseRemovalOn(struct bgp *bgp)
 
 static void rfapiMonitorTimerExpire(struct thread *t)
 {
-       struct rfapi_monitor_vpn *m = t->arg;
+       struct rfapi_monitor_vpn *m = THREAD_ARG(t);
 
        /* forget reference to thread, it's gone */
        m->timer = NULL;
@@ -744,19 +744,17 @@ static void rfapiMonitorTimerExpire(struct thread *t)
 
 static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
 {
-       if (m->timer) {
-               unsigned long remain = thread_timer_remain_second(m->timer);
+       unsigned long remain = thread_timer_remain_second(m->timer);
 
-               /* unexpected case, but avoid wraparound problems below */
-               if (remain > m->rfd->response_lifetime)
-                       return;
+       /* unexpected case, but avoid wraparound problems below */
+       if (remain > m->rfd->response_lifetime)
+               return;
 
-               /* don't restart if we just restarted recently */
-               if (m->rfd->response_lifetime - remain < 2)
-                       return;
+       /* don't restart if we just restarted recently */
+       if (m->rfd->response_lifetime - remain < 2)
+               return;
 
-               thread_cancel(&m->timer);
-       }
+       THREAD_OFF(m->timer);
 
        {
                char buf[BUFSIZ];
@@ -766,7 +764,7 @@ static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
                        rfapi_ntop(m->p.family, m->p.u.val, buf, BUFSIZ),
                        m->rfd->response_lifetime);
        }
-       m->timer = NULL;
+
        thread_add_timer(bm->master, rfapiMonitorTimerExpire, m,
                         m->rfd->response_lifetime, &m->timer);
 }
@@ -1041,7 +1039,7 @@ void rfapiMonitorMovedUp(struct rfapi_import_table *import_table,
 
 static void rfapiMonitorEthTimerExpire(struct thread *t)
 {
-       struct rfapi_monitor_eth *m = t->arg;
+       struct rfapi_monitor_eth *m = THREAD_ARG(t);
 
        /* forget reference to thread, it's gone */
        m->timer = NULL;
@@ -1054,19 +1052,17 @@ static void rfapiMonitorEthTimerExpire(struct thread *t)
 
 static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
 {
-       if (m->timer) {
-               unsigned long remain = thread_timer_remain_second(m->timer);
+       unsigned long remain = thread_timer_remain_second(m->timer);
 
-               /* unexpected case, but avoid wraparound problems below */
-               if (remain > m->rfd->response_lifetime)
-                       return;
+       /* unexpected case, but avoid wraparound problems below */
+       if (remain > m->rfd->response_lifetime)
+               return;
 
-               /* don't restart if we just restarted recently */
-               if (m->rfd->response_lifetime - remain < 2)
-                       return;
+       /* don't restart if we just restarted recently */
+       if (m->rfd->response_lifetime - remain < 2)
+               return;
 
-               thread_cancel(&m->timer);
-       }
+       THREAD_OFF(m->timer);
 
        {
                char buf[BUFSIZ];
@@ -1076,7 +1072,7 @@ static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
                        rfapiEthAddr2Str(&m->macaddr, buf, BUFSIZ),
                        m->rfd->response_lifetime);
        }
-       m->timer = NULL;
+
        thread_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
                         m->rfd->response_lifetime, &m->timer);
 }
@@ -1404,7 +1400,7 @@ void rfapiMonitorEthDel(struct bgp *bgp, struct rfapi_descriptor *rfd,
                rfapiMonitorEthDetachImport(bgp, val);
        }
 
-       thread_cancel(&val->timer);
+       THREAD_OFF(val->timer);
 
        /*
         * remove from rfd list
index 44eebe961cb5835067050de6069d36b484981cf5..9d61ada7db2b86e698149f8900fa5709da0ffd66 100644 (file)
@@ -268,8 +268,8 @@ static void rfapi_info_free(struct rfapi_info *goner)
                if (goner->timer) {
                        struct rfapi_rib_tcb *tcb;
 
-                       tcb = goner->timer->arg;
-                       thread_cancel(&goner->timer);
+                       tcb = THREAD_ARG(goner->timer);
+                       THREAD_OFF(goner->timer);
                        XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
                }
                XFREE(MTYPE_RFAPI_INFO, goner);
@@ -293,7 +293,7 @@ struct rfapi_rib_tcb {
  */
 static void rfapiRibExpireTimer(struct thread *t)
 {
-       struct rfapi_rib_tcb *tcb = t->arg;
+       struct rfapi_rib_tcb *tcb = THREAD_ARG(t);
 
        RFAPI_RIB_CHECK_COUNTS(1, 0);
 
@@ -338,8 +338,8 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
        struct rfapi_rib_tcb *tcb = NULL;
 
        if (ri->timer) {
-               tcb = ri->timer->arg;
-               thread_cancel(&ri->timer);
+               tcb = THREAD_ARG(ri->timer);
+               THREAD_OFF(ri->timer);
        } else {
                tcb = XCALLOC(MTYPE_RFAPI_RECENT_DELETE,
                              sizeof(struct rfapi_rib_tcb));
@@ -357,10 +357,9 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
 
        vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn,
                               ri->lifetime);
-       ri->timer = NULL;
+
        thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
                         &ri->timer);
-       assert(ri->timer);
 }
 
 extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
@@ -914,8 +913,8 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
                                if (ri->timer) {
                                        struct rfapi_rib_tcb *tcb;
 
-                                       tcb = ri->timer->arg;
-                                       thread_cancel(&ri->timer);
+                                       tcb = THREAD_ARG(ri->timer);
+                                       THREAD_OFF(ri->timer);
                                        XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
                                }
 
@@ -999,8 +998,8 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
                                if (ori->timer) {
                                        struct rfapi_rib_tcb *tcb;
 
-                                       tcb = ori->timer->arg;
-                                       thread_cancel(&ori->timer);
+                                       tcb = THREAD_ARG(ori->timer);
+                                       THREAD_OFF(ori->timer);
                                        XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
                                }
 
@@ -1179,7 +1178,6 @@ callback:
 
                        new = XCALLOC(MTYPE_RFAPI_NEXTHOP,
                                      sizeof(struct rfapi_next_hop_entry));
-                       assert(new);
 
                        if (ri->rk.aux_prefix.family) {
                                rfapiQprefix2Rprefix(&ri->rk.aux_prefix,
@@ -1269,7 +1267,6 @@ callback:
                                new = XCALLOC(
                                        MTYPE_RFAPI_NEXTHOP,
                                        sizeof(struct rfapi_next_hop_entry));
-                               assert(new);
 
                                if (ri->rk.aux_prefix.family) {
                                        rfapiQprefix2Rprefix(&ri->rk.aux_prefix,
@@ -1345,8 +1342,8 @@ callback:
                                if (ri->timer) {
                                        struct rfapi_rib_tcb *tcb;
 
-                                       tcb = ri->timer->arg;
-                                       thread_cancel(&ri->timer);
+                                       tcb = THREAD_ARG(ri->timer);
+                                       THREAD_OFF(ri->timer);
                                        XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
                                }
                                RFAPI_RIB_CHECK_COUNTS(0, delete_list->count);
@@ -1718,7 +1715,6 @@ void rfapiRibUpdatePendingNode(
 
                urq = XCALLOC(MTYPE_RFAPI_UPDATED_RESPONSE_QUEUE,
                              sizeof(struct rfapi_updated_responses_queue));
-               assert(urq);
                if (!rfd->updated_responses_queue)
                        updated_responses_queue_init(rfd);
 
index b95bace0d155590c1270344a04d48c1b14db28f7..c8fdadcac9583e2daeb1bf9bda22d664a126fadb 100644 (file)
@@ -928,12 +928,9 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match)
                                } else
                                        fp(out, "%-15s %-15s", "", "");
                                buf_remain[0] = 0;
-                               if (m->timer) {
-                                       rfapiFormatSeconds(
-                                               thread_timer_remain_second(
-                                                       m->timer),
-                                               buf_remain, BUFSIZ);
-                               }
+                               rfapiFormatSeconds(
+                                       thread_timer_remain_second(m->timer),
+                                       buf_remain, BUFSIZ);
                                fp(out, " %-15s %-10s\n",
                                   inet_ntop(m->p.family, &m->p.u.prefix,
                                             buf_pfx, BUFSIZ),
@@ -1005,12 +1002,9 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match)
                                } else
                                        fp(out, "%-15s %-15s", "", "");
                                buf_remain[0] = 0;
-                               if (mon_eth->timer) {
-                                       rfapiFormatSeconds(
-                                               thread_timer_remain_second(
-                                                       mon_eth->timer),
-                                               buf_remain, BUFSIZ);
-                               }
+                               rfapiFormatSeconds(thread_timer_remain_second(
+                                                          mon_eth->timer),
+                                                  buf_remain, BUFSIZ);
                                fp(out, " %-17s %10d %-10s\n",
                                   rfapi_ntop(pfx_mac.family, &pfx_mac.u.prefix,
                                              buf_pfx, BUFSIZ),
@@ -2515,7 +2509,7 @@ DEFUN (add_vnc_prefix,
  ************************************************************************/
 DEFUN (add_vnc_mac_vni_prefix_cost_life,
        add_vnc_mac_vni_prefix_cost_life_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255) lifetime (1-4294967295)",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255) lifetime (1-4294967295)",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2545,7 +2539,7 @@ DEFUN (add_vnc_mac_vni_prefix_cost_life,
 
 DEFUN (add_vnc_mac_vni_prefix_life,
        add_vnc_mac_vni_prefix_life_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime (1-4294967295)",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime (1-4294967295)",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2572,7 +2566,7 @@ DEFUN (add_vnc_mac_vni_prefix_life,
 
 DEFUN (add_vnc_mac_vni_prefix_cost,
        add_vnc_mac_vni_prefix_cost_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255)",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255)",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2598,7 +2592,7 @@ DEFUN (add_vnc_mac_vni_prefix_cost,
 
 DEFUN (add_vnc_mac_vni_prefix,
        add_vnc_mac_vni_prefix_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M>",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M>",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2622,7 +2616,7 @@ DEFUN (add_vnc_mac_vni_prefix,
 
 DEFUN (add_vnc_mac_vni_cost_life,
        add_vnc_mac_vni_cost_life_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255) lifetime (1-4294967295)",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255) lifetime (1-4294967295)",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2649,7 +2643,7 @@ DEFUN (add_vnc_mac_vni_cost_life,
 
 DEFUN (add_vnc_mac_vni_cost,
        add_vnc_mac_vni_cost_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255)",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255)",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2673,7 +2667,7 @@ DEFUN (add_vnc_mac_vni_cost,
 
 DEFUN (add_vnc_mac_vni_life,
        add_vnc_mac_vni_life_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lifetime (1-4294967295)",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lifetime (1-4294967295)",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -2698,7 +2692,7 @@ DEFUN (add_vnc_mac_vni_life,
 
 DEFUN (add_vnc_mac_vni,
        add_vnc_mac_vni_cmd,
-       "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X>",
+       "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X>",
        "Add registration\n"
        "VNC Information\n"
        "Add/modify mac address information\n"
@@ -3749,7 +3743,7 @@ DEFUN (clear_vnc_prefix_all,
  */
 DEFUN (clear_vnc_mac_vn_un,
        clear_vnc_mac_vn_un_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3782,7 +3776,7 @@ DEFUN (clear_vnc_mac_vn_un,
 
 DEFUN (clear_vnc_mac_un_vn,
        clear_vnc_mac_un_vn_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3815,7 +3809,7 @@ DEFUN (clear_vnc_mac_un_vn,
 
 DEFUN (clear_vnc_mac_un,
        clear_vnc_mac_un_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3844,7 +3838,7 @@ DEFUN (clear_vnc_mac_un,
 
 DEFUN (clear_vnc_mac_vn,
        clear_vnc_mac_vn_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3873,7 +3867,7 @@ DEFUN (clear_vnc_mac_vn,
 
 DEFUN (clear_vnc_mac_all,
        clear_vnc_mac_all_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> *",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> *",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3903,7 +3897,7 @@ DEFUN (clear_vnc_mac_all,
 
 DEFUN (clear_vnc_mac_vn_un_prefix,
        clear_vnc_mac_vn_un_prefix_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3940,7 +3934,7 @@ DEFUN (clear_vnc_mac_vn_un_prefix,
 
 DEFUN (clear_vnc_mac_un_vn_prefix,
        clear_vnc_mac_un_vn_prefix_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M> prefix <*|A.B.C.D/M|X:X::X:X/M>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -3981,7 +3975,7 @@ DEFUN (clear_vnc_mac_un_vn_prefix,
 
 DEFUN (clear_vnc_mac_un_prefix,
        clear_vnc_mac_un_prefix_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -4014,7 +4008,7 @@ DEFUN (clear_vnc_mac_un_prefix,
 
 DEFUN (clear_vnc_mac_vn_prefix,
        clear_vnc_mac_vn_prefix_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -4047,7 +4041,7 @@ DEFUN (clear_vnc_mac_vn_prefix,
 
 DEFUN (clear_vnc_mac_all_prefix,
        clear_vnc_mac_all_prefix_cmd,
-       "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+       "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> prefix <*|A.B.C.D/M|X:X::X:X/M>",
        "clear\n"
        "VNC Information\n"
        "Clear mac registration information\n"
@@ -4395,13 +4389,15 @@ static void rfapi_show_registrations(struct vty *vty,
 
 DEFUN (vnc_show_registrations_pfx,
        vnc_show_registrations_pfx_cmd,
-       "show vnc registrations [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+       "show vnc registrations [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
        SHOW_STR
        VNC_SHOW_STR
        "List active prefix registrations\n"
+       "Limit output to a particualr IPV4 address\n"
        "Limit output to a particular IPv4 prefix\n"
+       "Limit output to a particualr IPV6 address\n"
        "Limit output to a particular IPv6 prefix\n"
-       "Limit output to a particular IPv6 address\n")
+       "Limit output to a particular MAC address\n")
 {
        struct prefix p;
        struct prefix *p_addr = NULL;
@@ -4421,7 +4417,7 @@ DEFUN (vnc_show_registrations_pfx,
 
 DEFUN (vnc_show_registrations_some_pfx,
          vnc_show_registrations_some_pfx_cmd,
-         "show vnc registrations <all|holddown|imported|local|remote> [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+         "show vnc registrations <all|holddown|imported|local|remote> [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
          SHOW_STR
          VNC_SHOW_STR
          "List active prefix registrations\n"
@@ -4430,9 +4426,11 @@ DEFUN (vnc_show_registrations_some_pfx,
          "show only imported prefixes\n"
          "show only local registrations\n"
          "show only remote registrations\n"
-         "Limit output to a particular prefix or address\n"
-         "Limit output to a particular prefix or address\n"
-         "Limit output to a particular prefix or address\n")
+         "Limit output to a particualr IPV4 address\n"
+         "Limit output to a particular IPv4 prefix\n"
+         "Limit output to a particualr IPV6 address\n"
+         "Limit output to a particular IPv6 prefix\n"
+         "Limit output to a particular MAC address\n")
 {
        struct prefix p;
        struct prefix *p_addr = NULL;
@@ -4482,13 +4480,15 @@ DEFUN (vnc_show_registrations_some_pfx,
 
 DEFUN (vnc_show_responses_pfx,
        vnc_show_responses_pfx_cmd,
-       "show vnc responses [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+       "show vnc responses [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
        SHOW_STR
        VNC_SHOW_STR
        "List recent query responses\n"
+       "Limit output to a particualr IPV4 address\n"
        "Limit output to a particular IPv4 prefix\n"
+       "Limit output to a particualr IPV6 address\n"
        "Limit output to a particular IPv6 prefix\n"
-       "Limit output to a particular IPv6 address\n" )
+       "Limit output to a particular MAC address\n" )
 {
        struct prefix p;
        struct prefix *p_addr = NULL;
@@ -4513,15 +4513,17 @@ DEFUN (vnc_show_responses_pfx,
 
 DEFUN (vnc_show_responses_some_pfx,
        vnc_show_responses_some_pfx_cmd,
-       "show vnc responses <active|removed> [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+       "show vnc responses <active|removed> [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
        SHOW_STR
        VNC_SHOW_STR
        "List recent query responses\n"
        "show only active query responses\n"
        "show only removed query responses\n"
+       "Limit output to a particualr IPV4 address\n"
        "Limit output to a particular IPv4 prefix\n"
+       "Limit output to a particualr IPV6 address\n"
        "Limit output to a particular IPv6 prefix\n"
-       "Limit output to a particular IPV6 address\n")
+       "Limit output to a particular MAC address\n")
 {
        struct prefix p;
        struct prefix *p_addr = NULL;
@@ -4565,13 +4567,15 @@ DEFUN (vnc_show_responses_some_pfx,
 
 DEFUN (show_vnc_queries_pfx,
        show_vnc_queries_pfx_cmd,
-       "show vnc queries [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+       "show vnc queries [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
        SHOW_STR
        VNC_SHOW_STR
        "List active queries\n"
-       "Limit output to a particular IPv4 prefix or address\n"
+       "Limit output to a particualr IPV4 address\n"
+       "Limit output to a particular IPv4 prefix\n"
+       "Limit output to a particualr IPV6 address\n"
        "Limit output to a particular IPv6 prefix\n"
-       "Limit output to a particualr IPV6 address\n")
+       "Limit output to a particualr MAC address\n")
 {
        struct prefix pfx;
        struct prefix *p = NULL;
index 7cfa2ed67dec1282e33ed550440cec647507ff91..05e45bc4c88e280183e988eae018ada841ac887b 100644 (file)
@@ -564,7 +564,6 @@ static struct ecommunity *vnc_route_origin_ecom_single(struct in_addr *origin)
        roec.val[7] = 0;
 
        new = ecommunity_new();
-       assert(new);
        ecommunity_add_val(new, &roec, false, false);
 
        if (!new->size) {
@@ -1713,7 +1712,7 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi,
         * export expiration timer is already running on
         * this route: cancel it
         */
-       thread_cancel(&eti->timer);
+       THREAD_OFF(eti->timer);
 
        bgp_update(peer, prefix, /* prefix */
                   0,            /* addpath_id */
@@ -1727,7 +1726,7 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi,
 
 static void vncExportWithdrawTimer(struct thread *t)
 {
-       struct vnc_export_info *eti = t->arg;
+       struct vnc_export_info *eti = THREAD_ARG(t);
        const struct prefix *p = agg_node_get_prefix(eti->node);
 
        /*
@@ -1944,7 +1943,7 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi)
                                         * already running on
                                         * this route: cancel it
                                         */
-                                       thread_cancel(&eti->timer);
+                                       THREAD_OFF(eti->timer);
 
                                        vnc_zlog_debug_verbose(
                                                "%s: calling bgp_update",
@@ -2013,7 +2012,7 @@ void vnc_direct_bgp_rh_vpn_disable(struct bgp *bgp, afi_t afi)
                                        ZEBRA_ROUTE_VNC_DIRECT_RH,
                                        BGP_ROUTE_REDISTRIBUTE);
                                if (eti) {
-                                       thread_cancel(&eti->timer);
+                                       THREAD_OFF(eti->timer);
                                        vnc_eti_delete(eti);
                                }
 
index 255f868bdf6c12d49891988fb304bce4e7b31062..743576d2654ec8c466873c24ef5e99483cd1d7b0 100644 (file)
@@ -119,7 +119,6 @@ struct vnc_export_info *vnc_eti_get(struct bgp *bgp, vnc_export_type_t etype,
                agg_unlock_node(etn);
        } else {
                eti = XCALLOC(MTYPE_RFAPI_ETI, sizeof(struct vnc_export_info));
-               assert(eti);
                eti->node = etn;
                eti->peer = peer;
                peer_lock(peer);
index 293f88d1df637b6c772b8f42e4b3198f6b708fec..fe818987b87aebeb7ec99a378d18aff30f1e2882 100644 (file)
@@ -193,7 +193,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
                         * is not strictly necessary, but serves as a reminder
                         * to those who may meddle...
                         */
-                       frr_with_mutex(&vncHD1VR.peer->io_mtx) {
+                       frr_with_mutex (&vncHD1VR.peer->io_mtx) {
                                // we don't need any I/O related facilities
                                if (vncHD1VR.peer->ibuf)
                                        stream_fifo_free(vncHD1VR.peer->ibuf);
index 330752a79f985702ebb318ffc1e2e8ae972205e6..b7e17d3565543a45dfd62710423895703a7aa1d3 100644 (file)
@@ -635,6 +635,8 @@ AC_ARG_ENABLE([isisd],
   AS_HELP_STRING([--disable-isisd], [do not build isisd]))
 AC_ARG_ENABLE([pimd],
   AS_HELP_STRING([--disable-pimd], [do not build pimd]))
+AC_ARG_ENABLE([pim6d],
+  AS_HELP_STRING([--enable-pim6d], [build pim6d]))
 AC_ARG_ENABLE([pbrd],
   AS_HELP_STRING([--disable-pbrd], [do not build pbrd]))
 AC_ARG_ENABLE([sharpd],
@@ -1751,6 +1753,10 @@ AS_IF([test "$enable_pimd" != "no"], [
   AC_DEFINE([HAVE_PIMD], [1], [pimd])
 ])
 
+AS_IF([test "$enable_pim6d" = "yes"], [
+  AC_DEFINE([HAVE_PIM6D], [1], [pim6d])
+])
+
 AS_IF([test "$enable_pbrd" != "no"], [
   AC_DEFINE([HAVE_PBRD], [1], [pbrd])
 ])
@@ -2711,6 +2717,7 @@ AM_CONDITIONAL([BABELD], [test "$enable_babeld" != "no"])
 AM_CONDITIONAL([OSPF6D], [test "$enable_ospf6d" != "no"])
 AM_CONDITIONAL([ISISD], [test "$enable_isisd" != "no"])
 AM_CONDITIONAL([PIMD], [test "$enable_pimd" != "no"])
+AM_CONDITIONAL([PIM6D], [test "$enable_pim6d" = "yes"])
 AM_CONDITIONAL([PBRD], [test "$enable_pbrd" != "no"])
 AM_CONDITIONAL([SHARPD], [test "$enable_sharpd" = "yes"])
 AM_CONDITIONAL([STATICD], [test "$enable_staticd" != "no"])
index feaf92d0738cf1156b4c3b783522a7a2de506e40..9c14270b664d72f2f5dd2b3e868fd2617147e81c 100644 (file)
@@ -1,10 +1,16 @@
 frr (8.4~dev-1) UNRELEASED; urgency=medium
 
-  * New upstream release...
+  * FRR Dev 8.4
 
- -- Donatas Abraitis <donatas@opensourcerouting.org>  Tue, 07 May 2022 23:00:00 +0300
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com>  Wed, 20 Jul 2022 10:00:00 +0500
 
-frr (8.2-0) UNRELEASED; urgency=medium
+frr (8.3-0) unstable; urgency=medium
+
+  * New upstream release FRR 8.3
+
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com>  Wed, 13 Jul 2022 10:00:00 +0500
+
+frr (8.2-0) unstable; urgency=medium
 
   * New upstream release FRR 8.2
 
index 7c90979a2256fde4291571b41f884f058056a9b0..e8bf1a8ffa55ec860edef01d9ba12485b7b9960d 100644 (file)
@@ -19,7 +19,7 @@ Build-Depends: bison,
                libpcre2-dev,
                libpython3-dev,
                libreadline-dev,
-               librtr-dev (>= 0.8.0) <!pkg.frr.nortrlib>,
+               librtr-dev (>= 0.8.0~) <!pkg.frr.nortrlib>,
                libsnmp-dev,
                libssh-dev <!pkg.frr.nortrlib>,
                libyang2-dev,
index 1e0e726cb421cec3ae944fa53ce315762bc11d8e..735af6539b267502cee8ff870bc74475704d3b0d 100644 (file)
@@ -17,7 +17,7 @@
             # open, as well as the daemons, so always signal the daemons.
             # It's safe, a NOP if (only) syslog is being used.
             for i in babeld bgpd eigrpd isisd ldpd nhrpd ospf6d ospfd sharpd \
-                pimd ripd ripngd zebra pathd pbrd staticd bfdd fabricd vrrpd; do
+                pimd pim6d ripd ripngd zebra pathd pbrd staticd bfdd fabricd vrrpd; do
                 if [ -e /var/run/frr/$i.pid ] ; then
                     pids="$pids $(cat /var/run/frr/$i.pid)"
                 fi
index 7a719b7c60457dc169defc06db5dae97cf113f54..fdb458e6a84d3dce1fe88cb231c377329ff000e4 100755 (executable)
@@ -27,6 +27,12 @@ else
   CONF_LUA=--enable-scripting
 endif
 
+ifeq ($(filter pkg.frr.pim6d,$(DEB_BUILD_PROFILES)),)
+  CONF_PIM6=--disable-pim6d
+else
+  CONF_PIM6=--enable-pim6d
+endif
+
 export PYTHON=python3
 
 %:
@@ -46,6 +52,7 @@ override_dh_auto_configure:
                \
                $(CONF_RPKI) \
                $(CONF_LUA) \
+               $(CONF_PIM6) \
                --with-libpam \
                --enable-doc \
                --enable-doc-html \
index c8366480d2a7db4ca7bec7b4e5bb3311bc847ecc..bce13111880dde9bf47f8fcfa2a6d7d9cec5111f 100644 (file)
@@ -7,7 +7,7 @@ FRR ships two small wrappers around ``pthread_mutex_lock()`` /
 ``pthread_mutex_unlock``.  Use ``#include "frr_pthread.h"`` to get these
 macros.
 
-.. c:macro:: frr_with_mutex(mutex)
+.. c:macro:: frr_with_mutex (mutex)
 
    (With ``pthread_mutex_t *mutex``.)
 
@@ -17,7 +17,7 @@ macros.
 
       int somefunction(int option)
       {
-          frr_with_mutex(&my_mutex) {
+          frr_with_mutex (&my_mutex) {
               /* mutex will be locked */
 
               if (!option)
index a81e05249015d7f2be26169c892fb19561b54404..9aeb78c4fd88e8d378b2fcbc5caa07106cb751b1 100644 (file)
@@ -64,6 +64,10 @@ buster.)
      +================+===================+=========================================+
      | pkg.frr.rtrlib | pkg.frr.nortrlib  | builds frr-rpki-rtrlib package (or not) |
      +----------------+-------------------+-----------------------------------------+
+     | pkg.frr.lua    | pkg.frr.nolua     | builds lua scripting extension          |
+     +----------------+-------------------+-----------------------------------------+
+     | pkg.frr.pim6d  | pkg.frr.nopim6d   | builds pim6d (work in progress)         |
+     +----------------+-------------------+-----------------------------------------+
 
    * the ``-uc -us`` options to disable signing the packages with your GPG key
 
index c8248194b7cc12924ad402b8790c04598436b4e3..ac4405121ea8c33e60ec885f8e21c50cab0de798 100644 (file)
@@ -13,7 +13,7 @@ operation (and not a set of APIs.)  The core ideas are:
   "invisible" copies.  Other threads, when they access the structure, see an
   older (but consistent) copy.
 
-* once done, the updated copy is swapped in in a single operation so that
+* once done, the updated copy is swapped in a single operation so that
   other threads see either the old or the new data but no inconsistent state
   between.
 
index 6c1d9148d111b95cdc65f3ae66f20fd3badb86f0..ada182d8479f13330883daaa783d957c47aec26a 100644 (file)
@@ -38,6 +38,12 @@ Installing Topotest Requirements
    # To enable the gRPC topotest install:
    python3 -m pip install grpcio grpcio-tools
 
+   # Install Socat tool to run PIMv6 tests,
+   # Socat code can be taken from below url,
+   # which has latest changes done for PIMv6,
+   # join and traffic:
+   https://github.com/opensourcerouting/socat/
+
 
 Enable Coredumps
 """"""""""""""""
index 42faefd10b037594fe02648ec658c1fd220e7b70..7679a377eb5fc5dffc30abdf05b8c8a136353fec 100644 (file)
@@ -338,6 +338,12 @@ Basic Config Commands
 
    Restrict vty connections with an access list.
 
+.. clicmd:: allow-reserved-ranges
+
+   Allow using IPv4 reserved (Class E) IP ranges for daemons. E.g.: setting
+   IPv4 addresses for interfaces or allowing reserved ranges in BGP next-hops.
+
+   Default: off.
 
 .. _sample-config-file:
 
index c47ed04f63a4afe1a7ccc790cf484a48902505e0..0eb1064519b598271fccaa181a28fe6cf8ed821c 100644 (file)
@@ -518,6 +518,10 @@ You can inspect the current BFD peer status with the following commands:
    frr# show bfd peer 192.168.0.1 json
    {"multihop":false,"peer":"192.168.0.1","id":1,"remote-id":1,"status":"up","uptime":161,"diagnostic":"ok","remote-diagnostic":"ok","receive-interval":300,"transmit-interval":300,"echo-receive-interval":50,"echo-transmit-interval":0,"detect-multiplier":3,"remote-receive-interval":300,"remote-transmit-interval":300,"remote-echo-receive-interval":50,"remote-detect-multiplier":3,"peer-type":"dynamic"}
 
+If you are running IPV4 BFD Echo, on a Linux platform, we also
+calculate round trip time for the packets.  We display minimum,
+average and maximum time it took to receive the looped Echo packets
+in the RTT fields.
 
 You can inspect the current BFD peer status in brief with the following commands:
 
index ee5c389bca657b66facdb2fa2f0ac32e79e19171..e31bfe7bfad3ba53e27314d3be1363cc421b9aab 100644 (file)
@@ -1672,7 +1672,7 @@ Configuring Peers
 
 .. clicmd:: bgp fast-external-failover
 
-   This command causes bgp to not take down ebgp peers immediately
+   This command causes bgp to take down ebgp peers immediately
    when a link flaps.  `bgp fast-external-failover` is the default
    and will not be displayed as part of a `show run`.  The no form
    of the command turns off this ability.
@@ -2334,11 +2334,8 @@ setting BGP communities attribute to the updates.
     exit-address-family
    !
    bgp community-list 70 permit 7675:70
-   bgp community-list 70 deny
    bgp community-list 80 permit 7675:80
-   bgp community-list 80 deny
    bgp community-list 90 permit 7675:90
-   bgp community-list 90 deny
    !
    route-map RMAP permit 10
     match community 70
@@ -2827,6 +2824,14 @@ address-family:
    The CLI will disallow attempts to configure incompatible leaking
    modes.
 
+.. clicmd:: bgp retain route-target all
+
+It is possible to retain or not VPN prefixes that are not imported by local
+VRF configuration. This can be done via the following command in the context
+of the global VPNv4/VPNv6 family. This command defaults to on and is not
+displayed.
+The `no bgp retain route-target all` form of the command is displayed.
+
 .. _bgp-l3vpn-srv6:
 
 L3VPN SRv6
@@ -3429,6 +3434,12 @@ Debugging
    Display Listen sockets and the vrf that created them.  Useful for debugging of when
    listen is not working and this is considered a developer debug statement.
 
+.. clicmd:: debug bgp allow-martian
+
+   Enable or disable BGP accepting martian nexthops from a peer.  Please note
+   this is not an actual debug command and this command is also being deprecated
+   and will be removed soon.  The new command is :clicmd:`bgp allow-martian-nexthop`
+
 .. clicmd:: debug bgp bfd
 
    Enable or disable debugging for BFD events. This will show BFD integration
@@ -3795,6 +3806,10 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
 
    If the ``json`` option is specified, output is displayed in JSON format.
 
+.. clicmd:: show [ip] bgp [afi] [safi] [all] access-list WORD [wide|json]
+
+   Display routes that match the specified access-list.
+
 .. clicmd:: show [ip] bgp [afi] [safi] [all] filter-list WORD [wide|json]
 
    Display routes that match the specified AS-Path filter-list.
@@ -4070,6 +4085,12 @@ unless removed from the configuration with the negating command prior to the
 configuration write operation.  At this point in time non SAFI_UNICAST BGP
 data is not properly withdrawn from zebra when this command is issued.
 
+.. clicmd:: bgp allow-martian-nexthop
+
+When a peer receives a martian nexthop as part of the NLRI for a route
+permit the nexthop to be used as such, instead of rejecting and resetting
+the connection.
+
 .. clicmd:: bgp send-extra-data zebra
 
 This command turns on the ability of BGP to send extra data to zebra. Currently,
index 764584f89ce532d4a02c52104802aa3e2e1885c5..1983995c1fdb8d85c2cc26b3747648af64e4736e 100644 (file)
@@ -149,8 +149,9 @@ associated with a particular ``bmp targets``:
 .. clicmd:: bmp monitor AFI SAFI <pre-policy|post-policy>
 
    Perform Route Monitoring for the specified AFI and SAFI.  Only IPv4 and
-   IPv6 are currently valid for AFI, and only unicast and multicast are valid
-   for SAFI.  Other AFI/SAFI combinations may be added in the future.
+   IPv6 are currently valid for AFI. SAFI valid values are currently 
+   unicast, multicast, evpn and vpn.
+   Other AFI/SAFI combinations may be added in the future.
 
    All BGP neighbors are included in Route Monitoring.  Options to select
    a subset of BGP sessions may be added in the future.
index bf401825e05c73ee5256444e59a6a6112db00a59..91ca1b89f08de89df475c2a9f59d6a7b118c9391 100644 (file)
@@ -258,7 +258,7 @@ feature you're interested in, it should be supported on your platform.
 +-----------------------------------+----------------+--------------+------------+------------+
 | **Multicast Routing**             |                |              |            |            |
 +-----------------------------------+----------------+--------------+------------+------------+
-| `pimd` (PIM)                      | :mark:`≥4.18`  | :mark:`N`    | :mark:`Y`  | :mark:`Y`  |
+| `pimd` (PIM)                      | :mark:`≥4.19`  | :mark:`N`    | :mark:`Y`  | :mark:`Y`  |
 +-----------------------------------+----------------+--------------+------------+------------+
 |    SSM (Source Specific)          | :mark:`Y`      | :mark:`N`    | :mark:`Y`  | :mark:`Y`  |
 +-----------------------------------+----------------+--------------+------------+------------+
index ae39f4220d0b701a484905661f2f1d6571f02c12..44ade916a2227869dd42ba815e852686a9120c40 100644 (file)
@@ -13,7 +13,7 @@ network for optimizing forwarding of overlay BUM traffic.
 
 .. note::
 
-   On Linux for PIM-SM operation you *must* have kernel version 4.18 or greater.
+   On Linux for PIM-SM operation you *must* have kernel version 4.19 or greater.
    To use PIM for EVPN BUM forwarding, kernels 5.0 or greater are required.
    OpenBSD has no multicast support and FreeBSD, and NetBSD only
    have support for SSM.
index ca7d3872bf027a8946d1c08a8ba84b668e290624..851e58b814cc28fdaf60e303470c124af69f6562 100644 (file)
@@ -385,10 +385,25 @@ Clear commands reset various variables.
    packet count, byte count and wrong interface to 0 and start count
    up from this spot.
 
+.. clicmd:: clear ipv6 pim interfaces
+
+   Reset PIMv6 interfaces.
+
+.. clicmd:: clear ipv6 pim [vrf NAME] interface traffic
+
+   When this command is issued, resets the information about the 
+   number of PIM protocol packets sent/received on an interface.
+
 .. clicmd:: clear ipv6 pim oil
 
    Rescan PIMv6 OIL (output interface list).
 
+.. clicmd:: clear ipv6 pim [vrf NAME] bsr-data
+
+   This command will clear the BSM scope data struct. This command also
+   removes the next hop tracking for the bsr and resets the upstreams
+   for the dynamically learnt RPs.
+
 PIMv6 Debug Commands
 ====================
 
@@ -430,3 +445,11 @@ the config was written out.
 .. clicmd:: debug pimv6 zebra
 
    This gathers data about events from zebra that come up through the ZAPI.
+
+.. clicmd:: debug mroute6
+
+   This turns on debugging for PIMv6 interaction with kernel MFC cache.
+
+.. clicmd:: debug mroute6 detail
+
+   This turns on detailed debugging for PIMv6 interaction with kernel MFC cache.
index 0a155bf4892ed12356debd7c9d542aa805beee98..66bdd26f1a785a041d4404b2ed114f522d70407d 100644 (file)
@@ -119,14 +119,14 @@ RIP Configuration
 .. clicmd:: neighbor A.B.C.D
 
 
-   Specify RIP neighbor. When a neighbor doesn't understand multicast, this
-   command is used to specify neighbors. In some cases, not all routers will be
-   able to understand multicasting, where packets are sent to a network or a
-   group of addresses. In a situation where a neighbor cannot process multicast
-   packets, it is necessary to establish a direct link between routers. The
-   neighbor command allows the network administrator to specify a router as a
-   RIP neighbor. The `no neighbor a.b.c.d` command will disable the RIP
-   neighbor.
+   Specify a RIP neighbor to send updates to. This is required when a neighbor
+   is connected via a network that does not support multicast, or when it is
+   desired to statically define a neighbor. RIP updates will be sent via unicast
+   to each neighbour. Neighbour updates are in addition to any multicast updates
+   sent when an interface is not in passive mode (see the `passive-interface`
+   command). RIP will continue to process updates received from both the
+   neighbor and any received via multicast. The `no neighbor a.b.c.d` command
+   will disable the RIP neighbor.
 
    Below is very simple RIP configuration. Interface `eth0` and interface which
    address match to `10.0.0.0/8` are RIP enabled.
index d96df0ce522901215687cb4cd4b66ff751419617..05990e2523d1f3be77c0414d5460ea2110d07ce3 100644 (file)
@@ -155,6 +155,13 @@ Standard Commands
    Set description for the interface.
 
 
+.. clicmd:: mpls enable
+
+   Enable or disable mpls kernel processing on the interface, for linux.  Interfaces
+   configured with mpls will not automatically turn on if mpls kernel modules do not
+   happen to be loaded.  This command will fail on 3.X linux kernels and does not
+   work on non-linux systems at all.
+
 .. clicmd:: multicast
 
 
index fa4b9859b91a7ca743303674f307d777aa2732e7..187b5285244695f0d35250293d3784ecee6ebe7f 100644 (file)
@@ -15,11 +15,12 @@ RUN adduser -D -G abuild builder && su builder -c 'abuild-keygen -a -n'
 # This stage builds a libyang APK from source
 FROM alpine-builder as libyang-builder
 RUN mkdir -p /libyang && chown -R builder /pkgs /libyang
-COPY docker/alpine/libyang/ /libyang
-USER builder
-RUN cd /libyang \
-       && abuild checksum \
-       && abuild -r -P /pkgs/apk
+# -- Not currently needed - libyang currently available in Alpine upstream
+# COPY docker/alpine/libyang/ /libyang
+# USER builder
+# RUN cd /libyang \
+#      && abuild checksum \
+#      && abuild -r -P /pkgs/apk
 
 # This stage builds a dist tarball from the source
 FROM alpine:3.15 as source-builder
@@ -37,10 +38,11 @@ RUN source /src/alpine/APKBUILD.in \
        && pip install pytest
 
 RUN mkdir -p /pkgs/apk
-COPY --from=libyang-builder /pkgs/apk/ /pkgs/apk/
-RUN apk add \
-               --no-cache \
-               --allow-untrusted /pkgs/apk/*/*.apk
+# -- Not needed while libyang is not built
+# COPY --from=libyang-builder /pkgs/apk/ /pkgs/apk/
+# RUN apk add \
+#              --no-cache \
+#              --allow-untrusted /pkgs/apk/*/*.apk \
 
 COPY . /src
 ARG PKGVER
@@ -53,12 +55,13 @@ RUN cd /src \
 
 # This stage builds an APK from the dist tarball
 FROM alpine-builder as frr-apk-builder
-COPY --from=libyang-builder /pkgs/apk/ /pkgs/apk/
+# -- Not needed while libyang is not built
+# COPY --from=libyang-builder /pkgs/apk/ /pkgs/apk/
+# RUN apk add \
+#              --no-cache \
+#              --allow-untrusted /pkgs/apk/*/*.apk
 COPY --from=source-builder /src/frr-*.tar.gz /src/alpine/* /dist/
 RUN find /pkgs/apk -type f -name APKINDEX.tar.gz -delete
-RUN apk add \
-               --no-cache \
-               --allow-untrusted /pkgs/apk/*/*.apk
 RUN chown -R builder /dist /pkgs
 USER builder
 RUN cd /dist \
index 9fa20bf4d1badc3a1b57e6b42a82c6d03a7b6a1b..aa792e7f0b91fde3da01029f095e4f9e11c8c5c3 100755 (executable)
@@ -1,7 +1,7 @@
 # Contributor: Sören Tempel <soeren+alpine@soeren-tempel.net>
 # Maintainer: Christian Franke <nobody@nowhere.ws>
 pkgname=libyang
-pkgver=2.0.7
+pkgver=2.0.194
 pkgrel=0
 pkgdesc="YANG data modelling language parser and toolkit"
 url="https://github.com/CESNET/libyang"
@@ -10,9 +10,7 @@ license="BSD-3-Clause-Clear"
 makedepends="bison cmake cmocka-dev flex pcre2-dev"
 checkdepends="expect grep shunit2"
 subpackages="$pkgname-dev $pkgname-doc"
-source="$pkgname-$pkgver.tar.gz::https://github.com/CESNET/libyang/archive/v$pkgver.tar.gz
-       10-remove-non-standard-headers.patch
-       11-utest-dont-parse-dlerror.patch"
+source="$pkgname-$pkgver.tar.gz::https://github.com/CESNET/libyang/archive/v$pkgver.tar.gz"
 
 # secfixes:
 #   1.0.215-r1:
@@ -40,7 +38,3 @@ build() {
 package() {
        make -C build DESTDIR="$pkgdir" install
 }
-
-sha512sums="edb1d8d372b25ed820fa312e0dc96d4af7c8cd5ddeb785964de73f64774062ea7a5586bb27e2039ad24189d4a2ba04268921ca86e82423fc48647d1d10a2a0a7  libyang-2.0.7.tar.gz
-385008c715e6b0dc9e8f33c9cb550b3af7ee16f056f35d09a4ba01b9e00ddb88940915f93fc608fedd30b4f9a6a1503df414ae0be64b1263681b0ee18e6f4db8  10-remove-non-standard-headers.patch
-b16881d301a6aec68fbe6bfb7ba53a8fcdb4b9eead3b03573e0e2a4a8c3c3d6962db623be14d29c023b5a7ad0f685da1f6033dd9985f7a2914ad2f4da07e60cb  11-utest-dont-parse-dlerror.patch"
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
new file mode 100644 (file)
index 0000000..7ea59cf
--- /dev/null
@@ -0,0 +1,776 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_PKT_CLS_H
+#define __LINUX_PKT_CLS_H
+
+#include <linux/types.h>
+#include <linux/pkt_sched.h>
+
+#define TC_COOKIE_MAX_SIZE 16
+
+/* Action attributes */
+enum {
+       TCA_ACT_UNSPEC,
+       TCA_ACT_KIND,
+       TCA_ACT_OPTIONS,
+       TCA_ACT_INDEX,
+       TCA_ACT_STATS,
+       TCA_ACT_PAD,
+       TCA_ACT_COOKIE,
+       TCA_ACT_FLAGS,
+       TCA_ACT_HW_STATS,
+       TCA_ACT_USED_HW_STATS,
+       __TCA_ACT_MAX
+};
+
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
+                                        * actions stats.
+                                        */
+
+/* tca HW stats type
+ * When user does not pass the attribute, he does not care.
+ * It is the same as if he would pass the attribute with
+ * all supported bits set.
+ * In case no bits are set, user is not interested in getting any HW statistics.
+ */
+#define TCA_ACT_HW_STATS_IMMEDIATE (1 << 0) /* Means that in dump, user
+                                            * gets the current HW stats
+                                            * state from the device
+                                            * queried at the dump time.
+                                            */
+#define TCA_ACT_HW_STATS_DELAYED (1 << 1) /* Means that in dump, user gets
+                                          * HW stats that might be out of date
+                                          * for some time, maybe couple of
+                                          * seconds. This is the case when
+                                          * driver polls stats updates
+                                          * periodically or when it gets async
+                                          * stats update from the device.
+                                          */
+
+#define TCA_ACT_MAX __TCA_ACT_MAX
+#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
+#define TCA_ACT_MAX_PRIO 32
+#define TCA_ACT_BIND   1
+#define TCA_ACT_NOBIND 0
+#define TCA_ACT_UNBIND 1
+#define TCA_ACT_NOUNBIND       0
+#define TCA_ACT_REPLACE                1
+#define TCA_ACT_NOREPLACE      0
+
+#define TC_ACT_UNSPEC  (-1)
+#define TC_ACT_OK              0
+#define TC_ACT_RECLASSIFY      1
+#define TC_ACT_SHOT            2
+#define TC_ACT_PIPE            3
+#define TC_ACT_STOLEN          4
+#define TC_ACT_QUEUED          5
+#define TC_ACT_REPEAT          6
+#define TC_ACT_REDIRECT                7
+#define TC_ACT_TRAP            8 /* For hw path, this means "trap to cpu"
+                                  * and don't further process the frame
+                                  * in hardware. For sw path, this is
+                                  * equivalent of TC_ACT_STOLEN - drop
+                                  * the skb and act like everything
+                                  * is alright.
+                                  */
+#define TC_ACT_VALUE_MAX       TC_ACT_TRAP
+
+/* There is a special kind of actions called "extended actions",
+ * which need a value parameter. These have a local opcode located in
+ * the highest nibble, starting from 1. The rest of the bits
+ * are used to carry the value. These two parts together make
+ * a combined opcode.
+ */
+#define __TC_ACT_EXT_SHIFT 28
+#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
+#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
+#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
+#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
+
+#define TC_ACT_JUMP __TC_ACT_EXT(1)
+#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
+#define TC_ACT_EXT_OPCODE_MAX  TC_ACT_GOTO_CHAIN
+
+/* These macros are put here for binary compatibility with userspace apps that
+ * make use of them. For kernel code and new userspace apps, use the TCA_ID_*
+ * versions.
+ */
+#define TCA_ACT_GACT 5
+#define TCA_ACT_IPT 6
+#define TCA_ACT_PEDIT 7
+#define TCA_ACT_MIRRED 8
+#define TCA_ACT_NAT 9
+#define TCA_ACT_XT 10
+#define TCA_ACT_SKBEDIT 11
+#define TCA_ACT_VLAN 12
+#define TCA_ACT_BPF 13
+#define TCA_ACT_CONNMARK 14
+#define TCA_ACT_SKBMOD 15
+#define TCA_ACT_CSUM 16
+#define TCA_ACT_TUNNEL_KEY 17
+#define TCA_ACT_SIMP 22
+#define TCA_ACT_IFE 25
+#define TCA_ACT_SAMPLE 26
+
+/* Action type identifiers*/
+enum tca_id {
+       TCA_ID_UNSPEC = 0,
+       TCA_ID_POLICE = 1,
+       TCA_ID_GACT = TCA_ACT_GACT,
+       TCA_ID_IPT = TCA_ACT_IPT,
+       TCA_ID_PEDIT = TCA_ACT_PEDIT,
+       TCA_ID_MIRRED = TCA_ACT_MIRRED,
+       TCA_ID_NAT = TCA_ACT_NAT,
+       TCA_ID_XT = TCA_ACT_XT,
+       TCA_ID_SKBEDIT = TCA_ACT_SKBEDIT,
+       TCA_ID_VLAN = TCA_ACT_VLAN,
+       TCA_ID_BPF = TCA_ACT_BPF,
+       TCA_ID_CONNMARK = TCA_ACT_CONNMARK,
+       TCA_ID_SKBMOD = TCA_ACT_SKBMOD,
+       TCA_ID_CSUM = TCA_ACT_CSUM,
+       TCA_ID_TUNNEL_KEY = TCA_ACT_TUNNEL_KEY,
+       TCA_ID_SIMP = TCA_ACT_SIMP,
+       TCA_ID_IFE = TCA_ACT_IFE,
+       TCA_ID_SAMPLE = TCA_ACT_SAMPLE,
+       TCA_ID_CTINFO,
+       TCA_ID_MPLS,
+       TCA_ID_CT,
+       TCA_ID_GATE,
+       /* other actions go here */
+       __TCA_ID_MAX = 255
+};
+
+#define TCA_ID_MAX __TCA_ID_MAX
+
+struct tc_police {
+       __u32                   index;
+       int                     action;
+#define TC_POLICE_UNSPEC       TC_ACT_UNSPEC
+#define TC_POLICE_OK           TC_ACT_OK
+#define TC_POLICE_RECLASSIFY   TC_ACT_RECLASSIFY
+#define TC_POLICE_SHOT         TC_ACT_SHOT
+#define TC_POLICE_PIPE         TC_ACT_PIPE
+
+       __u32                   limit;
+       __u32                   burst;
+       __u32                   mtu;
+       struct tc_ratespec      rate;
+       struct tc_ratespec      peakrate;
+       int                     refcnt;
+       int                     bindcnt;
+       __u32                   capab;
+};
+
+struct tcf_t {
+       __u64   install;
+       __u64   lastuse;
+       __u64   expires;
+       __u64   firstuse;
+};
+
+struct tc_cnt {
+       int                   refcnt;
+       int                   bindcnt;
+};
+
+#define tc_gen \
+       __u32                 index; \
+       __u32                 capab; \
+       int                   action; \
+       int                   refcnt; \
+       int                   bindcnt
+
+enum {
+       TCA_POLICE_UNSPEC,
+       TCA_POLICE_TBF,
+       TCA_POLICE_RATE,
+       TCA_POLICE_PEAKRATE,
+       TCA_POLICE_AVRATE,
+       TCA_POLICE_RESULT,
+       TCA_POLICE_TM,
+       TCA_POLICE_PAD,
+       TCA_POLICE_RATE64,
+       TCA_POLICE_PEAKRATE64,
+       __TCA_POLICE_MAX
+#define TCA_POLICE_RESULT TCA_POLICE_RESULT
+};
+
+#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
+
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW  (1 << 0) /* don't offload filter to HW */
+#define TCA_CLS_FLAGS_SKIP_SW  (1 << 1) /* don't use filter in SW */
+#define TCA_CLS_FLAGS_IN_HW    (1 << 2) /* filter is offloaded to HW */
+#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
+#define TCA_CLS_FLAGS_VERBOSE  (1 << 4) /* verbose logging */
+
+/* U32 filters */
+
+#define TC_U32_HTID(h) ((h)&0xFFF00000)
+#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
+#define TC_U32_HASH(h) (((h)>>12)&0xFF)
+#define TC_U32_NODE(h) ((h)&0xFFF)
+#define TC_U32_KEY(h) ((h)&0xFFFFF)
+#define TC_U32_UNSPEC  0
+#define TC_U32_ROOT    (0xFFF00000)
+
+enum {
+       TCA_U32_UNSPEC,
+       TCA_U32_CLASSID,
+       TCA_U32_HASH,
+       TCA_U32_LINK,
+       TCA_U32_DIVISOR,
+       TCA_U32_SEL,
+       TCA_U32_POLICE,
+       TCA_U32_ACT,
+       TCA_U32_INDEV,
+       TCA_U32_PCNT,
+       TCA_U32_MARK,
+       TCA_U32_FLAGS,
+       TCA_U32_PAD,
+       __TCA_U32_MAX
+};
+
+#define TCA_U32_MAX (__TCA_U32_MAX - 1)
+
+struct tc_u32_key {
+       __be32          mask;
+       __be32          val;
+       int             off;
+       int             offmask;
+};
+
+struct tc_u32_sel {
+       unsigned char           flags;
+       unsigned char           offshift;
+       unsigned char           nkeys;
+
+       __be16                  offmask;
+       __u16                   off;
+       short                   offoff;
+
+       short                   hoff;
+       __be32                  hmask;
+       struct tc_u32_key       keys[0];
+};
+
+struct tc_u32_mark {
+       __u32           val;
+       __u32           mask;
+       __u32           success;
+};
+
+struct tc_u32_pcnt {
+       __u64 rcnt;
+       __u64 rhit;
+       __u64 kcnts[0];
+};
+
+/* Flags */
+
+#define TC_U32_TERMINAL                1
+#define TC_U32_OFFSET          2
+#define TC_U32_VAROFFSET       4
+#define TC_U32_EAT             8
+
+#define TC_U32_MAXDEPTH 8
+
+
+/* RSVP filter */
+
+enum {
+       TCA_RSVP_UNSPEC,
+       TCA_RSVP_CLASSID,
+       TCA_RSVP_DST,
+       TCA_RSVP_SRC,
+       TCA_RSVP_PINFO,
+       TCA_RSVP_POLICE,
+       TCA_RSVP_ACT,
+       __TCA_RSVP_MAX
+};
+
+#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
+
+struct tc_rsvp_gpi {
+       __u32   key;
+       __u32   mask;
+       int     offset;
+};
+
+struct tc_rsvp_pinfo {
+       struct tc_rsvp_gpi dpi;
+       struct tc_rsvp_gpi spi;
+       __u8    protocol;
+       __u8    tunnelid;
+       __u8    tunnelhdr;
+       __u8    pad;
+};
+
+/* ROUTE filter */
+
+enum {
+       TCA_ROUTE4_UNSPEC,
+       TCA_ROUTE4_CLASSID,
+       TCA_ROUTE4_TO,
+       TCA_ROUTE4_FROM,
+       TCA_ROUTE4_IIF,
+       TCA_ROUTE4_POLICE,
+       TCA_ROUTE4_ACT,
+       __TCA_ROUTE4_MAX
+};
+
+#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1)
+
+
+/* FW filter */
+
+enum {
+       TCA_FW_UNSPEC,
+       TCA_FW_CLASSID,
+       TCA_FW_POLICE,
+       TCA_FW_INDEV,
+       TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
+       TCA_FW_MASK,
+       __TCA_FW_MAX
+};
+
+#define TCA_FW_MAX (__TCA_FW_MAX - 1)
+
+/* TC index filter */
+
+enum {
+       TCA_TCINDEX_UNSPEC,
+       TCA_TCINDEX_HASH,
+       TCA_TCINDEX_MASK,
+       TCA_TCINDEX_SHIFT,
+       TCA_TCINDEX_FALL_THROUGH,
+       TCA_TCINDEX_CLASSID,
+       TCA_TCINDEX_POLICE,
+       TCA_TCINDEX_ACT,
+       __TCA_TCINDEX_MAX
+};
+
+#define TCA_TCINDEX_MAX     (__TCA_TCINDEX_MAX - 1)
+
+/* Flow filter */
+
+enum {
+       FLOW_KEY_SRC,
+       FLOW_KEY_DST,
+       FLOW_KEY_PROTO,
+       FLOW_KEY_PROTO_SRC,
+       FLOW_KEY_PROTO_DST,
+       FLOW_KEY_IIF,
+       FLOW_KEY_PRIORITY,
+       FLOW_KEY_MARK,
+       FLOW_KEY_NFCT,
+       FLOW_KEY_NFCT_SRC,
+       FLOW_KEY_NFCT_DST,
+       FLOW_KEY_NFCT_PROTO_SRC,
+       FLOW_KEY_NFCT_PROTO_DST,
+       FLOW_KEY_RTCLASSID,
+       FLOW_KEY_SKUID,
+       FLOW_KEY_SKGID,
+       FLOW_KEY_VLAN_TAG,
+       FLOW_KEY_RXHASH,
+       __FLOW_KEY_MAX,
+};
+
+#define FLOW_KEY_MAX   (__FLOW_KEY_MAX - 1)
+
+enum {
+       FLOW_MODE_MAP,
+       FLOW_MODE_HASH,
+};
+
+enum {
+       TCA_FLOW_UNSPEC,
+       TCA_FLOW_KEYS,
+       TCA_FLOW_MODE,
+       TCA_FLOW_BASECLASS,
+       TCA_FLOW_RSHIFT,
+       TCA_FLOW_ADDEND,
+       TCA_FLOW_MASK,
+       TCA_FLOW_XOR,
+       TCA_FLOW_DIVISOR,
+       TCA_FLOW_ACT,
+       TCA_FLOW_POLICE,
+       TCA_FLOW_EMATCHES,
+       TCA_FLOW_PERTURB,
+       __TCA_FLOW_MAX
+};
+
+#define TCA_FLOW_MAX   (__TCA_FLOW_MAX - 1)
+
+/* Basic filter */
+
+struct tc_basic_pcnt {
+       __u64 rcnt;
+       __u64 rhit;
+};
+
+enum {
+       TCA_BASIC_UNSPEC,
+       TCA_BASIC_CLASSID,
+       TCA_BASIC_EMATCHES,
+       TCA_BASIC_ACT,
+       TCA_BASIC_POLICE,
+       TCA_BASIC_PCNT,
+       TCA_BASIC_PAD,
+       __TCA_BASIC_MAX
+};
+
+#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
+
+
+/* Cgroup classifier */
+
+enum {
+       TCA_CGROUP_UNSPEC,
+       TCA_CGROUP_ACT,
+       TCA_CGROUP_POLICE,
+       TCA_CGROUP_EMATCHES,
+       __TCA_CGROUP_MAX,
+};
+
+#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
+
+/* BPF classifier */
+
+#define TCA_BPF_FLAG_ACT_DIRECT                (1 << 0)
+
+enum {
+       TCA_BPF_UNSPEC,
+       TCA_BPF_ACT,
+       TCA_BPF_POLICE,
+       TCA_BPF_CLASSID,
+       TCA_BPF_OPS_LEN,
+       TCA_BPF_OPS,
+       TCA_BPF_FD,
+       TCA_BPF_NAME,
+       TCA_BPF_FLAGS,
+       TCA_BPF_FLAGS_GEN,
+       TCA_BPF_TAG,
+       TCA_BPF_ID,
+       __TCA_BPF_MAX,
+};
+
+#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+
+/* Flower classifier */
+
+enum {
+       TCA_FLOWER_UNSPEC,
+       TCA_FLOWER_CLASSID,
+       TCA_FLOWER_INDEV,
+       TCA_FLOWER_ACT,
+       TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
+       TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
+       TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
+       TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
+       TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
+       TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
+       TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
+       TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
+       TCA_FLOWER_KEY_TCP_DST,         /* be16 */
+       TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
+       TCA_FLOWER_KEY_UDP_DST,         /* be16 */
+
+       TCA_FLOWER_FLAGS,
+       TCA_FLOWER_KEY_VLAN_ID,         /* be16 */
+       TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
+       TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
+
+       TCA_FLOWER_KEY_ENC_KEY_ID,      /* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_SRC,    /* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_DST,    /* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+       TCA_FLOWER_KEY_ENC_IPV6_SRC,    /* struct in6_addr */
+       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+       TCA_FLOWER_KEY_ENC_IPV6_DST,    /* struct in6_addr */
+       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+       TCA_FLOWER_KEY_TCP_SRC_MASK,    /* be16 */
+       TCA_FLOWER_KEY_TCP_DST_MASK,    /* be16 */
+       TCA_FLOWER_KEY_UDP_SRC_MASK,    /* be16 */
+       TCA_FLOWER_KEY_UDP_DST_MASK,    /* be16 */
+       TCA_FLOWER_KEY_SCTP_SRC_MASK,   /* be16 */
+       TCA_FLOWER_KEY_SCTP_DST_MASK,   /* be16 */
+
+       TCA_FLOWER_KEY_SCTP_SRC,        /* be16 */
+       TCA_FLOWER_KEY_SCTP_DST,        /* be16 */
+
+       TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,        /* be16 */
+       TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,   /* be16 */
+       TCA_FLOWER_KEY_ENC_UDP_DST_PORT,        /* be16 */
+       TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,   /* be16 */
+
+       TCA_FLOWER_KEY_FLAGS,           /* be32 */
+       TCA_FLOWER_KEY_FLAGS_MASK,      /* be32 */
+
+       TCA_FLOWER_KEY_ICMPV4_CODE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
+       TCA_FLOWER_KEY_ICMPV4_TYPE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
+       TCA_FLOWER_KEY_ICMPV6_CODE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
+       TCA_FLOWER_KEY_ICMPV6_TYPE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
+
+       TCA_FLOWER_KEY_ARP_SIP,         /* be32 */
+       TCA_FLOWER_KEY_ARP_SIP_MASK,    /* be32 */
+       TCA_FLOWER_KEY_ARP_TIP,         /* be32 */
+       TCA_FLOWER_KEY_ARP_TIP_MASK,    /* be32 */
+       TCA_FLOWER_KEY_ARP_OP,          /* u8 */
+       TCA_FLOWER_KEY_ARP_OP_MASK,     /* u8 */
+       TCA_FLOWER_KEY_ARP_SHA,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_SHA_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_THA,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_THA_MASK,    /* ETH_ALEN */
+
+       TCA_FLOWER_KEY_MPLS_TTL,        /* u8 - 8 bits */
+       TCA_FLOWER_KEY_MPLS_BOS,        /* u8 - 1 bit */
+       TCA_FLOWER_KEY_MPLS_TC,         /* u8 - 3 bits */
+       TCA_FLOWER_KEY_MPLS_LABEL,      /* be32 - 20 bits */
+
+       TCA_FLOWER_KEY_TCP_FLAGS,       /* be16 */
+       TCA_FLOWER_KEY_TCP_FLAGS_MASK,  /* be16 */
+
+       TCA_FLOWER_KEY_IP_TOS,          /* u8 */
+       TCA_FLOWER_KEY_IP_TOS_MASK,     /* u8 */
+       TCA_FLOWER_KEY_IP_TTL,          /* u8 */
+       TCA_FLOWER_KEY_IP_TTL_MASK,     /* u8 */
+
+       TCA_FLOWER_KEY_CVLAN_ID,        /* be16 */
+       TCA_FLOWER_KEY_CVLAN_PRIO,      /* u8   */
+       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,  /* be16 */
+
+       TCA_FLOWER_KEY_ENC_IP_TOS,      /* u8 */
+       TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
+       TCA_FLOWER_KEY_ENC_IP_TTL,      /* u8 */
+       TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
+
+       TCA_FLOWER_KEY_ENC_OPTS,
+       TCA_FLOWER_KEY_ENC_OPTS_MASK,
+
+       TCA_FLOWER_IN_HW_COUNT,
+
+       TCA_FLOWER_KEY_PORT_SRC_MIN,    /* be16 */
+       TCA_FLOWER_KEY_PORT_SRC_MAX,    /* be16 */
+       TCA_FLOWER_KEY_PORT_DST_MIN,    /* be16 */
+       TCA_FLOWER_KEY_PORT_DST_MAX,    /* be16 */
+
+       TCA_FLOWER_KEY_CT_STATE,        /* u16 */
+       TCA_FLOWER_KEY_CT_STATE_MASK,   /* u16 */
+       TCA_FLOWER_KEY_CT_ZONE,         /* u16 */
+       TCA_FLOWER_KEY_CT_ZONE_MASK,    /* u16 */
+       TCA_FLOWER_KEY_CT_MARK,         /* u32 */
+       TCA_FLOWER_KEY_CT_MARK_MASK,    /* u32 */
+       TCA_FLOWER_KEY_CT_LABELS,       /* u128 */
+       TCA_FLOWER_KEY_CT_LABELS_MASK,  /* u128 */
+
+       TCA_FLOWER_KEY_MPLS_OPTS,
+
+       TCA_FLOWER_KEY_HASH,            /* u32 */
+       TCA_FLOWER_KEY_HASH_MASK,       /* u32 */
+
+       __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_CT_FLAGS_NEW = 1 << 0, /* Beginning of a new connection. */
+       TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */
+       TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */
+       TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */
+       TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */
+       TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */
+       __TCA_FLOWER_KEY_CT_FLAGS_MAX,
+};
+
+enum {
+       TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
+                                        * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
+                                        * attributes
+                                        */
+       TCA_FLOWER_KEY_ENC_OPTS_VXLAN,  /* Nested
+                                        * TCA_FLOWER_KEY_ENC_OPT_VXLAN_
+                                        * attributes
+                                        */
+       TCA_FLOWER_KEY_ENC_OPTS_ERSPAN, /* Nested
+                                        * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
+                                        * attributes
+                                        */
+       __TCA_FLOWER_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,            /* u16 */
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,             /* u8 */
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,             /* 4 to 128 bytes */
+
+       __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
+               (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP,               /* u32 */
+       __TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX \
+               (__TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER,              /* u8 */
+       TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX,            /* be32 */
+       TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,              /* u8 */
+       TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,             /* u8 */
+       __TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \
+               (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
+       TCA_FLOWER_KEY_MPLS_OPTS_LSE,
+       __TCA_FLOWER_KEY_MPLS_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_MPLS_OPTS_MAX (__TCA_FLOWER_KEY_MPLS_OPTS_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC,
+       TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
+       TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
+       TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
+       TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
+       TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
+       __TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX,
+};
+
+#define TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX \
+               (__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
+       TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
+};
+
+#define TCA_FLOWER_MASK_FLAGS_RANGE    (1 << 0) /* Range-based match */
+
+/* Match-all classifier */
+
+struct tc_matchall_pcnt {
+       __u64 rhit;
+};
+
+enum {
+       TCA_MATCHALL_UNSPEC,
+       TCA_MATCHALL_CLASSID,
+       TCA_MATCHALL_ACT,
+       TCA_MATCHALL_FLAGS,
+       TCA_MATCHALL_PCNT,
+       TCA_MATCHALL_PAD,
+       __TCA_MATCHALL_MAX,
+};
+
+#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
+
+/* Extended Matches */
+
+struct tcf_ematch_tree_hdr {
+       __u16           nmatches;
+       __u16           progid;
+};
+
+enum {
+       TCA_EMATCH_TREE_UNSPEC,
+       TCA_EMATCH_TREE_HDR,
+       TCA_EMATCH_TREE_LIST,
+       __TCA_EMATCH_TREE_MAX
+};
+#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
+
+struct tcf_ematch_hdr {
+       __u16           matchid;
+       __u16           kind;
+       __u16           flags;
+       __u16           pad; /* currently unused */
+};
+
+/*  0                   1
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 
+ * +-----------------------+-+-+---+
+ * |         Unused        |S|I| R |
+ * +-----------------------+-+-+---+
+ *
+ * R(2) ::= relation to next ematch
+ *          where: 0 0 END (last ematch)
+ *                 0 1 AND
+ *                 1 0 OR
+ *                 1 1 Unused (invalid)
+ * I(1) ::= invert result
+ * S(1) ::= simple payload
+ */
+#define TCF_EM_REL_END 0
+#define TCF_EM_REL_AND (1<<0)
+#define TCF_EM_REL_OR  (1<<1)
+#define TCF_EM_INVERT  (1<<2)
+#define TCF_EM_SIMPLE  (1<<3)
+
+#define TCF_EM_REL_MASK        3
+#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
+
+enum {
+       TCF_LAYER_LINK,
+       TCF_LAYER_NETWORK,
+       TCF_LAYER_TRANSPORT,
+       __TCF_LAYER_MAX
+};
+#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1)
+
+/* Ematch type assignments
+ *   1..32767          Reserved for ematches inside kernel tree
+ *   32768..65535      Free to use, not reliable
+ */
+#define        TCF_EM_CONTAINER        0
+#define        TCF_EM_CMP              1
+#define        TCF_EM_NBYTE            2
+#define        TCF_EM_U32              3
+#define        TCF_EM_META             4
+#define        TCF_EM_TEXT             5
+#define        TCF_EM_VLAN             6
+#define        TCF_EM_CANID            7
+#define        TCF_EM_IPSET            8
+#define        TCF_EM_IPT              9
+#define        TCF_EM_MAX              9
+
+enum {
+       TCF_EM_PROG_TC
+};
+
+enum {
+       TCF_EM_OPND_EQ,
+       TCF_EM_OPND_GT,
+       TCF_EM_OPND_LT
+};
+
+#endif
index d431787ebb692d2f2a1dbdb464f6ef71d7182ed8..a37cda1ce136c9e99b11ca3781fba87cfa24890a 100644 (file)
@@ -238,11 +238,11 @@ struct fabricd *fabricd_new(struct isis_area *area)
 
 void fabricd_finish(struct fabricd *f)
 {
-       thread_cancel(&(f->initial_sync_timeout));
+       THREAD_OFF(f->initial_sync_timeout);
 
-       thread_cancel(&(f->tier_calculation_timer));
+       THREAD_OFF(f->tier_calculation_timer);
 
-       thread_cancel(&(f->tier_set_timer));
+       THREAD_OFF(f->tier_set_timer);
 
        isis_spftree_del(f->spftree);
        neighbor_lists_clear(f);
@@ -334,7 +334,7 @@ void fabricd_initial_sync_finish(struct isis_area *area)
                  f->initial_sync_circuit->interface->name);
        f->initial_sync_state = FABRICD_SYNC_COMPLETE;
        f->initial_sync_circuit = NULL;
-       thread_cancel(&(f->initial_sync_timeout));
+       THREAD_OFF(f->initial_sync_timeout);
 }
 
 static void fabricd_bump_tier_calculation_timer(struct fabricd *f);
@@ -427,14 +427,14 @@ static void fabricd_bump_tier_calculation_timer(struct fabricd *f)
 {
        /* Cancel timer if we already know our tier */
        if (f->tier != ISIS_TIER_UNDEFINED || f->tier_set_timer) {
-               thread_cancel(&(f->tier_calculation_timer));
+               THREAD_OFF(f->tier_calculation_timer);
                return;
        }
 
        /* If we need to calculate the tier, wait some
         * time for the topology to settle before running
         * the calculation */
-       thread_cancel(&(f->tier_calculation_timer));
+       THREAD_OFF(f->tier_calculation_timer);
 
        thread_add_timer(master, fabricd_tier_calculation_cb, f,
                         2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
@@ -719,7 +719,7 @@ void fabricd_trigger_csnp(struct isis_area *area, bool circuit_scoped)
                if (!circuit->t_send_csnp[1])
                        continue;
 
-               thread_cancel(&(circuit->t_send_csnp[ISIS_LEVEL2 - 1]));
+               THREAD_OFF(circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
                thread_add_timer_msec(master, send_l2_csnp, circuit,
                                      isis_jitter(f->csnp_delay, CSNP_JITTER),
                                      &circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
index 11f17ec7bfe72c52164439740486419c56a739c1..86cf10ae1796bcb9744ae1c2c81ae230629af234 100644 (file)
@@ -161,7 +161,7 @@ void isis_delete_adj(void *arg)
        /* Remove self from snmp list without walking the list*/
        list_delete_node(adj->circuit->snmp_adj_list, adj->snmp_list_node);
 
-       thread_cancel(&adj->t_expire);
+       THREAD_OFF(adj->t_expire);
        if (adj->adj_state != ISIS_ADJ_DOWN)
                adj->adj_state = ISIS_ADJ_DOWN;
 
@@ -170,7 +170,7 @@ void isis_delete_adj(void *arg)
        XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->area_addresses);
        XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->ipv4_addresses);
        XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->ll_ipv6_addrs);
-
+       XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->global_ipv6_addrs);
        adj_mt_finish(adj);
        list_delete(&adj->adj_sids);
 
index 28d4b530fc3a631ee9399cae52a0331adbf9c21b..9e97e489373a6c2f75924e0c7a2203c3a89baaa3 100644 (file)
@@ -845,12 +845,12 @@ void isis_circuit_down(struct isis_circuit *circuit)
                memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1);
                memset(circuit->u.bc.snpa, 0, ETH_ALEN);
 
-               thread_cancel(&circuit->u.bc.t_send_lan_hello[0]);
-               thread_cancel(&circuit->u.bc.t_send_lan_hello[1]);
-               thread_cancel(&circuit->u.bc.t_run_dr[0]);
-               thread_cancel(&circuit->u.bc.t_run_dr[1]);
-               thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[0]);
-               thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[1]);
+               THREAD_OFF(circuit->u.bc.t_send_lan_hello[0]);
+               THREAD_OFF(circuit->u.bc.t_send_lan_hello[1]);
+               THREAD_OFF(circuit->u.bc.t_run_dr[0]);
+               THREAD_OFF(circuit->u.bc.t_run_dr[1]);
+               THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[0]);
+               THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[1]);
                circuit->lsp_regenerate_pending[0] = 0;
                circuit->lsp_regenerate_pending[1] = 0;
 
@@ -860,7 +860,7 @@ void isis_circuit_down(struct isis_circuit *circuit)
        } else if (circuit->circ_type == CIRCUIT_T_P2P) {
                isis_delete_adj(circuit->u.p2p.neighbor);
                circuit->u.p2p.neighbor = NULL;
-               thread_cancel(&circuit->u.p2p.t_send_p2p_hello);
+               THREAD_OFF(circuit->u.p2p.t_send_p2p_hello);
        }
 
        /*
@@ -873,11 +873,11 @@ void isis_circuit_down(struct isis_circuit *circuit)
        circuit->snmp_adj_idx_gen = 0;
 
        /* Cancel all active threads */
-       thread_cancel(&circuit->t_send_csnp[0]);
-       thread_cancel(&circuit->t_send_csnp[1]);
-       thread_cancel(&circuit->t_send_psnp[0]);
-       thread_cancel(&circuit->t_send_psnp[1]);
-       thread_cancel(&circuit->t_read);
+       THREAD_OFF(circuit->t_send_csnp[0]);
+       THREAD_OFF(circuit->t_send_csnp[1]);
+       THREAD_OFF(circuit->t_send_psnp[0]);
+       THREAD_OFF(circuit->t_send_psnp[1]);
+       THREAD_OFF(circuit->t_read);
 
        if (circuit->tx_queue) {
                isis_tx_queue_free(circuit->tx_queue);
index bbc569ec1ce9b40bb159252de8cbda15fedb4b04..a673cb8c1ed4e4691feca3c2514f20a809b80055 100644 (file)
@@ -1629,8 +1629,10 @@ DEFPY_YANG (isis_sr_prefix_sid,
 
                if (strmatch(lh_behavior, "no-php-flag"))
                        value = "no-php";
-               else
+               else if (strmatch(lh_behavior, "explicit-null"))
                        value = "explicit-null";
+               else
+                       value = "php";
 
                nb_cli_enqueue_change(vty, "./last-hop-behavior", NB_OP_MODIFY,
                                      value);
index 27b73880728153e54c4d3c93b531c3fba108037b..b9bf49867d09568b45baba57ac3b89edcf7b7dd3 100644 (file)
@@ -222,8 +222,8 @@ int isis_dr_resign(struct isis_circuit *circuit, int level)
 
        circuit->u.bc.is_dr[level - 1] = 0;
        circuit->u.bc.run_dr_elect[level - 1] = 0;
-       thread_cancel(&circuit->u.bc.t_run_dr[level - 1]);
-       thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+       THREAD_OFF(circuit->u.bc.t_run_dr[level - 1]);
+       THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
        circuit->lsp_regenerate_pending[level - 1] = 0;
 
        memcpy(id, circuit->isis->sysid, ISIS_SYS_ID_LEN);
@@ -247,7 +247,7 @@ int isis_dr_resign(struct isis_circuit *circuit, int level)
                                 &circuit->t_send_psnp[1]);
        }
 
-       thread_cancel(&circuit->t_send_csnp[level - 1]);
+       THREAD_OFF(circuit->t_send_csnp[level - 1]);
 
        thread_add_timer(master, isis_run_dr,
                         &circuit->level_arg[level - 1],
@@ -285,8 +285,6 @@ int isis_dr_commence(struct isis_circuit *circuit, int level)
                        circuit->circuit_id;
 
                assert(circuit->circuit_id); /* must be non-zero */
-               /*    if (circuit->t_send_l1_psnp)
-                  thread_cancel (circuit->t_send_l1_psnp); */
                lsp_generate_pseudo(circuit, 1);
 
                thread_add_timer(master, send_l1_csnp, circuit,
@@ -307,8 +305,6 @@ int isis_dr_commence(struct isis_circuit *circuit, int level)
                        circuit->circuit_id;
 
                assert(circuit->circuit_id); /* must be non-zero */
-               /*    if (circuit->t_send_l1_psnp)
-                  thread_cancel (circuit->t_send_l1_psnp); */
                lsp_generate_pseudo(circuit, 2);
 
                thread_add_timer(master, send_l2_csnp, circuit,
index 8d76e819345d54ec6bec4ab03474f3e830da115a..5d6b7bc60a27676a26f61434ea23938dbcaed8a7 100644 (file)
@@ -57,7 +57,7 @@ void dyn_cache_finish(struct isis *isis)
        struct listnode *node, *nnode;
        struct isis_dynhn *dyn;
 
-       thread_cancel(&isis->t_dync_clean);
+       THREAD_OFF(isis->t_dync_clean);
 
        for (ALL_LIST_ELEMENTS(isis->dyn_cache, node, nnode, dyn)) {
                list_delete_node(isis->dyn_cache, node);
index fce48fec97d0174ddbdd16e5aa21767f25e65ad1..42823cf2b3d8e4cc5f79a9fbcd907cc20b5e7deb 100644 (file)
@@ -109,13 +109,13 @@ static void circuit_resign_level(struct isis_circuit *circuit, int level)
                        circuit->area->area_tag, circuit->circuit_id,
                        circuit->interface->name, level);
 
-       thread_cancel(&circuit->t_send_csnp[idx]);
-       thread_cancel(&circuit->t_send_psnp[idx]);
+       THREAD_OFF(circuit->t_send_csnp[idx]);
+       THREAD_OFF(circuit->t_send_psnp[idx]);
 
        if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
-               thread_cancel(&circuit->u.bc.t_send_lan_hello[idx]);
-               thread_cancel(&circuit->u.bc.t_run_dr[idx]);
-               thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[idx]);
+               THREAD_OFF(circuit->u.bc.t_send_lan_hello[idx]);
+               THREAD_OFF(circuit->u.bc.t_run_dr[idx]);
+               THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[idx]);
                circuit->lsp_regenerate_pending[idx] = 0;
                circuit->u.bc.run_dr_elect[idx] = 0;
                circuit->u.bc.is_dr[idx] = 0;
index 800cac8521e99c1094567a72139bc65939f15802..c4fadcba0372fc12d8612b73bc7ea5b2d2016a4e 100644 (file)
@@ -1519,7 +1519,7 @@ int isis_rlfa_activate(struct isis_spftree *spftree, struct rlfa *rlfa,
                          spftree->route_table_backup);
        spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] += 1;
 
-       thread_cancel(&area->t_rlfa_rib_update);
+       THREAD_OFF(area->t_rlfa_rib_update);
        thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
                         &area->t_rlfa_rib_update);
 
@@ -1538,7 +1538,7 @@ void isis_rlfa_deactivate(struct isis_spftree *spftree, struct rlfa *rlfa)
        isis_route_delete(area, rn, spftree->route_table_backup);
        spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] -= 1;
 
-       thread_cancel(&area->t_rlfa_rib_update);
+       THREAD_OFF(area->t_rlfa_rib_update);
        thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
                         &area->t_rlfa_rib_update);
 }
index 8dbd41b5d9ab962fc244e9369abf8dc4815603a8..5387f37039016ae8be149b4b7df407da9798ea32 100644 (file)
@@ -1390,7 +1390,7 @@ int lsp_generate(struct isis_area *area, int level)
 
        refresh_time = lsp_refresh_time(newlsp, rem_lifetime);
 
-       thread_cancel(&area->t_lsp_refresh[level - 1]);
+       THREAD_OFF(area->t_lsp_refresh[level - 1]);
        area->lsp_regenerate_pending[level - 1] = 0;
        thread_add_timer(master, lsp_refresh,
                         &area->lsp_refresh_arg[level - 1], refresh_time,
@@ -1601,7 +1601,7 @@ int _lsp_regenerate_schedule(struct isis_area *area, int level,
                        "ISIS (%s): Will schedule regen timer. Last run was: %lld, Now is: %lld",
                        area->area_tag, (long long)lsp->last_generated,
                        (long long)now);
-               thread_cancel(&area->t_lsp_refresh[lvl - 1]);
+               THREAD_OFF(area->t_lsp_refresh[lvl - 1]);
                diff = now - lsp->last_generated;
                if (diff < area->lsp_gen_interval[lvl - 1]
                    && !(area->bfd_signalled_down)) {
@@ -1794,7 +1794,7 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
        lsp_flood(lsp, NULL);
 
        refresh_time = lsp_refresh_time(lsp, rem_lifetime);
-       thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+       THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
        circuit->lsp_regenerate_pending[level - 1] = 0;
        if (level == IS_LEVEL_1)
                thread_add_timer(
@@ -1985,7 +1985,7 @@ int lsp_regenerate_schedule_pseudo(struct isis_circuit *circuit, int level)
                        "ISIS (%s): Will schedule PSN regen timer. Last run was: %lld, Now is: %lld",
                        area->area_tag, (long long)lsp->last_generated,
                        (long long)now);
-               thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
+               THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
                diff = now - lsp->last_generated;
                if (diff < circuit->area->lsp_gen_interval[lvl - 1]) {
                        timeout =
index b1fbfd514037c59969710f3b6d03fea667f101da..47093fdd6bffd32fd5b9028810d310ef768ed29a 100644 (file)
@@ -205,7 +205,7 @@ static int process_p2p_hello(struct iih_info *iih)
                                      adj);
 
        /* lets take care of the expiry */
-       thread_cancel(&adj->t_expire);
+       THREAD_OFF(adj->t_expire);
        thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
                         &adj->t_expire);
 
@@ -497,7 +497,7 @@ static int process_lan_hello(struct iih_info *iih)
                                      adj);
 
        /* lets take care of the expiry */
-       thread_cancel(&adj->t_expire);
+       THREAD_OFF(adj->t_expire);
        thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
                         &adj->t_expire);
 
@@ -2064,7 +2064,7 @@ static void _send_hello_sched(struct isis_circuit *circuit,
                if (thread_timer_remain_msec(*threadp) < (unsigned long)delay)
                        return;
 
-               thread_cancel(threadp);
+               THREAD_OFF(*threadp);
        }
 
        thread_add_timer_msec(master, send_hello_cb,
index 09f92554c020bd7090d3715e0d24ba4cf46184bf..6be7c2d608aa27a98e4383fa8ad2933944f5c1f2 100644 (file)
@@ -1400,20 +1400,21 @@ static void spf_adj_list_parse_tlv(struct isis_spftree *spftree,
                spf_adj_list_parse_lsp(spftree, adj_list, lsp, id, metric);
 }
 
-static void spf_adj_list_parse_lsp_frag(struct isis_spftree *spftree,
-                                       struct list *adj_list,
-                                       struct isis_lsp *lsp,
-                                       const uint8_t *pseudo_nodeid,
-                                       uint32_t pseudo_metric)
+static void spf_adj_list_parse_lsp(struct isis_spftree *spftree,
+                                  struct list *adj_list, struct isis_lsp *lsp,
+                                  const uint8_t *pseudo_nodeid,
+                                  uint32_t pseudo_metric)
 {
        bool pseudo_lsp = LSP_PSEUDO_ID(lsp->hdr.lsp_id);
+       struct isis_lsp *frag;
+       struct listnode *node;
        struct isis_item *head;
        struct isis_item_list *te_neighs;
 
        if (lsp->hdr.seqno == 0 || lsp->hdr.rem_lifetime == 0)
                return;
 
-       /* Parse main LSP. */
+       /* Parse LSP. */
        if (lsp->tlvs) {
                if (pseudo_lsp || spftree->mtid == ISIS_MT_IPV4_UNICAST) {
                        head = lsp->tlvs->oldstyle_reach.head;
@@ -1444,27 +1445,17 @@ static void spf_adj_list_parse_lsp_frag(struct isis_spftree *spftree,
                        }
                }
        }
-}
 
-
-static void spf_adj_list_parse_lsp(struct isis_spftree *spftree,
-                                  struct list *adj_list, struct isis_lsp *lsp,
-                                  const uint8_t *pseudo_nodeid,
-                                  uint32_t pseudo_metric)
-{
-       struct isis_lsp *frag;
-       struct listnode *node;
-
-       spf_adj_list_parse_lsp_frag(spftree, adj_list, lsp, pseudo_nodeid,
-                                   pseudo_metric);
+       if (LSP_FRAGMENT(lsp->hdr.lsp_id))
+               return;
 
        /* Parse LSP fragments. */
        for (ALL_LIST_ELEMENTS_RO(lsp->lspu.frags, node, frag)) {
                if (!frag->tlvs)
                        continue;
 
-               spf_adj_list_parse_lsp_frag(spftree, adj_list, frag,
-                                           pseudo_nodeid, pseudo_metric);
+               spf_adj_list_parse_lsp(spftree, adj_list, frag, pseudo_nodeid,
+                                      pseudo_metric);
        }
 }
 
@@ -1947,7 +1938,7 @@ int _isis_spf_schedule(struct isis_area *area, int level,
                        area->area_tag, level, diff, func, file, line);
        }
 
-       thread_cancel(&area->t_rlfa_rib_update);
+       THREAD_OFF(area->t_rlfa_rib_update);
        if (area->spf_delay_ietf[level - 1]) {
                /* Need to call schedule function also if spf delay is running
                 * to
index 107fa71d71ebb0caf990f2fafef21188c827c784..259047ff66eef8588ce42d80e8d7c21880b38a4b 100644 (file)
@@ -1180,7 +1180,7 @@ void isis_sr_stop(struct isis_area *area)
                 area->area_tag);
 
        /* Disable any re-attempt to connect to Label Manager */
-       thread_cancel(&srdb->t_start_lm);
+       THREAD_OFF(srdb->t_start_lm);
 
        /* Uninstall all local Adjacency-SIDs. */
        for (ALL_LIST_ELEMENTS(area->srdb.adj_sids, node, nnode, sra))
index 11be3c3a7159ed7a9793bde1c98070ea2c504518..b3c3fd4b0b4db058ad8ae750e3766235f795ae96 100644 (file)
@@ -3580,9 +3580,9 @@ static int pack_tlv_router_cap(const struct isis_router_cap *router_cap,
 }
 
 static int unpack_tlv_router_cap(enum isis_tlv_context context,
-                                      uint8_t tlv_type, uint8_t tlv_len,
-                                      struct stream *s, struct sbuf *log,
-                                      void *dest, int indent)
+                                uint8_t tlv_type, uint8_t tlv_len,
+                                struct stream *s, struct sbuf *log, void *dest,
+                                int indent)
 {
        struct isis_tlvs *tlvs = dest;
        struct isis_router_cap *rcap;
@@ -3627,7 +3627,7 @@ static int unpack_tlv_router_cap(enum isis_tlv_context context,
                                log, indent,
                                "WARNING: Router Capability subTLV length too large compared to expected size\n");
                        stream_forward_getp(s, STREAM_READABLE(s));
-
+                       XFREE(MTYPE_ISIS_TLV, rcap);
                        return 0;
                }
 
index 078329221a200a7346408f45b44e40fa902d0f5c..06369c6d70758db519a30b0630b3e4c2235ac7fb 100644 (file)
@@ -92,7 +92,7 @@ static void tx_queue_element_free(void *element)
 {
        struct isis_tx_queue_entry *e = element;
 
-       thread_cancel(&(e->retry));
+       THREAD_OFF(e->retry);
 
        XFREE(MTYPE_TX_QUEUE_ENTRY, e);
 }
@@ -161,7 +161,7 @@ void _isis_tx_queue_add(struct isis_tx_queue *queue,
 
        e->type = type;
 
-       thread_cancel(&(e->retry));
+       THREAD_OFF(e->retry);
        thread_add_event(master, tx_queue_send_event, e, 0, &e->retry);
 
        e->is_retry = false;
@@ -184,7 +184,7 @@ void _isis_tx_queue_del(struct isis_tx_queue *queue, struct isis_lsp *lsp,
                           func, file, line);
        }
 
-       thread_cancel(&(e->retry));
+       THREAD_OFF(e->retry);
 
        hash_release(queue->hash, e);
        XFREE(MTYPE_TX_QUEUE_ENTRY, e);
index 996a62f4d55acbee8dfb72cf845a23ea11f90f61..3fd2476ad126b0131a9f2495bdfb39cffde0126f 100644 (file)
@@ -514,10 +514,10 @@ void isis_area_destroy(struct isis_area *area)
 
        if (area->spf_timer[0])
                isis_spf_timer_free(THREAD_ARG(area->spf_timer[0]));
-       thread_cancel(&area->spf_timer[0]);
+       THREAD_OFF(area->spf_timer[0]);
        if (area->spf_timer[1])
                isis_spf_timer_free(THREAD_ARG(area->spf_timer[1]));
-       thread_cancel(&area->spf_timer[1]);
+       THREAD_OFF(area->spf_timer[1]);
 
        spf_backoff_free(area->spf_delay_ietf[0]);
        spf_backoff_free(area->spf_delay_ietf[1]);
@@ -541,10 +541,10 @@ void isis_area_destroy(struct isis_area *area)
        isis_lfa_tiebreakers_clear(area, ISIS_LEVEL1);
        isis_lfa_tiebreakers_clear(area, ISIS_LEVEL2);
 
-       thread_cancel(&area->t_tick);
-       thread_cancel(&area->t_lsp_refresh[0]);
-       thread_cancel(&area->t_lsp_refresh[1]);
-       thread_cancel(&area->t_rlfa_rib_update);
+       THREAD_OFF(area->t_tick);
+       THREAD_OFF(area->t_lsp_refresh[0]);
+       THREAD_OFF(area->t_lsp_refresh[1]);
+       THREAD_OFF(area->t_rlfa_rib_update);
 
        thread_cancel_event(master, area);
 
@@ -3094,12 +3094,12 @@ static void area_resign_level(struct isis_area *area, int level)
        if (area->spf_timer[level - 1])
                isis_spf_timer_free(THREAD_ARG(area->spf_timer[level - 1]));
 
-       thread_cancel(&area->spf_timer[level - 1]);
+       THREAD_OFF(area->spf_timer[level - 1]);
 
        sched_debug(
                "ISIS (%s): Resigned from L%d - canceling LSP regeneration timer.",
                area->area_tag, level);
-       thread_cancel(&area->t_lsp_refresh[level - 1]);
+       THREAD_OFF(area->t_lsp_refresh[level - 1]);
        area->lsp_regenerate_pending[level - 1] = 0;
 }
 
index 2999a35c06710d0facad3ab5bdc37a64554181de..1a04c563c1922761bafec405255c17cc84a6b462 100644 (file)
@@ -72,7 +72,7 @@ accept_del(int fd)
        LIST_FOREACH(av, &accept_queue.queue, entry)
                if (av->fd == fd) {
                        log_debug("%s: %d removed from queue", __func__, fd);
-                       thread_cancel(&av->ev);
+                       THREAD_OFF(av->ev);
                        LIST_REMOVE(av, entry);
                        free(av);
                        return;
@@ -92,7 +92,7 @@ accept_unpause(void)
 {
        if (accept_queue.evt != NULL) {
                log_debug(__func__);
-               thread_cancel(&accept_queue.evt);
+               THREAD_OFF(accept_queue.evt);
                accept_arm();
        }
 }
@@ -111,7 +111,7 @@ accept_unarm(void)
 {
        struct accept_ev        *av;
        LIST_FOREACH(av, &accept_queue.queue, entry)
-               thread_cancel(&av->ev);
+               THREAD_OFF(av->ev);
 }
 
 static void accept_cb(struct thread *thread)
index bbc8a277a624d3434a163ae9189d6ccffcbb0956..8caa3c4e2414306b0cbf23e71f76d8098d858b4c 100644 (file)
@@ -196,7 +196,7 @@ static void adj_itimer(struct thread *thread)
 void
 adj_start_itimer(struct adj *adj)
 {
-       thread_cancel(&adj->inactivity_timer);
+       THREAD_OFF(adj->inactivity_timer);
        adj->inactivity_timer = NULL;
        thread_add_timer(master, adj_itimer, adj, adj->holdtime,
                         &adj->inactivity_timer);
@@ -205,7 +205,7 @@ adj_start_itimer(struct adj *adj)
 void
 adj_stop_itimer(struct adj *adj)
 {
-       thread_cancel(&adj->inactivity_timer);
+       THREAD_OFF(adj->inactivity_timer);
 }
 
 /* targeted neighbors */
@@ -354,7 +354,7 @@ static void tnbr_hello_timer(struct thread *thread)
 static void
 tnbr_start_hello_timer(struct tnbr *tnbr)
 {
-       thread_cancel(&tnbr->hello_timer);
+       THREAD_OFF(tnbr->hello_timer);
        tnbr->hello_timer = NULL;
        thread_add_timer(master, tnbr_hello_timer, tnbr, tnbr_get_hello_interval(tnbr),
                         &tnbr->hello_timer);
@@ -363,7 +363,7 @@ tnbr_start_hello_timer(struct tnbr *tnbr)
 static void
 tnbr_stop_hello_timer(struct tnbr *tnbr)
 {
-       thread_cancel(&tnbr->hello_timer);
+       THREAD_OFF(tnbr->hello_timer);
 }
 
 struct ctl_adj *
@@ -386,7 +386,7 @@ adj_to_ctl(struct adj *adj)
        }
        actl.holdtime = adj->holdtime;
        actl.holdtime_remaining =
-           thread_timer_remain_second(adj->inactivity_timer);
+               thread_timer_remain_second(adj->inactivity_timer);
        actl.trans_addr = adj->trans_addr;
        actl.ds_tlv = adj->ds_tlv;
 
index 376f488bd1212584dd350bf05d153805fddfca2b..09e91217ae3cae18057de16d1d2cd8de6f456021 100644 (file)
@@ -180,8 +180,8 @@ control_close(int fd)
        msgbuf_clear(&c->iev.ibuf.w);
        TAILQ_REMOVE(&ctl_conns, c, entry);
 
-       thread_cancel(&c->iev.ev_read);
-       thread_cancel(&c->iev.ev_write);
+       THREAD_OFF(c->iev.ev_read);
+       THREAD_OFF(c->iev.ev_write);
        close(c->iev.ibuf.fd);
        accept_unpause();
        free(c);
index af6e8fd7ecbf9585799f39b00bcd8ae11d2e0dce..392e25470fbd4fb7c8fe18b9f51a12cd1c2b56ac 100644 (file)
@@ -467,7 +467,7 @@ static void if_hello_timer(struct thread *thread)
 static void
 if_start_hello_timer(struct iface_af *ia)
 {
-       thread_cancel(&ia->hello_timer);
+       THREAD_OFF(ia->hello_timer);
        thread_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
                         &ia->hello_timer);
 }
@@ -475,7 +475,7 @@ if_start_hello_timer(struct iface_af *ia)
 static void
 if_stop_hello_timer(struct iface_af *ia)
 {
-       thread_cancel(&ia->hello_timer);
+       THREAD_OFF(ia->hello_timer);
 }
 
 struct ctl_iface *
@@ -543,11 +543,8 @@ ldp_sync_to_ctl(struct iface *iface)
        ictl.wait_time = if_get_wait_for_sync_interval();
        ictl.timer_running = iface->ldp_sync.wait_for_sync_timer ? true : false;
 
-       if (iface->ldp_sync.wait_for_sync_timer)
-               ictl.wait_time_remaining =
+       ictl.wait_time_remaining =
                thread_timer_remain_second(iface->ldp_sync.wait_for_sync_timer);
-       else
-               ictl.wait_time_remaining = 0;
 
        memset(&ictl.peer_ldp_id, 0, sizeof(ictl.peer_ldp_id));
 
index 9d1daabbe96906dbdcaeb7c273abbdb5a8014fb6..efc07b45470830349b7822fb556efc8c45018aab 100644 (file)
@@ -417,8 +417,8 @@ static void lde_dispatch_imsg(struct thread *thread)
                imsg_event_add(iev);
        else {
                /* this pipe is dead, so remove the event handlers and exit */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                lde_shutdown();
        }
 }
@@ -702,8 +702,8 @@ static void lde_dispatch_parent(struct thread *thread)
                imsg_event_add(iev);
        else {
                /* this pipe is dead, so remove the event handlers and exit */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                lde_shutdown();
        }
 }
index 1ade3c5378d0973df3b87d94af0323cf17516413..43665605469c2b095856c5dfcc6020bae85ae379 100644 (file)
@@ -1068,7 +1068,7 @@ void lde_gc_timer(struct thread *thread)
 void
 lde_gc_start_timer(void)
 {
-       thread_cancel(&gc_timer);
+       THREAD_OFF(gc_timer);
        thread_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL,
                         &gc_timer);
 }
@@ -1076,5 +1076,5 @@ lde_gc_start_timer(void)
 void
 lde_gc_stop_timer(void)
 {
-       thread_cancel(&gc_timer);
+       THREAD_OFF(gc_timer);
 }
index 796cf11798e11de022672012ea9cb5fb573687fa..87d78afa25b1903dead36c40b8fe5810c1431192 100644 (file)
@@ -616,8 +616,8 @@ static void main_dispatch_ldpe(struct thread *thread)
                imsg_event_add(iev);
        else {
                /* this pipe is dead, so remove the event handlers and exit */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                ldpe_pid = 0;
                if (lde_pid == 0)
                        ldpd_shutdown();
@@ -721,8 +721,8 @@ static void main_dispatch_lde(struct thread *thread)
                imsg_event_add(iev);
        else {
                /* this pipe is dead, so remove the event handlers and exit */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                lde_pid = 0;
                if (ldpe_pid == 0)
                        ldpd_shutdown();
@@ -744,8 +744,8 @@ void ldp_write_handler(struct thread *thread)
                fatal("msgbuf_write");
        if (n == 0) {
                /* this pipe is dead, so remove the event handlers */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                return;
        }
 
@@ -829,7 +829,7 @@ void evbuf_init(struct evbuf *eb, int fd, void (*handler)(struct thread *),
 void
 evbuf_clear(struct evbuf *eb)
 {
-       thread_cancel(&eb->ev);
+       THREAD_OFF(eb->ev);
        msgbuf_clear(&eb->wbuf);
        eb->wbuf.fd = -1;
 }
index 29abd420e59cb14090bcf53245e8788017004fc2..792dcb2f2a8618ab67c5f4c09c054fdba95f0de6 100644 (file)
@@ -212,7 +212,7 @@ ldpe_shutdown(void)
 
 #ifdef __OpenBSD__
        if (sysdep.no_pfkey == 0) {
-               thread_cancel(&pfkey_ev);
+               THREAD_OFF(pfkey_ev);
                close(global.pfkeysock);
        }
 #endif
@@ -626,8 +626,8 @@ static void ldpe_dispatch_main(struct thread *thread)
                imsg_event_add(iev);
        else {
                /* this pipe is dead, so remove the event handlers and exit */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                ldpe_shutdown();
        }
 }
@@ -762,8 +762,8 @@ static void ldpe_dispatch_lde(struct thread *thread)
                imsg_event_add(iev);
        else {
                /* this pipe is dead, so remove the event handlers and exit */
-               thread_cancel(&iev->ev_read);
-               thread_cancel(&iev->ev_write);
+               THREAD_OFF(iev->ev_read);
+               THREAD_OFF(iev->ev_write);
                ldpe_shutdown();
        }
 }
@@ -813,14 +813,14 @@ ldpe_close_sockets(int af)
        af_global = ldp_af_global_get(&global, af);
 
        /* discovery socket */
-       thread_cancel(&af_global->disc_ev);
+       THREAD_OFF(af_global->disc_ev);
        if (af_global->ldp_disc_socket != -1) {
                close(af_global->ldp_disc_socket);
                af_global->ldp_disc_socket = -1;
        }
 
        /* extended discovery socket */
-       thread_cancel(&af_global->edisc_ev);
+       THREAD_OFF(af_global->edisc_ev);
        if (af_global->ldp_edisc_socket != -1) {
                close(af_global->ldp_edisc_socket);
                af_global->ldp_edisc_socket = -1;
index 867ad92e474ee8e6eb6764a653f9059508349d62..e1db9e8e1e00f5a28b827c4c3e038eeedd6095f3 100644 (file)
@@ -307,7 +307,7 @@ nbr_del(struct nbr *nbr)
        nbr->auth.method = AUTH_NONE;
 
        if (nbr_pending_connect(nbr))
-               thread_cancel(&nbr->ev_connect);
+               THREAD_OFF(nbr->ev_connect);
        nbr_stop_ktimer(nbr);
        nbr_stop_ktimeout(nbr);
        nbr_stop_itimeout(nbr);
@@ -435,7 +435,7 @@ nbr_start_ktimer(struct nbr *nbr)
 
        /* send three keepalives per period */
        secs = nbr->keepalive / KEEPALIVE_PER_PERIOD;
-       thread_cancel(&nbr->keepalive_timer);
+       THREAD_OFF(nbr->keepalive_timer);
        nbr->keepalive_timer = NULL;
        thread_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
 }
@@ -443,7 +443,7 @@ nbr_start_ktimer(struct nbr *nbr)
 void
 nbr_stop_ktimer(struct nbr *nbr)
 {
-       thread_cancel(&nbr->keepalive_timer);
+       THREAD_OFF(nbr->keepalive_timer);
 }
 
 /* Keepalive timeout: if the nbr hasn't sent keepalive */
@@ -462,7 +462,7 @@ static void nbr_ktimeout(struct thread *thread)
 static void
 nbr_start_ktimeout(struct nbr *nbr)
 {
-       thread_cancel(&nbr->keepalive_timeout);
+       THREAD_OFF(nbr->keepalive_timeout);
        nbr->keepalive_timeout = NULL;
        thread_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
                         &nbr->keepalive_timeout);
@@ -471,7 +471,7 @@ nbr_start_ktimeout(struct nbr *nbr)
 void
 nbr_stop_ktimeout(struct nbr *nbr)
 {
-       thread_cancel(&nbr->keepalive_timeout);
+       THREAD_OFF(nbr->keepalive_timeout);
 }
 
 /* Session initialization timeout: if nbr got stuck in the initialization FSM */
@@ -491,7 +491,7 @@ nbr_start_itimeout(struct nbr *nbr)
        int              secs;
 
        secs = INIT_FSM_TIMEOUT;
-       thread_cancel(&nbr->init_timeout);
+       THREAD_OFF(nbr->init_timeout);
        nbr->init_timeout = NULL;
        thread_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
 }
@@ -499,7 +499,7 @@ nbr_start_itimeout(struct nbr *nbr)
 void
 nbr_stop_itimeout(struct nbr *nbr)
 {
-       thread_cancel(&nbr->init_timeout);
+       THREAD_OFF(nbr->init_timeout);
 }
 
 /* Init delay timer: timer to retry to iniziatize session */
@@ -537,7 +537,7 @@ nbr_start_idtimer(struct nbr *nbr)
                break;
        }
 
-       thread_cancel(&nbr->initdelay_timer);
+       THREAD_OFF(nbr->initdelay_timer);
        nbr->initdelay_timer = NULL;
        thread_add_timer(master, nbr_idtimer, nbr, secs,
                         &nbr->initdelay_timer);
@@ -546,7 +546,7 @@ nbr_start_idtimer(struct nbr *nbr)
 void
 nbr_stop_idtimer(struct nbr *nbr)
 {
-       thread_cancel(&nbr->initdelay_timer);
+       THREAD_OFF(nbr->initdelay_timer);
 }
 
 int
@@ -847,11 +847,8 @@ nbr_to_ctl(struct nbr *nbr)
        nctl.stats = nbr->stats;
        nctl.flags = nbr->flags;
        nctl.max_pdu_len = nbr->max_pdu_len;
-       if (nbr->keepalive_timer)
-               nctl.hold_time_remaining =
-                   thread_timer_remain_second(nbr->keepalive_timer);
-       else
-               nctl.hold_time_remaining = 0;
+       nctl.hold_time_remaining =
+               thread_timer_remain_second(nbr->keepalive_timer);
 
        gettimeofday(&now, NULL);
        if (nbr->state == NBR_STA_OPER) {
index 707878ca9ff53692b34f602a94340edbdb92f069..2cca52461f46a8f2c58297dfc70811a5838ce5c8 100644 (file)
@@ -651,7 +651,7 @@ session_shutdown(struct nbr *nbr, uint32_t status, uint32_t msg_id,
        switch (nbr->state) {
        case NBR_STA_PRESENT:
                if (nbr_pending_connect(nbr))
-                       thread_cancel(&nbr->ev_connect);
+                       THREAD_OFF(nbr->ev_connect);
                break;
        case NBR_STA_INITIAL:
        case NBR_STA_OPENREC:
@@ -756,7 +756,7 @@ tcp_close(struct tcp_conn *tcp)
        evbuf_clear(&tcp->wbuf);
 
        if (tcp->nbr) {
-               thread_cancel(&tcp->rev);
+               THREAD_OFF(tcp->rev);
                free(tcp->rbuf);
                tcp->nbr->tcp = NULL;
        }
@@ -788,7 +788,7 @@ pending_conn_new(int fd, int af, union ldpd_addr *addr)
 void
 pending_conn_del(struct pending_conn *pconn)
 {
-       thread_cancel(&pconn->ev_timeout);
+       THREAD_OFF(pconn->ev_timeout);
        TAILQ_REMOVE(&global.pending_conns, pconn, entry);
        free(pconn);
 }
index 821c573fb279c7505c176580a1df822853660a85..4c087219cb7e331132baefdc13f33a31906e0ebb 100644 (file)
@@ -118,7 +118,6 @@ static void agentx_events_update(void)
        snmp_select_info(&maxfd, &fds, &timeout, &block);
 
        if (!block) {
-               timeout_thr = NULL;
                thread_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
                                    &timeout_thr);
        }
@@ -147,9 +146,9 @@ static void agentx_events_update(void)
                else if (FD_ISSET(fd, &fds)) {
                        struct listnode *newln;
                        thr = XCALLOC(MTYPE_TMP, sizeof(struct thread *));
-                       thread_add_read(agentx_tm, agentx_read, NULL, fd, thr);
+
                        newln = listnode_add_before(events, ln, thr);
-                       (*thr)->arg = newln;
+                       thread_add_read(agentx_tm, agentx_read, newln, fd, thr);
                }
        }
 
index cbecc815741e3e94bb4cd39f3be3407aaddcb813..a23afb1e43863e15ff36c50a1d866ff1e69ca0f5 100644 (file)
@@ -121,6 +121,11 @@ const char *cmd_version_get(void)
        return host.version;
 }
 
+bool cmd_allow_reserved_ranges_get(void)
+{
+       return host.allow_reserved_ranges;
+}
+
 static int root_on_exit(struct vty *vty);
 
 /* Standard command node structures. */
@@ -454,6 +459,9 @@ static int config_write_host(struct vty *vty)
        if (name && name[0] != '\0')
                vty_out(vty, "domainname %s\n", name);
 
+       if (cmd_allow_reserved_ranges_get())
+               vty_out(vty, "allow-reserved-ranges\n");
+
        /* The following are all configuration commands that are not sent to
         * watchfrr.  For instance watchfrr is hardcoded to log to syslog so
         * we would always display 'log syslog informational' in the config
@@ -2294,6 +2302,21 @@ DEFUN (no_banner_motd,
        return CMD_SUCCESS;
 }
 
+DEFUN(allow_reserved_ranges, allow_reserved_ranges_cmd, "allow-reserved-ranges",
+      "Allow using IPv4 (Class E) reserved IP space\n")
+{
+       host.allow_reserved_ranges = true;
+       return CMD_SUCCESS;
+}
+
+DEFUN(no_allow_reserved_ranges, no_allow_reserved_ranges_cmd,
+      "no allow-reserved-ranges",
+      NO_STR "Allow using IPv4 (Class E) reserved IP space\n")
+{
+       host.allow_reserved_ranges = false;
+       return CMD_SUCCESS;
+}
+
 int cmd_find_cmds(struct vty *vty, struct cmd_token **argv, int argc)
 {
        const struct cmd_node *node;
@@ -2483,6 +2506,7 @@ void cmd_init(int terminal)
        host.lines = -1;
        cmd_banner_motd_line(FRR_DEFAULT_MOTD);
        host.motdfile = NULL;
+       host.allow_reserved_ranges = false;
 
        /* Install top nodes. */
        install_node(&view_node);
@@ -2552,6 +2576,8 @@ void cmd_init(int terminal)
                install_element(CONFIG_NODE, &no_banner_motd_cmd);
                install_element(CONFIG_NODE, &service_terminal_length_cmd);
                install_element(CONFIG_NODE, &no_service_terminal_length_cmd);
+               install_element(CONFIG_NODE, &allow_reserved_ranges_cmd);
+               install_element(CONFIG_NODE, &no_allow_reserved_ranges_cmd);
 
                log_cmd_init();
                vrf_install_commands();
index 7363ed84c888c29d95a0a77c5d4c15156415d846..70e52708a741ee0ac5def20e7d8d94af64fe1f12 100644 (file)
@@ -84,6 +84,9 @@ struct host {
        /* Banner configuration. */
        char *motd;
        char *motdfile;
+
+       /* Allow using IPv4 (Class E) reserved IP space */
+       bool allow_reserved_ranges;
 };
 
 /* List of CLI nodes. Please remember to update the name array in command.c. */
@@ -614,6 +617,7 @@ extern const char *cmd_domainname_get(void);
 extern const char *cmd_system_get(void);
 extern const char *cmd_release_get(void);
 extern const char *cmd_version_get(void);
+extern bool cmd_allow_reserved_ranges_get(void);
 
 /* NOT safe for general use; call this only if DEV_BUILD! */
 extern void grammar_sandbox_init(void);
index 9d79f38b7c02509ccd3fb78cf85138ee03335fc3..bef7f3b209131030a8629198cbf7af7dbd4c1269 100644 (file)
@@ -84,7 +84,7 @@ void log_ref_add(struct log_ref *ref)
 {
        uint32_t i = 0;
 
-       frr_with_mutex(&refs_mtx) {
+       frr_with_mutex (&refs_mtx) {
                while (ref[i].code != END_FERR) {
                        (void)hash_get(refs, &ref[i], hash_alloc_intern);
                        i++;
@@ -98,7 +98,7 @@ struct log_ref *log_ref_get(uint32_t code)
        struct log_ref *ref;
 
        holder.code = code;
-       frr_with_mutex(&refs_mtx) {
+       frr_with_mutex (&refs_mtx) {
                ref = hash_lookup(refs, &holder);
        }
 
@@ -115,7 +115,7 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
        if (json)
                top = json_object_new_object();
 
-       frr_with_mutex(&refs_mtx) {
+       frr_with_mutex (&refs_mtx) {
                errlist = code ? list_new() : hash_to_list(refs);
        }
 
@@ -182,7 +182,7 @@ DEFUN_NOSH(show_error_code,
 
 void log_ref_init(void)
 {
-       frr_with_mutex(&refs_mtx) {
+       frr_with_mutex (&refs_mtx) {
                refs = hash_create(ferr_hash_key, ferr_hash_cmp,
                                   "Error Reference Texts");
        }
@@ -190,7 +190,7 @@ void log_ref_init(void)
 
 void log_ref_fini(void)
 {
-       frr_with_mutex(&refs_mtx) {
+       frr_with_mutex (&refs_mtx) {
                hash_clean(refs, NULL);
                hash_free(refs);
                refs = NULL;
index 35b97a9bde8993539083544f22a46f1b4d14ecbd..3ed1f3e03e96dc6737150d4799d6114e3e5e9a88 100644 (file)
@@ -428,7 +428,7 @@ static void plist_dnode_to_prefix(const struct lyd_node *dnode, bool *any,
 static int _plist_is_dup(const struct lyd_node *dnode, void *arg)
 {
        struct plist_dup_args *pda = arg;
-       struct prefix p;
+       struct prefix p = {};
        int ge, le;
        bool any;
 
index 0f56fbac83426f2d9158206b6eecdbec6f780a79..dd675bbb858b6fc3a5d49729aea9a9c6c6200a6e 100644 (file)
@@ -55,7 +55,7 @@ static struct list *frr_pthread_list;
 
 void frr_pthread_init(void)
 {
-       frr_with_mutex(&frr_pthread_list_mtx) {
+       frr_with_mutex (&frr_pthread_list_mtx) {
                frr_pthread_list = list_new();
        }
 }
@@ -64,7 +64,7 @@ void frr_pthread_finish(void)
 {
        frr_pthread_stop_all();
 
-       frr_with_mutex(&frr_pthread_list_mtx) {
+       frr_with_mutex (&frr_pthread_list_mtx) {
                struct listnode *n, *nn;
                struct frr_pthread *fpt;
 
@@ -105,7 +105,7 @@ struct frr_pthread *frr_pthread_new(const struct frr_pthread_attr *attr,
        pthread_mutex_init(fpt->running_cond_mtx, NULL);
        pthread_cond_init(fpt->running_cond, NULL);
 
-       frr_with_mutex(&frr_pthread_list_mtx) {
+       frr_with_mutex (&frr_pthread_list_mtx) {
                listnode_add(frr_pthread_list, fpt);
        }
 
@@ -126,7 +126,7 @@ static void frr_pthread_destroy_nolock(struct frr_pthread *fpt)
 
 void frr_pthread_destroy(struct frr_pthread *fpt)
 {
-       frr_with_mutex(&frr_pthread_list_mtx) {
+       frr_with_mutex (&frr_pthread_list_mtx) {
                listnode_delete(frr_pthread_list, fpt);
        }
 
@@ -193,7 +193,7 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
 
 void frr_pthread_wait_running(struct frr_pthread *fpt)
 {
-       frr_with_mutex(fpt->running_cond_mtx) {
+       frr_with_mutex (fpt->running_cond_mtx) {
                while (!fpt->running)
                        pthread_cond_wait(fpt->running_cond,
                                          fpt->running_cond_mtx);
@@ -202,7 +202,7 @@ void frr_pthread_wait_running(struct frr_pthread *fpt)
 
 void frr_pthread_notify_running(struct frr_pthread *fpt)
 {
-       frr_with_mutex(fpt->running_cond_mtx) {
+       frr_with_mutex (fpt->running_cond_mtx) {
                fpt->running = true;
                pthread_cond_signal(fpt->running_cond);
        }
@@ -219,7 +219,7 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
 
 void frr_pthread_stop_all(void)
 {
-       frr_with_mutex(&frr_pthread_list_mtx) {
+       frr_with_mutex (&frr_pthread_list_mtx) {
                struct listnode *n;
                struct frr_pthread *fpt;
                for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt)) {
index e9132f790734607a01063964c1853a845c11fb2d..4b371b43abe40b35ffdc1bc3687e2f0863717f70 100644 (file)
@@ -56,7 +56,7 @@ struct hash *hash_create_size(unsigned int size,
        hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
        hash->stats.empty = hash->size;
 
-       frr_with_mutex(&_hashes_mtx) {
+       frr_with_mutex (&_hashes_mtx) {
                if (!_hashes)
                        _hashes = list_new();
 
@@ -329,7 +329,7 @@ struct list *hash_to_list(struct hash *hash)
 
 void hash_free(struct hash *hash)
 {
-       frr_with_mutex(&_hashes_mtx) {
+       frr_with_mutex (&_hashes_mtx) {
                if (_hashes) {
                        listnode_delete(_hashes, hash);
                        if (_hashes->count == 0) {
index 639a1d37d8a152cc69d43db4786434fed8566dfe..ab5a8515b5c6c1dc2d0020a64095442c71d77659 100644 (file)
@@ -1357,7 +1357,6 @@ struct ls_message *ls_parse_msg(struct stream *s)
        /* Read LS Message header */
        STREAM_GETC(s, msg->event);
        STREAM_GETC(s, msg->type);
-       STREAM_GET(&msg->remote_id, s, sizeof(struct ls_node_id));
 
        /* Read Message Payload */
        switch (msg->type) {
@@ -1365,6 +1364,7 @@ struct ls_message *ls_parse_msg(struct stream *s)
                msg->data.node = ls_parse_node(s);
                break;
        case LS_MSG_TYPE_ATTRIBUTES:
+               STREAM_GET(&msg->remote_id, s, sizeof(struct ls_node_id));
                msg->data.attr = ls_parse_attributes(s);
                break;
        case LS_MSG_TYPE_PREFIX:
@@ -1563,13 +1563,14 @@ static int ls_format_msg(struct stream *s, struct ls_message *msg)
        /* Prepare Link State header */
        stream_putc(s, msg->event);
        stream_putc(s, msg->type);
-       stream_put(s, &msg->remote_id, sizeof(struct ls_node_id));
 
        /* Add Message Payload */
        switch (msg->type) {
        case LS_MSG_TYPE_NODE:
                return ls_format_node(s, msg->data.node);
        case LS_MSG_TYPE_ATTRIBUTES:
+               /* Add remote node first */
+               stream_put(s, &msg->remote_id, sizeof(struct ls_node_id));
                return ls_format_attributes(s, msg->data.attr);
        case LS_MSG_TYPE_PREFIX:
                return ls_format_prefix(s, msg->data.prefix);
@@ -1625,7 +1626,6 @@ int ls_send_msg(struct zclient *zclient, struct ls_message *msg,
 
        return zclient_send_message(zclient);
 }
-
 struct ls_message *ls_vertex2msg(struct ls_message *msg,
                                 struct ls_vertex *vertex)
 {
@@ -1791,9 +1791,10 @@ struct ls_edge *ls_msg2edge(struct ls_ted *ted, struct ls_message *msg,
        case LS_MSG_EVENT_DELETE:
                edge = ls_find_edge_by_source(ted, attr);
                if (edge) {
-                       if (delete)
+                       if (delete) {
                                ls_edge_del_all(ted, edge);
-                       else
+                               edge = NULL;
+                       } else
                                edge->status = DELETE;
                }
                break;
index f46a2068a1d13eec4b196f5905c8bf2294756c41..ed315452da6d9193365259488b26e383ca65e1ac 100644 (file)
@@ -91,7 +91,7 @@ struct ls_node_id {
                        uint8_t level;                  /* ISIS Level */
                        uint8_t padding;
                } iso;
-       } id __attribute__((aligned(8)));
+       } id;
 };
 
 /**
index f01497dead4042b62b9d45a50477369b888f33dd..df74a8c9badcb1d87891a72c0c778a2f69411c5e 100644 (file)
@@ -43,14 +43,14 @@ static int zlog_filter_lookup(const char *lookup)
 
 void zlog_filter_clear(void)
 {
-       frr_with_mutex(&logfilterlock) {
+       frr_with_mutex (&logfilterlock) {
                zlog_filter_count = 0;
        }
 }
 
 int zlog_filter_add(const char *filter)
 {
-       frr_with_mutex(&logfilterlock) {
+       frr_with_mutex (&logfilterlock) {
                if (zlog_filter_count >= ZLOG_FILTERS_MAX)
                        return 1;
 
@@ -74,7 +74,7 @@ int zlog_filter_add(const char *filter)
 
 int zlog_filter_del(const char *filter)
 {
-       frr_with_mutex(&logfilterlock) {
+       frr_with_mutex (&logfilterlock) {
                int found_idx = zlog_filter_lookup(filter);
                int last_idx = zlog_filter_count - 1;
 
@@ -96,7 +96,7 @@ int zlog_filter_dump(char *buf, size_t max_size)
 {
        int len = 0;
 
-       frr_with_mutex(&logfilterlock) {
+       frr_with_mutex (&logfilterlock) {
                for (int i = 0; i < zlog_filter_count; i++) {
                        int ret;
 
@@ -115,7 +115,7 @@ static int search_buf(const char *buf, size_t len)
 {
        char *found = NULL;
 
-       frr_with_mutex(&logfilterlock) {
+       frr_with_mutex (&logfilterlock) {
                for (int i = 0; i < zlog_filter_count; i++) {
                        found = memmem(buf, len, zlog_filters[i],
                                       strlen(zlog_filters[i]));
index 9cb999110b41aab6c65a6851cee9b59f441422de..95721ffc7745444c0cd1505cdffe134b3750169f 100644 (file)
@@ -198,7 +198,7 @@ class RpcStateBase
 
        static void c_callback(struct thread *thread)
        {
-               auto _tag = static_cast<RpcStateBase *>(thread->arg);
+               auto _tag = static_cast<RpcStateBase *>(THREAD_ARG(thread));
                /*
                 * We hold the lock until the callback finishes and has updated
                 * _tag->state, then we signal done and release.
index 1a3efd32b19e6e28829aa511eca2a50c063e0e8e..e64b10bf24787452081c01262c3ff839b0a632ed 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <zebra.h>
 
+#include "command.h"
 #include "prefix.h"
 #include "ipaddr.h"
 #include "vty.h"
@@ -1386,6 +1387,23 @@ char *evpn_es_df_alg2str(uint8_t df_alg, char *buf, int buf_len)
        return buf;
 }
 
+bool ipv4_unicast_valid(const struct in_addr *addr)
+{
+       in_addr_t ip = ntohl(addr->s_addr);
+
+       if (IPV4_CLASS_D(ip))
+               return false;
+
+       if (IPV4_CLASS_E(ip)) {
+               if (cmd_allow_reserved_ranges_get())
+                       return true;
+               else
+                       return false;
+       }
+
+       return true;
+}
+
 printfrr_ext_autoreg_p("EA", printfrr_ea);
 static ssize_t printfrr_ea(struct fbuf *buf, struct printfrr_eargs *ea,
                           const void *ptr)
index f9eef28a0bb429e63d397916b454e2bd4d5b8c5c..7b2f889874982d744b921e1a114d762c8c1dce16 100644 (file)
@@ -352,7 +352,7 @@ union prefixconstptr {
 #define PREFIX_STRLEN 80
 
 /*
- * Longest possible length of a (S,G) string is 36 bytes
+ * Longest possible length of a (S,G) string is 34 bytes
  * 123.123.123.123 = 15 * 2
  * (,) = 3
  * NULL Character at end = 1
@@ -382,6 +382,8 @@ static inline void ipv4_addr_copy(struct in_addr *dst,
 #define IPV4_NET0(a) ((((uint32_t)(a)) & 0xff000000) == 0x00000000)
 #define IPV4_NET127(a) ((((uint32_t)(a)) & 0xff000000) == 0x7f000000)
 #define IPV4_LINKLOCAL(a) ((((uint32_t)(a)) & 0xffff0000) == 0xa9fe0000)
+#define IPV4_CLASS_D(a) ((((uint32_t)(a)) & 0xf0000000) == 0xe0000000)
+#define IPV4_CLASS_E(a) ((((uint32_t)(a)) & 0xf0000000) == 0xf0000000)
 #define IPV4_CLASS_DE(a) ((((uint32_t)(a)) & 0xe0000000) == 0xe0000000)
 #define IPV4_MC_LINKLOCAL(a) ((((uint32_t)(a)) & 0xffffff00) == 0xe0000000)
 
@@ -507,17 +509,7 @@ extern int str_to_esi(const char *str, esi_t *esi);
 extern char *esi_to_str(const esi_t *esi, char *buf, int size);
 extern char *evpn_es_df_alg2str(uint8_t df_alg, char *buf, int buf_len);
 extern void prefix_evpn_hexdump(const struct prefix_evpn *p);
-
-static inline bool ipv4_unicast_valid(const struct in_addr *addr)
-{
-
-       in_addr_t ip = ntohl(addr->s_addr);
-
-       if (IPV4_CLASS_DE(ip))
-               return false;
-
-       return true;
-}
+extern bool ipv4_unicast_valid(const struct in_addr *addr);
 
 static inline int ipv6_martian(const struct in6_addr *addr)
 {
@@ -534,14 +526,14 @@ static inline int ipv6_martian(const struct in6_addr *addr)
 extern int macstr2prefix_evpn(const char *str, struct prefix_evpn *p);
 
 /* NOTE: This routine expects the address argument in network byte order. */
-static inline int ipv4_martian(const struct in_addr *addr)
+static inline bool ipv4_martian(const struct in_addr *addr)
 {
        in_addr_t ip = ntohl(addr->s_addr);
 
        if (IPV4_NET0(ip) || IPV4_NET127(ip) || !ipv4_unicast_valid(addr)) {
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
 static inline bool is_default_prefix4(const struct prefix_ipv4 *p)
index c012178e71b0ad233857526085081897579087e5..71416beebe55b3940662ba6fef874254d9827be1 100644 (file)
@@ -286,9 +286,6 @@ static void zprivs_caps_init(struct zebra_privs_t *zprivs)
                }
        }
 
-       if (!zprivs_state.syscaps_p)
-               return;
-
        if (!(zprivs_state.caps = cap_init())) {
                fprintf(stderr, "privs_init: failed to cap_init, %s\n",
                        safe_strerror(errno));
@@ -301,10 +298,12 @@ static void zprivs_caps_init(struct zebra_privs_t *zprivs)
                exit(1);
        }
 
-       /* set permitted caps */
-       cap_set_flag(zprivs_state.caps, CAP_PERMITTED,
-                    zprivs_state.syscaps_p->num, zprivs_state.syscaps_p->caps,
-                    CAP_SET);
+       /* set permitted caps, if any */
+       if (zprivs_state.syscaps_p && zprivs_state.syscaps_p->num) {
+               cap_set_flag(zprivs_state.caps, CAP_PERMITTED,
+                            zprivs_state.syscaps_p->num,
+                            zprivs_state.syscaps_p->caps, CAP_SET);
+       }
 
        /* set inheritable caps, if any */
        if (zprivs_state.syscaps_i && zprivs_state.syscaps_i->num) {
@@ -364,7 +363,7 @@ static void zprivs_caps_terminate(void)
        }
 
        /* free up private state */
-       if (zprivs_state.syscaps_p->num) {
+       if (zprivs_state.syscaps_p && zprivs_state.syscaps_p->num) {
                XFREE(MTYPE_PRIVS, zprivs_state.syscaps_p->caps);
                XFREE(MTYPE_PRIVS, zprivs_state.syscaps_p);
        }
@@ -489,7 +488,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
         * Serialize 'raise' operations; particularly important for
         * OSes where privs are process-wide.
         */
-       frr_with_mutex(&(privs->mutex)) {
+       frr_with_mutex (&(privs->mutex)) {
                /* Locate ref-counting object to use */
                refs = get_privs_refs(privs);
 
@@ -518,7 +517,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
        /* Serialize 'lower privs' operation - particularly important
         * when OS privs are process-wide.
         */
-       frr_with_mutex(&(*privs)->mutex) {
+       frr_with_mutex (&(*privs)->mutex) {
                refs = get_privs_refs(*privs);
 
                if (--(refs->refcount) == 0) {
old mode 100644 (file)
new mode 100755 (executable)
index 93fa84b..0d64ad8
@@ -245,6 +245,9 @@ void resolver_resolve(struct resolver_query *query, int af, vrf_id_t vrf_id,
 {
        int ret;
 
+       if (hostname == NULL)
+               return;
+
        if (query->callback != NULL) {
                flog_err(
                        EC_LIB_RESOLVER,
index 7fd5a96e5b5f596ac854b4279f6c8ede2f8f4485..9529b79419407e09010aa28be7d957a56312fbcc 100644 (file)
@@ -2540,7 +2540,8 @@ void route_map_notify_pentry_dependencies(const char *affected_name,
 */
 route_map_result_t route_map_apply_ext(struct route_map *map,
                                       const struct prefix *prefix,
-                                      void *match_object, void *set_object)
+                                      void *match_object, void *set_object,
+                                      int *pref)
 {
        static int recursion = 0;
        enum route_map_cmd_result_t match_ret = RMAP_NOMATCH;
@@ -2676,7 +2677,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
                                                ret = route_map_apply_ext(
                                                        nextrm, prefix,
                                                        match_object,
-                                                       set_object);
+                                                       set_object, NULL);
                                                recursion--;
                                        }
 
@@ -2721,6 +2722,13 @@ route_map_apply_end:
                           (map ? map->name : "null"), prefix,
                           route_map_result_str(ret));
 
+       if (pref) {
+               if (index != NULL && ret == RMAP_PERMITMATCH)
+                       *pref = index->pref;
+               else
+                       *pref = 65536;
+       }
+
        return (ret);
 }
 
index 13dafe6849edb6d1b645b65a1ed2b6c69ed7ea13..ad391981e0de5a0fa9f53ae37044ebcd00bf569d 100644 (file)
@@ -482,9 +482,9 @@ struct route_map *route_map_lookup_warn_noexist(struct vty *vty, const char *nam
 extern route_map_result_t route_map_apply_ext(struct route_map *map,
                                              const struct prefix *prefix,
                                              void *match_object,
-                                             void *set_object);
+                                             void *set_object, int *pref);
 #define route_map_apply(map, prefix, object)                                   \
-       route_map_apply_ext(map, prefix, object, object)
+       route_map_apply_ext(map, prefix, object, object, NULL)
 
 extern void route_map_add_hook(void (*func)(const char *));
 extern void route_map_delete_hook(void (*func)(const char *));
index 45f3c2333073143e07c130afc55eb61c356340f6..7a2b8a1c835e1bd8331e6e98d7987a32b9702a40 100644 (file)
@@ -268,12 +268,9 @@ int setsockopt_ipv4_multicast(int sock, int optname, struct in_addr if_addr,
            && (errno == EADDRINUSE)) {
                /* see above: handle possible problem when interface comes back
                 * up */
-               char buf[1][INET_ADDRSTRLEN];
                zlog_info(
-                       "setsockopt_ipv4_multicast attempting to drop and re-add (fd %d, mcast %s, ifindex %u)",
-                       sock, inet_ntop(AF_INET, &mreqn.imr_multiaddr, buf[0],
-                                       sizeof(buf[0])),
-                       ifindex);
+                       "setsockopt_ipv4_multicast attempting to drop and re-add (fd %d, mcast %pI4, ifindex %u)",
+                       sock, &mreqn.imr_multiaddr, ifindex);
                setsockopt(sock, IPPROTO_IP, IP_DROP_MEMBERSHIP, (void *)&mreqn,
                           sizeof(mreqn));
                ret = setsockopt(sock, IPPROTO_IP, IP_ADD_MEMBERSHIP,
@@ -309,12 +306,9 @@ int setsockopt_ipv4_multicast(int sock, int optname, struct in_addr if_addr,
            && (errno == EADDRINUSE)) {
                /* see above: handle possible problem when interface comes back
                 * up */
-               char buf[1][INET_ADDRSTRLEN];
                zlog_info(
-                       "setsockopt_ipv4_multicast attempting to drop and re-add (fd %d, mcast %s, ifindex %u)",
-                       sock, inet_ntop(AF_INET, &mreq.imr_multiaddr, buf[0],
-                                       sizeof(buf[0])),
-                       ifindex);
+                       "setsockopt_ipv4_multicast attempting to drop and re-add (fd %d, mcast %pI4, ifindex %u)",
+                       sock, &mreq.imr_multiaddr, ifindex);
                setsockopt(sock, IPPROTO_IP, IP_DROP_MEMBERSHIP, (void *)&mreq,
                           sizeof(mreq));
                ret = setsockopt(sock, IPPROTO_IP, IP_ADD_MEMBERSHIP,
index eff38798cc786bde9e578b2dd41e197a5af3053b..36ae21fdd181d40cc4a3a650fa09d2a558cbc822 100644 (file)
@@ -290,8 +290,10 @@ int sockopt_reuseaddr(int sock)
        ret = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void *)&on,
                         sizeof(on));
        if (ret < 0) {
-               flog_err(EC_LIB_SOCKET,
-                        "can't set sockopt SO_REUSEADDR to socket %d", sock);
+               flog_err(
+                       EC_LIB_SOCKET,
+                       "can't set sockopt SO_REUSEADDR to socket %d errno=%d: %s",
+                       sock, errno, safe_strerror(errno));
                return -1;
        }
        return 0;
index 83ed015bc905c0358f658ad197814ee7c6100079..2de3abdf45fc654dc4151e8172de2f22375c10e6 100644 (file)
@@ -1280,7 +1280,7 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
 
 void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
 {
-       frr_with_mutex(&fifo->mtx) {
+       frr_with_mutex (&fifo->mtx) {
                stream_fifo_push(fifo, s);
        }
 }
@@ -1312,7 +1312,7 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
 {
        struct stream *ret;
 
-       frr_with_mutex(&fifo->mtx) {
+       frr_with_mutex (&fifo->mtx) {
                ret = stream_fifo_pop(fifo);
        }
 
@@ -1328,7 +1328,7 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
 {
        struct stream *ret;
 
-       frr_with_mutex(&fifo->mtx) {
+       frr_with_mutex (&fifo->mtx) {
                ret = stream_fifo_head(fifo);
        }
 
@@ -1350,7 +1350,7 @@ void stream_fifo_clean(struct stream_fifo *fifo)
 
 void stream_fifo_clean_safe(struct stream_fifo *fifo)
 {
-       frr_with_mutex(&fifo->mtx) {
+       frr_with_mutex (&fifo->mtx) {
                stream_fifo_clean(fifo);
        }
 }
index b0c311e7f263547cdd775b8ffc24b918025a8328..d6defd7149fd1aa43a09fc2850662dbae16bae4a 100644 (file)
@@ -469,11 +469,7 @@ am__v_XRELFO_ = $(am__v_XRELFO_$(AM_DEFAULT_VERBOSITY))
 am__v_XRELFO_0 = @echo "  XRELFO  " $@;
 am__v_XRELFO_1 =
 
-if DEV_BUILD
 XRELFO_FLAGS = -Wlog-format -Wlog-args
-else
-XRELFO_FLAGS =
-endif
 
 SUFFIXES += .xref
 %.xref: % $(CLIPPY)
index 698cc73465c127ad2e06f170fc7abc6af0f69ce4..e61c7fa30298c121bed3ce87a4c58bc9e6b9b148 100644 (file)
@@ -104,13 +104,6 @@ struct ttable *ttable_new(const struct ttable_style *tts);
  */
 void ttable_del(struct ttable *tt);
 
-/**
- * Deletes an individual cell.
- *
- * @param cell the cell to destroy
- */
-void ttable_cell_del(struct ttable_cell *cell);
-
 /**
  * Inserts a new row at the given index.
  *
index fd79503cc6ac5ee3affd430a7f07411938af0309..c3613b5b0e87b68314ec7cd460f51e705af06a5b 100644 (file)
@@ -217,7 +217,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
        tmp.funcname = "TOTAL";
        tmp.types = filter;
 
-       frr_with_mutex(&masters_mtx) {
+       frr_with_mutex (&masters_mtx) {
                for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
                        const char *name = m->name ? m->name : "main";
 
@@ -283,9 +283,9 @@ static void cpu_record_clear(uint8_t filter)
        struct thread_master *m;
        struct listnode *ln;
 
-       frr_with_mutex(&masters_mtx) {
+       frr_with_mutex (&masters_mtx) {
                for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
-                       frr_with_mutex(&m->mtx) {
+                       frr_with_mutex (&m->mtx) {
                                void *args[2] = {tmp, m->cpu_record};
                                hash_iterate(
                                        m->cpu_record,
@@ -463,7 +463,7 @@ DEFUN_NOSH (show_thread_poll,
        struct listnode *node;
        struct thread_master *m;
 
-       frr_with_mutex(&masters_mtx) {
+       frr_with_mutex (&masters_mtx) {
                for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
                        show_thread_poll_helper(vty, m);
                }
@@ -630,7 +630,7 @@ struct thread_master *thread_master_create(const char *name)
                                   sizeof(struct pollfd) * rv->handler.pfdsize);
 
        /* add to list of threadmasters */
-       frr_with_mutex(&masters_mtx) {
+       frr_with_mutex (&masters_mtx) {
                if (!masters)
                        masters = list_new();
 
@@ -642,7 +642,7 @@ struct thread_master *thread_master_create(const char *name)
 
 void thread_master_set_name(struct thread_master *master, const char *name)
 {
-       frr_with_mutex(&master->mtx) {
+       frr_with_mutex (&master->mtx) {
                XFREE(MTYPE_THREAD_MASTER, master->name);
                master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
        }
@@ -708,7 +708,7 @@ static void thread_array_free(struct thread_master *m,
  */
 void thread_master_free_unused(struct thread_master *m)
 {
-       frr_with_mutex(&m->mtx) {
+       frr_with_mutex (&m->mtx) {
                struct thread *t;
                while ((t = thread_list_pop(&m->unuse)))
                        thread_free(m, t);
@@ -720,7 +720,7 @@ void thread_master_free(struct thread_master *m)
 {
        struct thread *t;
 
-       frr_with_mutex(&masters_mtx) {
+       frr_with_mutex (&masters_mtx) {
                listnode_delete(masters, m);
                if (masters->count == 0) {
                        list_delete(&masters);
@@ -756,7 +756,10 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
 {
        int64_t remain;
 
-       frr_with_mutex(&thread->mtx) {
+       if (!thread_is_scheduled(thread))
+               return 0;
+
+       frr_with_mutex (&thread->mtx) {
                remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
        }
 
@@ -772,7 +775,7 @@ unsigned long thread_timer_remain_second(struct thread *thread)
 struct timeval thread_timer_remain(struct thread *thread)
 {
        struct timeval remain;
-       frr_with_mutex(&thread->mtx) {
+       frr_with_mutex (&thread->mtx) {
                monotime_until(&thread->u.sands, &remain);
        }
        return remain;
@@ -987,7 +990,7 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
        if (fd >= m->fd_limit)
                assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
 
-       frr_with_mutex(&m->mtx) {
+       frr_with_mutex (&m->mtx) {
                if (t_ptr && *t_ptr)
                        // thread is already scheduled; don't reschedule
                        break;
@@ -1030,7 +1033,7 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
                        m->handler.pfdcount++;
 
                if (thread) {
-                       frr_with_mutex(&thread->mtx) {
+                       frr_with_mutex (&thread->mtx) {
                                thread->u.fd = fd;
                                thread_array[thread->u.fd] = thread;
                        }
@@ -1066,14 +1069,14 @@ static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
        monotime(&t);
        timeradd(&t, time_relative, &t);
 
-       frr_with_mutex(&m->mtx) {
+       frr_with_mutex (&m->mtx) {
                if (t_ptr && *t_ptr)
                        /* thread is already scheduled; don't reschedule */
                        return;
 
                thread = thread_get(m, THREAD_TIMER, func, arg, xref);
 
-               frr_with_mutex(&thread->mtx) {
+               frr_with_mutex (&thread->mtx) {
                        thread->u.sands = t;
                        thread_timer_list_add(&m->timer, thread);
                        if (t_ptr) {
@@ -1151,13 +1154,13 @@ void _thread_add_event(const struct xref_threadsched *xref,
 
        assert(m != NULL);
 
-       frr_with_mutex(&m->mtx) {
+       frr_with_mutex (&m->mtx) {
                if (t_ptr && *t_ptr)
                        /* thread is already scheduled; don't reschedule */
                        break;
 
                thread = thread_get(m, THREAD_EVENT, func, arg, xref);
-               frr_with_mutex(&thread->mtx) {
+               frr_with_mutex (&thread->mtx) {
                        thread->u.val = val;
                        thread_list_add_tail(&m->event, thread);
                }
@@ -1438,7 +1441,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
 
        cr->flags = flags;
 
-       frr_with_mutex(&m->mtx) {
+       frr_with_mutex (&m->mtx) {
                cr->eventobj = arg;
                listnode_add(m->cancel_req, cr);
                do_thread_cancel(m);
@@ -1496,7 +1499,7 @@ void thread_cancel(struct thread **thread)
 
        assert(master->owner == pthread_self());
 
-       frr_with_mutex(&master->mtx) {
+       frr_with_mutex (&master->mtx) {
                struct cancel_req *cr =
                        XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
                cr->thread = *thread;
@@ -1548,7 +1551,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
 
        assert(master->owner != pthread_self());
 
-       frr_with_mutex(&master->mtx) {
+       frr_with_mutex (&master->mtx) {
                master->canceled = false;
 
                if (thread) {
@@ -1928,7 +1931,7 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
 int thread_should_yield(struct thread *thread)
 {
        int result;
-       frr_with_mutex(&thread->mtx) {
+       frr_with_mutex (&thread->mtx) {
                result = monotime_since(&thread->real, NULL)
                         > (int64_t)thread->yield;
        }
@@ -1937,7 +1940,7 @@ int thread_should_yield(struct thread *thread)
 
 void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
 {
-       frr_with_mutex(&thread->mtx) {
+       frr_with_mutex (&thread->mtx) {
                thread->yield = yield_time;
        }
 }
@@ -2079,11 +2082,11 @@ void _thread_execute(const struct xref_threadsched *xref,
        struct thread *thread;
 
        /* Get or allocate new thread to execute. */
-       frr_with_mutex(&m->mtx) {
+       frr_with_mutex (&m->mtx) {
                thread = thread_get(m, THREAD_EVENT, func, arg, xref);
 
                /* Set its event value. */
-               frr_with_mutex(&thread->mtx) {
+               frr_with_mutex (&thread->mtx) {
                        thread->add_type = THREAD_EXECUTE;
                        thread->u.val = val;
                        thread->ref = &thread;
index 06fdc52e78ead3da1a2bedf44e050612dda9928d..50c410ad24242944d7d669bcfa6795e178682fcb 100644 (file)
@@ -308,6 +308,25 @@ struct dlist_head {
 static inline void typesafe_dlist_add(struct dlist_head *head,
                struct dlist_item *prev, struct dlist_item *item)
 {
+       /* SA on clang-11 thinks this can happen, but in reality -assuming no
+        * memory corruption- it can't.  DLIST uses a "closed" ring, i.e. the
+        * termination at the end of the list is not NULL but rather a pointer
+        * back to the head.  (This eliminates special-casing the first or last
+        * item.)
+        *
+        * Sadly, can't use assert() here since the libfrr assert / xref code
+        * uses typesafe lists itself...  that said, if an assert tripped here
+        * we'd already be way past some memory corruption, so we might as
+        * well just take the SEGV.  (In the presence of corruption, we'd see
+        * random SEGVs from places that make no sense at all anyway, an
+        * assert might actually be a red herring.)
+        *
+        * ("assume()" tells the compiler to produce code as if the condition
+        * will always hold;  it doesn't have any actual effect here, it'll
+        * just SEGV out on "item->next->prev = item".)
+        */
+       assume(prev->next != NULL);
+
        item->next = prev->next;
        item->next->prev = item;
        item->prev = prev;
index bee76c6e0f51b797818f3d6ee1a79d345ff486e5..ea21d1324bd35c57e409803896788b2897171b78 100644 (file)
@@ -715,7 +715,7 @@ size_t yang_dnode_get_binary_buf(char *buf, size_t size,
 
        canon = YANG_DNODE_XPATH_GET_CANON(dnode, xpath_fmt);
        cannon_len = strlen(canon);
-       decode_len = cannon_len;
+       decode_len = cannon_len + 1;
        value_str = (char *)malloc(decode_len);
        base64_init_decodestate(&s);
        cnt = base64_decode_block(canon, cannon_len, value_str, &s);
index 48785ad298568aae6398ab25e474701216140c7c..31bd8e16eb0278a8652b71279c08dc0c7e917000 100644 (file)
@@ -242,14 +242,14 @@ static bool zlog_file_cycle(struct zlog_cfg_file *zcf)
 
 void zlog_file_set_other(struct zlog_cfg_file *zcf)
 {
-       frr_with_mutex(&zcf->cfg_mtx) {
+       frr_with_mutex (&zcf->cfg_mtx) {
                zlog_file_cycle(zcf);
        }
 }
 
 bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename)
 {
-       frr_with_mutex(&zcf->cfg_mtx) {
+       frr_with_mutex (&zcf->cfg_mtx) {
                XFREE(MTYPE_LOG_FD_NAME, zcf->filename);
                zcf->filename = XSTRDUP(MTYPE_LOG_FD_NAME, filename);
                zcf->fd = -1;
@@ -261,7 +261,7 @@ bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename)
 
 bool zlog_file_set_fd(struct zlog_cfg_file *zcf, int fd)
 {
-       frr_with_mutex(&zcf->cfg_mtx) {
+       frr_with_mutex (&zcf->cfg_mtx) {
                if (zcf->fd == fd)
                        return true;
 
@@ -283,7 +283,7 @@ bool zlog_file_rotate(struct zlog_cfg_file *zcf)
        struct rcu_close_rotate *rcr;
        int fd;
 
-       frr_with_mutex(&zcf->cfg_mtx) {
+       frr_with_mutex (&zcf->cfg_mtx) {
                if (!zcf->active || !zcf->filename)
                        return true;
 
@@ -517,7 +517,7 @@ void zlog_syslog_set_facility(int facility)
        struct zlog_target *newztc;
        struct zlt_syslog *newzt;
 
-       frr_with_mutex(&syslog_cfg_mutex) {
+       frr_with_mutex (&syslog_cfg_mutex) {
                if (facility == syslog_facility)
                        return;
                syslog_facility = facility;
@@ -540,7 +540,7 @@ void zlog_syslog_set_facility(int facility)
 
 int zlog_syslog_get_facility(void)
 {
-       frr_with_mutex(&syslog_cfg_mutex) {
+       frr_with_mutex (&syslog_cfg_mutex) {
                return syslog_facility;
        }
        assert(0);
@@ -551,7 +551,7 @@ void zlog_syslog_set_prio_min(int prio_min)
        struct zlog_target *newztc;
        struct zlt_syslog *newzt = NULL;
 
-       frr_with_mutex(&syslog_cfg_mutex) {
+       frr_with_mutex (&syslog_cfg_mutex) {
                if (prio_min == syslog_prio_min)
                        return;
                syslog_prio_min = prio_min;
@@ -577,7 +577,7 @@ void zlog_syslog_set_prio_min(int prio_min)
 
 int zlog_syslog_get_prio_min(void)
 {
-       frr_with_mutex(&syslog_cfg_mutex) {
+       frr_with_mutex (&syslog_cfg_mutex) {
                return syslog_prio_min;
        }
        assert(0);
index 4b030325663fb8a17d68d482449360750c4c8983..e7f2eaf5a7e165a2c572804b170c31f24ae60ea9 100644 (file)
@@ -17,6 +17,7 @@
 #include "memory.h"
 #include "thread.h"
 #include "hash.h"
+#include "network.h"
 
 #include "nhrpd.h"
 #include "nhrp_protocol.h"
@@ -334,7 +335,7 @@ int nhrp_peer_check(struct nhrp_peer *p, int establish)
                        &p->t_fallback);
        } else {
                /* Maximum timeout is 1 second */
-               int r_time_ms = rand() % 1000;
+               int r_time_ms = frr_weak_random() % 1000;
 
                debugf(NHRP_DEBUG_COMMON,
                       "Initiating IPsec connection request to %pSU after %d ms:",
index 33a7a57c0fc999c7a022ae381b45afd6e529e065..f8b37d803f1b1514f4ab5ca7fc41bcae20169213 100644 (file)
@@ -340,7 +340,7 @@ int ospf6_process_grace_lsa(struct ospf6 *ospf6, struct ospf6_lsa *lsa,
            && !OSPF6_GR_IS_PLANNED_RESTART(restart_reason)) {
                if (IS_DEBUG_OSPF6_GR)
                        zlog_debug(
-                               "%s, Router supports only planned restarts but received the GRACE LSA due a unplanned restart",
+                               "%s, Router supports only planned restarts but received the GRACE LSA due to an unplanned restart",
                                __func__);
                restarter->gr_helper_info.rejected_reason =
                        OSPF6_HELPER_PLANNED_ONLY_RESTART;
index efa5d2b7ab91392ea02a4300d48a36594724efef..155374d3f0b35f224d4ef84dc6e98c6976d823bd 100644 (file)
@@ -966,10 +966,6 @@ static const char *ospf6_iftype_str(uint8_t iftype)
        return "UNKNOWN";
 }
 
-#if CONFDATE > 20220709
-CPP_NOTICE("Time to remove ospf6Enabled from JSON output")
-#endif
-
 /* show specified interface structure */
 static int ospf6_interface_show(struct vty *vty, struct interface *ifp,
                                json_object *json_obj, bool use_json)
@@ -996,11 +992,8 @@ static int ospf6_interface_show(struct vty *vty, struct interface *ifp,
                                       ospf6_iftype_str(default_iftype));
                json_object_int_add(json_obj, "interfaceId", ifp->ifindex);
 
-               if (ifp->info == NULL) {
-                       json_object_boolean_false_add(json_obj, "ospf6Enabled");
+               if (ifp->info == NULL)
                        return 0;
-               }
-               json_object_boolean_true_add(json_obj, "ospf6Enabled");
 
                oi = (struct ospf6_interface *)ifp->info;
 
@@ -1719,8 +1712,11 @@ void ospf6_interface_start(struct ospf6_interface *oi)
        if (oi->area_id_format == OSPF6_AREA_FMT_UNSET)
                return;
 
-       if (oi->area)
+       if (oi->area) {
+               /* Recompute cost */
+               ospf6_interface_recalculate_cost(oi);
                return;
+       }
 
        ospf6 = oi->interface->vrf->info;
        if (!ospf6)
index b5ea3ada36337610437867334f180028d59f54f3..52bb745d74cfe6f1649fef8236065bd2dd86f289 100644 (file)
@@ -1765,10 +1765,8 @@ void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa)
        intra_prefix_lsa =
                (struct ospf6_intra_prefix_lsa *)OSPF6_LSA_HEADER_END(
                        lsa->header);
-       if (intra_prefix_lsa->ref_type == htons(OSPF6_LSTYPE_ROUTER))
-               ospf6_linkstate_prefix(intra_prefix_lsa->ref_adv_router,
-                                      intra_prefix_lsa->ref_id, &ls_prefix);
-       else if (intra_prefix_lsa->ref_type == htons(OSPF6_LSTYPE_NETWORK))
+       if (intra_prefix_lsa->ref_type == htons(OSPF6_LSTYPE_ROUTER) ||
+           intra_prefix_lsa->ref_type == htons(OSPF6_LSTYPE_NETWORK))
                ospf6_linkstate_prefix(intra_prefix_lsa->ref_adv_router,
                                       intra_prefix_lsa->ref_id, &ls_prefix);
        else {
index 93a062b215f5f1f9d81d026d8d95019dff42eae6..360a9db0d6e05a983339aec4b0fa3e76249375e4 100644 (file)
@@ -2086,7 +2086,6 @@ static void ospf6_write(struct thread *thread)
 {
        struct ospf6 *ospf6 = THREAD_ARG(thread);
        struct ospf6_interface *oi;
-       struct ospf6_interface *last_serviced_oi = NULL;
        struct ospf6_header *oh;
        struct ospf6_packet *op;
        struct listnode *node;
@@ -2106,9 +2105,7 @@ static void ospf6_write(struct thread *thread)
        assert(node);
        oi = listgetdata(node);
 
-       while ((pkt_count < ospf6->write_oi_count) && oi
-              && (last_serviced_oi != oi)) {
-
+       while ((pkt_count < ospf6->write_oi_count) && oi) {
                op = ospf6_fifo_head(oi->obuf);
                assert(op);
                assert(op->length >= OSPF6_HEADER_SIZE);
@@ -2221,7 +2218,6 @@ static void ospf6_write(struct thread *thread)
                list_delete_node(ospf6->oi_write_q, node);
                if (ospf6_fifo_head(oi->obuf) == NULL) {
                        oi->on_write_q = 0;
-                       last_serviced_oi = NULL;
                        oi = NULL;
                } else {
                        listnode_add(ospf6->oi_write_q, oi);
index e84c6f5b3c975f51187a812dfd7b75ee9323c049..b5e6389d4c9b0cee3690537ed72bafe56753a36e 100644 (file)
@@ -439,6 +439,12 @@ int ospf_apiclient_lsa_originate(struct ospf_apiclient *oclient,
        struct lsa_header *lsah;
        uint32_t tmp;
 
+       /* Validate opaque LSA length */
+       if ((size_t)opaquelen > sizeof(buf) - sizeof(struct lsa_header)) {
+               fprintf(stderr, "opaquelen(%d) is larger than buf size %zu\n",
+                       opaquelen, sizeof(buf));
+               return OSPF_API_NOMEMORY;
+       }
 
        /* We can only originate opaque LSAs */
        if (!IS_OPAQUE_LSA(lsa_type)) {
index 7aa3a27c604d29d161de0469d87ce58c19f34fc9..b7db1a6a83fb57807c70c3a6fd6f3e9254f0b0d6 100644 (file)
@@ -350,7 +350,6 @@ static int ospf_abr_nssa_am_elected(struct ospf_area *area)
        struct ospf_lsa *lsa;
        struct router_lsa *rlsa;
        struct in_addr *best = NULL;
-       char buf[PREFIX_STRLEN];
 
        LSDB_LOOP (ROUTER_LSDB(area), rn, lsa) {
                /* sanity checks */
@@ -382,9 +381,8 @@ static int ospf_abr_nssa_am_elected(struct ospf_area *area)
 
        if (IS_DEBUG_OSPF_NSSA)
                zlog_debug(
-                       "ospf_abr_nssa_am_elected: best electable ABR is: %s",
-                       (best) ? inet_ntop(AF_INET, best, buf, sizeof(buf)) :
-                       "<none>");
+                       "ospf_abr_nssa_am_elected: best electable ABR is: %pI4",
+                       best);
 
        if (best == NULL)
                return 1;
@@ -1069,7 +1067,7 @@ static void ospf_abr_process_network_rt(struct ospf *ospf,
                    && !OSPF_IS_AREA_ID_BACKBONE(or->u.std.area_id)) {
                        if (IS_DEBUG_OSPF_EVENT)
                                zlog_debug(
-                                       "ospf_abr_process_network_rt(): this is route is not backbone one, skipping");
+                                       "ospf_abr_process_network_rt(): this route is not backbone one, skipping");
                        continue;
                }
 
index 9d73c3dfe63ba8921c2d3d74e0b29b55da25e84f..b8567830b1d6edc04570a2457b8e2cb9a5d49cbb 100644 (file)
@@ -318,12 +318,12 @@ void ospf_apiserver_free(struct ospf_apiserver *apiserv)
        struct listnode *node;
 
        /* Cancel read and write threads. */
-       thread_cancel(&apiserv->t_sync_read);
+       THREAD_OFF(apiserv->t_sync_read);
 #ifdef USE_ASYNC_READ
-       thread_cancel(&apiserv->t_async_read);
+       THREAD_OFF(apiserv->t_async_read);
 #endif /* USE_ASYNC_READ */
-       thread_cancel(&apiserv->t_sync_write);
-       thread_cancel(&apiserv->t_async_write);
+       THREAD_OFF(apiserv->t_sync_write);
+       THREAD_OFF(apiserv->t_async_write);
 
        /* Unregister all opaque types that application registered
           and flush opaque LSAs if still in LSDB. */
index 56116cd28d1988d4f3e5c9134135ae126278323f..fb117ecfc21b0a21926b01856555ebb811983925 100644 (file)
@@ -266,8 +266,12 @@ DEFUN (ip_ospf_bfd_prof,
        struct ospf_if_params *params;
        int idx_prof = 4;
 
-       ospf_interface_enable_bfd(ifp);
        params = IF_DEF_PARAMS(ifp);
+       if (!params->bfd_config) {
+               vty_out(vty, "ip ospf bfd has not been set\n");
+               return CMD_WARNING;
+       }
+
        strlcpy(params->bfd_config->profile, argv[idx_prof]->arg,
                sizeof(params->bfd_config->profile));
        ospf_interface_bfd_apply(ifp);
@@ -288,8 +292,10 @@ DEFUN (no_ip_ospf_bfd_prof,
        VTY_DECLVAR_CONTEXT(interface, ifp);
        struct ospf_if_params *params;
 
-       ospf_interface_enable_bfd(ifp);
        params = IF_DEF_PARAMS(ifp);
+       if (!params->bfd_config)
+               return CMD_SUCCESS;
+
        params->bfd_config->profile[0] = 0;
        ospf_interface_bfd_apply(ifp);
 
index 8f9153d766ae64d49c6219d8e8b20f6f323a684c..fb3fb21e080209b04910a6ca3f9efc6cd86e3aa4 100644 (file)
@@ -454,16 +454,13 @@ int ospf_flood_through_interface(struct ospf_interface *oi,
        struct ospf_neighbor *onbr;
        struct route_node *rn;
        int retx_flag;
-       char buf[PREFIX_STRLEN];
 
        if (IS_DEBUG_OSPF_EVENT)
                zlog_debug(
-                       "%s: considering int %s (%s), INBR(%s), LSA[%s] AGE %u",
+                       "%s: considering int %s (%s), INBR(%pI4), LSA[%s] AGE %u",
                        __func__, IF_NAME(oi), ospf_get_name(oi->ospf),
-                       inbr ? inet_ntop(AF_INET, &inbr->router_id, buf,
-                                        sizeof(buf))
-                            : "NULL",
-                       dump_lsa_key(lsa), ntohs(lsa->data->ls_age));
+                       inbr ? &inbr->router_id : NULL, dump_lsa_key(lsa),
+                       ntohs(lsa->data->ls_age));
 
        if (!ospf_if_is_enable(oi))
                return 0;
@@ -641,13 +638,15 @@ int ospf_flood_through_interface(struct ospf_interface *oi,
        if (oi->type == OSPF_IFTYPE_NBMA) {
                struct ospf_neighbor *nbr;
 
-               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
-                       if ((nbr = rn->info) != NULL)
-                               if (nbr != oi->nbr_self
-                                   && nbr->state >= NSM_Exchange)
-                                       ospf_ls_upd_send_lsa(
-                                               nbr, lsa,
-                                               OSPF_SEND_PACKET_DIRECT);
+               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+                       nbr = rn->info;
+
+                       if (!nbr)
+                               continue;
+                       if (nbr != oi->nbr_self && nbr->state >= NSM_Exchange)
+                               ospf_ls_upd_send_lsa(nbr, lsa,
+                                                    OSPF_SEND_PACKET_DIRECT);
+               }
        } else
                ospf_ls_upd_send_lsa(oi->nbr_self, lsa,
                                     OSPF_SEND_PACKET_INDIRECT);
@@ -994,18 +993,20 @@ static void ospf_ls_retransmit_delete_nbr_if(struct ospf_interface *oi,
        struct ospf_lsa *lsr;
 
        if (ospf_if_is_enable(oi))
-               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
+               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
                        /* If LSA find in LS-retransmit list, then remove it. */
-                       if ((nbr = rn->info) != NULL) {
-                               lsr = ospf_ls_retransmit_lookup(nbr, lsa);
+                       nbr = rn->info;
 
-                               /* If LSA find in ls-retransmit list, remove it.
-                                */
-                               if (lsr != NULL
-                                   && lsr->data->ls_seqnum
-                                              == lsa->data->ls_seqnum)
-                                       ospf_ls_retransmit_delete(nbr, lsr);
-                       }
+                       if (!nbr)
+                               continue;
+
+                       lsr = ospf_ls_retransmit_lookup(nbr, lsa);
+
+                       /* If LSA find in ls-retransmit list, remove it. */
+                       if (lsr != NULL &&
+                           lsr->data->ls_seqnum == lsa->data->ls_seqnum)
+                               ospf_ls_retransmit_delete(nbr, lsr);
+               }
 }
 
 void ospf_ls_retransmit_delete_nbr_area(struct ospf_area *area,
index 2521f2fce0badda48569d4d87f78c7ffc8f33cf1..66ef1d6564d967e5855af6cf9986860064704dae 100644 (file)
@@ -216,7 +216,7 @@ static void ospf_gr_restart_exit(struct ospf *ospf, const char *reason)
                zlog_debug("GR: exiting graceful restart: %s", reason);
 
        ospf->gr_info.restart_in_progress = false;
-       OSPF_TIMER_OFF(ospf->gr_info.t_grace_period);
+       THREAD_OFF(ospf->gr_info.t_grace_period);
 
        /* Record in non-volatile memory that the restart is complete. */
        ospf_gr_nvm_delete(ospf);
index 5df2ecf070dccf282392151c523490c7da86a673..2a758d55838e63f9dcf59b00779c216baaabe068 100644 (file)
@@ -66,15 +66,16 @@ int ospf_interface_neighbor_count(struct ospf_interface *oi)
 
        for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
                nbr = rn->info;
-               if (nbr) {
-                       /* Do not show myself. */
-                       if (nbr == oi->nbr_self)
-                               continue;
-                       /* Down state is not shown. */
-                       if (nbr->state == NSM_Down)
-                               continue;
-                       count++;
-               }
+               if (!nbr)
+                       continue;
+
+               /* Do not show myself. */
+               if (nbr == oi->nbr_self)
+                       continue;
+               /* Down state is not shown. */
+               if (nbr->state == NSM_Down)
+                       continue;
+               count++;
        }
 
        return count;
@@ -302,7 +303,7 @@ void ospf_if_cleanup(struct ospf_interface *oi)
        /* oi->nbrs and oi->nbr_nbma should be deleted on InterfaceDown event */
        /* delete all static neighbors attached to this interface */
        for (ALL_LIST_ELEMENTS(oi->nbr_nbma, node, nnode, nbr_nbma)) {
-               OSPF_POLL_TIMER_OFF(nbr_nbma->t_poll);
+               THREAD_OFF(nbr_nbma->t_poll);
 
                if (nbr_nbma->nbr) {
                        nbr_nbma->nbr->nbr_nbma = NULL;
@@ -315,10 +316,11 @@ void ospf_if_cleanup(struct ospf_interface *oi)
        }
 
        /* send Neighbor event KillNbr to all associated neighbors. */
-       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
+       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
                if ((nbr = rn->info) != NULL)
                        if (nbr != oi->nbr_self)
                                OSPF_NSM_EVENT_EXECUTE(nbr, NSM_KillNbr);
+       }
 
        /* Cleanup Link State Acknowlegdment list. */
        for (ALL_LIST_ELEMENTS(oi->ls_ack, node, nnode, lsa))
@@ -492,6 +494,20 @@ struct ospf_interface *ospf_if_lookup_recv_if(struct ospf *ospf,
        return match;
 }
 
+void ospf_interface_fifo_flush(struct ospf_interface *oi)
+{
+       struct ospf *ospf = oi->ospf;
+
+       ospf_fifo_flush(oi->obuf);
+
+       if (oi->on_write_q) {
+               listnode_delete(ospf->oi_write_q, oi);
+               if (list_isempty(ospf->oi_write_q))
+                       THREAD_OFF(ospf->t_write);
+               oi->on_write_q = 0;
+       }
+}
+
 static void ospf_if_reset_stats(struct ospf_interface *oi)
 {
        oi->hello_in = oi->hello_out = 0;
@@ -503,19 +519,10 @@ static void ospf_if_reset_stats(struct ospf_interface *oi)
 
 void ospf_if_stream_unset(struct ospf_interface *oi)
 {
-       struct ospf *ospf = oi->ospf;
-
        /* flush the interface packet queue */
-       ospf_fifo_flush(oi->obuf);
+       ospf_interface_fifo_flush(oi);
        /*reset protocol stats */
        ospf_if_reset_stats(oi);
-
-       if (oi->on_write_q) {
-               listnode_delete(ospf->oi_write_q, oi);
-               if (list_isempty(ospf->oi_write_q))
-                       OSPF_TIMER_OFF(ospf->t_write);
-               oi->on_write_q = 0;
-       }
 }
 
 
@@ -1474,7 +1481,7 @@ void ospf_reset_hello_timer(struct interface *ifp, struct in_addr addr,
                        ospf_hello_send(oi);
 
                        /* Restart hello timer for this interface */
-                       OSPF_ISM_TIMER_OFF(oi->t_hello);
+                       THREAD_OFF(oi->t_hello);
                        OSPF_HELLO_TIMER_ON(oi);
                }
 
@@ -1498,7 +1505,7 @@ void ospf_reset_hello_timer(struct interface *ifp, struct in_addr addr,
                ospf_hello_send(oi);
 
                /* Restart the hello timer. */
-               OSPF_ISM_TIMER_OFF(oi->t_hello);
+               THREAD_OFF(oi->t_hello);
                OSPF_HELLO_TIMER_ON(oi);
        }
 }
index e4410164067a75a8ec085eed26d961a40dde7757..51fc1bf3c3108c10f4fc426de984c485db4c533f 100644 (file)
@@ -351,6 +351,8 @@ extern void ospf_if_interface(struct interface *ifp);
 extern uint32_t ospf_if_count_area_params(struct interface *ifp);
 extern void ospf_reset_hello_timer(struct interface *ifp, struct in_addr addr,
                                   bool is_addr);
+
+extern void ospf_interface_fifo_flush(struct ospf_interface *oi);
 DECLARE_HOOK(ospf_vl_add, (struct ospf_vl_data * vd), (vd));
 DECLARE_HOOK(ospf_vl_delete, (struct ospf_vl_data * vd), (vd));
 
index 97da61034f1e65f12130b41d90f3cb8b26e6d5ea..ab75ab9a1a336fae98a389f349b145f75921780b 100644 (file)
@@ -290,16 +290,16 @@ static void ism_timer_set(struct ospf_interface *oi)
                   interface parameters must be set to initial values, and
                   timers are
                   reset also. */
-               OSPF_ISM_TIMER_OFF(oi->t_hello);
-               OSPF_ISM_TIMER_OFF(oi->t_wait);
-               OSPF_ISM_TIMER_OFF(oi->t_ls_ack);
+               THREAD_OFF(oi->t_hello);
+               THREAD_OFF(oi->t_wait);
+               THREAD_OFF(oi->t_ls_ack);
                break;
        case ISM_Loopback:
                /* In this state, the interface may be looped back and will be
                   unavailable for regular data traffic. */
-               OSPF_ISM_TIMER_OFF(oi->t_hello);
-               OSPF_ISM_TIMER_OFF(oi->t_wait);
-               OSPF_ISM_TIMER_OFF(oi->t_ls_ack);
+               THREAD_OFF(oi->t_hello);
+               THREAD_OFF(oi->t_wait);
+               THREAD_OFF(oi->t_ls_ack);
                break;
        case ISM_Waiting:
                /* The router is trying to determine the identity of DRouter and
@@ -309,7 +309,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
                OSPF_ISM_TIMER_ON(oi->t_wait, ospf_wait_timer,
                                  OSPF_IF_PARAM(oi, v_wait));
-               OSPF_ISM_TIMER_OFF(oi->t_ls_ack);
+               THREAD_OFF(oi->t_ls_ack);
                break;
        case ISM_PointToPoint:
                /* The interface connects to a physical Point-to-point network
@@ -318,7 +318,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                   neighboring router. Hello packets are also sent. */
                /* send first hello immediately */
                OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
-               OSPF_ISM_TIMER_OFF(oi->t_wait);
+               THREAD_OFF(oi->t_wait);
                OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
                                  oi->v_ls_ack);
                break;
@@ -328,7 +328,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                   and the router itself is neither Designated Router nor
                   Backup Designated Router. */
                OSPF_HELLO_TIMER_ON(oi);
-               OSPF_ISM_TIMER_OFF(oi->t_wait);
+               THREAD_OFF(oi->t_wait);
                OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
                                  oi->v_ls_ack);
                break;
@@ -337,7 +337,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                   network,
                   and the router is Backup Designated Router. */
                OSPF_HELLO_TIMER_ON(oi);
-               OSPF_ISM_TIMER_OFF(oi->t_wait);
+               THREAD_OFF(oi->t_wait);
                OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
                                  oi->v_ls_ack);
                break;
@@ -346,7 +346,7 @@ static void ism_timer_set(struct ospf_interface *oi)
                   network,
                   and the router is Designated Router. */
                OSPF_HELLO_TIMER_ON(oi);
-               OSPF_ISM_TIMER_OFF(oi->t_wait);
+               THREAD_OFF(oi->t_wait);
                OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
                                  oi->v_ls_ack);
                break;
index 35fbd15d002f6a0b183527b0defbc1ce557cc546..92329bc55ee529c3111f311aea26cfb618707a2f 100644 (file)
@@ -78,9 +78,6 @@
                                          OSPF_IF_PARAM((O), v_hello));        \
        } while (0)
 
-/* Macro for OSPF ISM timer turn off. */
-#define OSPF_ISM_TIMER_OFF(X) thread_cancel(&(X))
-
 /* Macro for OSPF schedule event. */
 #define OSPF_ISM_EVENT_SCHEDULE(I, E)                                          \
        thread_add_event(master, ospf_ism_event, (I), (E), NULL)
index 2be81042a682f4ad3eaf6dbf1c5b8acd6fbb75a3..af05f5b59e9fa78c80eb20356a0c7b3e90aff13f 100644 (file)
@@ -1761,7 +1761,7 @@ static struct ospf_lsa *ospf_handle_exnl_lsa_lsId_chg(struct ospf *ospf,
        struct as_external_lsa *al;
        struct in_addr mask;
        struct ospf_lsa *new;
-       struct external_info ei_summary;
+       struct external_info ei_summary = {};
        struct external_info *ei_old;
 
        lsa = ospf_lsdb_lookup_by_id(ospf->lsdb, OSPF_AS_EXTERNAL_LSA,
@@ -3625,7 +3625,7 @@ void ospf_flush_self_originated_lsas_now(struct ospf *ospf)
         * without conflicting to other threads.
         */
        if (ospf->t_maxage != NULL) {
-               OSPF_TIMER_OFF(ospf->t_maxage);
+               THREAD_OFF(ospf->t_maxage);
                thread_execute(master, ospf_maxage_lsa_remover, ospf, 0);
        }
 
@@ -3935,9 +3935,9 @@ void ospf_refresher_register_lsa(struct ospf *ospf, struct ospf_lsa *lsa)
 
                if (IS_DEBUG_OSPF(lsa, LSA_REFRESH))
                        zlog_debug(
-                               "LSA[Refresh:Type%d:%pI4]: ospf_refresher_register_lsa(): setting refresh_list on lsa %p (slod %d)",
-                               lsa->data->type, &lsa->data->id,
-                               (void *)lsa, index);
+                               "LSA[Refresh:Type%d:%pI4]: ospf_refresher_register_lsa(): setting refresh_list on lsa %p (slot %d)",
+                               lsa->data->type, &lsa->data->id, (void *)lsa,
+                               index);
        }
 }
 
index c59734b9f3140d586332c650103bd3967a6de244..02c44dcdc19ca4c0f3de9d8391d161bda807ccc2 100644 (file)
@@ -140,17 +140,17 @@ void ospf_nbr_free(struct ospf_neighbor *nbr)
        }
 
        /* Cancel all timers. */
-       OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
-       OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
-       OSPF_NSM_TIMER_OFF(nbr->t_ls_req);
-       OSPF_NSM_TIMER_OFF(nbr->t_ls_upd);
+       THREAD_OFF(nbr->t_inactivity);
+       THREAD_OFF(nbr->t_db_desc);
+       THREAD_OFF(nbr->t_ls_req);
+       THREAD_OFF(nbr->t_ls_upd);
 
        /* Cancel all events. */ /* Thread lookup cost would be negligible. */
        thread_cancel_event(master, nbr);
 
        bfd_sess_free(&nbr->bfd_session);
 
-       OSPF_NSM_TIMER_OFF(nbr->gr_helper_info.t_grace_timer);
+       THREAD_OFF(nbr->gr_helper_info.t_grace_timer);
 
        nbr->oi = NULL;
        XFREE(MTYPE_OSPF_NEIGHBOR, nbr);
@@ -456,7 +456,7 @@ static struct ospf_neighbor *ospf_nbr_add(struct ospf_interface *oi,
                                nbr->nbr_nbma = nbr_nbma;
 
                                if (nbr_nbma->t_poll)
-                                       OSPF_POLL_TIMER_OFF(nbr_nbma->t_poll);
+                                       THREAD_OFF(nbr_nbma->t_poll);
 
                                nbr->state_change = nbr_nbma->state_change + 1;
                        }
index c538d1a09a4b10e9c37778831656411bab89ee95..a27550853b0004f7ea1b556cc7e1c75df95cebc6 100644 (file)
@@ -120,32 +120,32 @@ static void nsm_timer_set(struct ospf_neighbor *nbr)
        switch (nbr->state) {
        case NSM_Deleted:
        case NSM_Down:
-               OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
-               OSPF_NSM_TIMER_OFF(nbr->t_hello_reply);
+               THREAD_OFF(nbr->t_inactivity);
+               THREAD_OFF(nbr->t_hello_reply);
        /* fallthru */
        case NSM_Attempt:
        case NSM_Init:
        case NSM_TwoWay:
-               OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
-               OSPF_NSM_TIMER_OFF(nbr->t_ls_upd);
-               OSPF_NSM_TIMER_OFF(nbr->t_ls_req);
+               THREAD_OFF(nbr->t_db_desc);
+               THREAD_OFF(nbr->t_ls_upd);
+               THREAD_OFF(nbr->t_ls_req);
                break;
        case NSM_ExStart:
                OSPF_NSM_TIMER_ON(nbr->t_db_desc, ospf_db_desc_timer,
                                  nbr->v_db_desc);
-               OSPF_NSM_TIMER_OFF(nbr->t_ls_upd);
-               OSPF_NSM_TIMER_OFF(nbr->t_ls_req);
+               THREAD_OFF(nbr->t_ls_upd);
+               THREAD_OFF(nbr->t_ls_req);
                break;
        case NSM_Exchange:
                OSPF_NSM_TIMER_ON(nbr->t_ls_upd, ospf_ls_upd_timer,
                                  nbr->v_ls_upd);
                if (!IS_SET_DD_MS(nbr->dd_flags))
-                       OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
+                       THREAD_OFF(nbr->t_db_desc);
                break;
        case NSM_Loading:
        case NSM_Full:
        default:
-               OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
+               THREAD_OFF(nbr->t_db_desc);
                break;
        }
 }
@@ -176,13 +176,13 @@ int nsm_should_adj(struct ospf_neighbor *nbr)
 static int nsm_hello_received(struct ospf_neighbor *nbr)
 {
        /* Start or Restart Inactivity Timer. */
-       OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
+       THREAD_OFF(nbr->t_inactivity);
 
        OSPF_NSM_TIMER_ON(nbr->t_inactivity, ospf_inactivity_timer,
                          nbr->v_inactivity);
 
        if (nbr->oi->type == OSPF_IFTYPE_NBMA && nbr->nbr_nbma)
-               OSPF_POLL_TIMER_OFF(nbr->nbr_nbma->t_poll);
+               THREAD_OFF(nbr->nbr_nbma->t_poll);
 
        /* Send proactive ARP requests */
        if (nbr->state < NSM_Exchange)
@@ -194,9 +194,9 @@ static int nsm_hello_received(struct ospf_neighbor *nbr)
 static int nsm_start(struct ospf_neighbor *nbr)
 {
        if (nbr->nbr_nbma)
-               OSPF_POLL_TIMER_OFF(nbr->nbr_nbma->t_poll);
+               THREAD_OFF(nbr->nbr_nbma->t_poll);
 
-       OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
+       THREAD_OFF(nbr->t_inactivity);
 
        OSPF_NSM_TIMER_ON(nbr->t_inactivity, ospf_inactivity_timer,
                          nbr->v_inactivity);
@@ -382,6 +382,10 @@ static void nsm_clear_adj(struct ospf_neighbor *nbr)
 
 static int nsm_kill_nbr(struct ospf_neighbor *nbr)
 {
+       struct ospf_interface *oi = nbr->oi;
+       struct ospf_neighbor *on;
+       struct route_node *rn;
+
        /* killing nbr_self is invalid */
        if (nbr == nbr->oi->nbr_self) {
                assert(nbr != nbr->oi->nbr_self);
@@ -407,6 +411,35 @@ static int nsm_kill_nbr(struct ospf_neighbor *nbr)
                                ospf_get_name(nbr->oi->ospf));
        }
 
+       /*
+        * Do we have any neighbors that are also operating
+        * on this interface?
+        */
+       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+               on = rn->info;
+
+               if (!on)
+                       continue;
+
+               if (on == nbr || on == oi->nbr_self)
+                       continue;
+
+               /*
+                * on is in some state where we might be
+                * sending packets on this interface
+                */
+               if (on->state > NSM_Down) {
+                       route_unlock_node(rn);
+                       return 0;
+               }
+       }
+       /*
+        * If we get here we know that this interface
+        * has no neighbors in a state where we could
+        * be sending packets.  Let's flush anything
+        * we got.
+        */
+       ospf_interface_fifo_flush(oi);
        return 0;
 }
 
index 0b40b1f424b4eed7ae09b6c9aa5e43e7f493a732..c526c4c3edd72edc8c47457175e0c86906c2d2ef 100644 (file)
@@ -58,9 +58,6 @@
 /* Macro for OSPF NSM timer turn on. */
 #define OSPF_NSM_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr, (V), &(T))
 
-/* Macro for OSPF NSM timer turn off. */
-#define OSPF_NSM_TIMER_OFF(X) thread_cancel(&(X))
-
 /* Macro for OSPF NSM schedule event. */
 #define OSPF_NSM_EVENT_SCHEDULE(N, E)                                          \
        thread_add_event(master, ospf_nsm_event, (N), (E), NULL)
index 7e95cb591a7c75df087c1a73fd19ac7b6570b2fc..c7ac81d9616a9c35a3246c70aed61b8a75c5feb5 100644 (file)
@@ -147,7 +147,7 @@ int ospf_opaque_type9_lsa_init(struct ospf_interface *oi)
 
 void ospf_opaque_type9_lsa_term(struct ospf_interface *oi)
 {
-       OSPF_TIMER_OFF(oi->t_opaque_lsa_self);
+       THREAD_OFF(oi->t_opaque_lsa_self);
        if (oi->opaque_lsa_self != NULL)
                list_delete(&oi->opaque_lsa_self);
        oi->opaque_lsa_self = NULL;
@@ -176,7 +176,7 @@ void ospf_opaque_type10_lsa_term(struct ospf_area *area)
        area->lsdb->new_lsa_hook = area->lsdb->del_lsa_hook = NULL;
 #endif /* MONITOR_LSDB_CHANGE */
 
-       OSPF_TIMER_OFF(area->t_opaque_lsa_self);
+       THREAD_OFF(area->t_opaque_lsa_self);
        if (area->opaque_lsa_self != NULL)
                list_delete(&area->opaque_lsa_self);
        return;
@@ -204,7 +204,7 @@ void ospf_opaque_type11_lsa_term(struct ospf *top)
        top->lsdb->new_lsa_hook = top->lsdb->del_lsa_hook = NULL;
 #endif /* MONITOR_LSDB_CHANGE */
 
-       OSPF_TIMER_OFF(top->t_opaque_lsa_self);
+       THREAD_OFF(top->t_opaque_lsa_self);
        if (top->opaque_lsa_self != NULL)
                list_delete(&top->opaque_lsa_self);
        return;
@@ -603,7 +603,7 @@ static void free_opaque_info_per_type(struct opaque_info_per_type *oipt,
                ospf_opaque_lsa_flush_schedule(lsa);
        }
 
-       OSPF_TIMER_OFF(oipt->t_opaque_lsa_self);
+       THREAD_OFF(oipt->t_opaque_lsa_self);
        list_delete(&oipt->id_list);
        if (cleanup_owner) {
                /* Remove from its owner's self-originated LSA list. */
@@ -711,7 +711,7 @@ static void free_opaque_info_per_id(void *val)
 {
        struct opaque_info_per_id *oipi = (struct opaque_info_per_id *)val;
 
-       OSPF_TIMER_OFF(oipi->t_opaque_lsa_self);
+       THREAD_OFF(oipi->t_opaque_lsa_self);
        if (oipi->lsa != NULL)
                ospf_lsa_unlock(&oipi->lsa);
        XFREE(MTYPE_OPAQUE_INFO_PER_ID, oipi);
index c319f8068a06bd30b5a23097b06995f5167dae66..c5e26fa371f38c6baf947c39d58a1777f276c471 100644 (file)
@@ -466,7 +466,7 @@ static void ospf_ls_req_timer(struct thread *thread)
 
 void ospf_ls_req_event(struct ospf_neighbor *nbr)
 {
-       thread_cancel(&nbr->t_ls_req);
+       THREAD_OFF(nbr->t_ls_req);
        thread_add_event(master, ospf_ls_req_timer, nbr, 0, &nbr->t_ls_req);
 }
 
@@ -623,7 +623,6 @@ static void ospf_write(struct thread *thread)
 {
        struct ospf *ospf = THREAD_ARG(thread);
        struct ospf_interface *oi;
-       struct ospf_interface *last_serviced_oi = NULL;
        struct ospf_packet *op;
        struct sockaddr_in sa_dst;
        struct ip iph;
@@ -664,13 +663,7 @@ static void ospf_write(struct thread *thread)
                ipid = (time(NULL) & 0xffff);
 #endif /* WANT_OSPF_WRITE_FRAGMENT */
 
-       while ((pkt_count < ospf->write_oi_count) && oi
-              && (last_serviced_oi != oi)) {
-               /* If there is only packet in the queue, the oi is removed from
-                  write-q, so fix up the last interface that was serviced */
-               if (last_serviced_oi == NULL) {
-                       last_serviced_oi = oi;
-               }
+       while ((pkt_count < ospf->write_oi_count) && oi) {
                pkt_count++;
 #ifdef WANT_OSPF_WRITE_FRAGMENT
                /* convenience - max OSPF data per packet */
@@ -853,11 +846,9 @@ static void ospf_write(struct thread *thread)
                list_delete_node(ospf->oi_write_q, node);
                if (ospf_fifo_head(oi->obuf) == NULL) {
                        oi->on_write_q = 0;
-                       last_serviced_oi = NULL;
                        oi = NULL;
-               } else {
+               } else
                        listnode_add(ospf->oi_write_q, oi);
-               }
 
                /* Setup to service from the head of the queue again */
                if (!list_isempty(ospf->oi_write_q)) {
@@ -3358,49 +3349,44 @@ static int ospf_make_hello(struct ospf_interface *oi, struct stream *s)
        stream_put_ipv4(s, BDR(oi).s_addr);
 
        /* Add neighbor seen. */
-       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
-               if ((nbr = rn->info))
-                       if (nbr->router_id.s_addr
-                           != INADDR_ANY) /* Ignore 0.0.0.0 node. */
-                               if (nbr->state
-                                   != NSM_Attempt) /* Ignore Down neighbor. */
-                                       if (nbr->state
-                                           != NSM_Down) /* This is myself for
-                                                           DR election. */
-                                               if (!IPV4_ADDR_SAME(
-                                                           &nbr->router_id,
-                                                           &oi->ospf->router_id)) {
-                                                       /* Check neighbor is
-                                                        * sane? */
-                                                       if (nbr->d_router.s_addr
-                                                                   != INADDR_ANY
-                                                           && IPV4_ADDR_SAME(
-                                                                   &nbr->d_router,
-                                                                   &oi->address
-                                                                            ->u
-                                                                            .prefix4)
-                                                           && IPV4_ADDR_SAME(
-                                                                   &nbr->bd_router,
-                                                                   &oi->address
-                                                                            ->u
-                                                                            .prefix4))
-                                                               flag = 1;
-
-                                                       /* Hello packet overflows interface MTU. */
-                                                       if (length + sizeof(uint32_t)
-                                                               > ospf_packet_max(oi)) {
-                                                               flog_err(
-                                                                       EC_OSPF_LARGE_HELLO,
-                                                                       "Oversized Hello packet! Larger than MTU. Not sending it out");
-                                                               return 0;
-                                                       }
-
-                                                       stream_put_ipv4(
-                                                               s,
-                                                               nbr->router_id
-                                                                       .s_addr);
-                                                       length += 4;
-                                               }
+       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+               nbr = rn->info;
+
+               if (!nbr)
+                       continue;
+
+               /* Ignore the 0.0.0.0 node */
+               if (nbr->router_id.s_addr == INADDR_ANY)
+                       continue;
+
+               /* Ignore Down neighbor */
+               if (nbr->state == NSM_Attempt)
+                       continue;
+
+               /* This is myself for DR election */
+               if (nbr->state == NSM_Down)
+                       continue;
+
+               if (IPV4_ADDR_SAME(&nbr->router_id, &oi->ospf->router_id))
+                       continue;
+               /* Check neighbor is  sane? */
+               if (nbr->d_router.s_addr != INADDR_ANY &&
+                   IPV4_ADDR_SAME(&nbr->d_router, &oi->address->u.prefix4) &&
+                   IPV4_ADDR_SAME(&nbr->bd_router, &oi->address->u.prefix4))
+                       flag = 1;
+
+               /* Hello packet overflows interface MTU.
+                */
+               if (length + sizeof(uint32_t) > ospf_packet_max(oi)) {
+                       flog_err(
+                               EC_OSPF_LARGE_HELLO,
+                               "Oversized Hello packet! Larger than MTU. Not sending it out");
+                       return 0;
+               }
+
+               stream_put_ipv4(s, nbr->router_id.s_addr);
+               length += 4;
+       }
 
        /* Let neighbor generate BackupSeen. */
        if (flag == 1)
@@ -3781,54 +3767,44 @@ void ospf_hello_send(struct ospf_interface *oi)
                struct ospf_neighbor *nbr;
                struct route_node *rn;
 
-               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
-                       if ((nbr = rn->info))
-                               if (nbr != oi->nbr_self)
-                                       if (nbr->state != NSM_Down) {
-                                               /*  RFC 2328  Section 9.5.1
-                                                   If the router is not
-                                                  eligible to become Designated
-                                                  Router,
-                                                   it must periodically send
-                                                  Hello Packets to both the
-                                                   Designated Router and the
-                                                  Backup Designated Router (if
-                                                  they
-                                                   exist).  */
-                                               if (PRIORITY(oi) == 0
-                                                   && IPV4_ADDR_CMP(
-                                                              &DR(oi),
-                                                              &nbr->address.u
-                                                                       .prefix4)
-                                                   && IPV4_ADDR_CMP(
-                                                              &BDR(oi),
-                                                              &nbr->address.u
-                                                                       .prefix4))
-                                                       continue;
-
-                                               /*  If the router is eligible to
-                                                  become Designated Router, it
-                                                   must periodically send Hello
-                                                  Packets to all neighbors that
-                                                   are also eligible. In
-                                                  addition, if the router is
-                                                  itself the
-                                                   Designated Router or Backup
-                                                  Designated Router, it must
-                                                  also
-                                                   send periodic Hello Packets
-                                                  to all other neighbors. */
-
-                                               if (nbr->priority == 0
-                                                   && oi->state == ISM_DROther)
-                                                       continue;
-                                               /* if oi->state == Waiting, send
-                                                * hello to all neighbors */
-                                               ospf_hello_send_sub(
-                                                       oi,
-                                                       nbr->address.u.prefix4
-                                                               .s_addr);
-                                       }
+               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+                       nbr = rn->info;
+                       if (!nbr)
+                               continue;
+
+                       if (nbr == oi->nbr_self)
+                               continue;
+
+                       if (nbr->state == NSM_Down)
+                               continue;
+
+                       /*
+                        * RFC 2328  Section 9.5.1
+                        * If the router is not eligible to become Designated
+                        * Router, it must periodically send Hello Packets to
+                        * both the Designated Router and the Backup
+                        * Designated Router (if they exist).
+                        */
+                       if (PRIORITY(oi) == 0 &&
+                           IPV4_ADDR_CMP(&DR(oi), &nbr->address.u.prefix4) &&
+                           IPV4_ADDR_CMP(&BDR(oi), &nbr->address.u.prefix4))
+                               continue;
+
+                       /*
+                        * If the router is eligible to become Designated
+                        * Router, it must periodically send Hello Packets to
+                        * all neighbors that are also eligible. In addition,
+                        * if the router is itself the Designated Router or
+                        * Backup Designated Router, it must also send periodic
+                        * Hello Packets to all other neighbors.
+                        */
+                       if (nbr->priority == 0 && oi->state == ISM_DROther)
+                               continue;
+
+                       /* if oi->state == Waiting, send
+                        * hello to all neighbors */
+                       ospf_hello_send_sub(oi, nbr->address.u.prefix4.s_addr);
+               }
        } else {
                /* Decide destination address. */
                if (oi->type == OSPF_IFTYPE_VIRTUALLINK)
@@ -4103,7 +4079,7 @@ static void ospf_ls_upd_queue_send(struct ospf_interface *oi,
                 * is actually turned off.
                 */
                if (list_isempty(oi->ospf->oi_write_q))
-                       OSPF_TIMER_OFF(oi->ospf->t_write);
+                       THREAD_OFF(oi->ospf->t_write);
        } else {
                /* Hook thread to write packet. */
                OSPF_ISM_WRITE_ON(oi->ospf);
@@ -4300,14 +4276,18 @@ void ospf_ls_ack_send_delayed(struct ospf_interface *oi)
                struct ospf_neighbor *nbr;
                struct route_node *rn;
 
-               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
-                       if ((nbr = rn->info) != NULL)
-                               if (nbr != oi->nbr_self
-                                   && nbr->state >= NSM_Exchange)
-                                       while (listcount(oi->ls_ack))
-                                               ospf_ls_ack_send_list(
-                                                       oi, oi->ls_ack,
-                                                       nbr->address.u.prefix4);
+               for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+                       nbr = rn->info;
+
+                       if (!nbr)
+                               continue;
+
+                       if (nbr != oi->nbr_self && nbr->state >= NSM_Exchange)
+                               while (listcount(oi->ls_ack))
+                                       ospf_ls_ack_send_list(
+                                               oi, oi->ls_ack,
+                                               nbr->address.u.prefix4);
+               }
                return;
        }
        if (oi->type == OSPF_IFTYPE_VIRTUALLINK)
index 44549b980cacc92227bd9f6fa2fb16f5fc719d0f..74a567427350f3e1469f9627c2fcc98ed8f0cc5b 100644 (file)
@@ -1464,8 +1464,13 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area,
                        if (ospf_nexthop_calculation(area, v, w, l, distance,
                                                     lsa_pos))
                                vertex_pqueue_add(candidate, w);
-                       else if (IS_DEBUG_OSPF_EVENT)
-                               zlog_debug("Nexthop Calc failed");
+                       else {
+                               listnode_delete(area->spf_vertex_list, w);
+                               ospf_vertex_free(w);
+                               w_lsa->stat = LSA_SPF_NOT_EXPLORED;
+                               if (IS_DEBUG_OSPF_EVENT)
+                                       zlog_debug("Nexthop Calc failed");
+                       }
                } else if (w_lsa->stat != LSA_SPF_IN_SPFTREE) {
                        w = w_lsa->stat;
                        if (w->distance < distance) {
index 2c7c80686cfd6bacf1b21627616f3a7490544648..3a71e55710823d87079ec374b1066617976a8572 100644 (file)
@@ -756,13 +756,14 @@ static struct ospf_neighbor *get_neighbor_by_addr(struct ospf *top,
        for (ALL_LIST_ELEMENTS_RO(top->oiflist, node, oi))
                for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
                        nbr = rn->info;
-                       if (nbr)
-                               if (IPV4_ADDR_SAME(&nbr->address.u.prefix4,
-                                                  &addr)
-                                   || IPV4_ADDR_SAME(&nbr->router_id, &addr)) {
-                                       route_unlock_node(rn);
-                                       return nbr;
-                               }
+                       if (!nbr)
+                               continue;
+
+                       if (IPV4_ADDR_SAME(&nbr->address.u.prefix4, &addr) ||
+                           IPV4_ADDR_SAME(&nbr->router_id, &addr)) {
+                               route_unlock_node(rn);
+                               return nbr;
+                       }
                }
        return NULL;
 }
index 5bb65b6abc35d8caa978aedc174f994b24dd315a..a6572794aac801d318754bdebd62dccf391fb15e 100644 (file)
@@ -4008,13 +4008,15 @@ static void show_ip_ospf_interface_traffic_sub(struct vty *vty,
                                    oi->ls_ack_in);
                json_object_int_add(json_interface_sub, "lsAckOut",
                                    oi->ls_ack_out);
+               json_object_int_add(json_interface_sub, "packetsQueued",
+                                   listcount(oi->obuf));
        } else {
                vty_out(vty,
-                       "%-10s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u\n",
+                       "%-10s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %12lu\n",
                        oi->ifp->name, oi->hello_in, oi->hello_out,
                        oi->db_desc_in, oi->db_desc_out, oi->ls_req_in,
                        oi->ls_req_out, oi->ls_upd_in, oi->ls_upd_out,
-                       oi->ls_ack_in, oi->ls_ack_out);
+                       oi->ls_ack_in, oi->ls_ack_out, listcount(oi->obuf));
        }
 }
 
@@ -4030,14 +4032,14 @@ static int show_ip_ospf_interface_traffic_common(
 
        if (!use_json && !display_once) {
                vty_out(vty, "\n");
-               vty_out(vty, "%-12s%-17s%-17s%-17s%-17s%-17s\n", "Interface",
-                       "    HELLO", "    DB-Desc", "   LS-Req", "   LS-Update",
-                       "   LS-Ack");
-               vty_out(vty, "%-10s%-18s%-18s%-17s%-17s%-17s\n", "",
+               vty_out(vty, "%-12s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+                       "Interface", "    HELLO", "    DB-Desc", "   LS-Req",
+                       "   LS-Update", "   LS-Ack", "    Packets");
+               vty_out(vty, "%-10s%-18s%-18s%-17s%-17s%-17s%-17s\n", "",
                        "      Rx/Tx", "     Rx/Tx", "    Rx/Tx", "    Rx/Tx",
-                       "    Rx/Tx");
+                       "    Rx/Tx", "    Queued");
                vty_out(vty,
-                       "--------------------------------------------------------------------------------------------\n");
+                       "-------------------------------------------------------------------------------------------------------------\n");
        } else if (use_json) {
                if (use_vrf)
                        json_vrf = json_object_new_object();
@@ -4476,19 +4478,22 @@ static void show_ip_ospf_neighbor_sub(struct vty *vty,
        struct ospf_neighbor *nbr, *prev_nbr = NULL;
 
        for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
-               if ((nbr = rn->info)) {
-                       /* Do not show myself. */
-                       if (nbr == oi->nbr_self)
-                               continue;
-                       /* Down state is not shown. */
-                       if (nbr->state == NSM_Down)
-                               continue;
+               nbr = rn->info;
 
-                       prev_nbr = nbr;
+               if (!nbr)
+                       continue;
 
-                       show_ip_ospf_neighbour_brief(vty, nbr, prev_nbr, json,
-                                                    use_json);
-               }
+               /* Do not show myself. */
+               if (nbr == oi->nbr_self)
+                       continue;
+               /* Down state is not shown. */
+               if (nbr->state == NSM_Down)
+                       continue;
+
+               prev_nbr = nbr;
+
+               show_ip_ospf_neighbour_brief(vty, nbr, prev_nbr, json,
+                                            use_json);
        }
 }
 
@@ -5402,14 +5407,17 @@ static int show_ip_ospf_neighbor_id_common(struct vty *vty, struct ospf *ospf,
        ospf_show_vrf_name(ospf, vty, json, use_vrf);
 
        for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi)) {
-               if ((nbr = ospf_nbr_lookup_by_routerid(oi->nbrs, router_id))) {
-                       if (is_detail)
-                               show_ip_ospf_neighbor_detail_sub(
-                                       vty, oi, nbr, NULL, json, use_json);
-                       else
-                               show_ip_ospf_neighbour_brief(vty, nbr, NULL,
-                                                            json, use_json);
-               }
+               nbr = ospf_nbr_lookup_by_routerid(oi->nbrs, router_id);
+
+               if (!nbr)
+                       continue;
+
+               if (is_detail)
+                       show_ip_ospf_neighbor_detail_sub(vty, oi, nbr, NULL,
+                                                        json, use_json);
+               else
+                       show_ip_ospf_neighbour_brief(vty, nbr, NULL, json,
+                                                    use_json);
        }
 
        if (use_json)
@@ -5498,16 +5506,19 @@ static int show_ip_ospf_neighbor_detail_common(struct vty *vty,
                struct ospf_neighbor *nbr, *prev_nbr = NULL;
 
                for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
-                       if ((nbr = rn->info)) {
-                               if (nbr != oi->nbr_self) {
-                                       if (nbr->state != NSM_Down) {
-                                               show_ip_ospf_neighbor_detail_sub(
-                                                       vty, oi, nbr, prev_nbr,
-                                                       json_nbr_sub, use_json);
-                                       }
+                       nbr = rn->info;
+
+                       if (!nbr)
+                               continue;
+
+                       if (nbr != oi->nbr_self) {
+                               if (nbr->state != NSM_Down) {
+                                       show_ip_ospf_neighbor_detail_sub(
+                                               vty, oi, nbr, prev_nbr,
+                                               json_nbr_sub, use_json);
                                }
-                               prev_nbr = nbr;
                        }
+                       prev_nbr = nbr;
                }
        }
 
@@ -5668,27 +5679,29 @@ static int show_ip_ospf_neighbor_detail_all_common(struct vty *vty,
                struct ospf_nbr_nbma *nbr_nbma;
 
                for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
-                       if ((nbr = rn->info)) {
-                               if (nbr != oi->nbr_self)
-                                       if (nbr->state != NSM_Down)
-                                               show_ip_ospf_neighbor_detail_sub(
-                                                       vty, oi, rn->info,
-                                                       prev_nbr,
-                                                       json_vrf, use_json);
-                               prev_nbr = nbr;
-                       }
+                       nbr = rn->info;
+
+                       if (!nbr)
+                               continue;
+
+                       if (nbr != oi->nbr_self)
+                               if (nbr->state != NSM_Down)
+                                       show_ip_ospf_neighbor_detail_sub(
+                                               vty, oi, rn->info, prev_nbr,
+                                               json_vrf, use_json);
+                       prev_nbr = nbr;
                }
 
-               if (oi->type == OSPF_IFTYPE_NBMA) {
-                       struct listnode *nd;
+               if (oi->type != OSPF_IFTYPE_NBMA)
+                       continue;
 
-                       for (ALL_LIST_ELEMENTS_RO(oi->nbr_nbma, nd, nbr_nbma)) {
-                               if (nbr_nbma->nbr == NULL
-                                   || nbr_nbma->nbr->state == NSM_Down)
-                                       show_ip_ospf_nbr_nbma_detail_sub(
-                                               vty, oi, nbr_nbma, use_json,
-                                               json_vrf);
-                       }
+               struct listnode *nd;
+
+               for (ALL_LIST_ELEMENTS_RO(oi->nbr_nbma, nd, nbr_nbma)) {
+                       if (nbr_nbma->nbr == NULL ||
+                           nbr_nbma->nbr->state == NSM_Down)
+                               show_ip_ospf_nbr_nbma_detail_sub(
+                                       vty, oi, nbr_nbma, use_json, json_vrf);
                }
        }
 
@@ -5853,19 +5866,25 @@ static int show_ip_ospf_neighbor_int_detail_common(struct vty *vty,
        }
 
        for (rn = route_top(IF_OIFS(ifp)); rn; rn = route_next(rn)) {
-               if ((oi = rn->info)) {
-                       for (nrn = route_top(oi->nbrs); nrn;
-                            nrn = route_next(nrn)) {
-                               if ((nbr = nrn->info)) {
-                                       if (nbr != oi->nbr_self) {
-                                               if (nbr->state != NSM_Down)
-                                                       show_ip_ospf_neighbor_detail_sub(
-                                                               vty, oi, nbr,
-                                                               NULL,
-                                                               json, use_json);
-                                       }
-                               }
-                       }
+               oi = rn->info;
+
+               if (!oi)
+                       continue;
+
+               for (nrn = route_top(oi->nbrs); nrn; nrn = route_next(nrn)) {
+                       nbr = nrn->info;
+
+                       if (!nbr)
+                               continue;
+
+                       if (nbr == oi->nbr_self)
+                               continue;
+
+                       if (nbr->state == NSM_Down)
+                               continue;
+
+                       show_ip_ospf_neighbor_detail_sub(vty, oi, nbr, NULL,
+                                                        json, use_json);
                }
        }
 
@@ -8019,13 +8038,17 @@ static void ospf_nbr_timer_update(struct ospf_interface *oi)
        struct route_node *rn;
        struct ospf_neighbor *nbr;
 
-       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn))
-               if ((nbr = rn->info)) {
-                       nbr->v_inactivity = OSPF_IF_PARAM(oi, v_wait);
-                       nbr->v_db_desc = OSPF_IF_PARAM(oi, retransmit_interval);
-                       nbr->v_ls_req = OSPF_IF_PARAM(oi, retransmit_interval);
-                       nbr->v_ls_upd = OSPF_IF_PARAM(oi, retransmit_interval);
-               }
+       for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+               nbr = rn->info;
+
+               if (!nbr)
+                       continue;
+
+               nbr->v_inactivity = OSPF_IF_PARAM(oi, v_wait);
+               nbr->v_db_desc = OSPF_IF_PARAM(oi, retransmit_interval);
+               nbr->v_ls_req = OSPF_IF_PARAM(oi, retransmit_interval);
+               nbr->v_ls_upd = OSPF_IF_PARAM(oi, retransmit_interval);
+       }
 }
 
 static int ospf_vty_dead_interval_set(struct vty *vty, const char *interval_str,
@@ -9748,7 +9771,7 @@ DEFUN (no_ospf_max_metric_router_lsa_startup,
        for (ALL_LIST_ELEMENTS_RO(ospf->areas, ln, area)) {
                SET_FLAG(area->stub_router_state,
                         OSPF_AREA_WAS_START_STUB_ROUTED);
-               OSPF_TIMER_OFF(area->t_stub_router);
+               THREAD_OFF(area->t_stub_router);
 
                /* Don't trample on admin stub routed */
                if (!CHECK_FLAG(area->stub_router_state,
index 33872950ac7a38329de6d231827cf9b13e7a8ab0..8512b6a3399cdc611a168d351407d0e9401b4484 100644 (file)
@@ -572,7 +572,7 @@ static struct ospf *ospf_lookup_by_name(const char *vrf_name)
 static void ospf_deferred_shutdown_finish(struct ospf *ospf)
 {
        ospf->stub_router_shutdown_time = OSPF_STUB_ROUTER_UNCONFIGURED;
-       OSPF_TIMER_OFF(ospf->t_deferred_shutdown);
+       THREAD_OFF(ospf->t_deferred_shutdown);
 
        ospf_finish_final(ospf);
 
@@ -754,7 +754,7 @@ static void ospf_finish_final(struct ospf *ospf)
        /* Clear static neighbors */
        for (rn = route_top(ospf->nbr_nbma); rn; rn = route_next(rn))
                if ((nbr_nbma = rn->info)) {
-                       OSPF_POLL_TIMER_OFF(nbr_nbma->t_poll);
+                       THREAD_OFF(nbr_nbma->t_poll);
 
                        if (nbr_nbma->nbr) {
                                nbr_nbma->nbr->nbr_nbma = NULL;
@@ -790,22 +790,22 @@ static void ospf_finish_final(struct ospf *ospf)
        }
 
        /* Cancel all timers. */
-       OSPF_TIMER_OFF(ospf->t_read);
-       OSPF_TIMER_OFF(ospf->t_write);
-       OSPF_TIMER_OFF(ospf->t_spf_calc);
-       OSPF_TIMER_OFF(ospf->t_ase_calc);
-       OSPF_TIMER_OFF(ospf->t_maxage);
-       OSPF_TIMER_OFF(ospf->t_maxage_walker);
-       OSPF_TIMER_OFF(ospf->t_abr_task);
-       OSPF_TIMER_OFF(ospf->t_asbr_check);
-       OSPF_TIMER_OFF(ospf->t_asbr_nssa_redist_update);
-       OSPF_TIMER_OFF(ospf->t_distribute_update);
-       OSPF_TIMER_OFF(ospf->t_lsa_refresher);
-       OSPF_TIMER_OFF(ospf->t_opaque_lsa_self);
-       OSPF_TIMER_OFF(ospf->t_sr_update);
-       OSPF_TIMER_OFF(ospf->t_default_routemap_timer);
-       OSPF_TIMER_OFF(ospf->t_external_aggr);
-       OSPF_TIMER_OFF(ospf->gr_info.t_grace_period);
+       THREAD_OFF(ospf->t_read);
+       THREAD_OFF(ospf->t_write);
+       THREAD_OFF(ospf->t_spf_calc);
+       THREAD_OFF(ospf->t_ase_calc);
+       THREAD_OFF(ospf->t_maxage);
+       THREAD_OFF(ospf->t_maxage_walker);
+       THREAD_OFF(ospf->t_abr_task);
+       THREAD_OFF(ospf->t_asbr_check);
+       THREAD_OFF(ospf->t_asbr_nssa_redist_update);
+       THREAD_OFF(ospf->t_distribute_update);
+       THREAD_OFF(ospf->t_lsa_refresher);
+       THREAD_OFF(ospf->t_opaque_lsa_self);
+       THREAD_OFF(ospf->t_sr_update);
+       THREAD_OFF(ospf->t_default_routemap_timer);
+       THREAD_OFF(ospf->t_external_aggr);
+       THREAD_OFF(ospf->gr_info.t_grace_period);
 
        LSDB_LOOP (OPAQUE_AS_LSDB(ospf), rn, lsa)
                ospf_discard_from_db(ospf, ospf->lsdb, lsa);
@@ -992,8 +992,8 @@ static void ospf_area_free(struct ospf_area *area)
                free(IMPORT_NAME(area));
 
        /* Cancel timer. */
-       OSPF_TIMER_OFF(area->t_stub_router);
-       OSPF_TIMER_OFF(area->t_opaque_lsa_self);
+       THREAD_OFF(area->t_stub_router);
+       THREAD_OFF(area->t_opaque_lsa_self);
 
        if (OSPF_IS_AREA_BACKBONE(area))
                area->ospf->backbone = NULL;
@@ -1428,7 +1428,7 @@ void ospf_ls_upd_queue_empty(struct ospf_interface *oi)
                }
 
        /* remove update event */
-       thread_cancel(&oi->t_ls_upd_event);
+       THREAD_OFF(oi->t_ls_upd_event);
 }
 
 void ospf_if_update(struct ospf *ospf, struct interface *ifp)
@@ -1836,7 +1836,7 @@ int ospf_timers_refresh_set(struct ospf *ospf, int interval)
                    - (monotime(NULL) - ospf->lsa_refresher_started);
 
        if (time_left > interval) {
-               OSPF_TIMER_OFF(ospf->t_lsa_refresher);
+               THREAD_OFF(ospf->t_lsa_refresher);
                thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
                                 interval, &ospf->t_lsa_refresher);
        }
@@ -1853,7 +1853,7 @@ int ospf_timers_refresh_unset(struct ospf *ospf)
                    - (monotime(NULL) - ospf->lsa_refresher_started);
 
        if (time_left > OSPF_LSA_REFRESH_INTERVAL_DEFAULT) {
-               OSPF_TIMER_OFF(ospf->t_lsa_refresher);
+               THREAD_OFF(ospf->t_lsa_refresher);
                ospf->t_lsa_refresher = NULL;
                thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
                                 OSPF_LSA_REFRESH_INTERVAL_DEFAULT,
@@ -1905,7 +1905,7 @@ static void ospf_nbr_nbma_delete(struct ospf *ospf,
 
 static void ospf_nbr_nbma_down(struct ospf_nbr_nbma *nbr_nbma)
 {
-       OSPF_TIMER_OFF(nbr_nbma->t_poll);
+       THREAD_OFF(nbr_nbma->t_poll);
 
        if (nbr_nbma->nbr) {
                nbr_nbma->nbr->nbr_nbma = NULL;
@@ -2094,7 +2094,7 @@ int ospf_nbr_nbma_poll_interval_set(struct ospf *ospf, struct in_addr nbr_addr,
        if (nbr_nbma->v_poll != interval) {
                nbr_nbma->v_poll = interval;
                if (nbr_nbma->oi && ospf_if_is_up(nbr_nbma->oi)) {
-                       OSPF_TIMER_OFF(nbr_nbma->t_poll);
+                       THREAD_OFF(nbr_nbma->t_poll);
                        OSPF_POLL_TIMER_ON(nbr_nbma->t_poll, ospf_poll_timer,
                                           nbr_nbma->v_poll);
                }
@@ -2263,7 +2263,7 @@ static int ospf_vrf_disable(struct vrf *vrf)
                if (IS_DEBUG_OSPF_EVENT)
                        zlog_debug("%s: ospf old_vrf_id %d unlinked", __func__,
                                   old_vrf_id);
-               thread_cancel(&ospf->t_read);
+               THREAD_OFF(ospf->t_read);
                close(ospf->fd);
                ospf->fd = -1;
        }
index 401a89fa38e402d0268be2449ceaa507f307a0ea..8478c96ddcd63ef4d4c413e0601c15eb550b36f5 100644 (file)
@@ -647,8 +647,6 @@ struct ospf_nbr_nbma {
 #define OSPF_TIMER_ON(T,F,V) thread_add_timer (master,(F),ospf,(V),&(T))
 #define OSPF_AREA_TIMER_ON(T,F,V) thread_add_timer (master, (F), area, (V), &(T))
 #define OSPF_POLL_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr_nbma, (V), &(T))
-#define OSPF_POLL_TIMER_OFF(X) OSPF_TIMER_OFF((X))
-#define OSPF_TIMER_OFF(X) thread_cancel(&(X))
 
 /* Extern variables. */
 extern struct ospf_master *om;
index b72a536ef4860135261cc4cc285d8f8f01b567c2..a1c56f93ad5a8288f0df465fc665bfd1c038d01b 100644 (file)
@@ -1326,9 +1326,9 @@ void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
                 * possibly disconnect and blacklist */
                flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
                          "Unsupported PCEP protocol feature: %s", err);
-               pcep_free_path(path);
                send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
                                PCEP_ERRV_LSP_NOT_PCE_INITIATED, path);
+               pcep_free_path(path);
        }
 }
 
index 156267a394431b7ba47b8e0de5a215e14a26ee0a..a98532cc399e9d8694a9545f954469c7598d76b7 100644 (file)
@@ -352,3 +352,9 @@ void path_zebra_init(struct thread_master *master)
        /* Connect to the LM. */
        path_zebra_label_manager_connect();
 }
+
+void path_zebra_stop(void)
+{
+       zclient_stop(zclient);
+       zclient_free(zclient);
+}
index 42a7123dd464f8bca26a7034113f3b9b5768bfde..683fcf10f7676d77469c3ad92b63ebef4f39c7fc 100644 (file)
@@ -30,5 +30,6 @@ void path_zebra_delete_sr_policy(struct srte_policy *policy);
 int path_zebra_request_label(mpls_label_t label);
 void path_zebra_release_label(mpls_label_t label);
 void path_zebra_init(struct thread_master *master);
+void path_zebra_stop(void);
 
 #endif /* _FRR_PATH_MPLS_H_ */
index be2cfe8b01474ac9bdea40af74fc98ae53983f71..e9d7cc6fc707c7a17866198819364927c7532bb2 100644 (file)
@@ -510,6 +510,8 @@ void srte_clean_zebra(void)
 
        RB_FOREACH_SAFE (policy, srte_policy_head, &srte_policies, safe_pol)
                srte_policy_del(policy);
+
+       path_zebra_stop();
 }
 
 /**
index d68bcfa160424d192044b6543ea50ab814f0e26d..a2b3431b94fcc346e8f8130a5bb58199cc2d9801 100644 (file)
@@ -137,6 +137,9 @@ DEFPY(pbr_map_match_src, pbr_map_match_src_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (pbrms->dst && pbrms->family && prefix->family != pbrms->family) {
                vty_out(vty, "Cannot mismatch families within match src/dst\n");
                return CMD_WARNING_CONFIG_FAILED;
@@ -170,6 +173,9 @@ DEFPY(pbr_map_match_dst, pbr_map_match_dst_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (pbrms->src && pbrms->family && prefix->family != pbrms->family) {
                vty_out(vty, "Cannot mismatch families within match src/dst\n");
                return CMD_WARNING_CONFIG_FAILED;
@@ -204,6 +210,9 @@ DEFPY(pbr_map_match_ip_proto, pbr_map_match_ip_proto_cmd,
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
        struct protoent *p;
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no) {
                p = getprotobyname(ip_proto);
                if (!p) {
@@ -228,6 +237,9 @@ DEFPY(pbr_map_match_src_port, pbr_map_match_src_port_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no) {
                if (pbrms->src_prt == port)
                        return CMD_SUCCESS;
@@ -250,6 +262,9 @@ DEFPY(pbr_map_match_dst_port, pbr_map_match_dst_port_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no) {
                if (pbrms->dst_prt == port)
                        return CMD_SUCCESS;
@@ -274,6 +289,9 @@ DEFPY(pbr_map_match_dscp, pbr_map_match_dscp_cmd,
        char dscpname[100];
        uint8_t rawDscp;
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        /* Discriminate dscp enums (cs0, cs1 etc.) and numbers */
        bool isANumber = true;
        for (int i = 0; i < (int)strlen(dscp); i++) {
@@ -333,6 +351,9 @@ DEFPY(pbr_map_match_ecn, pbr_map_match_ecn_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no) {
                if ((pbrms->dsfield & PBR_DSFIELD_ECN) == ecn)
                        return CMD_SUCCESS;
@@ -357,6 +378,9 @@ DEFPY(pbr_map_match_mark, pbr_map_match_mark_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
 #ifndef GNU_LINUX
        vty_out(vty, "pbr marks are not supported on this platform\n");
        return CMD_WARNING_CONFIG_FAILED;
@@ -417,6 +441,9 @@ DEFPY(pbr_map_action_queue_id, pbr_map_action_queue_id_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no)
                pbrms->action_queue_id = queue_id;
        else if ((uint32_t)queue_id == pbrms->action_queue_id)
@@ -435,6 +462,9 @@ DEFPY(pbr_map_action_pcp, pbr_map_action_pcp_cmd, "[no] set pcp <(0-7)$pcp>",
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no)
                pbrms->action_pcp = pcp;
        else if (pcp == pbrms->action_pcp)
@@ -454,6 +484,9 @@ DEFPY(pbr_map_action_vlan_id, pbr_map_action_vlan_id_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no)
                pbrms->action_vlan_id = vlan_id;
        else if (pbrms->action_vlan_id == vlan_id)
@@ -472,6 +505,9 @@ DEFPY(pbr_map_action_strip_vlan, pbr_map_action_strip_vlan_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (!no)
                pbrms->action_vlan_flags = PBR_MAP_STRIP_INNER_ANY;
        else
@@ -492,6 +528,9 @@ DEFPY(pbr_map_nexthop_group, pbr_map_nexthop_group_cmd,
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
        struct nexthop_group_cmd *nhgc;
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        nhgc = nhgc_find(name);
        if (!nhgc) {
                vty_out(vty, "Specified nexthop-group %s does not exist\n",
@@ -522,6 +561,9 @@ DEFPY(no_pbr_map_nexthop_group, no_pbr_map_nexthop_group_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        pbrms_clear_set_config(pbrms);
 
        return CMD_SUCCESS;
@@ -548,6 +590,9 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd,
        struct nexthop nhop;
        struct nexthop *nh = NULL;
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        if (vrf_name)
                vrf = vrf_lookup_by_name(vrf_name);
        else
@@ -670,6 +715,9 @@ DEFPY(no_pbr_map_nexthop, no_pbr_map_nexthop_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        pbrms_clear_set_config(pbrms);
 
        return CMD_SUCCESS;
@@ -684,6 +732,9 @@ DEFPY(pbr_map_vrf, pbr_map_vrf_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        /*
         * If an equivalent set vrf * exists, just return success.
         */
@@ -722,6 +773,9 @@ DEFPY(no_pbr_map_vrf, no_pbr_map_vrf_cmd,
 {
        struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
 
+       if (!pbrms)
+               return CMD_WARNING_CONFIG_FAILED;
+
        pbrms_clear_set_config(pbrms);
 
        return CMD_SUCCESS;
index 47b5f7e52c0a0c275dbcc581531d8b30152e1668..fe2cb56a262bbb68967df44fd15b632bfe4ed92d 100644 (file)
@@ -187,16 +187,18 @@ int rtnl_dump_filter_l(struct rtnl_handle *rth,
                       const struct rtnl_dump_filter_arg *arg)
 {
        struct sockaddr_nl nladdr;
-       struct iovec iov;
+       char buf[16384];
+       struct iovec iov = {
+               .iov_base = buf,
+               .iov_len = sizeof(buf),
+       };
        struct msghdr msg = {
                .msg_name = &nladdr,
                .msg_namelen = sizeof(nladdr),
                .msg_iov = &iov,
                .msg_iovlen = 1,
        };
-       char buf[16384];
 
-       iov.iov_base = buf;
        while (1) {
                int status;
                const struct rtnl_dump_filter_arg *a;
@@ -220,7 +222,7 @@ int rtnl_dump_filter_l(struct rtnl_handle *rth,
                }
 
                for (a = arg; a->filter; a++) {
-                       struct nlmsghdr *h = (struct nlmsghdr *)buf;
+                       struct nlmsghdr *h = (struct nlmsghdr *)iov.iov_base;
                        msglen = status;
 
                        while (NLMSG_OK(h, (uint32_t)msglen)) {
@@ -348,7 +350,8 @@ int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer,
                                msg.msg_namelen);
                        exit(1);
                }
-               for (h = (struct nlmsghdr *)buf; status >= (int)sizeof(*h);) {
+               for (h = (struct nlmsghdr *)iov.iov_base;
+                    status >= (int)sizeof(*h);) {
                        int err;
                        int len = h->nlmsg_len;
                        int l = len - sizeof(*h);
@@ -421,21 +424,23 @@ int rtnl_listen(struct rtnl_handle *rtnl, rtnl_filter_t handler, void *jarg)
        int status;
        struct nlmsghdr *h;
        struct sockaddr_nl nladdr;
-       struct iovec iov;
+       char buf[8192];
+       struct iovec iov = {
+               .iov_base = buf,
+               .iov_len = sizeof(buf),
+       };
        struct msghdr msg = {
                .msg_name = &nladdr,
                .msg_namelen = sizeof(nladdr),
                .msg_iov = &iov,
                .msg_iovlen = 1,
        };
-       char buf[8192];
 
        memset(&nladdr, 0, sizeof(nladdr));
        nladdr.nl_family = AF_NETLINK;
        nladdr.nl_pid = 0;
        nladdr.nl_groups = 0;
 
-       iov.iov_base = buf;
        while (1) {
                iov.iov_len = sizeof(buf);
                status = recvmsg(rtnl->fd, &msg, 0);
index 4f0758cf6a4fc1afe9938ab5fbcc1b2782b7d3e2..dc84de6bfd6377da01857d8b7dfa751fa83d5d33 100644 (file)
@@ -270,7 +270,7 @@ DEFPY (interface_ipv6_pim_drprio,
 
 DEFPY (interface_no_ipv6_pim_drprio,
        interface_no_ipv6_pim_drprio_cmd,
-       "no ip pim drpriority [(1-4294967295)]",
+       "no ipv6 pim drpriority [(1-4294967295)]",
        NO_STR
        IPV6_STR
        PIM_STR
@@ -686,9 +686,7 @@ DEFPY (ipv6_mld_group_watermark,
        "Group count to generate watermark warning\n")
 {
        PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
-
-       /* TBD Depends on MLD data structure changes */
-       (void)pim;
+       pim->gm_watermark_limit = limit;
 
        return CMD_SUCCESS;
 }
@@ -703,9 +701,7 @@ DEFPY (no_ipv6_mld_group_watermark,
        IGNORED_IN_NO_STR)
 {
        PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
-
-       /* TBD Depends on MLD data structure changes */
-       (void)pim;
+       pim->gm_watermark_limit = 0;
 
        return CMD_SUCCESS;
 }
@@ -1076,14 +1072,15 @@ DEFPY (show_ipv6_pim_neighbor_vrf_all,
 
 DEFPY (show_ipv6_pim_nexthop,
        show_ipv6_pim_nexthop_cmd,
-       "show ipv6 pim [vrf NAME] nexthop",
+       "show ipv6 pim [vrf NAME] nexthop [json$json]",
        SHOW_STR
        IPV6_STR
        PIM_STR
        VRF_CMD_HELP_STR
-       "PIM cached nexthop rpf information\n")
+       "PIM cached nexthop rpf information\n"
+       JSON_STR)
 {
-       return pim_show_nexthop_cmd_helper(vrf, vty);
+       return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
 }
 
 DEFPY (show_ipv6_pim_nexthop_lookup,
@@ -1263,12 +1260,25 @@ DEFPY (clear_ipv6_pim_statistics,
        return CMD_SUCCESS;
 }
 
+DEFPY (clear_ipv6_pim_interface_traffic,
+       clear_ipv6_pim_interface_traffic_cmd,
+       "clear ipv6 pim [vrf NAME] interface traffic",
+       CLEAR_STR
+       IPV6_STR
+       CLEAR_IP_PIM_STR
+       VRF_CMD_HELP_STR
+       "Reset PIM interfaces\n"
+       "Reset Protocol Packet counters\n")
+{
+       return clear_pim_interface_traffic(vrf, vty);
+}
+
 DEFPY (clear_ipv6_mroute,
        clear_ipv6_mroute_cmd,
        "clear ipv6 mroute [vrf NAME]$name",
        CLEAR_STR
        IPV6_STR
-       "Reset multicast routes\n"
+       MROUTE_STR
        VRF_CMD_HELP_STR)
 {
        struct vrf *v = pim_cmd_lookup(vty, name);
@@ -1312,6 +1322,45 @@ DEFPY (clear_ipv6_mroute_count,
        return clear_ip_mroute_count_command(vty, name);
 }
 
+DEFPY (clear_ipv6_pim_interfaces,
+       clear_ipv6_pim_interfaces_cmd,
+       "clear ipv6 pim [vrf NAME] interfaces",
+       CLEAR_STR
+       IPV6_STR
+       CLEAR_IP_PIM_STR
+       VRF_CMD_HELP_STR
+       "Reset PIM interfaces\n")
+{
+       struct vrf *v = pim_cmd_lookup(vty, vrf);
+
+       if (!v)
+               return CMD_WARNING;
+
+       clear_pim_interfaces(v->info);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_pim_bsr_db,
+       clear_ipv6_pim_bsr_db_cmd,
+       "clear ipv6 pim [vrf NAME] bsr-data",
+       CLEAR_STR
+       IPV6_STR
+       CLEAR_IP_PIM_STR
+       VRF_CMD_HELP_STR
+       "Reset pim bsr data\n")
+{
+       struct vrf *v;
+
+       v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+       if (!v)
+               return CMD_WARNING;
+
+       pim_bsm_clear(v->info);
+
+       return CMD_SUCCESS;
+}
+
 DEFPY (debug_pimv6,
        debug_pimv6_cmd,
        "[no] debug pimv6",
@@ -1467,6 +1516,51 @@ DEFPY (debug_pimv6_zebra,
        return CMD_SUCCESS;
 }
 
+DEFPY (debug_mroute6,
+       debug_mroute6_cmd,
+       "[no] debug mroute6",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MROUTE6_STR)
+{
+       if (!no)
+               PIM_DO_DEBUG_MROUTE;
+       else
+               PIM_DONT_DEBUG_MROUTE;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (debug_mroute6_detail,
+       debug_mroute6_detail_cmd,
+       "[no] debug mroute6 detail",
+       NO_STR
+       DEBUG_STR
+       DEBUG_MROUTE6_STR
+       "detailed\n")
+{
+       if (!no)
+               PIM_DO_DEBUG_MROUTE_DETAIL;
+       else
+               PIM_DONT_DEBUG_MROUTE_DETAIL;
+
+       return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (show_debugging_pimv6,
+           show_debugging_pimv6_cmd,
+           "show debugging [pimv6]",
+           SHOW_STR
+           DEBUG_STR
+           "PIMv6 Information\n")
+{
+       vty_out(vty, "PIMv6 debugging status\n");
+
+       pim_debug_config_write(vty);
+
+       return CMD_SUCCESS;
+}
+
 void pim_cmd_init(void)
 {
        if_cmd_init(pim_interface_config_write);
@@ -1581,6 +1675,12 @@ void pim_cmd_init(void)
        install_element(ENABLE_NODE, &clear_ipv6_mroute_cmd);
        install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd);
        install_element(ENABLE_NODE, &clear_ipv6_mroute_count_cmd);
+       install_element(ENABLE_NODE, &clear_ipv6_pim_bsr_db_cmd);
+       install_element(ENABLE_NODE, &clear_ipv6_pim_interfaces_cmd);
+       install_element(ENABLE_NODE, &clear_ipv6_pim_interface_traffic_cmd);
+
+       install_element(ENABLE_NODE, &show_debugging_pimv6_cmd);
+
        install_element(ENABLE_NODE, &debug_pimv6_cmd);
        install_element(ENABLE_NODE, &debug_pimv6_nht_cmd);
        install_element(ENABLE_NODE, &debug_pimv6_nht_det_cmd);
@@ -1591,6 +1691,8 @@ void pim_cmd_init(void)
        install_element(ENABLE_NODE, &debug_pimv6_trace_cmd);
        install_element(ENABLE_NODE, &debug_pimv6_trace_detail_cmd);
        install_element(ENABLE_NODE, &debug_pimv6_zebra_cmd);
+       install_element(ENABLE_NODE, &debug_mroute6_cmd);
+       install_element(ENABLE_NODE, &debug_mroute6_detail_cmd);
 
        install_element(CONFIG_NODE, &debug_pimv6_cmd);
        install_element(CONFIG_NODE, &debug_pimv6_nht_cmd);
@@ -1602,4 +1704,6 @@ void pim_cmd_init(void)
        install_element(CONFIG_NODE, &debug_pimv6_trace_cmd);
        install_element(CONFIG_NODE, &debug_pimv6_trace_detail_cmd);
        install_element(CONFIG_NODE, &debug_pimv6_zebra_cmd);
+       install_element(CONFIG_NODE, &debug_mroute6_cmd);
+       install_element(CONFIG_NODE, &debug_mroute6_detail_cmd);
 }
index 8fb82d9f2677d3493004728c368e3f189ac3d700..c45c998453511bf8e6ac74b20d115f91bc877e4e 100644 (file)
@@ -57,6 +57,7 @@
 #define DEBUG_PIMV6_PACKETDUMP_RECV_STR "Dump received packets\n"
 #define DEBUG_PIMV6_TRACE_STR "PIMv6 internal daemon activity\n"
 #define DEBUG_PIMV6_ZEBRA_STR "ZEBRA protocol activity\n"
+#define DEBUG_MROUTE6_STR "PIMv6 interaction with kernel MFC cache\n"
 
 void pim_cmd_init(void);
 
index b3f4e4256c7353504c9ebc6da9b1a3219771a8c3..e0b5a87e059a7c2c35057bc1cc7619648ab611a3 100644 (file)
@@ -161,8 +161,6 @@ int main(int argc, char **argv, char **envp)
        }
 
        pim_router_init();
-       /* TODO PIM6: temporary enable all debugs, remove later in PIMv6 work */
-       router->debugs = ~0U;
 
        access_list_init();
        prefix_list_init();
index b661b68948e7d5113e935a9e13b345c344e9104e..60602ee04a390c004b0f46151f308e7f367132d2 100644 (file)
@@ -2288,6 +2288,22 @@ void gm_ifp_update(struct interface *ifp)
        }
 }
 
+void gm_group_delete(struct gm_if *gm_ifp)
+{
+       struct gm_sg *sg, *sg_start;
+
+       sg_start = gm_sgs_first(gm_ifp->sgs);
+
+       /* clean up all mld groups */
+       frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
+               THREAD_OFF(sg->t_sg_expire);
+               if (sg->oil)
+                       pim_channel_oil_del(sg->oil, __func__);
+               gm_sgs_del(gm_ifp->sgs, sg);
+               gm_sg_free(sg);
+       }
+}
+
 /*
  * CLI (show commands only)
  */
index 9c7a6370072ef8c69a9db01227a356a2f2d540f0..95523c2922b6b7c2a2a858b85ecc8bd5c3daa9db 100644 (file)
@@ -352,6 +352,7 @@ struct gm_if {
 #if PIM_IPV == 6
 extern void gm_ifp_update(struct interface *ifp);
 extern void gm_ifp_teardown(struct interface *ifp);
+extern void gm_group_delete(struct gm_if *gm_ifp);
 #else
 static inline void gm_ifp_update(struct interface *ifp)
 {
diff --git a/pimd/pim6_stubs.c b/pimd/pim6_stubs.c
deleted file mode 100644 (file)
index 8213b9e..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * PIMv6 temporary stubs
- * Copyright (C) 2022  David Lamparter for NetDEF, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; see the file COPYING; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <zebra.h>
-
-#include "pimd.h"
-#include "pim_nht.h"
-#include "pim_zlookup.h"
-#include "pim_pim.h"
-#include "pim_register.h"
-#include "pim_cmd.h"
-#include "pim_bsm.h"
-
-/*
- * NH lookup / NHT
- */
-void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
-{
-}
-
-void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
-{
-}
-
-bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
-{
-       return false;
-}
-
-void pim_bsm_proc_free(struct pim_instance *pim)
-{
-}
-
-void pim_bsm_proc_init(struct pim_instance *pim)
-{
-}
-
-struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
-                                         struct prefix *grp)
-{
-       return NULL;
-}
-
-void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
-{
-}
-
-int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
-                   uint32_t buf_size, bool no_fwd)
-{
-       return 0;
-}
index e28f121136c3fc7de3f74ba09481e96142ee4c94..defe4070cfb54697d8c3bb839f0da27118ca9370 100644 (file)
@@ -35,6 +35,8 @@ typedef struct in_addr pim_addr;
 #define ipaddr_pim     ipaddr_v4
 #define PIM_MAX_BITLEN IPV4_MAX_BITLEN
 #define PIM_AF_NAME     "ip"
+#define PIM_AF_DBG     "pim"
+#define PIM_MROUTE_DBG  "mroute"
 #define PIMREG          "pimreg"
 #define GM              "IGMP"
 
@@ -60,6 +62,8 @@ typedef struct in6_addr pim_addr;
 #define ipaddr_pim     ipaddr_v6
 #define PIM_MAX_BITLEN IPV6_MAX_BITLEN
 #define PIM_AF_NAME     "ipv6"
+#define PIM_AF_DBG     "pimv6"
+#define PIM_MROUTE_DBG  "mroute6"
 #define PIMREG          "pim6reg"
 #define GM              "MLD"
 
index 8ef3c43a99658a6a465ab470fe10169db8363e38..1f7dd2f3f92be2e13c46d7ae75c696eca9af98e9 100644 (file)
@@ -130,11 +130,7 @@ int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *node1,
                return 1;
        if (node1->hash > node2->hash)
                return -1;
-       if (node1->rp_address.s_addr < node2->rp_address.s_addr)
-               return 1;
-       if (node1->rp_address.s_addr > node2->rp_address.s_addr)
-               return -1;
-       return 0;
+       return pim_addr_cmp(node2->rp_address, node1->rp_address);
 }
 
 static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
@@ -173,11 +169,10 @@ static void pim_on_bs_timer(struct thread *t)
                           __func__, scope->sz_id);
 
        pim_nht_bsr_del(scope->pim, scope->current_bsr);
-
        /* Reset scope zone data */
        scope->accept_nofwd_bsm = false;
        scope->state = ACCEPT_ANY;
-       scope->current_bsr.s_addr = INADDR_ANY;
+       scope->current_bsr = PIMADDR_ANY;
        scope->current_bsr_prio = 0;
        scope->current_bsr_first_ts = 0;
        scope->current_bsr_last_ts = 0;
@@ -353,10 +348,9 @@ static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
        THREAD_OFF(bsrp->g2rp_timer);
        if (PIM_DEBUG_BSM)
                zlog_debug(
-                       "%s : starting g2rp timer for grp: %pFX - rp: %pI4 with timeout  %d secs(Actual Hold time : %d secs)",
-                       __func__, &bsrp->bsgrp_node->group,
-                       &bsrp->rp_address, hold_time,
-                       bsrp->rp_holdtime);
+                       "%s : starting g2rp timer for grp: %pFX - rp: %pPAs with timeout  %d secs(Actual Hold time : %d secs)",
+                       __func__, &bsrp->bsgrp_node->group, &bsrp->rp_address,
+                       hold_time, bsrp->rp_holdtime);
 
        thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
                         &bsrp->g2rp_timer);
@@ -374,7 +368,7 @@ static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
                return;
 
        if (PIM_DEBUG_BSM)
-               zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pI4",
+               zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pPAs",
                           __func__, &bsrp->bsgrp_node->group,
                           &bsrp->rp_address);
 
@@ -462,8 +456,7 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
                route_unlock_node(rn);
 
                if (active && pend) {
-                       if ((active->rp_address.s_addr
-                            != pend->rp_address.s_addr))
+                       if (pim_addr_cmp(active->rp_address, pend->rp_address))
                                pim_rp_change(pim, pend->rp_address,
                                              bsgrp_node->group, RP_SRC_BSR);
                }
@@ -531,18 +524,17 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
        pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
 }
 
-static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
+static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,
                             uint32_t bsr_prio)
 {
-       if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
+       if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr))
                return true;
 
        if (bsr_prio > pim->global_scope.current_bsr_prio)
                return true;
 
        else if (bsr_prio == pim->global_scope.current_bsr_prio) {
-               if (ntohl(bsr.s_addr)
-                   >= ntohl(pim->global_scope.current_bsr.s_addr))
+               if (pim_addr_cmp(bsr, pim->global_scope.current_bsr) >= 0)
                        return true;
                else
                        return false;
@@ -550,10 +542,10 @@ static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
                return false;
 }
 
-static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
+static void pim_bsm_update(struct pim_instance *pim, pim_addr bsr,
                           uint32_t bsr_prio)
 {
-       if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+       if (pim_addr_cmp(bsr, pim->global_scope.current_bsr)) {
                pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
                pim_nht_bsr_add(pim, bsr);
 
@@ -571,7 +563,7 @@ void pim_bsm_clear(struct pim_instance *pim)
        struct route_node *rn;
        struct route_node *rpnode;
        struct bsgrp_node *bsgrp;
-       struct prefix nht_p;
+       pim_addr nht_p;
        struct prefix g_all;
        struct rp_info *rp_all;
        struct pim_upstream *up;
@@ -583,7 +575,7 @@ void pim_bsm_clear(struct pim_instance *pim)
        /* Reset scope zone data */
        pim->global_scope.accept_nofwd_bsm = false;
        pim->global_scope.state = ACCEPT_ANY;
-       pim->global_scope.current_bsr.s_addr = INADDR_ANY;
+       pim->global_scope.current_bsr = PIMADDR_ANY;
        pim->global_scope.current_bsr_prio = 0;
        pim->global_scope.current_bsr_first_ts = 0;
        pim->global_scope.current_bsr_last_ts = 0;
@@ -617,16 +609,14 @@ void pim_bsm_clear(struct pim_instance *pim)
                }
 
                /* Deregister addr with Zebra NHT */
-               nht_p.family = AF_INET;
-               nht_p.prefixlen = IPV4_MAX_BITLEN;
-               nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+               nht_p = rp_info->rp.rpf_addr;
 
                if (PIM_DEBUG_PIM_NHT_RP) {
-                       zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
+                       zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
                                   __func__, &nht_p);
                }
 
-               pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+               pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
 
                if (!pim_get_all_mcast_group(&g_all))
                        return;
@@ -634,7 +624,7 @@ void pim_bsm_clear(struct pim_instance *pim)
                rp_all = pim_rp_find_match_group(pim, &g_all);
 
                if (rp_all == rp_info) {
-                       pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
+                       rp_all->rp.rpf_addr = PIMADDR_ANY;
                        rp_all->i_am_rp = 0;
                } else {
                        /* Delete the rp_info from rp-list */
@@ -944,7 +934,6 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
        struct pim_interface *pim_ifp;
        struct bsm_scope *scope;
        struct bsm_frag *bsfrag;
-       char neigh_src_str[INET_ADDRSTRLEN];
        uint32_t pim_mtu;
        bool no_fwd = true;
        bool ret = false;
@@ -982,13 +971,13 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
        if (!pim_ifp->ucast_bsm_accept) {
                dst_addr = qpim_all_pim_routers_addr;
                if (PIM_DEBUG_BSM)
-                       zlog_debug("%s: Sending BSM mcast to %s", __func__,
-                                  neigh_src_str);
+                       zlog_debug("%s: Sending BSM mcast to %pPA", __func__,
+                                  &neigh->source_addr);
        } else {
                dst_addr = neigh->source_addr;
                if (PIM_DEBUG_BSM)
-                       zlog_debug("%s: Sending BSM ucast to %s", __func__,
-                                  neigh_src_str);
+                       zlog_debug("%s: Sending BSM ucast to %pPA", __func__,
+                                  &neigh->source_addr);
        }
        pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
        pim_hello_require(ifp);
@@ -1040,7 +1029,7 @@ struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
        return bsgrp;
 }
 
-static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
+static uint32_t hash_calc_on_grp_rp(struct prefix group, pim_addr rp,
                                    uint8_t hashmasklen)
 {
        uint64_t temp;
@@ -1058,13 +1047,24 @@ static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
        /* in_addr stores ip in big endian, hence network byte order
         * convert to uint32 before processing hash
         */
+#if PIM_IPV == 4
        grpaddr = ntohl(group.u.prefix4.s_addr);
+#else
+       grpaddr = group.u.prefix6.s6_addr32[0] ^ group.u.prefix6.s6_addr32[1] ^
+                 group.u.prefix6.s6_addr32[2] ^ group.u.prefix6.s6_addr32[3];
+#endif
        /* Avoid shifting by 32 bit on a 32 bit register */
        if (hashmasklen)
                grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
        else
                grpaddr = grpaddr & mask;
+
+#if PIM_IPV == 4
        rp_add = ntohl(rp.s_addr);
+#else
+       rp_add = rp.s6_addr32[0] ^ rp.s6_addr32[1] ^ rp.s6_addr32[2] ^
+                rp.s6_addr32[3];
+#endif
        temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)
               + 12345;
        hash = temp & (0x7fffffff);
@@ -1083,8 +1083,7 @@ static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
 
        bsm_rpinfo->rp_prio = rp->rp_pri;
        bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
-       memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
-              sizeof(struct in_addr));
+       bsm_rpinfo->rp_address = rp->rpaddr.addr;
        bsm_rpinfo->elapse_time = 0;
 
        /* Back pointer to the group node. */
@@ -1142,6 +1141,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
        int frag_rp_cnt = 0;
        int offset = 0;
        int ins_count = 0;
+       pim_addr grp_addr;
 
        while (buflen > offset) {
                if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) {
@@ -1153,31 +1153,28 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
                }
                /* Extract Group tlv from BSM */
                memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
+               grp_addr = grpinfo.group.addr;
 
-               if (PIM_DEBUG_BSM) {
-                       char grp_str[INET_ADDRSTRLEN];
-
-                       pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
-                                      sizeof(grp_str));
+               if (PIM_DEBUG_BSM)
                        zlog_debug(
-                               "%s, Group %s  Rpcount:%d Fragment-Rp-count:%d",
-                               __func__, grp_str, grpinfo.rp_count,
+                               "%s, Group %pPAs  Rpcount:%d Fragment-Rp-count:%d",
+                               __func__, &grp_addr, grpinfo.rp_count,
                                grpinfo.frag_rp_count);
-               }
 
                buf += sizeof(struct bsmmsg_grpinfo);
                offset += sizeof(struct bsmmsg_grpinfo);
 
-               group.family = AF_INET;
-               if (grpinfo.group.mask > IPV4_MAX_BITLEN) {
+               group.family = PIM_AF;
+               if (grpinfo.group.mask > PIM_MAX_BITLEN) {
                        if (PIM_DEBUG_BSM)
                                zlog_debug(
-                                       "%s, v4 prefix length specified: %d is too long",
+                                       "%s, prefix length specified: %d is too long",
                                        __func__, grpinfo.group.mask);
                        return false;
                }
+
+               pim_addr_to_prefix(&group, grp_addr);
                group.prefixlen = grpinfo.group.mask;
-               group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
 
                /* Get the Group node for the BSM rp table */
                bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
@@ -1189,14 +1186,10 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
                        if (!bsgrp)
                                continue;
 
-                       if (PIM_DEBUG_BSM) {
-                               char grp_str[INET_ADDRSTRLEN];
-
-                               pim_inet4_dump("<Group?>", grpinfo.group.addr,
-                                              grp_str, sizeof(grp_str));
-                               zlog_debug("%s, Rp count is zero for group: %s",
-                                          __func__, grp_str);
-                       }
+                       if (PIM_DEBUG_BSM)
+                               zlog_debug(
+                                       "%s, Rp count is zero for group: %pPAs",
+                                       __func__, &grp_addr);
 
                        old_rpinfo = bsm_rpinfos_first(bsgrp->bsrp_list);
                        if (old_rpinfo)
@@ -1249,13 +1242,12 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
                        offset += sizeof(struct bsmmsg_rpinfo);
 
                        if (PIM_DEBUG_BSM) {
-                               char rp_str[INET_ADDRSTRLEN];
+                               pim_addr rp_addr;
 
-                               pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
-                                              rp_str, sizeof(rp_str));
+                               rp_addr = rpinfo.rpaddr.addr;
                                zlog_debug(
-                                       "%s, Rp address - %s; pri:%d hold:%d",
-                                       __func__, rp_str, rpinfo.rp_pri,
+                                       "%s, Rp address - %pPAs; pri:%d hold:%d",
+                                       __func__, &rp_addr, rpinfo.rp_pri,
                                        rpinfo.rp_holdtime);
                        }
 
@@ -1287,8 +1279,8 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
        struct pim_interface *pim_ifp = NULL;
        struct bsm_frag *bsfrag;
        struct pim_instance *pim;
-       char bsr_str[INET_ADDRSTRLEN];
        uint16_t frag_tag;
+       pim_addr bsr_addr;
        bool empty_bsm = false;
 
        /* BSM Packet acceptance validation */
@@ -1330,16 +1322,17 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
        }
 
        bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
-       pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
-                      sizeof(bsr_str));
-       if (bshdr->hm_len > IPV4_MAX_BITLEN) {
-               zlog_warn("Bad hashmask length for IPv4; got %hhu, expected value in range 0-32",
-                         bshdr->hm_len);
+       if (bshdr->hm_len > PIM_MAX_BITLEN) {
+               zlog_warn(
+                       "Bad hashmask length for %s; got %hhu, expected value in range 0-32",
+                       PIM_AF_NAME, bshdr->hm_len);
                pim->bsm_dropped++;
                return -1;
        }
        pim->global_scope.hashMasklen = bshdr->hm_len;
        frag_tag = ntohs(bshdr->frag_tag);
+       /* NB: bshdr->bsr_addr.addr is packed/unaligned => memcpy */
+       memcpy(&bsr_addr, &bshdr->bsr_addr.addr, sizeof(bsr_addr));
 
        /* Identify empty BSM */
        if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
@@ -1361,7 +1354,7 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
        }
 
        /* Drop if bsr is not preferred bsr */
-       if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
+       if (!is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) {
                if (PIM_DEBUG_BSM)
                        zlog_debug("%s : Received a non-preferred BSM",
                                   __func__);
@@ -1377,30 +1370,25 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
                } else {
                        if (PIM_DEBUG_BSM)
                                zlog_debug(
-                                       "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
-                                       __func__, bsr_str);
+                                       "%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false",
+                                       __func__, &bsr_addr);
                        pim->bsm_dropped++;
                        pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
                        return -1;
                }
        }
 
-#if PIM_IPV == 4
-       if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr))
-#else
-       if (0)
-#endif
-       {
+       if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) {
                /* Multicast BSMs are only accepted if source interface & IP
                 * match RPF towards the BSR's IP address, or they have
                 * no-forward set
                 */
-               if (!no_fwd && !pim_nht_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
-                                                     ifp, sg->src)) {
+               if (!no_fwd &&
+                   !pim_nht_bsr_rpf_check(pim, bsr_addr, ifp, sg->src)) {
                        if (PIM_DEBUG_BSM)
                                zlog_debug(
-                                       "BSM check: RPF to BSR %s is not %pPA%%%s",
-                                       bsr_str, &sg->src, ifp->name);
+                                       "BSM check: RPF to BSR %pPAs is not %pPA%%%s",
+                                       &bsr_addr, &sg->src, ifp->name);
                        pim->bsm_dropped++;
                        return -1;
                }
@@ -1459,7 +1447,7 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
        }
 
        /* update the scope information from bsm */
-       pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
+       pim_bsm_update(pim, bsr_addr, bshdr->bsr_prio);
 
        if (!no_fwd) {
                pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
index 910067109ec377032205dd23ce33e501b5026435..90bd2f087705b2bed9a053335849ae376d0a6066 100644 (file)
@@ -61,7 +61,7 @@ struct bsm_scope {
        int sz_id;                      /* scope zone id */
        enum ncbsr_state state;         /* non candidate BSR state */
        bool accept_nofwd_bsm;          /* no fwd bsm accepted for scope */
-       struct in_addr current_bsr;     /* current elected BSR for the sz */
+       pim_addr current_bsr;           /* current elected BSR for the sz */
        uint32_t current_bsr_prio;      /* current BSR priority */
        int64_t current_bsr_first_ts;   /* current BSR elected time */
        int64_t current_bsr_last_ts;    /* Last BSM received from E-BSR */
@@ -185,18 +185,30 @@ struct bsm_hdr {
        uint16_t frag_tag;
        uint8_t hm_len;
        uint8_t bsr_prio;
+#if PIM_IPV == 4
        struct pim_encoded_ipv4_unicast bsr_addr;
+#else
+       struct pim_encoded_ipv6_unicast bsr_addr;
+#endif
 } __attribute__((packed));
 
 struct bsmmsg_grpinfo {
+#if PIM_IPV == 4
        struct pim_encoded_group_ipv4 group;
+#else
+       struct pim_encoded_group_ipv6 group;
+#endif
        uint8_t rp_count;
        uint8_t frag_rp_count;
        uint16_t reserved;
 } __attribute__((packed));
 
 struct bsmmsg_rpinfo {
+#if PIM_IPV == 4
        struct pim_encoded_ipv4_unicast rpaddr;
+#else
+       struct pim_encoded_ipv6_unicast rpaddr;
+#endif
        uint16_t rp_holdtime;
        uint8_t rp_pri;
        uint8_t reserved;
index 817ebcc256e0c06a06552f029ed58276e2f93b1a..c2453efa069ddb55932fe8e07bb4036e8760a3b2 100644 (file)
@@ -860,12 +860,11 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
 
        frr_each (bsm_frags, pim->global_scope.bsm_frags, bsfrag) {
                char grp_str[PREFIX_STRLEN];
-               char rp_str[INET_ADDRSTRLEN];
-               char bsr_str[INET_ADDRSTRLEN];
                struct bsmmsg_grpinfo *group;
-               struct bsmmsg_rpinfo *rpaddr;
+               struct bsmmsg_rpinfo *bsm_rpinfo;
                struct prefix grp;
                struct bsm_hdr *hdr;
+               pim_addr bsr_addr;
                uint32_t offset = 0;
                uint8_t *buf;
                uint32_t len = 0;
@@ -879,17 +878,16 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
                len -= PIM_MSG_HEADER_LEN;
 
                hdr = (struct bsm_hdr *)buf;
+               /* NB: bshdr->bsr_addr.addr is packed/unaligned => memcpy */
+               memcpy(&bsr_addr, &hdr->bsr_addr.addr, sizeof(bsr_addr));
 
                /* BSM starts with bsr header */
                buf += sizeof(struct bsm_hdr);
                len -= sizeof(struct bsm_hdr);
 
-               pim_inet4_dump("<BSR Address?>", hdr->bsr_addr.addr, bsr_str,
-                              sizeof(bsr_str));
-
-
                if (uj) {
-                       json_object_string_add(json, "BSR address", bsr_str);
+                       json_object_string_addf(json, "BSR address", "%pPA",
+                                               &bsr_addr);
                        json_object_int_add(json, "BSR priority",
                                            hdr->bsr_prio);
                        json_object_int_add(json, "Hashmask Length",
@@ -901,7 +899,7 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
                        vty_out(vty, "------------------\n");
                        vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",
                                "BSR-Priority", "Hashmask-len", "Fragment-Tag");
-                       vty_out(vty, "%-15s %-15d %-15d %-15d\n", bsr_str,
+                       vty_out(vty, "%-15pPA %-15d %-15d %-15d\n", &bsr_addr,
                                hdr->bsr_prio, hdr->hm_len,
                                ntohs(hdr->frag_tag));
                }
@@ -913,9 +911,16 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
 
                        if (group->group.family == PIM_MSG_ADDRESS_FAMILY_IPV4)
                                grp.family = AF_INET;
+                       else if (group->group.family ==
+                                PIM_MSG_ADDRESS_FAMILY_IPV6)
+                               grp.family = AF_INET6;
 
                        grp.prefixlen = group->group.mask;
-                       grp.u.prefix4.s_addr = group->group.addr.s_addr;
+#if PIM_IPV == 4
+                       grp.u.prefix4 = group->group.addr;
+#else
+                       grp.u.prefix6 = group->group.addr;
+#endif
 
                        prefix2str(&grp, grp_str, sizeof(grp_str));
 
@@ -954,31 +959,35 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
                                        "RpAddress     HoldTime     Priority\n");
 
                        while (frag_rp_cnt--) {
-                               rpaddr = (struct bsmmsg_rpinfo *)buf;
+                               pim_addr rp_addr;
+
+                               bsm_rpinfo = (struct bsmmsg_rpinfo *)buf;
+                               /* unaligned, again */
+                               memcpy(&rp_addr, &bsm_rpinfo->rpaddr,
+                                      sizeof(rp_addr));
 
                                buf += sizeof(struct bsmmsg_rpinfo);
                                offset += sizeof(struct bsmmsg_rpinfo);
 
-                               pim_inet4_dump("<Rp addr?>",
-                                              rpaddr->rpaddr.addr, rp_str,
-                                              sizeof(rp_str));
-
                                if (uj) {
                                        json_row = json_object_new_object();
-                                       json_object_string_add(
-                                               json_row, "Rp Address", rp_str);
+                                       json_object_string_addf(
+                                               json_row, "Rp Address", "%pPA",
+                                               &rp_addr);
                                        json_object_int_add(
                                                json_row, "Rp HoldTime",
-                                               ntohs(rpaddr->rp_holdtime));
+                                               ntohs(bsm_rpinfo->rp_holdtime));
                                        json_object_int_add(json_row,
                                                            "Rp Priority",
-                                                           rpaddr->rp_pri);
-                                       json_object_object_add(
-                                               json_group, rp_str, json_row);
+                                                           bsm_rpinfo->rp_pri);
+                                       json_object_object_addf(
+                                               json_group, json_row, "%pPA",
+                                               &rp_addr);
                                } else {
-                                       vty_out(vty, "%-15s %-12d %d\n", rp_str,
-                                               ntohs(rpaddr->rp_holdtime),
-                                               rpaddr->rp_pri);
+                                       vty_out(vty, "%-15pPA %-12d %d\n",
+                                               &rp_addr,
+                                               ntohs(bsm_rpinfo->rp_holdtime),
+                                               bsm_rpinfo->rp_pri);
                                }
                        }
                        vty_out(vty, "\n");
@@ -998,24 +1007,17 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
        struct bsgrp_node *bsgrp;
        struct bsm_rpinfo *bsm_rp;
        struct route_node *rn;
-       char bsr_str[INET_ADDRSTRLEN];
        json_object *json = NULL;
        json_object *json_group = NULL;
        json_object *json_row = NULL;
 
-       if (pim->global_scope.current_bsr.s_addr == INADDR_ANY)
-               strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
-
-       else
-               pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, bsr_str,
-                              sizeof(bsr_str));
-
        if (uj) {
                json = json_object_new_object();
-               json_object_string_add(json, "BSR Address", bsr_str);
-       } else {
-               vty_out(vty, "BSR Address  %s\n", bsr_str);
-       }
+               json_object_string_addf(json, "BSR Address", "%pPA",
+                                       &pim->global_scope.current_bsr);
+       } else
+               vty_out(vty, "BSR Address  %pPA\n",
+                       &pim->global_scope.current_bsr);
 
        for (rn = route_top(pim->global_scope.bsrp_table); rn;
             rn = route_next(rn)) {
@@ -1045,27 +1047,24 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
                }
 
                frr_each (bsm_rpinfos, bsgrp->bsrp_list, bsm_rp) {
-                       char rp_str[INET_ADDRSTRLEN];
-
-                       pim_inet4_dump("<Rp Address?>", bsm_rp->rp_address,
-                                      rp_str, sizeof(rp_str));
-
                        if (uj) {
                                json_row = json_object_new_object();
-                               json_object_string_add(json_row, "Rp Address",
-                                                      rp_str);
+                               json_object_string_addf(json_row, "Rp Address",
+                                                       "%pPA",
+                                                       &bsm_rp->rp_address);
                                json_object_int_add(json_row, "Rp HoldTime",
                                                    bsm_rp->rp_holdtime);
                                json_object_int_add(json_row, "Rp Priority",
                                                    bsm_rp->rp_prio);
                                json_object_int_add(json_row, "Hash Val",
                                                    bsm_rp->hash);
-                               json_object_object_add(json_group, rp_str,
-                                                      json_row);
+                               json_object_object_addf(json_group, json_row,
+                                                       "%pPA",
+                                                       &bsm_rp->rp_address);
 
                        } else {
-                               vty_out(vty, "%-15s %-15u %-15u %-15u\n",
-                                       rp_str, bsm_rp->rp_prio,
+                               vty_out(vty, "%-15pPA %-15u %-15u %-15u\n",
+                                       &bsm_rp->rp_address, bsm_rp->rp_prio,
                                        bsm_rp->rp_holdtime, bsm_rp->hash);
                        }
                }
@@ -1086,26 +1085,23 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
                }
 
                frr_each (bsm_rpinfos, bsgrp->partial_bsrp_list, bsm_rp) {
-                       char rp_str[INET_ADDRSTRLEN];
-
-                       pim_inet4_dump("<Rp Addr?>", bsm_rp->rp_address, rp_str,
-                                      sizeof(rp_str));
-
                        if (uj) {
                                json_row = json_object_new_object();
-                               json_object_string_add(json_row, "Rp Address",
-                                                      rp_str);
+                               json_object_string_addf(json_row, "Rp Address",
+                                                       "%pPA",
+                                                       &bsm_rp->rp_address);
                                json_object_int_add(json_row, "Rp HoldTime",
                                                    bsm_rp->rp_holdtime);
                                json_object_int_add(json_row, "Rp Priority",
                                                    bsm_rp->rp_prio);
                                json_object_int_add(json_row, "Hash Val",
                                                    bsm_rp->hash);
-                               json_object_object_add(json_group, rp_str,
-                                                      json_row);
+                               json_object_object_addf(json_group, json_row,
+                                                       "%pPA",
+                                                       &bsm_rp->rp_address);
                        } else {
-                               vty_out(vty, "%-15s %-15u %-15u %-15u\n",
-                                       rp_str, bsm_rp->rp_prio,
+                               vty_out(vty, "%-15pPA %-15u %-15u %-15u\n",
+                                       &bsm_rp->rp_address, bsm_rp->rp_prio,
                                        bsm_rp->rp_holdtime, bsm_rp->hash);
                        }
                }
@@ -1135,12 +1131,12 @@ static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
                json = json_object_new_object();
                json_object_int_add(json, "totalGroups", pim->igmp_group_count);
                json_object_int_add(json, "watermarkLimit",
-                                   pim->igmp_watermark_limit);
+                                   pim->gm_watermark_limit);
        } else {
                vty_out(vty, "Total IGMP groups: %u\n", pim->igmp_group_count);
                vty_out(vty, "Watermark warn limit(%s): %u\n",
-                       pim->igmp_watermark_limit ? "Set" : "Not Set",
-                       pim->igmp_watermark_limit);
+                       pim->gm_watermark_limit ? "Set" : "Not Set",
+                       pim->gm_watermark_limit);
                vty_out(vty,
                        "Interface        Group           Mode Timer    Srcs V Uptime  \n");
        }
@@ -1451,11 +1447,9 @@ static void pim_show_bsr(struct pim_instance *pim,
        char last_bsm_seen[10];
        time_t now;
        char bsr_state[20];
-       char bsr_str[PREFIX_STRLEN];
        json_object *json = NULL;
 
-       if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) {
-               strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+       if (pim_addr_is_any(pim->global_scope.current_bsr)) {
                pim_time_uptime(uptime, sizeof(uptime),
                                pim->global_scope.current_bsr_first_ts);
                pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
@@ -1463,8 +1457,6 @@ static void pim_show_bsr(struct pim_instance *pim,
        }
 
        else {
-               pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr,
-                              bsr_str, sizeof(bsr_str));
                now = pim_time_monotonic_sec();
                pim_time_uptime(uptime, sizeof(uptime),
                                (now - pim->global_scope.current_bsr_first_ts));
@@ -1486,9 +1478,11 @@ static void pim_show_bsr(struct pim_instance *pim,
                strlcpy(bsr_state, "", sizeof(bsr_state));
        }
 
+
        if (uj) {
                json = json_object_new_object();
-               json_object_string_add(json, "bsr", bsr_str);
+               json_object_string_addf(json, "bsr", "%pPA",
+                                       &pim->global_scope.current_bsr);
                json_object_int_add(json, "priority",
                                    pim->global_scope.current_bsr_prio);
                json_object_int_add(json, "fragmentTag",
@@ -1500,7 +1494,8 @@ static void pim_show_bsr(struct pim_instance *pim,
 
        else {
                vty_out(vty, "PIMv2 Bootstrap information\n");
-               vty_out(vty, "Current preferred BSR address: %s\n", bsr_str);
+               vty_out(vty, "Current preferred BSR address: %pPA\n",
+                       &pim->global_scope.current_bsr);
                vty_out(vty,
                        "Priority        Fragment-Tag       State           UpTime\n");
                vty_out(vty, "  %-12d    %-12d    %-13s    %7s\n",
@@ -1526,17 +1521,6 @@ static void clear_igmp_interfaces(struct pim_instance *pim)
                pim_if_addr_add_all(ifp);
 }
 
-static void clear_pim_interfaces(struct pim_instance *pim)
-{
-       struct interface *ifp;
-
-       FOR_ALL_INTERFACES (pim->vrf, ifp) {
-               if (ifp->info) {
-                       pim_neighbor_delete_all(ifp, "interface cleared");
-               }
-       }
-}
-
 static void clear_interfaces(struct pim_instance *pim)
 {
        clear_igmp_interfaces(pim);
@@ -1662,7 +1646,7 @@ DEFPY (clear_ip_mroute,
        "clear ip mroute [vrf NAME]$name",
        CLEAR_STR
        IP_STR
-       "Reset multicast routes\n"
+       MROUTE_STR
        VRF_CMD_HELP_STR)
 {
        struct vrf *v = pim_cmd_lookup(vty, name);
@@ -1675,7 +1659,7 @@ DEFPY (clear_ip_mroute,
        return CMD_SUCCESS;
 }
 
-DEFUN (clear_ip_pim_interfaces,
+DEFPY (clear_ip_pim_interfaces,
        clear_ip_pim_interfaces_cmd,
        "clear ip pim [vrf NAME] interfaces",
        CLEAR_STR
@@ -1684,61 +1668,27 @@ DEFUN (clear_ip_pim_interfaces,
        VRF_CMD_HELP_STR
        "Reset PIM interfaces\n")
 {
-       int idx = 2;
-       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+       struct vrf *v = pim_cmd_lookup(vty, vrf);
 
-       if (!vrf)
+       if (!v)
                return CMD_WARNING;
 
-       clear_pim_interfaces(vrf->info);
+       clear_pim_interfaces(v->info);
 
        return CMD_SUCCESS;
 }
 
-DEFUN (clear_ip_pim_interface_traffic,
+DEFPY (clear_ip_pim_interface_traffic,
        clear_ip_pim_interface_traffic_cmd,
        "clear ip pim [vrf NAME] interface traffic",
-       "Reset functions\n"
-       "IP information\n"
-       "PIM clear commands\n"
+       CLEAR_STR
+       IP_STR
+       CLEAR_IP_PIM_STR
        VRF_CMD_HELP_STR
        "Reset PIM interfaces\n"
        "Reset Protocol Packet counters\n")
 {
-       int idx = 2;
-       struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
-       struct interface *ifp = NULL;
-       struct pim_interface *pim_ifp = NULL;
-
-       if (!vrf)
-               return CMD_WARNING;
-
-       FOR_ALL_INTERFACES (vrf, ifp) {
-               pim_ifp = ifp->info;
-
-               if (!pim_ifp)
-                       continue;
-
-               pim_ifp->pim_ifstat_hello_recv = 0;
-               pim_ifp->pim_ifstat_hello_sent = 0;
-               pim_ifp->pim_ifstat_join_recv = 0;
-               pim_ifp->pim_ifstat_join_send = 0;
-               pim_ifp->pim_ifstat_prune_recv = 0;
-               pim_ifp->pim_ifstat_prune_send = 0;
-               pim_ifp->pim_ifstat_reg_recv = 0;
-               pim_ifp->pim_ifstat_reg_send = 0;
-               pim_ifp->pim_ifstat_reg_stop_recv = 0;
-               pim_ifp->pim_ifstat_reg_stop_send = 0;
-               pim_ifp->pim_ifstat_assert_recv = 0;
-               pim_ifp->pim_ifstat_assert_send = 0;
-               pim_ifp->pim_ifstat_bsm_rx = 0;
-               pim_ifp->pim_ifstat_bsm_tx = 0;
-               pim_ifp->igmp_ifstat_joins_sent = 0;
-               pim_ifp->igmp_ifstat_joins_failed = 0;
-               pim_ifp->igmp_peak_group_count = 0;
-       }
-
-       return CMD_SUCCESS;
+       return clear_pim_interface_traffic(vrf, vty);
 }
 
 DEFPY (clear_ip_pim_oil,
@@ -2782,14 +2732,15 @@ DEFPY (show_ip_pim_rpf_vrf_all,
 
 DEFPY (show_ip_pim_nexthop,
        show_ip_pim_nexthop_cmd,
-       "show ip pim [vrf NAME] nexthop",
+       "show ip pim [vrf NAME] nexthop [json$json]",
        SHOW_STR
        IP_STR
        PIM_STR
        VRF_CMD_HELP_STR
-       "PIM cached nexthop rpf information\n")
+       "PIM cached nexthop rpf information\n"
+       JSON_STR)
 {
-       return pim_show_nexthop_cmd_helper(vrf, vty);
+       return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
 }
 
 DEFPY (show_ip_pim_nexthop_lookup,
@@ -3334,7 +3285,7 @@ DEFPY (ip_igmp_group_watermark,
        "Group count to generate watermark warning\n")
 {
        PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
-       pim->igmp_watermark_limit = limit;
+       pim->gm_watermark_limit = limit;
 
        return CMD_SUCCESS;
 }
@@ -3349,7 +3300,7 @@ DEFPY (no_ip_igmp_group_watermark,
        IGNORED_IN_NO_STR)
 {
        PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
-       pim->igmp_watermark_limit = 0;
+       pim->gm_watermark_limit = 0;
 
        return CMD_SUCCESS;
 }
@@ -4166,26 +4117,21 @@ DEFPY_HIDDEN (pim_test_sg_keepalive,
              "The Group we are resetting\n")
 {
        struct pim_upstream *up;
+       struct vrf *vrf;
        struct pim_instance *pim;
        pim_sgaddr sg;
 
        sg.src = source;
        sg.grp = group;
 
-       if (!name)
-               pim = pim_get_pim_instance(VRF_DEFAULT);
-       else {
-               struct vrf *vrf = vrf_lookup_by_name(name);
-
-               if (!vrf) {
-                       vty_out(vty, "%% Vrf specified: %s does not exist\n",
-                               name);
-                       return CMD_WARNING;
-               }
-
-               pim = pim_get_pim_instance(vrf->vrf_id);
+       vrf = vrf_lookup_by_name(name ? name : VRF_DEFAULT_NAME);
+       if (!vrf) {
+               vty_out(vty, "%% Vrf specified: %s does not exist\n", name);
+               return CMD_WARNING;
        }
 
+       pim = vrf->info;
+
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
                return CMD_WARNING;
@@ -5580,12 +5526,13 @@ DEFUN (show_ip_msdp_mesh_group,
        int idx = 2;
        struct pim_msdp_mg *mg;
        struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
-       struct pim_instance *pim = vrf->info;
+       struct pim_instance *pim;
        struct json_object *json = NULL;
 
        if (!vrf)
                return CMD_WARNING;
 
+       pim = vrf->info;
        /* Quick case: list is empty. */
        if (SLIST_EMPTY(&pim->msdp.mglist)) {
                if (uj)
index 03689bea8df93b4a00484f3dcc06e27fb2e31ae1..009936cdb751248e4f33c1622c4ddfa6257a397d 100644 (file)
@@ -32,6 +32,7 @@
 #include "ferr.h"
 #include "lib/srcdest_table.h"
 #include "lib/linklist.h"
+#include "termtable.h"
 
 #include "pimd.h"
 #include "pim_instance.h"
@@ -56,6 +57,7 @@
 #include "pim_addr.h"
 #include "pim_static.h"
 #include "pim_util.h"
+#include "pim6_mld.h"
 
 /**
  * Get current node VRF name.
@@ -886,6 +888,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
 {
        struct pim_upstream *up;
        time_t now = pim_time_monotonic_sec();
+       struct ttable *tt = NULL;
+       char *table = NULL;
        json_object *json_group = NULL;
        json_object *json_row = NULL;
 
@@ -893,18 +897,21 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
 
        if (!json) {
                vty_out(vty, "\n");
-               vty_out(vty,
-                       "Source          Group           RpfIface         RpfAddress      RibNextHop      Metric Pref\n");
+
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(
+                       tt,
+                       "Source|Group|RpfIface|RpfAddress|RibNextHop|Metric|Pref");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
        }
 
        frr_each (rb_pim_upstream, &pim->upstream_head, up) {
-               char rpf_addr_str[PREFIX_STRLEN];
                const char *rpf_ifname;
                struct pim_rpf *rpf = &up->rpf;
 
-               pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str,
-                             sizeof(rpf_addr_str));
-
                rpf_ifname =
                        rpf->source_nexthop.interface ? rpf->source_nexthop
                                                                .interface->name
@@ -932,8 +939,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
                        json_object_string_add(json_row, "group", grp_str);
                        json_object_string_add(json_row, "rpfInterface",
                                               rpf_ifname);
-                       json_object_string_add(json_row, "rpfAddress",
-                                              rpf_addr_str);
+                       json_object_string_addf(json_row, "rpfAddress", "%pPA",
+                                               &rpf->rpf_addr);
                        json_object_string_addf(
                                json_row, "ribNexthop", "%pPAs",
                                &rpf->source_nexthop.mrib_nexthop_addr);
@@ -946,23 +953,36 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
                        json_object_object_add(json_group, src_str, json_row);
 
                } else {
-                       vty_out(vty,
-                               "%-15pPAs %-15pPAs %-16s %-15s %-15pPAs %6d %4d\n",
+                       ttable_add_row(
+                               tt, "%pPAs|%pPAs|%s|%pPA|%pPAs|%d|%d",
                                &up->sg.src, &up->sg.grp, rpf_ifname,
-                               rpf_addr_str,
+                               &rpf->rpf_addr,
                                &rpf->source_nexthop.mrib_nexthop_addr,
                                rpf->source_nexthop.mrib_route_metric,
                                rpf->source_nexthop.mrib_metric_preference);
                }
        }
+       /* Dump the generated table. */
+       if (!json) {
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
 {
        struct interface *ifp;
+       struct ttable *tt = NULL;
+       char *table = NULL;
 
-       vty_out(vty,
-               "Interface        Address         Neighbor        Secondary      \n");
+       /* Prepare table. */
+       tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+       ttable_add_row(tt, "Interface|Address|Neighbor|Secondary");
+       tt->style.cell.rpad = 2;
+       tt->style.corner = '+';
+       ttable_restyle(tt);
 
        FOR_ALL_INTERFACES (pim->vrf, ifp) {
                struct pim_interface *pim_ifp;
@@ -990,12 +1010,16 @@ void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
 
                        for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list,
                                                  prefix_node, p))
-                               vty_out(vty,
-                                       "%-16s %-15pPAs %-15pPAs %-15pFX\n",
-                                       ifp->name, &ifaddr, &neigh->source_addr,
-                                       p);
+                               ttable_add_row(tt, "%s|%pPAs|%pPAs|%pFX",
+                                              ifp->name, &ifaddr,
+                                              &neigh->source_addr, p);
                }
        }
+       /* Dump the generated table. */
+       table = ttable_dump(tt, "\n");
+       vty_out(vty, "%s\n", table);
+       XFREE(MTYPE_TMP, table);
+       ttable_del(tt);
 }
 
 void pim_show_state(struct pim_instance *pim, struct vty *vty,
@@ -1319,15 +1343,24 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
                       pim_sgaddr *sg, json_object *json)
 {
        struct pim_upstream *up;
+       struct ttable *tt = NULL;
+       char *table = NULL;
        time_t now;
        json_object *json_group = NULL;
        json_object *json_row = NULL;
 
        now = pim_time_monotonic_sec();
 
-       if (!json)
-               vty_out(vty,
-                       "Iif             Source          Group           State       Uptime   JoinTimer RSTimer   KATimer   RefCnt\n");
+       if (!json) {
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(
+                       tt,
+                       "Iif|Source|Group|State|Uptime|JoinTimer|RSTimer|KATimer|RefCnt");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
+       }
 
        frr_each (rb_pim_upstream, &pim->upstream_head, up) {
                char uptime[10];
@@ -1352,9 +1385,9 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
                if (!up->t_join_timer && up->rpf.source_nexthop.interface) {
                        struct pim_neighbor *nbr;
 
-                       nbr = pim_neighbor_find_prefix(
+                       nbr = pim_neighbor_find(
                                up->rpf.source_nexthop.interface,
-                               &up->rpf.rpf_addr);
+                               up->rpf.rpf_addr);
                        if (nbr)
                                pim_time_timer_to_hhmmss(join_timer,
                                                         sizeof(join_timer),
@@ -1418,7 +1451,7 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
 
                                rpg = RP(pim, up->sg.grp);
                                json_object_string_addf(json_row, "rpfAddress",
-                                                       "%pFX", &rpg->rpf_addr);
+                                                       "%pPA", &rpg->rpf_addr);
                        } else {
                                json_object_string_add(json_row, "rpfAddress",
                                                       src_str);
@@ -1448,8 +1481,8 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
                        json_object_int_add(json_row, "sptBit", up->sptbit);
                        json_object_object_add(json_group, src_str, json_row);
                } else {
-                       vty_out(vty,
-                               "%-16s%-15pPAs %-15pPAs %-11s %-8s %-9s %-9s %-9s %6d\n",
+                       ttable_add_row(tt,
+                               "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s|%d",
                                up->rpf.source_nexthop.interface
                                ? up->rpf.source_nexthop.interface->name
                                : "Unknown",
@@ -1457,12 +1490,20 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
                                join_timer, rs_timer, ka_timer, up->ref_count);
                }
        }
+       /* Dump the generated table. */
+       if (!json) {
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 static void pim_show_join_desired_helper(struct pim_instance *pim,
                                         struct vty *vty,
                                         struct pim_upstream *up,
-                                        json_object *json, bool uj)
+                                        json_object *json, bool uj,
+                                        struct ttable *tt)
 {
        json_object *json_group = NULL;
        json_object *json_row = NULL;
@@ -1493,56 +1534,75 @@ static void pim_show_join_desired_helper(struct pim_instance *pim,
                json_object_object_add(json_group, src_str, json_row);
 
        } else {
-               vty_out(vty, "%-15pPAs %-15pPAs %-6s\n", &up->sg.src,
-                       &up->sg.grp,
-                       pim_upstream_evaluate_join_desired(pim, up) ? "yes"
-                                                                   : "no");
+               ttable_add_row(tt, "%pPAs|%pPAs|%s", &up->sg.src, &up->sg.grp,
+                              pim_upstream_evaluate_join_desired(pim, up)
+                                      ? "yes"
+                                      : "no");
        }
 }
 
 void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj)
 {
        struct pim_upstream *up;
+       struct ttable *tt = NULL;
+       char *table = NULL;
 
        json_object *json = NULL;
 
        if (uj)
                json = json_object_new_object();
-       else
-               vty_out(vty, "Source          Group           EvalJD\n");
+       else {
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(tt, "Source|Group|EvalJD");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
+       }
 
        frr_each (rb_pim_upstream, &pim->upstream_head, up) {
                /* scan all interfaces */
-               pim_show_join_desired_helper(pim, vty, up, json, uj);
+               pim_show_join_desired_helper(pim, vty, up, json, uj, tt);
        }
 
        if (uj)
                vty_json(vty, json);
+       else {
+               /* Dump the generated table. */
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
 {
        struct pim_upstream *up;
+       struct ttable *tt = NULL;
+       char *table = NULL;
        json_object *json = NULL;
        json_object *json_group = NULL;
        json_object *json_row = NULL;
 
        if (uj)
                json = json_object_new_object();
-       else
-               vty_out(vty,
-                       "Source          Group           RpfIface         RibNextHop      RpfAddress     \n");
+       else {
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(tt,
+                              "Source|Group|RpfIface|RibNextHop|RpfAddress");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
+       }
 
        frr_each (rb_pim_upstream, &pim->upstream_head, up) {
-               char rpf_addr_str[PREFIX_STRLEN];
                struct pim_rpf *rpf;
                const char *rpf_ifname;
 
                rpf = &up->rpf;
 
-               pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str,
-                             sizeof(rpf_addr_str));
-
                rpf_ifname =
                        rpf->source_nexthop.interface ? rpf->source_nexthop
                                                                .interface->name
@@ -1573,19 +1633,26 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
                        json_object_string_addf(
                                json_row, "ribNexthop", "%pPAs",
                                &rpf->source_nexthop.mrib_nexthop_addr);
-                       json_object_string_add(json_row, "rpfAddress",
-                                              rpf_addr_str);
+                       json_object_string_addf(json_row, "rpfAddress", "%pPA",
+                                               &rpf->rpf_addr);
                        json_object_object_add(json_group, src_str, json_row);
                } else {
-                       vty_out(vty, "%-15pPAs %-15pPAs %-16s %-15pPA %-15s\n",
-                               &up->sg.src, &up->sg.grp, rpf_ifname,
-                               &rpf->source_nexthop.mrib_nexthop_addr,
-                               rpf_addr_str);
+                       ttable_add_row(tt, "%pPAs|%pPAs|%s|%pPA|%pPA",
+                                      &up->sg.src, &up->sg.grp, rpf_ifname,
+                                      &rpf->source_nexthop.mrib_nexthop_addr,
+                                      &rpf->rpf_addr);
                }
        }
 
        if (uj)
                vty_json(vty, json);
+       else {
+               /* Dump the generated table. */
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp,
@@ -1679,7 +1746,7 @@ int pim_show_join_cmd_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
                vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
                return CMD_WARNING;
        }
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -1780,7 +1847,7 @@ int pim_show_jp_agg_list_cmd_helper(const char *vrf, struct vty *vty)
                vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
                return CMD_WARNING;
        }
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -2131,6 +2198,8 @@ void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag,
        int pim_nbrs = 0;
        int pim_ifchannels = 0;
        bool uj = true;
+       struct ttable *tt = NULL;
+       char *table = NULL;
        json_object *json_row = NULL;
        json_object *json_tmp;
 
@@ -2174,43 +2243,60 @@ void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag,
        }
 
        if (!uj) {
-               vty_out(vty,
-                       "Interface         State          Address  PIM Nbrs           PIM DR  FHR IfChannels\n");
+
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(
+                       tt,
+                       "Interface|State|Address|PIM Nbrs|PIM DR|FHR|IfChannels");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
 
                json_object_object_foreach(json, key, val)
                {
-                       vty_out(vty, "%-16s  ", key);
+                       const char *state, *address, *pimdr;
+                       int neighbors, firsthpr, pimifchnl;
 
                        json_object_object_get_ex(val, "state", &json_tmp);
-                       vty_out(vty, "%5s  ", json_object_get_string(json_tmp));
+                       state = json_object_get_string(json_tmp);
 
                        json_object_object_get_ex(val, "address", &json_tmp);
-                       vty_out(vty, "%15s  ",
-                               json_object_get_string(json_tmp));
+                       address = json_object_get_string(json_tmp);
 
                        json_object_object_get_ex(val, "pimNeighbors",
                                                  &json_tmp);
-                       vty_out(vty, "%8d  ", json_object_get_int(json_tmp));
+                       neighbors = json_object_get_int(json_tmp);
 
                        if (json_object_object_get_ex(
                                    val, "pimDesignatedRouterLocal",
                                    &json_tmp)) {
-                               vty_out(vty, "%15s  ", "local");
+                               pimdr = "local";
                        } else {
                                json_object_object_get_ex(
                                        val, "pimDesignatedRouter", &json_tmp);
-                               vty_out(vty, "%15s  ",
-                                       json_object_get_string(json_tmp));
+                               pimdr = json_object_get_string(json_tmp);
                        }
 
                        json_object_object_get_ex(val, "firstHopRouter",
                                                  &json_tmp);
-                       vty_out(vty, "%3d  ", json_object_get_int(json_tmp));
+                       firsthpr = json_object_get_int(json_tmp);
 
                        json_object_object_get_ex(val, "pimIfChannels",
                                                  &json_tmp);
-                       vty_out(vty, "%9d\n", json_object_get_int(json_tmp));
+                       pimifchnl = json_object_get_int(json_tmp);
+
+                       ttable_add_row(tt, "%s|%s|%s|%d|%s|%d|%d", key, state,
+                                      address, neighbors, pimdr, firsthpr,
+                                      pimifchnl);
                }
+
+               /* Dump the generated table. */
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+
+               ttable_del(tt);
        }
 }
 
@@ -2635,40 +2721,90 @@ void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty,
                vty_out(vty, "SSM group range : %s\n", range_str);
 }
 
-struct pnc_cache_walk_data {
+struct vty_pnc_cache_walk_data {
        struct vty *vty;
        struct pim_instance *pim;
 };
 
-static int pim_print_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
+struct json_pnc_cache_walk_data {
+       json_object *json_obj;
+       struct pim_instance *pim;
+};
+
+static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
 {
        struct pim_nexthop_cache *pnc = bucket->data;
-       struct pnc_cache_walk_data *cwd = arg;
+       struct vty_pnc_cache_walk_data *cwd = arg;
        struct vty *vty = cwd->vty;
        struct pim_instance *pim = cwd->pim;
        struct nexthop *nh_node = NULL;
        ifindex_t first_ifindex;
        struct interface *ifp = NULL;
-       char buf[PREFIX_STRLEN];
 
        for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
                first_ifindex = nh_node->ifindex;
+
                ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
 
-               vty_out(vty, "%-15s ",
-                       inet_ntop(AF_INET, &pnc->rpf.rpf_addr.u.prefix4, buf,
-                                 sizeof(buf)));
+               vty_out(vty, "%-15pPA ", &pnc->rpf.rpf_addr);
                vty_out(vty, "%-16s ", ifp ? ifp->name : "NULL");
+#if PIM_IPV == 4
                vty_out(vty, "%pI4 ", &nh_node->gate.ipv4);
+#else
+               vty_out(vty, "%pI6 ", &nh_node->gate.ipv6);
+#endif
                vty_out(vty, "\n");
        }
        return CMD_SUCCESS;
 }
 
+static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
+                                          void *arg)
+{
+       struct pim_nexthop_cache *pnc = backet->data;
+       struct json_pnc_cache_walk_data *cwd = arg;
+       struct pim_instance *pim = cwd->pim;
+       struct nexthop *nh_node = NULL;
+       ifindex_t first_ifindex;
+       struct interface *ifp = NULL;
+       char addr_str[PIM_ADDRSTRLEN];
+       json_object *json_row = NULL;
+       json_object *json_ifp = NULL;
+       json_object *json_arr = NULL;
+
+       for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+               first_ifindex = nh_node->ifindex;
+               ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+               snprintfrr(addr_str, sizeof(addr_str), "%pPA",
+                          &pnc->rpf.rpf_addr);
+               json_object_object_get_ex(cwd->json_obj, addr_str, &json_row);
+               if (!json_row) {
+                       json_row = json_object_new_object();
+                       json_object_string_addf(json_row, "address", "%pPA",
+                                               &pnc->rpf.rpf_addr);
+                       json_object_object_addf(cwd->json_obj, json_row, "%pPA",
+                                               &pnc->rpf.rpf_addr);
+                       json_arr = json_object_new_array();
+                       json_object_object_add(json_row, "nexthops", json_arr);
+               }
+               json_ifp = json_object_new_object();
+               json_object_string_add(json_ifp, "interface",
+                                      ifp ? ifp->name : "NULL");
+#if PIM_IPV == 4
+               json_object_string_addf(json_ifp, "nexthop", "%pI4",
+                                       &nh_node->gate.ipv4);
+#else
+               json_object_string_addf(json_ifp, "nexthop", "%pI6",
+                                       &nh_node->gate.ipv6);
+#endif
+               json_object_array_add(json_arr, json_ifp);
+       }
+       return CMD_SUCCESS;
+}
+
 int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
                                       pim_addr source, pim_addr group)
 {
-       struct prefix nht_p;
        int result = 0;
        pim_addr vif_source;
        struct prefix grp;
@@ -2698,11 +2834,11 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
        if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group))
                return CMD_SUCCESS;
 
-       pim_addr_to_prefix(&nht_p, vif_source);
        pim_addr_to_prefix(&grp, group);
        memset(&nexthop, 0, sizeof(nexthop));
 
-       result = pim_ecmp_nexthop_lookup(v->info, &nexthop, &nht_p, &grp, 0);
+       result =
+               pim_ecmp_nexthop_lookup(v->info, &nexthop, vif_source, &grp, 0);
 
        if (!result) {
                vty_out(vty,
@@ -2718,7 +2854,7 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
        return CMD_SUCCESS;
 }
 
-int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty)
+int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty, bool uj)
 {
        struct vrf *v;
 
@@ -2727,23 +2863,35 @@ int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty)
        if (!v)
                return CMD_WARNING;
 
-       pim_show_nexthop(v->info, vty);
+       pim_show_nexthop(v->info, vty, uj);
 
        return CMD_SUCCESS;
 }
 
-void pim_show_nexthop(struct pim_instance *pim, struct vty *vty)
+void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj)
 {
-       struct pnc_cache_walk_data cwd;
+       struct vty_pnc_cache_walk_data cwd;
+       struct json_pnc_cache_walk_data jcwd;
 
        cwd.vty = vty;
        cwd.pim = pim;
-       vty_out(vty, "Number of registered addresses: %lu\n",
-               pim->rpf_hash->count);
-       vty_out(vty, "Address         Interface        Nexthop\n");
-       vty_out(vty, "---------------------------------------------\n");
+       jcwd.pim = pim;
+
+       if (uj) {
+               jcwd.json_obj = json_object_new_object();
+       } else {
+               vty_out(vty, "Number of registered addresses: %lu\n",
+                       pim->rpf_hash->count);
+               vty_out(vty, "Address         Interface        Nexthop\n");
+               vty_out(vty, "---------------------------------------------\n");
+       }
 
-       hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd);
+       if (uj) {
+               hash_walk(pim->rpf_hash, pim_print_json_pnc_cache_walkcb,
+                         &jcwd);
+               vty_json(vty, jcwd.json_obj);
+       } else
+               hash_walk(pim->rpf_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
 }
 
 int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
@@ -3209,13 +3357,22 @@ void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty,
                               json_object *json)
 {
        struct interface *ifp;
+       struct ttable *tt = NULL;
+       char *table = NULL;
        json_object *json_row = NULL;
 
        vty_out(vty, "\n");
 
-       if (!json)
-               vty_out(vty,
-                       "Interface        Address            ifi Vif  PktsIn PktsOut    BytesIn   BytesOut\n");
+       if (!json) {
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(
+                       tt,
+                       "Interface|Address|ifi|Vif|PktsIn|PktsOut|BytesIn|BytesOut");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
+       }
 
        FOR_ALL_INTERFACES (pim->vrf, ifp) {
                struct pim_interface *pim_ifp;
@@ -3271,16 +3428,22 @@ void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty,
                                            (unsigned long)vreq.obytes);
                        json_object_object_add(json, ifp->name, json_row);
                } else {
-                       vty_out(vty,
-                               "%-16s %-15pPAs %3d %3d %7lu %7lu %10lu %10lu\n",
-                               ifp->name, &pim_ifp->primary_address,
-                               ifp->ifindex, pim_ifp->mroute_vif_index,
-                               (unsigned long)vreq.icount,
-                               (unsigned long)vreq.ocount,
-                               (unsigned long)vreq.ibytes,
-                               (unsigned long)vreq.obytes);
+                       ttable_add_row(tt, "%s|%pPAs|%d|%d|%lu|%lu|%lu|%lu",
+                                      ifp->name, &pim_ifp->primary_address,
+                                      ifp->ifindex, pim_ifp->mroute_vif_index,
+                                      (unsigned long)vreq.icount,
+                                      (unsigned long)vreq.ocount,
+                                      (unsigned long)vreq.ibytes,
+                                      (unsigned long)vreq.obytes);
                }
        }
+       /* Dump the generated table. */
+       if (!json) {
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim, struct vty *vty)
@@ -3297,6 +3460,8 @@ void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim, struct vty *vty)
        vty_out(vty, "Mroute socket descriptor:");
 
        vty_out(vty, " %d(%s)\n", pim->mroute_socket, vrf->name);
+       vty_out(vty, "PIM Register socket descriptor:");
+       vty_out(vty, " %d(%s)\n", pim->reg_sock, vrf->name);
 
        pim_time_uptime(uptime, sizeof(uptime),
                        now - pim->mroute_socket_creation);
@@ -3334,6 +3499,8 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
        struct listnode *node;
        struct channel_oil *c_oil;
        struct static_route *s_route;
+       struct ttable *tt = NULL;
+       char *table = NULL;
        time_t now;
        json_object *json_group = NULL;
        json_object *json_source = NULL;
@@ -3356,8 +3523,14 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
                vty_out(vty, "Flags: S - Sparse, C - Connected, P - Pruned\n");
                vty_out(vty,
                        "       R - SGRpt Pruned, F - Register flag, T - SPT-bit set\n");
-               vty_out(vty,
-                       "\nSource          Group           Flags    Proto  Input            Output           TTL  Uptime\n");
+
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(
+                       tt, "Source|Group|Flags|Proto|Input|Output|TTL|Uptime");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
        }
 
        now = pim_time_monotonic_sec();
@@ -3561,11 +3734,10 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
                                        strlcpy(proto, "STAR", sizeof(proto));
                                }
 
-                               vty_out(vty,
-                                       "%-15s %-15s %-8s %-6s %-16s %-16s %-3d  %8s\n",
-                                       src_str, grp_str, state_str, proto,
-                                       in_ifname, out_ifname, ttl,
-                                       mroute_uptime);
+                               ttable_add_row(tt, "%s|%s|%s|%s|%s|%s|%d|%s",
+                                              src_str, grp_str, state_str,
+                                              proto, in_ifname, out_ifname,
+                                              ttl, mroute_uptime);
 
                                if (first) {
                                        src_str[0] = '\0';
@@ -3579,11 +3751,10 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
                }
 
                if (!json && !found_oif) {
-                       vty_out(vty,
-                               "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n",
-                               oil_origin(c_oil), oil_mcastgrp(c_oil),
-                               state_str, "none", in_ifname, "none", 0,
-                               "--:--:--");
+                       ttable_add_row(tt, "%pPAs|%pPAs|%s|%s|%s|%s|%d|%s",
+                                      oil_origin(c_oil), oil_mcastgrp(c_oil),
+                                      state_str, "none", in_ifname, "none", 0,
+                                      "--:--:--");
                }
        }
 
@@ -3687,8 +3858,8 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
                                json_object_object_add(json_oil, out_ifname,
                                                       json_ifp_out);
                        } else {
-                               vty_out(vty,
-                                       "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n",
+                               ttable_add_row(
+                                       tt, "%pPAs|%pPAs|%s|%s|%s|%s|%d|%s",
                                        &s_route->source, &s_route->group, "-",
                                        proto, in_ifname, out_ifname, ttl,
                                        oif_uptime);
@@ -3702,17 +3873,24 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
                }
 
                if (!json && !found_oif) {
-                       vty_out(vty,
-                               "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n",
-                               &s_route->source, &s_route->group, "-", proto,
-                               in_ifname, "none", 0, "--:--:--");
+                       ttable_add_row(tt, "%pPAs|%pPAs|%s|%s|%s|%s|%d|%s",
+                                      &s_route->source, &s_route->group, "-",
+                                      proto, in_ifname, "none", 0, "--:--:--");
                }
        }
+       /* Dump the generated table. */
+       if (!json) {
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
                                              json_object *json,
-                                             struct vty *vty)
+                                             struct vty *vty,
+                                             struct ttable *tt)
 {
        json_object *json_group = NULL;
        json_object *json_source = NULL;
@@ -3747,12 +3925,12 @@ static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
                json_object_int_add(json_source, "wrongIf", c_oil->cc.wrong_if);
 
        } else {
-               vty_out(vty, "%-15pPAs %-15pPAs %-8llu %-7ld %-10ld %-7ld\n",
-                       oil_origin(c_oil), oil_mcastgrp(c_oil),
-                       c_oil->cc.lastused / 100,
-                       c_oil->cc.pktcnt - c_oil->cc.origpktcnt,
-                       c_oil->cc.bytecnt - c_oil->cc.origbytecnt,
-                       c_oil->cc.wrong_if - c_oil->cc.origwrong_if);
+               ttable_add_row(tt, "%pPAs|%pPAs|%llu|%ld|%ld|%ld",
+                              oil_origin(c_oil), oil_mcastgrp(c_oil),
+                              c_oil->cc.lastused / 100,
+                              c_oil->cc.pktcnt - c_oil->cc.origpktcnt,
+                              c_oil->cc.bytecnt - c_oil->cc.origbytecnt,
+                              c_oil->cc.wrong_if - c_oil->cc.origwrong_if);
        }
 }
 
@@ -3762,20 +3940,35 @@ void show_mroute_count(struct pim_instance *pim, struct vty *vty,
        struct listnode *node;
        struct channel_oil *c_oil;
        struct static_route *sr;
+       struct ttable *tt = NULL;
+       char *table = NULL;
 
        if (!json) {
                vty_out(vty, "\n");
 
-               vty_out(vty,
-                       "Source          Group           LastUsed Packets Bytes WrongIf  \n");
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(tt,
+                              "Source|Group|LastUsed|Packets|Bytes|WrongIf");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
        }
 
        /* Print PIM and IGMP route counts */
        frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
-               show_mroute_count_per_channel_oil(c_oil, json, vty);
+               show_mroute_count_per_channel_oil(c_oil, json, vty, tt);
 
        for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr))
-               show_mroute_count_per_channel_oil(&sr->c_oil, json, vty);
+               show_mroute_count_per_channel_oil(&sr->c_oil, json, vty, tt);
+
+       /* Dump the generated table. */
+       if (!json) {
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       }
 }
 
 void show_mroute_summary(struct pim_instance *pim, struct vty *vty,
@@ -3939,6 +4132,12 @@ void clear_mroute(struct pim_instance *pim)
                                igmp_group_delete(grp);
                        }
                }
+#else
+               struct gm_if *gm_ifp;
+
+               gm_ifp = pim_ifp->mld;
+               if (gm_ifp)
+                       gm_group_delete(gm_ifp);
 #endif
        }
 
@@ -3968,6 +4167,46 @@ void clear_pim_statistics(struct pim_instance *pim)
        }
 }
 
+int clear_pim_interface_traffic(const char *vrf, struct vty *vty)
+{
+       struct interface *ifp = NULL;
+       struct pim_interface *pim_ifp = NULL;
+
+       struct vrf *v = pim_cmd_lookup(vty, vrf);
+
+       if (!v)
+               return CMD_WARNING;
+
+       FOR_ALL_INTERFACES (v, ifp) {
+               pim_ifp = ifp->info;
+
+               if (!pim_ifp)
+                       continue;
+
+               pim_ifp->pim_ifstat_hello_recv = 0;
+               pim_ifp->pim_ifstat_hello_sent = 0;
+               pim_ifp->pim_ifstat_join_recv = 0;
+               pim_ifp->pim_ifstat_join_send = 0;
+               pim_ifp->pim_ifstat_prune_recv = 0;
+               pim_ifp->pim_ifstat_prune_send = 0;
+               pim_ifp->pim_ifstat_reg_recv = 0;
+               pim_ifp->pim_ifstat_reg_send = 0;
+               pim_ifp->pim_ifstat_reg_stop_recv = 0;
+               pim_ifp->pim_ifstat_reg_stop_send = 0;
+               pim_ifp->pim_ifstat_assert_recv = 0;
+               pim_ifp->pim_ifstat_assert_send = 0;
+               pim_ifp->pim_ifstat_bsm_rx = 0;
+               pim_ifp->pim_ifstat_bsm_tx = 0;
+#if PIM_IPV == 4
+               pim_ifp->igmp_ifstat_joins_sent = 0;
+               pim_ifp->igmp_ifstat_joins_failed = 0;
+               pim_ifp->igmp_peak_group_count = 0;
+#endif
+       }
+
+       return CMD_SUCCESS;
+}
+
 int pim_debug_pim_cmd(void)
 {
        PIM_DO_DEBUG_PIM_EVENTS;
@@ -3990,6 +4229,8 @@ int pim_no_debug_pim_cmd(void)
 
        PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
        PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+       PIM_DONT_DEBUG_BSM;
+       PIM_DONT_DEBUG_VXLAN;
        return CMD_SUCCESS;
 }
 
@@ -4043,7 +4284,7 @@ int pim_show_rpf_helper(const char *vrf, struct vty *vty, bool json)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4099,7 +4340,7 @@ int pim_show_rp_helper(const char *vrf, struct vty *vty, const char *group_str,
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4170,7 +4411,7 @@ int pim_show_secondary_helper(const char *vrf, struct vty *vty)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4193,7 +4434,7 @@ int pim_show_statistics_helper(const char *vrf, struct vty *vty,
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4222,7 +4463,7 @@ int pim_show_upstream_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
                vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
                return CMD_WARNING;
        }
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4286,7 +4527,7 @@ int pim_show_upstream_join_desired_helper(const char *vrf, struct vty *vty,
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4308,7 +4549,7 @@ int pim_show_upstream_rpf_helper(const char *vrf, struct vty *vty, bool uj)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4332,7 +4573,7 @@ int pim_show_state_helper(const char *vrf, struct vty *vty,
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4386,7 +4627,7 @@ int pim_show_multicast_helper(const char *vrf, struct vty *vty)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4421,7 +4662,7 @@ int pim_show_multicast_count_helper(const char *vrf, struct vty *vty, bool json)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4478,7 +4719,7 @@ int pim_show_mroute_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4541,7 +4782,7 @@ int pim_show_mroute_count_helper(const char *vrf, struct vty *vty, bool json)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4597,7 +4838,7 @@ int pim_show_mroute_summary_helper(const char *vrf, struct vty *vty, bool json)
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4683,10 +4924,10 @@ void pim_show_interface_traffic(struct pim_instance *pim, struct vty *vty,
                                            pim_ifp->pim_ifstat_join_recv);
                        json_object_int_add(json_row, "joinTx",
                                            pim_ifp->pim_ifstat_join_send);
-                       json_object_int_add(json_row, "pruneTx",
-                                           pim_ifp->pim_ifstat_prune_send);
                        json_object_int_add(json_row, "pruneRx",
                                            pim_ifp->pim_ifstat_prune_recv);
+                       json_object_int_add(json_row, "pruneTx",
+                                           pim_ifp->pim_ifstat_prune_send);
                        json_object_int_add(json_row, "registerRx",
                                            pim_ifp->pim_ifstat_reg_recv);
                        json_object_int_add(json_row, "registerTx",
@@ -4833,7 +5074,7 @@ int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
        if (!v)
                return CMD_WARNING;
 
-       pim = pim_get_pim_instance(v->vrf_id);
+       pim = v->info;
 
        if (!pim) {
                vty_out(vty, "%% Unable to find pim instance\n");
@@ -4847,3 +5088,13 @@ int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
 
        return CMD_SUCCESS;
 }
+
+void clear_pim_interfaces(struct pim_instance *pim)
+{
+       struct interface *ifp;
+
+       FOR_ALL_INTERFACES (pim->vrf, ifp) {
+               if (ifp->info)
+                       pim_neighbor_delete_all(ifp, "interface cleared");
+       }
+}
index 67e092e0794550af8f7d4a2ee46f978182f4e612..27c029670e59ce5c78bbb82f48d53170d311e7d2 100644 (file)
@@ -23,9 +23,6 @@
 struct pim_upstream;
 struct pim_instance;
 
-/* duplicated from pim_instance.h - needed to avoid dependency mess */
-struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id);
-
 const char *pim_cli_get_vrf_name(struct vty *vty);
 int pim_process_join_prune_cmd(struct vty *vty, const char *jpi_str);
 int pim_process_no_join_prune_cmd(struct vty *vty);
@@ -107,8 +104,8 @@ void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty,
                                 bool uj);
 int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
                                       pim_addr source, pim_addr group);
-int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty);
-void pim_show_nexthop(struct pim_instance *pim, struct vty *vty);
+int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty, bool uj);
+void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj);
 int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
                                  const char *json, const char *interface);
 int pim_show_neighbors_vrf_all_cmd_helper(struct vty *vty, const char *json,
@@ -142,6 +139,7 @@ int clear_ip_mroute_count_command(struct vty *vty, const char *name);
 struct vrf *pim_cmd_lookup(struct vty *vty, const char *name);
 void clear_mroute(struct pim_instance *pim);
 void clear_pim_statistics(struct pim_instance *pim);
+int clear_pim_interface_traffic(const char *vrf, struct vty *vty);
 int pim_debug_pim_cmd(void);
 int pim_no_debug_pim_cmd(void);
 int pim_debug_pim_packets_cmd(const char *hello, const char *joins,
@@ -187,6 +185,7 @@ void pim_show_interface_traffic(struct pim_instance *pim, struct vty *vty,
                                bool uj);
 int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
                                      struct vty *vty, bool uj);
+void clear_pim_interfaces(struct pim_instance *pim);
 /*
  * Special Macro to allow us to get the correct pim_instance;
  */
index 738a548c7c5ee05dc17bdcdad1dc8ea111a10a22..e03e5a263074da692e280217c19fc91e51780187 100644 (file)
@@ -633,9 +633,7 @@ void pim_if_addr_add(struct connected *ifc)
                           with RNH address to receive update and add the
                           interface as nexthop. */
                        memset(&rpf, 0, sizeof(struct pim_rpf));
-                       rpf.rpf_addr.family = AF_INET;
-                       rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
-                       rpf.rpf_addr.u.prefix4 = ifc->address->u.prefix4;
+                       rpf.rpf_addr = pim_addr_from_prefix(ifc->address);
                        pnc = pim_nexthop_cache_find(pim_ifp->pim, &rpf);
                        if (pnc)
                                pim_sendmsg_zebra_rnh(pim_ifp->pim, zclient,
@@ -801,22 +799,13 @@ void pim_if_addr_add_all(struct interface *ifp)
                pim_if_addr_add(ifc);
        }
 
-       if (!v4_addrs && v6_addrs && !if_is_loopback(ifp)) {
-               if (pim_ifp->pim_enable) {
-
-                       /* Interface has a valid primary address ? */
-                       if (!pim_addr_is_any(pim_ifp->primary_address)) {
-
-                               /* Interface has a valid socket ? */
-                               if (pim_ifp->pim_sock_fd < 0) {
-                                       if (pim_sock_add(ifp)) {
-                                               zlog_warn(
-                                                       "Failure creating PIM socket for interface %s",
-                                                       ifp->name);
-                                       }
-                               }
-                       }
-               } /* pim */
+       if (!v4_addrs && v6_addrs && !if_is_loopback(ifp) &&
+           pim_ifp->pim_enable && !pim_addr_is_any(pim_ifp->primary_address) &&
+           pim_ifp->pim_sock_fd < 0 && pim_sock_add(ifp)) {
+               /* Interface has a valid primary address ? */
+               /* Interface has a valid socket ? */
+               zlog_warn("Failure creating PIM socket for interface %s",
+                         ifp->name);
        }
        /*
         * PIM or IGMP/MLD is enabled on interface, and there is at least one
@@ -1768,9 +1757,7 @@ static int pim_ifp_down(struct interface *ifp)
 
        if (ifp->info) {
                pim_if_del_vif(ifp);
-#if PIM_IPV == 4
                pim_ifstat_reset(ifp);
-#endif
        }
 
        return 0;
index e2d2ab97c9fe3fcd65324a06e7e913bde9c9b623..ce252366ce435d54cdb36ec6f45db95ca75ca574 100644 (file)
@@ -539,10 +539,7 @@ struct pim_ifchannel *pim_ifchannel_add(struct interface *ifp, pim_sgaddr *sg,
                if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
                        PIM_IF_FLAG_SET_PROTO_IGMP(ch->flags);
 
-               if (ch->upstream)
-                       ch->upstream->flags |= up_flags;
-               else if (PIM_DEBUG_EVENTS)
-                       zlog_debug("%s:%pSG No Upstream found", __func__, sg);
+               ch->upstream->flags |= up_flags;
 
                return ch;
        }
@@ -637,8 +634,7 @@ static void ifjoin_to_noinfo(struct pim_ifchannel *ch)
        pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_NOINFO);
        pim_forward_stop(ch);
 
-       if (ch->upstream)
-               PIM_UPSTREAM_FLAG_UNSET_SRC_PIM(ch->upstream->flags);
+       PIM_UPSTREAM_FLAG_UNSET_SRC_PIM(ch->upstream->flags);
 
        PIM_IF_FLAG_UNSET_PROTO_PIM(ch->flags);
 
@@ -685,8 +681,7 @@ static void on_ifjoin_prune_pending_timer(struct thread *t)
                                struct pim_rpf rpf;
 
                                rpf.source_nexthop.interface = ifp;
-                               pim_addr_to_prefix(&rpf.rpf_addr,
-                                                  pim_ifp->primary_address);
+                               rpf.rpf_addr = pim_ifp->primary_address;
                                pim_jp_agg_single_upstream_send(
                                        &rpf, ch->upstream, 0);
                        }
@@ -697,31 +692,29 @@ static void on_ifjoin_prune_pending_timer(struct thread *t)
                         *  message on RP path upon prune timer expiry.
                         */
                        ch->ifjoin_state = PIM_IFJOIN_PRUNE;
-                       if (ch->upstream) {
-                               struct pim_upstream *parent =
-                                       ch->upstream->parent;
+                       struct pim_upstream *parent =
+                               ch->upstream->parent;
 
-                               pim_upstream_update_join_desired(pim_ifp->pim,
-                                                                ch->upstream);
+                       pim_upstream_update_join_desired(pim_ifp->pim,
+                                                        ch->upstream);
 
-                               pim_jp_agg_single_upstream_send(&parent->rpf,
-                                                               parent, true);
-                               /*
-                                * SGRpt prune pending expiry has to install
-                                * SG entry with empty olist to drop the SG
-                                * traffic incase no other intf exists.
-                                * On that scenario, SG entry wouldn't have
-                                * got installed until Prune pending timer
-                                * expired. So install now.
-                                */
-                               pim_channel_del_oif(
-                                       ch->upstream->channel_oil, ifp,
-                                       PIM_OIF_FLAG_PROTO_STAR, __func__);
-                               if (!ch->upstream->channel_oil->installed)
-                                       pim_upstream_mroute_add(
-                                               ch->upstream->channel_oil,
-                                               __func__);
-                       }
+                       pim_jp_agg_single_upstream_send(&parent->rpf,
+                                                       parent, true);
+                       /*
+                        * SGRpt prune pending expiry has to install
+                        * SG entry with empty olist to drop the SG
+                        * traffic incase no other intf exists.
+                        * On that scenario, SG entry wouldn't have
+                        * got installed until Prune pending timer
+                        * expired. So install now.
+                        */
+                       pim_channel_del_oif(
+                               ch->upstream->channel_oil, ifp,
+                               PIM_OIF_FLAG_PROTO_STAR, __func__);
+                       if (!ch->upstream->channel_oil->installed)
+                               pim_upstream_mroute_add(
+                                       ch->upstream->channel_oil,
+                                       __func__);
                }
                /* from here ch may have been deleted */
        }
@@ -751,7 +744,7 @@ static void check_recv_upstream(int is_join, struct interface *recv_ifp,
                return;
        }
 
-       rpf_addr = pim_addr_from_prefix(&up->rpf.rpf_addr);
+       rpf_addr = up->rpf.rpf_addr;
 
        /* upstream directed to RPF'(S,G) ? */
        if (pim_addr_cmp(upstream, rpf_addr)) {
index 749e259a7e015b7766f84f607aa6a12f4672b3bf..6ffeeb96571886e69d5e3b7b3550fd36df0ca8f7 100644 (file)
@@ -585,10 +585,14 @@ static int igmp_recv_query(struct gm_sock *igmp, int query_version,
                 * time; the same router keeps sending the Group-Specific
                 * Queries.
                 */
-               struct gm_group *group;
+               const struct gm_group *group;
+               const struct listnode *grpnode;
+
+               for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+                                         group)) {
+                       if (!group->t_group_query_retransmit_timer)
+                               continue;
 
-               group = find_group_by_addr(igmp, group_addr);
-               if (group && group->t_group_query_retransmit_timer) {
                        if (PIM_DEBUG_IGMP_TRACE)
                                zlog_debug(
                                        "%s: lower address query packet from %s is ignored when last member query interval timer is running",
@@ -1005,8 +1009,8 @@ static void igmp_group_count_incr(struct pim_interface *pim_ifp)
        uint32_t group_count = listcount(pim_ifp->gm_group_list);
 
        ++pim_ifp->pim->igmp_group_count;
-       if (pim_ifp->pim->igmp_group_count
-           == pim_ifp->pim->igmp_watermark_limit) {
+       if (pim_ifp->pim->igmp_group_count ==
+           pim_ifp->pim->gm_watermark_limit) {
                zlog_warn(
                        "IGMP group count reached watermark limit: %u(vrf: %s)",
                        pim_ifp->pim->igmp_group_count,
index 359e3db71867a2c4d68e30a321d2838ef2f7ca2a..4b85d45751a5207fb337a824a5eef5d5a37edc23 100644 (file)
@@ -36,6 +36,7 @@
 #include "pim_vty.h"
 #include "pim_bsm.h"
 #include "pim_mlag.h"
+#include "pim_sock.h"
 
 static void pim_instance_terminate(struct pim_instance *pim)
 {
@@ -70,6 +71,8 @@ static void pim_instance_terminate(struct pim_instance *pim)
 
        pim_msdp_exit(pim);
 
+       close(pim->reg_sock);
+
        pim_mroute_socket_disable(pim);
 
        XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist);
@@ -131,6 +134,10 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
 
        pim->last_route_change_time = -1;
 
+       pim->reg_sock = pim_reg_sock();
+       if (pim->reg_sock < 0)
+               assert(0);
+
        /* MSDP global timer defaults. */
        pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
        pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
index 23b9df4aff13a75d9dc13f317ff714d9695439ed..0da881557c0a8db07987cb82e9503e31a2e56c6f 100644 (file)
@@ -136,6 +136,7 @@ struct pim_instance {
 
        struct thread *thread;
        int mroute_socket;
+       int reg_sock; /* Socket to send register msg */
        int64_t mroute_socket_creation;
        int64_t mroute_add_events;
        int64_t mroute_add_last;
@@ -173,7 +174,7 @@ struct pim_instance {
        struct thread *t_gm_recv;
 
        unsigned int igmp_group_count;
-       unsigned int igmp_watermark_limit;
+       unsigned int gm_watermark_limit;
        unsigned int keep_alive_time;
        unsigned int rp_keep_alive_time;
 
index 1b722382b919d67464eb7db19621f8d8b3982466..8c7ae80d2bfc52ad49d50d26a5e7c3c09edc2250 100644 (file)
@@ -86,7 +86,7 @@ static void recv_join(struct interface *ifp, struct pim_neighbor *neigh,
                 * If the RP sent in the message is not
                 * our RP for the group, drop the message
                 */
-               rpf_addr = pim_addr_from_prefix(&rp->rpf_addr);
+               rpf_addr = rp->rpf_addr;
                if (pim_addr_cmp(sg->src, rpf_addr)) {
                        zlog_warn(
                                "%s: Specified RP(%pPAs) in join is different than our configured RP(%pPAs)",
@@ -427,7 +427,6 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
        size_t packet_left = 0;
        size_t packet_size = 0;
        size_t group_size = 0;
-       pim_addr rpf_addr;
 
        if (rpf->source_nexthop.interface)
                pim_ifp = rpf->source_nexthop.interface->info;
@@ -436,9 +435,8 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                return -1;
        }
 
-       rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
 
-       on_trace(__func__, rpf->source_nexthop.interface, rpf_addr);
+       on_trace(__func__, rpf->source_nexthop.interface, rpf->rpf_addr);
 
        if (!pim_ifp) {
                zlog_warn("%s: multicast not enabled on interface %s", __func__,
@@ -446,11 +444,11 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                return -1;
        }
 
-       if (pim_addr_is_any(rpf_addr)) {
+       if (pim_addr_is_any(rpf->rpf_addr)) {
                if (PIM_DEBUG_PIM_J_P)
                        zlog_debug(
                                "%s: upstream=%pPA is myself on interface %s",
-                               __func__, &rpf_addr,
+                               __func__, &rpf->rpf_addr,
                                rpf->source_nexthop.interface->name);
                return 0;
        }
@@ -473,7 +471,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                        memset(msg, 0, sizeof(*msg));
 
                        pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
-                                                 rpf_addr);
+                                                 rpf->rpf_addr);
                        msg->reserved = 0;
                        msg->holdtime = htons(PIM_JP_HOLDTIME);
 
@@ -492,7 +490,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                if (PIM_DEBUG_PIM_J_P)
                        zlog_debug(
                                "%s: sending (G)=%pPAs to upstream=%pPA on interface %s",
-                               __func__, &group->group, &rpf_addr,
+                               __func__, &group->group, &rpf->rpf_addr,
                                rpf->source_nexthop.interface->name);
 
                group_size = pim_msg_get_jp_group_size(group->sources);
@@ -516,7 +514,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
                        memset(msg, 0, sizeof(*msg));
 
                        pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
-                                                 rpf_addr);
+                                                 rpf->rpf_addr);
                        msg->reserved = 0;
                        msg->holdtime = htons(PIM_JP_HOLDTIME);
 
index 16774a03f5995463596a9891056631797cde6786..44ebbb4dead27a7acf3372dc29cf2e2cb9b674f9 100644 (file)
@@ -110,7 +110,6 @@ pim_jp_agg_get_interface_upstream_switch_list(struct pim_rpf *rpf)
        struct pim_interface *pim_ifp;
        struct pim_iface_upstream_switch *pius;
        struct listnode *node, *nnode;
-       pim_addr rpf_addr;
 
        if (!ifp)
                return NULL;
@@ -121,18 +120,16 @@ pim_jp_agg_get_interface_upstream_switch_list(struct pim_rpf *rpf)
        if (!pim_ifp)
                return NULL;
 
-       rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
-
        for (ALL_LIST_ELEMENTS(pim_ifp->upstream_switch_list, node, nnode,
                               pius)) {
-               if (!pim_addr_cmp(pius->address, rpf_addr))
+               if (!pim_addr_cmp(pius->address, rpf->rpf_addr))
                        break;
        }
 
        if (!pius) {
                pius = XCALLOC(MTYPE_PIM_JP_AGG_GROUP,
                               sizeof(struct pim_iface_upstream_switch));
-               pius->address = rpf_addr;
+               pius->address = rpf->rpf_addr;
                pius->us = list_new();
                listnode_add_sort(pim_ifp->upstream_switch_list, pius);
        }
index b67849fd8f176ac50696faef30cdd7d83ebe5dad..7e57b405f2d2d2f72f8f463a80ba21b0b117f123 100644 (file)
@@ -279,7 +279,7 @@ size_t pim_msg_build_jp_groups(struct pim_jp_groups *grp,
                        struct pim_rpf *rpf = pim_rp_g(pim, source->up->sg.grp);
                        bits = PIM_ENCODE_SPARSE_BIT | PIM_ENCODE_WC_BIT
                               | PIM_ENCODE_RPT_BIT;
-                       stosend = pim_addr_from_prefix(&rpf->rpf_addr);
+                       stosend = rpf->rpf_addr;
                        /* Only Send SGRpt in case of *,G Join */
                        if (source->is_join)
                                up = source->up;
index c40ffaff4eb69aadbbde19d95d22089f5bd6ac26..72b16a5f49e1a5a34735b40d41e07521818f04e5 100644 (file)
@@ -334,10 +334,10 @@ static bool is_pim_interface(const struct lyd_node *dnode)
        pim_enable_dnode =
                yang_dnode_getf(dnode,
                                "%s/frr-pim:pim/address-family[address-family='%s']/pim-enable",
-                               if_xpath, "frr-routing:ipv4");
+                               if_xpath, FRR_PIM_AF_XPATH_VAL);
        igmp_enable_dnode = yang_dnode_getf(dnode,
                        "%s/frr-gmp:gmp/address-family[address-family='%s']/enable",
-                       if_xpath, "frr-routing:ipv4");
+                       if_xpath, FRR_PIM_AF_XPATH_VAL);
 
        if (((pim_enable_dnode) &&
             (yang_dnode_get_bool(pim_enable_dnode, "."))) ||
index 0bbed1f99fc8562529588288b2b85961f2f94eac..6d6dbb64653b3c11cfc6fc97d5e32558d1ffbe56 100644 (file)
@@ -263,7 +263,7 @@ static void on_neighbor_jp_timer(struct thread *t)
                           neigh->upstream_jp_agg->count);
 
        rpf.source_nexthop.interface = neigh->interface;
-       pim_addr_to_prefix(&rpf.rpf_addr, neigh->source_addr);
+       rpf.rpf_addr = neigh->source_addr;
        pim_joinprune_send(&rpf, neigh->upstream_jp_agg);
 
        thread_add_timer(router->master, on_neighbor_jp_timer, neigh,
index 564d16a60b70ab3c77acd84662d346d97998861c..9feb064e96f83d55ac0916784849db4505476843 100644 (file)
 void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
                           struct pim_nexthop_cache *pnc, int command)
 {
-       struct prefix *p;
+       struct prefix p;
        int ret;
 
-       p = &(pnc->rpf.rpf_addr);
-       ret = zclient_send_rnh(zclient, command, p, SAFI_UNICAST, false, false,
+       pim_addr_to_prefix(&p, pnc->rpf.rpf_addr);
+       ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false,
                               pim->vrf->vrf_id);
        if (ret == ZCLIENT_SEND_FAILURE)
                zlog_warn("sendmsg_nexthop: zclient_send_message() failed");
@@ -65,7 +65,7 @@ void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
                zlog_debug(
                        "%s: NHT %sregistered addr %pFX(%s) with Zebra ret:%d ",
                        __func__,
-                       (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", p,
+                       (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p,
                        pim->vrf->name, ret);
 
        return;
@@ -98,7 +98,7 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
        pnc->rp_list = list_new();
        pnc->rp_list->cmp = pim_rp_list_cmp;
 
-       snprintfrr(hash_name, sizeof(hash_name), "PNC %pFX(%s) Upstream Hash",
+       snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash",
                   &pnc->rpf.rpf_addr, pim->vrf->name);
        pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key,
                                              pim_upstream_equal, hash_name);
@@ -107,7 +107,7 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
 }
 
 static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
-                                            struct prefix *addr)
+                                            pim_addr addr)
 {
        struct pim_nexthop_cache *pnc = NULL;
        struct pim_rpf rpf;
@@ -115,7 +115,7 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
 
        zclient = pim_zebra_zclient_get();
        memset(&rpf, 0, sizeof(rpf));
-       rpf.rpf_addr = *addr;
+       rpf.rpf_addr = addr;
 
        pnc = pim_nexthop_cache_find(pim, &rpf);
        if (!pnc) {
@@ -124,8 +124,8 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
                                      ZEBRA_NEXTHOP_REGISTER);
                if (PIM_DEBUG_PIM_NHT_DETAIL)
                        zlog_debug(
-                               "%s: NHT cache and zebra notification added for %pFX(%s)",
-                               __func__, addr, pim->vrf->name);
+                               "%s: NHT cache and zebra notification added for %pPA(%s)",
+                               __func__, &addr, pim->vrf->name);
        }
 
        return pnc;
@@ -134,7 +134,7 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
 /* TBD: this does several distinct things and should probably be split up.
  * (checking state vs. returning pnc vs. adding upstream vs. adding rp)
  */
-int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
+int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
                              struct pim_upstream *up, struct rp_info *rp,
                              struct pim_nexthop_cache *out_pnc)
 {
@@ -143,7 +143,7 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
 
        pnc = pim_nht_get(pim, addr);
 
-       assertf(up || rp, "addr=%pFX", addr);
+       assertf(up || rp, "addr=%pPA", &addr);
 
        if (rp != NULL) {
                ch_node = listnode_lookup(pnc->rp_list, rp);
@@ -163,28 +163,21 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
        return 0;
 }
 
-#if PIM_IPV == 4
-void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
+void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
 {
        struct pim_nexthop_cache *pnc;
-       struct prefix pfx;
-
-       pfx.family = AF_INET;
-       pfx.prefixlen = IPV4_MAX_BITLEN;
-       pfx.u.prefix4 = addr;
 
-       pnc = pim_nht_get(pim, &pfx);
+       pnc = pim_nht_get(pim, addr);
 
        pnc->bsr_count++;
 }
-#endif /* PIM_IPV == 4 */
 
 static void pim_nht_drop_maybe(struct pim_instance *pim,
                               struct pim_nexthop_cache *pnc)
 {
        if (PIM_DEBUG_PIM_NHT)
                zlog_debug(
-                       "%s: NHT %pFX(%s) rp_list count:%d upstream count:%ld BSR count:%u",
+                       "%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u",
                        __func__, &pnc->rpf.rpf_addr, pim->vrf->name,
                        pnc->rp_list->count, pnc->upstream_hash->count,
                        pnc->bsr_count);
@@ -206,7 +199,7 @@ static void pim_nht_drop_maybe(struct pim_instance *pim,
        }
 }
 
-void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
+void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
                                struct pim_upstream *up, struct rp_info *rp)
 {
        struct pim_nexthop_cache *pnc = NULL;
@@ -214,11 +207,11 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
        struct pim_upstream *upstream = NULL;
 
        /* Remove from RPF hash if it is the last entry */
-       lookup.rpf.rpf_addr = *addr;
+       lookup.rpf.rpf_addr = addr;
        pnc = hash_lookup(pim->rpf_hash, &lookup);
        if (!pnc) {
-               zlog_warn("attempting to delete nonexistent NHT entry %pFX",
-                         addr);
+               zlog_warn("attempting to delete nonexistent NHT entry %pPA",
+                         &addr);
                return;
        }
 
@@ -247,8 +240,7 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
        pim_nht_drop_maybe(pim, pnc);
 }
 
-#if PIM_IPV == 4
-void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)
 {
        struct pim_nexthop_cache *pnc = NULL;
        struct pim_nexthop_cache lookup;
@@ -258,28 +250,26 @@ void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
         * is 0.0.0.0 as that the BSR has not been registered
         * for tracking yet.
         */
-       if (addr.s_addr == INADDR_ANY)
+       if (pim_addr_is_any(addr))
                return;
 
-       lookup.rpf.rpf_addr.family = AF_INET;
-       lookup.rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
-       lookup.rpf.rpf_addr.u.prefix4 = addr;
+       lookup.rpf.rpf_addr = addr;
 
        pnc = hash_lookup(pim->rpf_hash, &lookup);
 
        if (!pnc) {
-               zlog_warn("attempting to delete nonexistent NHT BSR entry %pI4",
+               zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA",
                          &addr);
                return;
        }
 
-       assertf(pnc->bsr_count > 0, "addr=%pI4", &addr);
+       assertf(pnc->bsr_count > 0, "addr=%pPA", &addr);
        pnc->bsr_count--;
 
        pim_nht_drop_maybe(pim, pnc);
 }
 
-bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
                           struct interface *src_ifp, pim_addr src_ip)
 {
        struct pim_nexthop_cache *pnc = NULL;
@@ -288,9 +278,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
        struct nexthop *nh;
        struct interface *ifp;
 
-       lookup.rpf.rpf_addr.family = AF_INET;
-       lookup.rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
-       lookup.rpf.rpf_addr.u.prefix4 = bsr_addr;
+       lookup.rpf.rpf_addr = bsr_addr;
 
        pnc = hash_lookup(pim->rpf_hash, &lookup);
        if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) {
@@ -396,12 +384,11 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
                if (!nbr)
                        continue;
 
-               return nh->ifindex == src_ifp->ifindex
-                      && nhaddr.s_addr == src_ip.s_addr;
+               return nh->ifindex == src_ifp->ifindex &&
+                      (!pim_addr_cmp(nhaddr, src_ip));
        }
        return false;
 }
-#endif /* PIM_IPV == 4 */
 
 void pim_rp_nexthop_del(struct rp_info *rp_info)
 {
@@ -427,7 +414,7 @@ static void pim_update_rp_nh(struct pim_instance *pim,
 
                // Compute PIM RPF using cached nexthop
                if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
-                                            &rp_info->rp.rpf_addr,
+                                            rp_info->rp.rpf_addr,
                                             &rp_info->group, 1))
                        pim_rp_nexthop_del(rp_info);
        }
@@ -449,7 +436,7 @@ static int pim_update_upstream_nh_helper(struct hash_bucket *bucket, void *arg)
         * RPF nbr is now unreachable the MFC has already been updated
         * by pim_rpf_clear
         */
-       if (rpf_result != PIM_RPF_FAILURE)
+       if (rpf_result == PIM_RPF_CHANGED)
                pim_upstream_mroute_iif_update(up->channel_oil, __func__);
 
        if (rpf_result == PIM_RPF_CHANGED ||
@@ -497,9 +484,8 @@ uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
 
 static int pim_ecmp_nexthop_search(struct pim_instance *pim,
                                   struct pim_nexthop_cache *pnc,
-                                  struct pim_nexthop *nexthop,
-                                  struct prefix *src, struct prefix *grp,
-                                  int neighbor_needed)
+                                  struct pim_nexthop *nexthop, pim_addr src,
+                                  struct prefix *grp, int neighbor_needed)
 {
        struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
        struct interface *ifps[router->multipath];
@@ -510,7 +496,6 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
        uint8_t nh_iter = 0, found = 0;
        uint32_t i, num_nbrs = 0;
        pim_addr nh_addr = nexthop->mrib_nexthop_addr;
-       pim_addr src_addr = pim_addr_from_prefix(src);
        pim_addr grp_addr = pim_addr_from_prefix(grp);
 
        if (!pnc || !pnc->nexthop_num || !nexthop)
@@ -544,7 +529,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
 
                        if (curr_route_valid &&
                            !pim_if_connected_to_source(nexthop->interface,
-                                                       src_addr)) {
+                                                       src)) {
                                nbr = pim_neighbor_find(
                                        nexthop->interface,
                                        nexthop->mrib_nexthop_addr);
@@ -565,7 +550,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
                                        if (PIM_DEBUG_PIM_NHT)
                                                zlog_debug(
                                                        "%s: (%pPA,%pPA)(%s) current nexthop %s is valid, skipping new path selection",
-                                                       __func__, &src_addr,
+                                                       __func__, &src,
                                                        &grp_addr,
                                                        pim->vrf->name,
                                                        nexthop->interface->name);
@@ -590,12 +575,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
                        pim_addr nhaddr = nh_node->gate.ipv6;
 #endif
                        nbrs[i] = pim_neighbor_find(ifps[i], nhaddr);
-                       if (nbrs[i] ||
-                           pim_if_connected_to_source(ifps[i], src_addr))
+                       if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
                                num_nbrs++;
                }
        }
        if (pim->ecmp_enable) {
+               struct prefix src_pfx;
                uint32_t consider = pnc->nexthop_num;
 
                if (neighbor_needed && num_nbrs < consider)
@@ -605,7 +590,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
                        return 0;
 
                // PIM ECMP flag is enable then choose ECMP path.
-               hash_val = pim_compute_ecmp_hash(src, grp);
+               pim_addr_to_prefix(&src_pfx, src);
+               hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
                mod_val = hash_val % consider;
        }
 
@@ -617,8 +603,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
                        if (PIM_DEBUG_PIM_NHT)
                                zlog_debug(
                                        "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
-                                       __FILE__, __func__, first_ifindex,
-                                       &src_addr, pim->vrf->name);
+                                       __FILE__, __func__, first_ifindex, &src,
+                                       pim->vrf->name);
                        if (nh_iter == mod_val)
                                mod_val++; // Select nexthpath
                        nh_iter++;
@@ -629,15 +615,14 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
                                zlog_debug(
                                        "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
                                        __func__, ifp->name, pim->vrf->name,
-                                       first_ifindex, &src_addr);
+                                       first_ifindex, &src);
                        if (nh_iter == mod_val)
                                mod_val++; // Select nexthpath
                        nh_iter++;
                        continue;
                }
 
-               if (neighbor_needed &&
-                   !pim_if_connected_to_source(ifp, src_addr)) {
+               if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
                        nbr = nbrs[nh_iter];
                        if (!nbr && !if_is_loopback(ifp)) {
                                if (PIM_DEBUG_PIM_NHT)
@@ -661,14 +646,14 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
 #endif
                        nexthop->mrib_metric_preference = pnc->distance;
                        nexthop->mrib_route_metric = pnc->metric;
-                       nexthop->last_lookup = src_addr;
+                       nexthop->last_lookup = src;
                        nexthop->last_lookup_time = pim_time_monotonic_usec();
                        nexthop->nbr = nbr;
                        found = 1;
                        if (PIM_DEBUG_PIM_NHT)
                                zlog_debug(
                                        "%s: (%pPA,%pPA)(%s) selected nhop interface %s addr %pPAs mod_val %u iter %d ecmp %d",
-                                       __func__, &src_addr, &grp_addr,
+                                       __func__, &src, &grp_addr,
                                        pim->vrf->name, ifp->name, &nh_addr,
                                        mod_val, nh_iter, pim->ecmp_enable);
                }
@@ -708,12 +693,12 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
        }
 
        if (cmd == ZEBRA_NEXTHOP_UPDATE) {
-               prefix_copy(&rpf.rpf_addr, &match);
+               rpf.rpf_addr = pim_addr_from_prefix(&match);
                pnc = pim_nexthop_cache_find(pim, &rpf);
                if (!pnc) {
                        if (PIM_DEBUG_PIM_NHT)
                                zlog_debug(
-                                       "%s: Skipping NHT update, addr %pFX is not in local cached DB.",
+                                       "%s: Skipping NHT update, addr %pPA is not in local cached DB.",
                                        __func__, &rpf.rpf_addr);
                        return 0;
                }
@@ -740,12 +725,10 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
                                 */
 #if PIM_IPV == 4
                                nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
-                               nexthop->gate.ipv4 =
-                                       pnc->rpf.rpf_addr.u.prefix4;
+                               nexthop->gate.ipv4 = pnc->rpf.rpf_addr;
 #else
                                nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
-                               nexthop->gate.ipv6 =
-                                       pnc->rpf.rpf_addr.u.prefix6;
+                               nexthop->gate.ipv6 = pnc->rpf.rpf_addr;
 #endif
                                break;
 #if PIM_IPV == 4
@@ -884,7 +867,7 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
 }
 
 int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
-                           struct pim_nexthop *nexthop, struct prefix *src,
+                           struct pim_nexthop *nexthop, pim_addr src,
                            struct prefix *grp, int neighbor_needed)
 {
        struct pim_nexthop_cache *pnc;
@@ -898,14 +881,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
        uint8_t i = 0;
        uint32_t hash_val = 0, mod_val = 0;
        uint32_t num_nbrs = 0;
-       pim_addr src_addr = pim_addr_from_prefix(src);
 
        if (PIM_DEBUG_PIM_NHT_DETAIL)
                zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
-                          __func__, &src_addr, pim->vrf->name,
+                          __func__, &src, pim->vrf->name,
                           nexthop->last_lookup_time);
 
-       rpf.rpf_addr = *src;
+       rpf.rpf_addr = src;
 
        pnc = pim_nexthop_cache_find(pim, &rpf);
        if (pnc) {
@@ -917,13 +899,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
        memset(nexthop_tab, 0,
               sizeof(struct pim_zlookup_nexthop) * router->multipath);
        num_ifindex =
-               zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
-                                      src_addr, PIM_NEXTHOP_LOOKUP_MAX);
+               zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
+                                      PIM_NEXTHOP_LOOKUP_MAX);
        if (num_ifindex < 1) {
                if (PIM_DEBUG_PIM_NHT)
                        zlog_warn(
                                "%s: could not find nexthop ifindex for address %pPA(%s)",
-                               __func__, &src_addr, pim->vrf->name);
+                               __func__, &src, pim->vrf->name);
                return 0;
        }
 
@@ -940,14 +922,14 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                if (ifps[i]) {
                        nbrs[i] = pim_neighbor_find(
                                ifps[i], nexthop_tab[i].nexthop_addr);
-                       if (nbrs[i] ||
-                           pim_if_connected_to_source(ifps[i], src_addr))
+                       if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
                                num_nbrs++;
                }
        }
 
        // If PIM ECMP enable then choose ECMP path.
        if (pim->ecmp_enable) {
+               struct prefix src_pfx;
                uint32_t consider = num_ifindex;
 
                if (neighbor_needed && num_nbrs < consider)
@@ -956,7 +938,8 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                if (consider == 0)
                        return 0;
 
-               hash_val = pim_compute_ecmp_hash(src, grp);
+               pim_addr_to_prefix(&src_pfx, src);
+               hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
                mod_val = hash_val % consider;
                if (PIM_DEBUG_PIM_NHT_DETAIL)
                        zlog_debug("%s: hash_val %u mod_val %u", __func__,
@@ -972,8 +955,8 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                        if (PIM_DEBUG_PIM_NHT)
                                zlog_debug(
                                        "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
-                                       __FILE__, __func__, first_ifindex,
-                                       &src_addr, pim->vrf->name);
+                                       __FILE__, __func__, first_ifindex, &src,
+                                       pim->vrf->name);
                        if (i == mod_val)
                                mod_val++;
                        i++;
@@ -985,14 +968,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                                zlog_debug(
                                        "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
                                        __func__, ifp->name, pim->vrf->name,
-                                       first_ifindex, &src_addr);
+                                       first_ifindex, &src);
                        if (i == mod_val)
                                mod_val++;
                        i++;
                        continue;
                }
-               if (neighbor_needed &&
-                   !pim_if_connected_to_source(ifp, src_addr)) {
+               if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
                        nbr = nbrs[i];
                        if (PIM_DEBUG_PIM_NHT_DETAIL)
                                zlog_debug("ifp name: %s(%s), pim nbr: %p",
@@ -1006,7 +988,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                                                __func__,
                                                &nexthop_tab[i].nexthop_addr,
                                                ifp->name, pim->vrf->name,
-                                               &src_addr);
+                                               &src);
                                i++;
                                continue;
                        }
@@ -1017,7 +999,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                                zlog_debug(
                                        "%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
                                        __func__, &nexthop_tab[i].nexthop_addr,
-                                       &src_addr, ifp->name, pim->vrf->name,
+                                       &src, ifp->name, pim->vrf->name,
                                        nexthop_tab[i].route_metric,
                                        nexthop_tab[i].protocol_distance);
                        /* update nexthop data */
@@ -1028,7 +1010,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                                nexthop_tab[i].protocol_distance;
                        nexthop->mrib_route_metric =
                                nexthop_tab[i].route_metric;
-                       nexthop->last_lookup = src_addr;
+                       nexthop->last_lookup = src;
                        nexthop->last_lookup_time = pim_time_monotonic_usec();
                        nexthop->nbr = nbr;
                        found = 1;
@@ -1042,24 +1024,19 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
                return 0;
 }
 
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
-                                    struct prefix *src, struct prefix *grp)
+int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
+                                    struct prefix *grp)
 {
        struct pim_nexthop nhop;
        int vif_index;
        ifindex_t ifindex;
-       pim_addr src_addr;
-
-       if (PIM_DEBUG_PIM_NHT_DETAIL) {
-               src_addr = pim_addr_from_prefix(src);
-       }
 
        memset(&nhop, 0, sizeof(nhop));
        if (!pim_ecmp_nexthop_lookup(pim, &nhop, src, grp, 1)) {
                if (PIM_DEBUG_PIM_NHT)
                        zlog_debug(
                                "%s: could not find nexthop ifindex for address %pPA(%s)",
-                               __func__, &src_addr, pim->vrf->name);
+                               __func__, &src, pim->vrf->name);
                return -1;
        }
 
@@ -1069,7 +1046,7 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
                        "%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
                        __func__, ifindex,
                        ifindex2ifname(ifindex, pim->vrf->vrf_id),
-                       pim->vrf->name, &src_addr);
+                       pim->vrf->name, &src);
 
        vif_index = pim_if_find_vifindex_by_ifindex(pim, ifindex);
 
@@ -1077,7 +1054,7 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
                if (PIM_DEBUG_PIM_NHT) {
                        zlog_debug(
                                "%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
-                               __func__, vif_index, pim->vrf->name, &src_addr);
+                               __func__, vif_index, pim->vrf->name, &src);
                }
                return -2;
        }
index d51f622ece479284886d87241fbbcbea3817457d..240e61d98ff9cdc85815835abaa82488a231f8f9 100644 (file)
@@ -54,28 +54,28 @@ struct pim_nexthop_cache {
 };
 
 int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
-int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
+int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
                              struct pim_upstream *up, struct rp_info *rp,
                              struct pim_nexthop_cache *out_pnc);
-void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
+void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
                                struct pim_upstream *up, struct rp_info *rp);
 struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
                                                 struct pim_rpf *rpf);
 uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
 int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
-                           struct pim_nexthop *nexthop, struct prefix *src,
+                           struct pim_nexthop *nexthop, pim_addr src,
                            struct prefix *grp, int neighbor_needed);
 void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
                           struct pim_nexthop_cache *pnc, int command);
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
-                                    struct prefix *src, struct prefix *grp);
+int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
+                                    struct prefix *grp);
 void pim_rp_nexthop_del(struct rp_info *rp_info);
 
 /* for RPF check on BSM message receipt */
-void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr bsr_addr);
-void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr bsr_addr);
+void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr);
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
 /* RPF(bsr_addr) == src_ip%src_ifp? */
-bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
                           struct interface *src_ifp, pim_addr src_ip);
 
 #endif
index d0c4921d0744ff5dc255b720cc7e59bca9615ce2..5a2647b93f33e19234e32adcfe78152b74dc5003 100644 (file)
@@ -187,9 +187,6 @@ extern int pim_channel_oil_compare(const struct channel_oil *c1,
 DECLARE_RBTREE_UNIQ(rb_pim_oil, struct channel_oil, oil_rb,
                     pim_channel_oil_compare);
 
-
-extern struct list *pim_channel_oil_list;
-
 void pim_oil_init(struct pim_instance *pim);
 void pim_oil_terminate(struct pim_instance *pim);
 
index 8403340d861a39bb6b139f142d4779ed22bae8ac..4e2c44b172df88baa5b5147f698cf1598a1ae6c0 100644 (file)
@@ -139,7 +139,6 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
        struct pim_instance *pim = pim_ifp->pim;
        struct pim_upstream *up = NULL;
        struct pim_rpf *rp;
-       pim_addr rpf_addr;
        pim_sgaddr sg;
        struct listnode *up_node;
        struct pim_upstream *child;
@@ -174,12 +173,11 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
 
        rp = RP(pim_ifp->pim, sg.grp);
        if (rp) {
-               rpf_addr = pim_addr_from_prefix(&rp->rpf_addr);
                /* As per RFC 7761, Section 4.9.4:
                 * A special wildcard value consisting of an address field of
                 * all zeros can be used to indicate any source.
                 */
-               if ((pim_addr_cmp(sg.src, rpf_addr) == 0) ||
+               if ((pim_addr_cmp(sg.src, rp->rpf_addr) == 0) ||
                    pim_addr_is_any(sg.src)) {
                        handling_star = true;
                        sg.src = PIMADDR_ANY;
@@ -284,11 +282,10 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
        unsigned char *b1;
        struct pim_interface *pinfo;
        struct interface *ifp;
-       pim_addr dst = pim_addr_from_prefix(&rpg->rpf_addr);
 
        if (PIM_DEBUG_PIM_REG) {
                zlog_debug("Sending %s %sRegister Packet to %pPA", up->sg_str,
-                          null_register ? "NULL " : "", &dst);
+                          null_register ? "NULL " : "", &rpg->rpf_addr);
        }
 
        ifp = rpg->source_nexthop.interface;
@@ -310,7 +307,7 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
        if (PIM_DEBUG_PIM_REG) {
                zlog_debug("%s: Sending %s %sRegister Packet to %pPA on %s",
                           __func__, up->sg_str, null_register ? "NULL " : "",
-                          &dst, ifp->name);
+                          &rpg->rpf_addr, ifp->name);
        }
 
        memset(buffer, 0, 10000);
@@ -327,13 +324,14 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
         */
        src = pim_register_get_unicast_v6_addr(pinfo);
 #endif
-       pim_msg_build_header(src, dst, buffer, buf_size + PIM_MSG_REGISTER_LEN,
+       pim_msg_build_header(src, rpg->rpf_addr, buffer,
+                            buf_size + PIM_MSG_REGISTER_LEN,
                             PIM_MSG_TYPE_REGISTER, false);
 
        if (!pinfo->pim_passive_enable)
                ++pinfo->pim_ifstat_reg_send;
 
-       if (pim_msg_send(pinfo->pim_sock_fd, src, dst, buffer,
+       if (pim_msg_send(pinfo->pim->reg_sock, src, rpg->rpf_addr, buffer,
                         buf_size + PIM_MSG_REGISTER_LEN, ifp)) {
                if (PIM_DEBUG_PIM_TRACE) {
                        zlog_debug(
@@ -618,7 +616,7 @@ int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
                }
        }
 
-       rp_addr = pim_addr_from_prefix(&(RP(pim, sg.grp))->rpf_addr);
+       rp_addr = (RP(pim, sg.grp))->rpf_addr;
        if (i_am_rp && (!pim_addr_cmp(dest_addr, rp_addr))) {
                sentRegisterStop = 0;
 
index 2b798b14b2f054c37692cb1e7cb1afee6333f527..1dce6b35626505d1dd62ceeab94a03014b08942a 100644 (file)
@@ -51,6 +51,7 @@
 #include "pim_bsm.h"
 #include "pim_util.h"
 #include "pim_ssm.h"
+#include "termtable.h"
 
 /* Cleanup pim->rpf_hash each node data */
 void pim_rp_list_hash_clean(void *data)
@@ -84,7 +85,7 @@ int pim_rp_list_cmp(void *v1, void *v2)
        /*
         * Sort by RP IP address
         */
-       ret = prefix_cmp(&rp1->rp.rpf_addr, &rp2->rp.rpf_addr);
+       ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
        if (ret)
                return ret;
 
@@ -119,7 +120,7 @@ void pim_rp_init(struct pim_instance *pim)
                XFREE(MTYPE_PIM_RP, rp_info);
                return;
        }
-       pim_addr_to_prefix(&rp_info->rp.rpf_addr, PIMADDR_ANY);
+       rp_info->rp.rpf_addr = PIMADDR_ANY;
 
        listnode_add(pim->rp_list, rp_info);
 
@@ -149,12 +150,9 @@ static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
 {
        struct listnode *node;
        struct rp_info *rp_info;
-       struct prefix rp_prefix;
-
-       pim_addr_to_prefix(&rp_prefix, rp);
 
        for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
-               if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
+               if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
                    rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
                        return rp_info;
                }
@@ -189,11 +187,9 @@ static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
 {
        struct listnode *node;
        struct rp_info *rp_info;
-       struct prefix rp_prefix;
 
-       pim_addr_to_prefix(&rp_prefix, rp);
        for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
-               if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
+               if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
                    prefix_same(&rp_info->group, group))
                        return rp_info;
        }
@@ -344,11 +340,9 @@ static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
 {
        struct listnode *node;
        struct pim_secondary_addr *sec_addr;
-       pim_addr rpf_addr;
-
-       rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+       pim_addr sec_paddr;
 
-       if (!pim_addr_cmp(pim_ifp->primary_address, rpf_addr))
+       if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
                return 1;
 
        if (!pim_ifp->sec_addr_list) {
@@ -356,9 +350,11 @@ static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
        }
 
        for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
-               if (prefix_same(&sec_addr->addr, &rp_info->rp.rpf_addr)) {
+               sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
+               /* If an RP-address is self, It should be enough to say
+                * I am RP the prefix-length should not matter here */
+               if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
                        return 1;
-               }
        }
 
        return 0;
@@ -388,7 +384,6 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
        enum pim_rpf_result rpf_result;
        pim_addr old_upstream_addr;
        pim_addr new_upstream_addr;
-       struct prefix nht_p;
 
        old_upstream_addr = up->upstream_addr;
        pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
@@ -408,12 +403,11 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
         */
        if (!pim_addr_is_any(old_upstream_addr)) {
                /* Deregister addr with Zebra NHT */
-               pim_addr_to_prefix(&nht_p, old_upstream_addr);
                if (PIM_DEBUG_PIM_TRACE)
                        zlog_debug(
-                               "%s: Deregister upstream %s addr %pFX with Zebra NHT",
-                               __func__, up->sg_str, &nht_p);
-               pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+                               "%s: Deregister upstream %s addr %pPA with Zebra NHT",
+                               __func__, up->sg_str, &old_upstream_addr);
+               pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
        }
 
        /* Update the upstream address */
@@ -446,7 +440,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
        struct listnode *node, *nnode;
        struct rp_info *tmp_rp_info;
        char buffer[BUFSIZ];
-       struct prefix nht_p;
+       pim_addr nht_p;
        struct route_node *rn = NULL;
        struct pim_upstream *up;
        bool upstream_updated = false;
@@ -456,7 +450,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
 
        rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
 
-       pim_addr_to_prefix(&rp_info->rp.rpf_addr, rp_addr);
+       rp_info->rp.rpf_addr = rp_addr;
        prefix_copy(&rp_info->group, &group);
        rp_info->rp_src = rp_src_flag;
 
@@ -482,8 +476,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
                 */
                for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
                                       tmp_rp_info)) {
-                       if (prefix_same(&rp_info->rp.rpf_addr,
-                                       &tmp_rp_info->rp.rpf_addr)) {
+                       if (!pim_addr_cmp(rp_info->rp.rpf_addr,
+                                         tmp_rp_info->rp.rpf_addr)) {
                                if (tmp_rp_info->plist)
                                        pim_rp_del_config(pim, rp_addr, NULL,
                                                          tmp_rp_info->plist);
@@ -519,8 +513,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
                for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
                                       tmp_rp_info)) {
                        if (tmp_rp_info->plist &&
-                           prefix_same(&rp_info->rp.rpf_addr,
-                                       &tmp_rp_info->rp.rpf_addr)) {
+                           (!pim_addr_cmp(rp_info->rp.rpf_addr,
+                                          tmp_rp_info->rp.rpf_addr))) {
                                pim_rp_del_config(pim, rp_addr, NULL,
                                                  tmp_rp_info->plist);
                        }
@@ -539,7 +533,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
                        nht_p = rp_all->rp.rpf_addr;
                        if (PIM_DEBUG_PIM_NHT_RP)
                                zlog_debug(
-                                       "%s: NHT Register rp_all addr %pFX grp %pFX ",
+                                       "%s: NHT Register rp_all addr %pPA grp %pFX ",
                                        __func__, &nht_p, &rp_all->group);
 
                        frr_each (rb_pim_upstream, &pim->upstream_head, up) {
@@ -565,12 +559,12 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
 
                        pim_rp_check_interfaces(pim, rp_all);
                        pim_rp_refresh_group_to_rp_mapping(pim);
-                       pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
+                       pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
                                                  NULL);
 
                        if (!pim_ecmp_nexthop_lookup(pim,
                                                     &rp_all->rp.source_nexthop,
-                                                    &nht_p, &rp_all->group, 1))
+                                                    nht_p, &rp_all->group, 1))
                                return PIM_RP_NO_PATH;
                        return PIM_SUCCESS;
                }
@@ -660,10 +654,10 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
        /* Register addr with Zebra NHT */
        nht_p = rp_info->rp.rpf_addr;
        if (PIM_DEBUG_PIM_NHT_RP)
-               zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
+               zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
                           __func__, &nht_p, &rp_info->group);
-       pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
-       if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+       pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+       if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
                                     &rp_info->group, 1))
                return PIM_RP_NO_PATH;
 
@@ -698,7 +692,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
        struct prefix g_all;
        struct rp_info *rp_info;
        struct rp_info *rp_all;
-       struct prefix nht_p;
+       pim_addr nht_p;
        struct route_node *rn;
        bool was_plist = false;
        struct rp_info *trp_info;
@@ -753,9 +747,9 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
        /* Deregister addr with Zebra NHT */
        nht_p = rp_info->rp.rpf_addr;
        if (PIM_DEBUG_PIM_NHT_RP)
-               zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__,
+               zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
                           &nht_p);
-       pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+       pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
 
        if (!pim_get_all_mcast_group(&g_all))
                return PIM_RP_BAD_ADDRESS;
@@ -769,7 +763,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
                         */
                        pim_addr rpf_addr;
 
-                       rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+                       rpf_addr = rp_info->rp.rpf_addr;
                        if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
                            pim_addr_is_any(up->sg.src)) {
                                struct prefix grp;
@@ -782,7 +776,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
                                }
                        }
                }
-               pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
+               rp_all->rp.rpf_addr = PIMADDR_ANY;
                rp_all->i_am_rp = 0;
                return PIM_SUCCESS;
        }
@@ -817,7 +811,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
                 */
                pim_addr rpf_addr;
 
-               rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+               rpf_addr = rp_info->rp.rpf_addr;
                if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
                    pim_addr_is_any(up->sg.src)) {
                        struct prefix grp;
@@ -851,7 +845,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
 int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
                  struct prefix group, enum rp_source rp_src_flag)
 {
-       struct prefix nht_p;
+       pim_addr nht_p;
        struct route_node *rn;
        int result = 0;
        struct rp_info *rp_info = NULL;
@@ -873,7 +867,7 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
                return result;
        }
 
-       old_rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+       old_rp_addr = rp_info->rp.rpf_addr;
        if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
                if (rp_info->rp_src != rp_src_flag) {
                        rp_info->rp_src = rp_src_flag;
@@ -882,24 +876,21 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
                }
        }
 
-       nht_p.family = PIM_AF;
-       nht_p.prefixlen = PIM_MAX_BITLEN;
-
        /* Deregister old RP addr with Zebra NHT */
 
        if (!pim_addr_is_any(old_rp_addr)) {
                nht_p = rp_info->rp.rpf_addr;
                if (PIM_DEBUG_PIM_NHT_RP)
-                       zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
+                       zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
                                   __func__, &nht_p);
-               pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+               pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
        }
 
        pim_rp_nexthop_del(rp_info);
        listnode_delete(pim->rp_list, rp_info);
        /* Update the new RP address*/
 
-       pim_addr_to_prefix(&rp_info->rp.rpf_addr, new_rp_addr);
+       rp_info->rp.rpf_addr = new_rp_addr;
        rp_info->rp_src = rp_src_flag;
        rp_info->i_am_rp = 0;
 
@@ -926,11 +917,11 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
        /* Register new RP addr with Zebra NHT */
        nht_p = rp_info->rp.rpf_addr;
        if (PIM_DEBUG_PIM_NHT_RP)
-               zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
+               zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
                           __func__, &nht_p, &rp_info->group);
 
-       pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
-       if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+       pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+       if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
                                     &rp_info->group, 1)) {
                route_unlock_node(rn);
                return PIM_RP_NO_PATH;
@@ -949,7 +940,7 @@ void pim_rp_setup(struct pim_instance *pim)
 {
        struct listnode *node;
        struct rp_info *rp_info;
-       struct prefix nht_p;
+       pim_addr nht_p;
 
        for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
                if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
@@ -957,9 +948,9 @@ void pim_rp_setup(struct pim_instance *pim)
 
                nht_p = rp_info->rp.rpf_addr;
 
-               pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+               pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
                if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
-                                            &nht_p, &rp_info->group, 1))
+                                            nht_p, &rp_info->group, 1))
                        if (PIM_DEBUG_PIM_NHT_RP)
                                zlog_debug(
                                        "Unable to lookup nexthop for rp specified");
@@ -994,12 +985,9 @@ void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
                if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
                        i_am_rp_changed = true;
                        rp_info->i_am_rp = 1;
-                       if (PIM_DEBUG_PIM_NHT_RP) {
-                               char rp[PREFIX_STRLEN];
-                               pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
-                                             rp, sizeof(rp));
-                               zlog_debug("%s: %s: i am rp", __func__, rp);
-                       }
+                       if (PIM_DEBUG_PIM_NHT_RP)
+                               zlog_debug("%s: %pPA: i am rp", __func__,
+                                          &rp_info->rp.rpf_addr);
                }
        }
 
@@ -1032,16 +1020,15 @@ void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
                if (old_i_am_rp != rp_info->i_am_rp) {
                        i_am_rp_changed = true;
                        if (PIM_DEBUG_PIM_NHT_RP) {
-                               char rp[PREFIX_STRLEN];
-                               pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
-                                             rp, sizeof(rp));
-                               if (rp_info->i_am_rp) {
-                                       zlog_debug("%s: %s: i am rp", __func__,
-                                                  rp);
-                               } else {
-                                       zlog_debug("%s: %s: i am no longer rp",
-                                                  __func__, rp);
-                               }
+                               if (rp_info->i_am_rp)
+                                       zlog_debug("%s: %pPA: i am rp",
+                                                  __func__,
+                                                  &rp_info->rp.rpf_addr);
+                               else
+                                       zlog_debug(
+                                               "%s: %pPA: i am no longer rp",
+                                               __func__,
+                                               &rp_info->rp.rpf_addr);
                        }
                }
        }
@@ -1088,18 +1075,18 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
        rp_info = pim_rp_find_match_group(pim, &g);
 
        if (rp_info) {
-               struct prefix nht_p;
+               pim_addr nht_p;
 
                /* Register addr with Zebra NHT */
                nht_p = rp_info->rp.rpf_addr;
                if (PIM_DEBUG_PIM_NHT_RP)
                        zlog_debug(
-                               "%s: NHT Register RP addr %pFX grp %pFX with Zebra",
+                               "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
                                __func__, &nht_p, &rp_info->group);
-               pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+               pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
                pim_rpf_set_refresh_time(pim);
                (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
-                                             &nht_p, &rp_info->group, 1);
+                                             nht_p, &rp_info->group, 1);
                return (&rp_info->rp);
        }
 
@@ -1137,7 +1124,7 @@ int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
        }
 
        if (pim_addr_is_any(source))
-               *up = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+               *up = rp_info->rp.rpf_addr;
        else
                *up = source;
 
@@ -1159,7 +1146,7 @@ int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
                if (rp_info->rp_src == RP_SRC_BSR)
                        continue;
 
-               rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+               rp_addr = rp_info->rp.rpf_addr;
                if (rp_info->plist)
                        vty_out(vty,
                                "%s" PIM_AF_NAME
@@ -1180,14 +1167,25 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
        struct rp_info *rp_info;
        struct rp_info *prev_rp_info = NULL;
        struct listnode *node;
+       struct ttable *tt = NULL;
+       char *table = NULL;
        char source[7];
+       char grp[INET6_ADDRSTRLEN];
 
        json_object *json_rp_rows = NULL;
        json_object *json_row = NULL;
 
-       if (!json)
-               vty_out(vty,
-                       "RP address       group/prefix-list   OIF               I am RP    Source   Group-Type\n");
+       if (!json) {
+               /* Prepare table. */
+               tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+               ttable_add_row(
+                       tt,
+                       "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
+               tt->style.cell.rpad = 2;
+               tt->style.corner = '+';
+               ttable_restyle(tt);
+       }
+
        for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
                if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
                        continue;
@@ -1215,10 +1213,10 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
                         * entry for the previous RP
                         */
                        if (prev_rp_info &&
-                           prefix_cmp(&prev_rp_info->rp.rpf_addr,
-                                      &rp_info->rp.rpf_addr)) {
+                           (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
+                                         rp_info->rp.rpf_addr))) {
                                json_object_object_addf(
-                                       json, json_rp_rows, "%pFXh",
+                                       json, json_rp_rows, "%pPA",
                                        &prev_rp_info->rp.rpf_addr);
                                json_rp_rows = NULL;
                        }
@@ -1227,7 +1225,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
                                json_rp_rows = json_object_new_array();
 
                        json_row = json_object_new_object();
-                       json_object_string_addf(json_row, "rpAddress", "%pFXh",
+                       json_object_string_addf(json_row, "rpAddress", "%pPA",
                                                &rp_info->rp.rpf_addr);
                        if (rp_info->rp.source_nexthop.interface)
                                json_object_string_add(
@@ -1257,34 +1255,33 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
 
                        json_object_array_add(json_rp_rows, json_row);
                } else {
-                       vty_out(vty, "%-15pFXh  ", &rp_info->rp.rpf_addr);
-
-                       if (rp_info->plist)
-                               vty_out(vty, "%-18s  ", rp_info->plist);
-                       else
-                               vty_out(vty, "%-18pFX  ", &rp_info->group);
-
-                       if (rp_info->rp.source_nexthop.interface)
-                               vty_out(vty, "%-16s  ",
-                                       rp_info->rp.source_nexthop
-                                               .interface->name);
-                       else
-                               vty_out(vty, "%-16s  ", "(Unknown)");
-
-                       if (rp_info->i_am_rp)
-                               vty_out(vty, "yes");
-                       else
-                               vty_out(vty, "no");
-
-                       vty_out(vty, "%14s", source);
-                       vty_out(vty, "%6s\n", group_type);
+                       prefix2str(&rp_info->group, grp, sizeof(grp));
+                       ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
+                                         &rp_info->rp.rpf_addr,
+                                         rp_info->plist
+                                         ? rp_info->plist
+                                         : grp,
+                                         rp_info->rp.source_nexthop.interface
+                                         ? rp_info->rp.source_nexthop
+                                               .interface->name
+                                               : "Unknown",
+                                         rp_info->i_am_rp
+                                         ? "yes"
+                                         : "no",
+                                         source, group_type);
                }
                prev_rp_info = rp_info;
        }
 
-       if (json) {
+       /* Dump the generated table. */
+       if (!json) {
+               table = ttable_dump(tt, "\n");
+               vty_out(vty, "%s\n", table);
+               XFREE(MTYPE_TMP, table);
+               ttable_del(tt);
+       } else {
                if (prev_rp_info && json_rp_rows)
-                       json_object_object_addf(json, json_rp_rows, "%pFXh",
+                       json_object_object_addf(json, json_rp_rows, "%pPA",
                                                &prev_rp_info->rp.rpf_addr);
        }
 }
@@ -1294,7 +1291,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
        struct listnode *node = NULL;
        struct rp_info *rp_info = NULL;
        struct nexthop *nh_node = NULL;
-       struct prefix nht_p;
+       pim_addr nht_p;
        struct pim_nexthop_cache pnc;
 
        for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
@@ -1303,8 +1300,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
 
                nht_p = rp_info->rp.rpf_addr;
                memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
-               if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
-                                              &pnc))
+               if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
                        continue;
 
                for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
@@ -1329,7 +1325,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
 #endif
                        if (PIM_DEBUG_PIM_NHT_RP)
                                zlog_debug(
-                                       "%s: addr %pFXh new nexthop addr %pPAs interface %s",
+                                       "%s: addr %pPA new nexthop addr %pPAs interface %s",
                                        __func__, &nht_p, &nbr->source_addr,
                                        ifp1->name);
                }
index bd4dd31a2c58a499cea56bdddba16c62ec8687b4..a28278c5812634ca33cc06abf309bb06964270ba 100644 (file)
@@ -203,11 +203,10 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
 {
        struct pim_rpf *rpf = &up->rpf;
        struct pim_rpf saved;
-       struct prefix nht_p;
-       struct prefix src, grp;
+       pim_addr src;
+       struct prefix grp;
        bool neigh_needed = true;
        uint32_t saved_mrib_route_metric;
-       pim_addr rpf_addr;
 
        if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
                return PIM_RPF_OK;
@@ -226,25 +225,22 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
                old->rpf_addr = saved.rpf_addr;
        }
 
-       pim_addr_to_prefix(&nht_p, up->upstream_addr);
-
-       pim_addr_to_prefix(&src, up->upstream_addr); // RP or Src address
+       src = up->upstream_addr; // RP or Src address
        pim_addr_to_prefix(&grp, up->sg.grp);
 
        if ((pim_addr_is_any(up->sg.src) && I_am_RP(pim, up->sg.grp)) ||
            PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
                neigh_needed = false;
-       pim_find_or_track_nexthop(pim, &nht_p, up, NULL, NULL);
-       if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, &src, &grp,
-                               neigh_needed)) {
+       pim_find_or_track_nexthop(pim, up->upstream_addr, up, NULL, NULL);
+       if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, src, &grp,
+                                    neigh_needed)) {
                /* Route is Deleted in Zebra, reset the stored NH data */
                pim_upstream_rpf_clear(pim, up);
                pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
                return PIM_RPF_FAILURE;
        }
 
-       rpf_addr = pim_rpf_find_rpf_addr(up);
-       pim_addr_to_prefix(&rpf->rpf_addr, rpf_addr);
+       rpf->rpf_addr = pim_rpf_find_rpf_addr(up);
 
        if (pim_rpf_addr_is_inaddr_any(rpf) && PIM_DEBUG_ZEBRA) {
                /* RPF'(S,G) not found */
@@ -287,7 +283,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
        }
 
        /* detect change in RPF'(S,G) */
-       if (!prefix_same(&saved.rpf_addr, &rpf->rpf_addr) ||
+       if (pim_addr_cmp(saved.rpf_addr, rpf->rpf_addr) ||
            saved.source_nexthop.interface != rpf->source_nexthop.interface) {
                pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
                return PIM_RPF_CHANGED;
@@ -321,7 +317,7 @@ void pim_upstream_rpf_clear(struct pim_instance *pim,
                        router->infinite_assert_metric.metric_preference;
                up->rpf.source_nexthop.mrib_route_metric =
                        router->infinite_assert_metric.route_metric;
-               pim_addr_to_prefix(&up->rpf.rpf_addr, PIMADDR_ANY);
+               up->rpf.rpf_addr = PIMADDR_ANY;
                pim_upstream_mroute_iif_update(up->channel_oil, __func__);
        }
 }
@@ -375,15 +371,7 @@ static pim_addr pim_rpf_find_rpf_addr(struct pim_upstream *up)
 
 int pim_rpf_addr_is_inaddr_any(struct pim_rpf *rpf)
 {
-       pim_addr rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
-
-       switch (rpf->rpf_addr.family) {
-       case AF_INET:
-       case AF_INET6:
-               return pim_addr_is_any(rpf_addr);
-       default:
-               return 0;
-       }
+       return pim_addr_is_any(rpf->rpf_addr);
 }
 
 int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2)
@@ -399,10 +387,10 @@ unsigned int pim_rpf_hash_key(const void *arg)
        const struct pim_nexthop_cache *r = arg;
 
 #if PIM_IPV == 4
-       return jhash_1word(r->rpf.rpf_addr.u.prefix4.s_addr, 0);
+       return jhash_1word(r->rpf.rpf_addr.s_addr, 0);
 #else
-       return jhash2(r->rpf.rpf_addr.u.prefix6.s6_addr32,
-                     array_size(r->rpf.rpf_addr.u.prefix6.s6_addr32), 0);
+       return jhash2(r->rpf.rpf_addr.s6_addr32,
+                     array_size(r->rpf.rpf_addr.s6_addr32), 0);
 #endif
 }
 
@@ -413,5 +401,5 @@ bool pim_rpf_equal(const void *arg1, const void *arg2)
        const struct pim_nexthop_cache *r2 =
                (const struct pim_nexthop_cache *)arg2;
 
-       return prefix_same(&r1->rpf.rpf_addr, &r2->rpf.rpf_addr);
+       return (!pim_addr_cmp(r1->rpf.rpf_addr, r2->rpf.rpf_addr));
 }
index a2289b4cc5592be5e93998d7431e6b6f8437b5d7..2ddb9832f602cc70a9555fbb9b1a68eb369bdd30 100644 (file)
@@ -49,7 +49,7 @@ struct pim_nexthop {
 
 struct pim_rpf {
        struct pim_nexthop source_nexthop;
-       struct prefix rpf_addr; /* RPF'(S,G) */
+       pim_addr rpf_addr; /* RPF'(S,G) */
 };
 
 enum pim_rpf_result { PIM_RPF_OK = 0, PIM_RPF_CHANGED, PIM_RPF_FAILURE };
index afc5d471183c3a978ffced4a5b962814e127b14d..b5a055c6aa94a7f4e620ffbdf76abb1f638408ed 100644 (file)
@@ -179,6 +179,46 @@ static inline int pim_setsockopt(int protocol, int fd, struct interface *ifp)
 }
 #endif
 
+int pim_reg_sock(void)
+{
+       int fd;
+       long flags;
+
+       frr_with_privs (&pimd_privs) {
+               fd = socket(PIM_AF, SOCK_RAW, IPPROTO_RAW);
+       }
+
+       if (fd < 0) {
+               zlog_warn("Could not create raw socket: errno=%d: %s", errno,
+                         safe_strerror(errno));
+               return PIM_SOCK_ERR_SOCKET;
+       }
+
+       if (sockopt_reuseaddr(fd)) {
+               close(fd);
+               return PIM_SOCK_ERR_REUSE;
+       }
+
+       flags = fcntl(fd, F_GETFL, 0);
+       if (flags < 0) {
+               zlog_warn(
+                       "Could not get fcntl(F_GETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
+                       fd, errno, safe_strerror(errno));
+               close(fd);
+               return PIM_SOCK_ERR_NONBLOCK_GETFL;
+       }
+
+       if (fcntl(fd, F_SETFL, flags | O_NONBLOCK)) {
+               zlog_warn(
+                       "Could not set fcntl(F_SETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
+                       fd, errno, safe_strerror(errno));
+               close(fd);
+               return PIM_SOCK_ERR_NONBLOCK_SETFL;
+       }
+
+       return fd;
+}
+
 int pim_socket_mcast(int protocol, pim_addr ifaddr, struct interface *ifp,
                     uint8_t loop)
 {
index ea9f7009bc7f0d747d05c8e34631764f6ac1765f..4dcb9e6466cdf687e5f794ac2b1de4f9df486cb4 100644 (file)
@@ -51,4 +51,6 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
 
 int pim_socket_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
 
+int pim_reg_sock(void);
+
 #endif /* PIM_SOCK_H */
index 581b855f924246543bdcb6b30f453f95a65dd4b6..f5449d28242094d2ba955b813bb206feb2318e66 100644 (file)
@@ -97,10 +97,11 @@ int pim_static_add(struct pim_instance *pim, struct interface *iif,
        }
 
        for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
-               if (!pim_addr_cmp(s_route->group, group)
-                   && !pim_addr_cmp(s_route->source, source)) {
-                       if (s_route->iif == iif_index
-                           && s_route->oif_ttls[oif_index]) {
+               if (!pim_addr_cmp(s_route->group, group) &&
+                   !pim_addr_cmp(s_route->source, source) &&
+                   (s_route->iif == iif_index)) {
+
+                       if (s_route->oif_ttls[oif_index]) {
                                zlog_warn(
                                        "%s %s: Unable to add static route: Route already exists (iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
                                        __FILE__, __func__, iif_index,
@@ -122,42 +123,11 @@ int pim_static_add(struct pim_instance *pim, struct interface *iif,
 
                        /* Route exists and has the same input interface, but
                         * adding a new output interface */
-                       if (s_route->iif == iif_index) {
-                               s_route->oif_ttls[oif_index] = 1;
-                               oil_if_set(&s_route->c_oil, oif_index, 1);
-                               s_route->c_oil.oif_creation[oif_index] =
-                                       pim_time_monotonic_sec();
-                               ++s_route->c_oil.oil_ref_count;
-                       } else {
-                               /* input interface changed */
-                               s_route->iif = iif_index;
-                               pim_static_mroute_iif_update(
-                                       &s_route->c_oil, iif_index, __func__);
-
-#ifdef PIM_ENFORCE_LOOPFREE_MFC
-                               /* check to make sure the new input was not an
-                                * old output */
-                               if (s_route->oif_ttls[iif_index]) {
-                                       s_route->oif_ttls[iif_index] = 0;
-                                       s_route->c_oil.oif_creation[iif_index] =
-                                               0;
-                                       oil_if_set(&s_route->c_oil, iif_index,
-                                                  0);
-                                       --s_route->c_oil.oil_ref_count;
-                               }
-#endif
-
-                               /* now add the new output, if it is new */
-                               if (!s_route->oif_ttls[oif_index]) {
-                                       s_route->oif_ttls[oif_index] = 1;
-                                       s_route->c_oil.oif_creation[oif_index] =
-                                               pim_time_monotonic_sec();
-                                       oil_if_set(&s_route->c_oil, oif_index,
-                                                  1);
-                                       ++s_route->c_oil.oil_ref_count;
-                               }
-                       }
-
+                       s_route->oif_ttls[oif_index] = 1;
+                       oil_if_set(&s_route->c_oil, oif_index, 1);
+                       s_route->c_oil.oif_creation[oif_index] =
+                               pim_time_monotonic_sec();
+                       ++s_route->c_oil.oil_ref_count;
                        break;
                }
        }
index be8b6a9f4f94178bcc2b82d92e53a7801d405079..44817760977c81d1740c68be831ff8df73c29081 100644 (file)
 #include "prefix.h"
 #include "pim_addr.h"
 
-#include "pim_addr.h"
-
+#if PIM_IPV == 4
 /*
- * Longest possible length of a (S,G) string is 36 bytes
+ * Longest possible length of a IPV4 (S,G) string is 34 bytes
  * 123.123.123.123 = 16 * 2
  * (,) = 3
  * NULL Character at end = 1
- * (123.123.123.123,123,123,123,123)
+ * (123.123.123.123,123.123.123.123)
  */
 #define PIM_SG_LEN PREFIX_SG_STR_LEN
+#else
+/*
+ * Longest possible length of a IPV6 (S,G) string is 94 bytes
+ * INET6_ADDRSTRLEN * 2 = 46 * 2
+ * (,) = 3
+ * NULL Character at end = 1
+ */
+#define PIM_SG_LEN 96
+#endif
+
 #define pim_inet4_dump prefix_mcast_inet4_dump
 
 void pim_addr_dump(const char *onfail, struct prefix *p, char *buf,
index a69177a6e5492a0b703604dc89fa476bca936ea4..3b73c430b03336d24945ca271a5ad28679942a90 100644 (file)
@@ -34,7 +34,7 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
        struct pim_interface *pim_oif = oif->info;
        int input_iface_vif_index = 0;
        pim_addr vif_source;
-       struct prefix src, grp;
+       struct prefix grp;
        struct pim_nexthop nexthop;
        struct pim_upstream *up = NULL;
 
@@ -43,20 +43,19 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
                return pim_channel_oil_add(pim, &sg, __func__);
        }
 
-       pim_addr_to_prefix(&src, vif_source); // RP or Src addr
        pim_addr_to_prefix(&grp, sg.grp);
 
        up = pim_upstream_find(pim, &sg);
        if (up) {
                memcpy(&nexthop, &up->rpf.source_nexthop,
                       sizeof(struct pim_nexthop));
-               pim_ecmp_nexthop_lookup(pim, &nexthop, &src, &grp, 0);
+               pim_ecmp_nexthop_lookup(pim, &nexthop, vif_source, &grp, 0);
                if (nexthop.interface)
                        input_iface_vif_index = pim_if_find_vifindex_by_ifindex(
                                pim, nexthop.interface->ifindex);
        } else
                input_iface_vif_index =
-                       pim_ecmp_fib_lookup_if_vif_index(pim, &src, &grp);
+                       pim_ecmp_fib_lookup_if_vif_index(pim, vif_source, &grp);
 
        if (PIM_DEBUG_ZEBRA)
                zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d",
index 90bf7990931883a8f63863861d4a5c57026cf35b..65975ce14640bca825fc723a93374315ac92c497 100644 (file)
@@ -218,7 +218,7 @@ int pim_encode_addr_group(uint8_t *buf, afi_t afi, int bidir, int scope,
        *buf++ = PIM_MSG_ADDRESS_FAMILY;
        *buf++ = 0;
        *buf++ = flags;
-       *buf++ = sizeof(group) / 8;
+       *buf++ = sizeof(group) * 8;
        memcpy(buf, &group, sizeof(group));
        buf += sizeof(group);
 
index 54a7f59ca44074d6b75fa7d15bafb8481a846c9f..0742daa4de6d1a55be3d7303d30410a9ec00c517 100644 (file)
@@ -192,7 +192,6 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
        struct listnode *node, *nnode;
        struct pim_ifchannel *ch;
        bool notify_msdp = false;
-       struct prefix nht_p;
 
        if (PIM_DEBUG_PIM_TRACE)
                zlog_debug(
@@ -267,12 +266,11 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
         */
        if (!pim_addr_is_any(up->upstream_addr)) {
                /* Deregister addr with Zebra NHT */
-               pim_addr_to_prefix(&nht_p, up->upstream_addr);
                if (PIM_DEBUG_PIM_TRACE)
                        zlog_debug(
-                               "%s: Deregister upstream %s addr %pFX with Zebra NHT",
-                               __func__, up->sg_str, &nht_p);
-               pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+                               "%s: Deregister upstream %s addr %pPA with Zebra NHT",
+                               __func__, up->sg_str, &up->upstream_addr);
+               pim_delete_tracked_nexthop(pim, up->upstream_addr, up, NULL);
        }
 
        XFREE(MTYPE_PIM_UPSTREAM, up);
@@ -290,16 +288,13 @@ void pim_upstream_send_join(struct pim_upstream *up)
        }
 
        if (PIM_DEBUG_PIM_TRACE) {
-               char rpf_str[PREFIX_STRLEN];
-               pim_addr_dump("<rpf?>", &up->rpf.rpf_addr, rpf_str,
-                             sizeof(rpf_str));
-               zlog_debug("%s: RPF'%s=%s(%s) for Interface %s", __func__,
-                          up->sg_str, rpf_str,
+               zlog_debug("%s: RPF'%s=%pPA(%s) for Interface %s", __func__,
+                          up->sg_str, &up->rpf.rpf_addr,
                           pim_upstream_state2str(up->join_state),
                           up->rpf.source_nexthop.interface->name);
                if (pim_rpf_addr_is_inaddr_any(&up->rpf)) {
-                       zlog_debug("%s: can't send join upstream: RPF'%s=%s",
-                                  __func__, up->sg_str, rpf_str);
+                       zlog_debug("%s: can't send join upstream: RPF'%s=%pPA",
+                                  __func__, up->sg_str, &up->rpf.rpf_addr);
                        /* warning only */
                }
        }
@@ -345,8 +340,8 @@ static void join_timer_stop(struct pim_upstream *up)
        THREAD_OFF(up->t_join_timer);
 
        if (up->rpf.source_nexthop.interface)
-               nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
-                                              &up->rpf.rpf_addr);
+               nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+                                       up->rpf.rpf_addr);
 
        if (nbr)
                pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
@@ -359,8 +354,8 @@ void join_timer_start(struct pim_upstream *up)
        struct pim_neighbor *nbr = NULL;
 
        if (up->rpf.source_nexthop.interface) {
-               nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
-                                              &up->rpf.rpf_addr);
+               nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+                                       up->rpf.rpf_addr);
 
                if (PIM_DEBUG_PIM_EVENTS) {
                        zlog_debug(
@@ -428,7 +423,7 @@ void pim_update_suppress_timers(uint32_t suppress_time)
        }
 }
 
-void pim_upstream_join_suppress(struct pim_upstream *up, struct prefix rpf,
+void pim_upstream_join_suppress(struct pim_upstream *up, pim_addr rpf,
                                int holdtime)
 {
        long t_joinsuppress_msec;
@@ -451,23 +446,19 @@ void pim_upstream_join_suppress(struct pim_upstream *up, struct prefix rpf,
                        pim_time_timer_remain_msec(up->t_join_timer);
        else {
                /* Remove it from jp agg from the nbr for suppression */
-               nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
-                                              &up->rpf.rpf_addr);
+               nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+                                       up->rpf.rpf_addr);
                if (nbr) {
                        join_timer_remain_msec =
                                pim_time_timer_remain_msec(nbr->jp_timer);
                }
        }
 
-       if (PIM_DEBUG_PIM_TRACE) {
-               char rpf_str[INET_ADDRSTRLEN];
-
-               pim_addr_dump("<rpf?>", &rpf, rpf_str, sizeof(rpf_str));
+       if (PIM_DEBUG_PIM_TRACE)
                zlog_debug(
-                       "%s %s: detected Join%s to RPF'(S,G)=%s: join_timer=%ld msec t_joinsuppress=%ld msec",
-                       __FILE__, __func__, up->sg_str, rpf_str,
+                       "%s %s: detected Join%s to RPF'(S,G)=%pPA: join_timer=%ld msec t_joinsuppress=%ld msec",
+                       __FILE__, __func__, up->sg_str, &rpf,
                        join_timer_remain_msec, t_joinsuppress_msec);
-       }
 
        if (join_timer_remain_msec < t_joinsuppress_msec) {
                if (PIM_DEBUG_PIM_TRACE) {
@@ -507,8 +498,8 @@ void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
                /* upstream join tracked with neighbor jp timer */
                struct pim_neighbor *nbr;
 
-               nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
-                                              &up->rpf.rpf_addr);
+               nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+                                       up->rpf.rpf_addr);
                if (nbr)
                        join_timer_remain_msec =
                                pim_time_timer_remain_msec(nbr->jp_timer);
@@ -517,17 +508,11 @@ void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
                        join_timer_remain_msec = t_override_msec + 1;
        }
 
-       if (PIM_DEBUG_PIM_TRACE) {
-               char rpf_str[INET_ADDRSTRLEN];
-
-               pim_addr_dump("<rpf?>", &up->rpf.rpf_addr, rpf_str,
-                             sizeof(rpf_str));
-
+       if (PIM_DEBUG_PIM_TRACE)
                zlog_debug(
-                       "%s: to RPF'%s=%s: join_timer=%ld msec t_override=%d msec",
-                       debug_label, up->sg_str, rpf_str,
+                       "%s: to RPF'%s=%pPA: join_timer=%ld msec t_override=%d msec",
+                       debug_label, up->sg_str, &up->rpf.rpf_addr,
                        join_timer_remain_msec, t_override_msec);
-       }
 
        if (join_timer_remain_msec > t_override_msec) {
                if (PIM_DEBUG_PIM_TRACE) {
@@ -842,9 +827,7 @@ void pim_upstream_fill_static_iif(struct pim_upstream *up,
        up->rpf.source_nexthop.mrib_metric_preference =
                ZEBRA_CONNECT_DISTANCE_DEFAULT;
        up->rpf.source_nexthop.mrib_route_metric = 0;
-       up->rpf.rpf_addr.family = AF_INET;
-       up->rpf.rpf_addr.u.prefix4.s_addr = PIM_NET_INADDR_ANY;
-
+       up->rpf.rpf_addr = PIMADDR_ANY;
 }
 
 static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
@@ -903,7 +886,7 @@ static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
                router->infinite_assert_metric.metric_preference;
        up->rpf.source_nexthop.mrib_route_metric =
                router->infinite_assert_metric.route_metric;
-       pim_addr_to_prefix(&up->rpf.rpf_addr, PIMADDR_ANY);
+       up->rpf.rpf_addr = PIMADDR_ANY;
        up->ifchannels = list_new();
        up->ifchannels->cmp = (int (*)(void *, void *))pim_ifchannel_compare;
 
@@ -947,7 +930,16 @@ static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
                                        __func__, up->sg_str);
                }
 
-               if (up->rpf.source_nexthop.interface) {
+               /* Consider a case where (S,G,rpt) prune is received and this
+                * upstream is getting created due to that, then as per RFC
+                * until prune pending time we need to behave same as NOINFO
+                * state, therefore do not install if OIF is NULL until then
+                * This is for PIM Conformance PIM-SM 16.3 fix
+                * When the prune pending timer pop, this mroute will get
+                * installed with none as OIF */
+               if (up->rpf.source_nexthop.interface &&
+                   !(pim_upstream_empty_inherited_olist(up) && (ch != NULL) &&
+                     PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))) {
                        pim_upstream_mroute_iif_update(up->channel_oil,
                                        __func__);
                }
@@ -1069,15 +1061,13 @@ struct pim_upstream *pim_upstream_add(struct pim_instance *pim, pim_sgaddr *sg,
        }
 
        if (PIM_DEBUG_PIM_TRACE) {
-               if (up)
-                       zlog_debug("%s(%s): %s, iif %pFX (%s) found: %d: ref_count: %d",
-                  __func__, name,
-                  up->sg_str, &up->rpf.rpf_addr, up->rpf.source_nexthop.interface ?
-                   up->rpf.source_nexthop.interface->name : "Unknown" ,
-                  found, up->ref_count);
-               else
-                       zlog_debug("%s(%s): (%pSG) failure to create", __func__,
-                                  name, sg);
+               zlog_debug(
+                       "%s(%s): %s, iif %pPA (%s) found: %d: ref_count: %d",
+                       __func__, name, up->sg_str, &up->rpf.rpf_addr,
+                       up->rpf.source_nexthop.interface ? up->rpf.source_nexthop
+                                                                  .interface->name
+                                                        : "Unknown",
+                       found, up->ref_count);
        }
 
        return up;
@@ -1274,7 +1264,7 @@ void pim_upstream_rpf_genid_changed(struct pim_instance *pim,
        frr_each (rb_pim_upstream, &pim->upstream_head, up) {
                pim_addr rpf_addr;
 
-               rpf_addr = pim_addr_from_prefix(&up->rpf.rpf_addr);
+               rpf_addr = up->rpf.rpf_addr;
 
                if (PIM_DEBUG_PIM_TRACE)
                        zlog_debug(
index 8feffb8fdbb630e106572f7434be36e0bbb15f63..3841d1af7b7b227b0f1d975dda82b609bd7e6bab 100644 (file)
@@ -317,7 +317,7 @@ void pim_upstream_update_join_desired(struct pim_instance *pim,
                                      struct pim_upstream *up);
 
 void pim_update_suppress_timers(uint32_t suppress_time);
-void pim_upstream_join_suppress(struct pim_upstream *up, struct prefix rpf,
+void pim_upstream_join_suppress(struct pim_upstream *up, pim_addr rpf,
                                int holdtime);
 
 void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
index 76b230bbf1eb1ec037e88fcceb80d6c8845da641..cfbd436981f200dd6fc3b54ac986af7478d11429 100644 (file)
@@ -78,11 +78,11 @@ int pim_debug_config_write(struct vty *vty)
 
        /* PIM_DEBUG_MROUTE catches _DETAIL too */
        if (router->debugs & PIM_MASK_MROUTE) {
-               vty_out(vty, "debug mroute\n");
+               vty_out(vty, "debug " PIM_MROUTE_DBG "\n");
                ++writes;
        }
        if (PIM_DEBUG_MROUTE_DETAIL) {
-               vty_out(vty, "debug mroute detail\n");
+               vty_out(vty, "debug " PIM_MROUTE_DBG " detail\n");
                ++writes;
        }
 
@@ -92,34 +92,34 @@ int pim_debug_config_write(struct vty *vty)
        }
 
        if (PIM_DEBUG_PIM_EVENTS) {
-               vty_out(vty, "debug pim events\n");
+               vty_out(vty, "debug " PIM_AF_DBG " events\n");
                ++writes;
        }
        if (PIM_DEBUG_PIM_PACKETS) {
-               vty_out(vty, "debug pim packets\n");
+               vty_out(vty, "debug " PIM_AF_DBG " packets\n");
                ++writes;
        }
        if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {
-               vty_out(vty, "debug pim packet-dump send\n");
+               vty_out(vty, "debug " PIM_AF_DBG " packet-dump send\n");
                ++writes;
        }
        if (PIM_DEBUG_PIM_PACKETDUMP_RECV) {
-               vty_out(vty, "debug pim packet-dump receive\n");
+               vty_out(vty, "debug " PIM_AF_DBG " packet-dump receive\n");
                ++writes;
        }
 
        /* PIM_DEBUG_PIM_TRACE catches _DETAIL too */
        if (router->debugs & PIM_MASK_PIM_TRACE) {
-               vty_out(vty, "debug pim trace\n");
+               vty_out(vty, "debug " PIM_AF_DBG " trace\n");
                ++writes;
        }
        if (PIM_DEBUG_PIM_TRACE_DETAIL) {
-               vty_out(vty, "debug pim trace detail\n");
+               vty_out(vty, "debug " PIM_AF_DBG " trace detail\n");
                ++writes;
        }
 
        if (PIM_DEBUG_ZEBRA) {
-               vty_out(vty, "debug pim zebra\n");
+               vty_out(vty, "debug " PIM_AF_DBG " zebra\n");
                ++writes;
        }
 
@@ -129,12 +129,12 @@ int pim_debug_config_write(struct vty *vty)
         }
 
        if (PIM_DEBUG_BSM) {
-               vty_out(vty, "debug pim bsm\n");
+               vty_out(vty, "debug " PIM_AF_DBG " bsm\n");
                ++writes;
        }
 
        if (PIM_DEBUG_VXLAN) {
-               vty_out(vty, "debug pim vxlan\n");
+               vty_out(vty, "debug " PIM_AF_DBG " vxlan\n");
                ++writes;
        }
 
@@ -144,17 +144,17 @@ int pim_debug_config_write(struct vty *vty)
        }
 
        if (PIM_DEBUG_PIM_HELLO) {
-               vty_out(vty, "debug pim packets hello\n");
+               vty_out(vty, "debug " PIM_AF_DBG " packets hello\n");
                ++writes;
        }
 
        if (PIM_DEBUG_PIM_J_P) {
-               vty_out(vty, "debug pim packets joins\n");
+               vty_out(vty, "debug " PIM_AF_DBG " packets joins\n");
                ++writes;
        }
 
        if (PIM_DEBUG_PIM_REG) {
-               vty_out(vty, "debug pim packets register\n");
+               vty_out(vty, "debug " PIM_AF_DBG " packets register\n");
                ++writes;
        }
 
@@ -164,7 +164,7 @@ int pim_debug_config_write(struct vty *vty)
        }
 
        if (PIM_DEBUG_PIM_NHT) {
-               vty_out(vty, "debug pim nht\n");
+               vty_out(vty, "debug " PIM_AF_DBG " nht\n");
                ++writes;
        }
 
@@ -174,7 +174,7 @@ int pim_debug_config_write(struct vty *vty)
        }
 
        if (PIM_DEBUG_PIM_NHT_DETAIL) {
-               vty_out(vty, "debug pim nht detail\n");
+               vty_out(vty, "debug " PIM_AF_DBG " nht detail\n");
                ++writes;
        }
 
@@ -260,9 +260,14 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
                ++writes;
        }
 
-       if (pim->igmp_watermark_limit != 0) {
-               vty_out(vty, "%sip igmp watermark-warn %u\n", spaces,
-                       pim->igmp_watermark_limit);
+       if (pim->gm_watermark_limit != 0) {
+#if PIM_IPV == 4
+               vty_out(vty, "%s" PIM_AF_NAME " igmp watermark-warn %u\n",
+                       spaces, pim->gm_watermark_limit);
+#else
+               vty_out(vty, "%s" PIM_AF_NAME " mld watermark-warn %u\n",
+                       spaces, pim->gm_watermark_limit);
+#endif
                ++writes;
        }
 
index 3565be35bde556d3a7dde62348b62288f66f0918..5b63d0493694ef738eecc925d09122c2170f44f8 100644 (file)
@@ -303,7 +303,6 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
        struct pim_upstream *up;
        struct pim_interface *term_ifp;
        int flags = 0;
-       struct prefix nht_p;
        struct pim_instance *pim = vxlan_sg->pim;
 
        if (vxlan_sg->up) {
@@ -353,9 +352,8 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
                 * iif
                 */
                if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
-                       pim_addr_to_prefix(&nht_p, up->upstream_addr);
-                       pim_delete_tracked_nexthop(vxlan_sg->pim, &nht_p, up,
-                                                  NULL);
+                       pim_delete_tracked_nexthop(vxlan_sg->pim,
+                                                  up->upstream_addr, up, NULL);
                }
                /* We are acting FHR; clear out use_rpt setting if any */
                pim_upstream_update_use_rpt(up, false /*update_mroute*/);
index 7f217d9c2eb5465e9adfd380dded6f49fa172c50..2c76fd6868f0ae0285368187dfb9569e0cc998bf 100644 (file)
@@ -255,7 +255,7 @@ void pim_zebra_update_all_interfaces(struct pim_instance *pim)
                        struct pim_rpf rpf;
 
                        rpf.source_nexthop.interface = ifp;
-                       pim_addr_to_prefix(&rpf.rpf_addr, us->address);
+                       rpf.rpf_addr = us->address;
                        pim_joinprune_send(&rpf, us->us);
                        pim_jp_agg_clear_group(us->us);
                }
@@ -269,8 +269,8 @@ void pim_zebra_upstream_rpf_changed(struct pim_instance *pim,
        if (old->source_nexthop.interface) {
                struct pim_neighbor *nbr;
 
-               nbr = pim_neighbor_find_prefix(old->source_nexthop.interface,
-                                              &old->rpf_addr);
+               nbr = pim_neighbor_find(old->source_nexthop.interface,
+                                       old->rpf_addr);
                if (nbr)
                        pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
 
index 5879cdefb09784f4ec3793e4e39477e42aa928ab..6710f19995f2a9b8dfe80d2553d458aa06142222 100644 (file)
@@ -28,7 +28,6 @@
 void pim_zebra_init(void);
 void pim_zebra_zclient_update(struct vty *vty);
 
-void pim_scan_individual_oil(struct channel_oil *c_oil, int in_vif_index);
 void pim_scan_oil(struct pim_instance *pim_matcher);
 
 void pim_forward_start(struct pim_ifchannel *ch);
index 1424b2a45b20b59ce4c71ff280dbce64625da80d..aa06b86479d941d13b22061d86714de4e3cfcdd0 100644 (file)
@@ -22,6 +22,7 @@ pim_common = \
        pimd/pim_assert.c \
        pimd/pim_bfd.c \
        pimd/pim_br.c \
+       pimd/pim_bsm.c \
        pimd/pim_cmd_common.c \
        pimd/pim_errors.c \
        pimd/pim_hello.c \
@@ -64,7 +65,6 @@ pim_common = \
 
 pimd_pimd_SOURCES = \
        $(pim_common) \
-       pimd/pim_bsm.c \
        pimd/pim_cmd.c \
        pimd/pim_igmp.c \
        pimd/pim_igmp_mtrace.c \
@@ -90,7 +90,6 @@ pimd_pim6d_SOURCES = \
        $(pim_common) \
        pimd/pim6_main.c \
        pimd/pim6_mld.c \
-       pimd/pim6_stubs.c \
        pimd/pim6_cmd.c \
        # end
 
@@ -171,17 +170,11 @@ clippy_scan += \
 pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
 pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP)
 
-if PIMD
-if DEV_BUILD
-#
-# pim6d is only enabled for --enable-dev-build, and NOT installed currently
-# (change noinst_ to sbin_ below to install it.)
-#
-noinst_PROGRAMS += pimd/pim6d
+if PIM6D
+sbin_PROGRAMS += pimd/pim6d
 pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6
 pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP)
 endif
-endif
 
 pimd_test_igmpv3_join_LDADD = lib/libfrr.la
 pimd_test_igmpv3_join_SOURCES = pimd/test_igmpv3_join.c
index 9aeb7d01d4b8402c457df90921bb080605543ac9..45690fb4acb5bc8dcf9cbf309e4010d5a8283835 100644 (file)
--- a/qpb/qpb.h
+++ b/qpb/qpb.h
@@ -121,11 +121,13 @@ static inline int qpb__l3_prefix__get(const Qpb__L3Prefix *pb_prefix,
        switch (family) {
 
        case AF_INET:
-               memset(prefix, 0, sizeof(struct prefix_ipv4));
+               memset((struct prefix_ipv4 *)prefix, 0,
+                      sizeof(struct prefix_ipv4));
                break;
 
        case AF_INET6:
-               memset(prefix, 0, sizeof(struct prefix_ipv6));
+               memset((struct prefix_ipv6 *)prefix, 0,
+                      sizeof(struct prefix_ipv6));
                break;
 
        default:
index 04ae96b654ee008a6fb1381fef4d5cf885c7c830..31061e3ae0fe50adcb1b7d61ac8f70dbaf68d01a 100644 (file)
     endscript
 }
 
+/var/log/frr/pim6d.log {
+    notifempty
+    missingok
+    postrotate
+        /bin/kill -USR1 `cat /var/run/frr/pim6d.pid 2> /dev/null` 2> /dev/null || true
+    endscript
+}
+
 /var/log/frr/sharpd.log {
     notifempty
     missingok
index 9d3d2b0643815139bf4ba96f3c9edcc7ce0f95d4..962541405f9f57e12b64cfa4e0a55a413f4b5fb2 100644 (file)
@@ -24,6 +24,7 @@
 %{!?with_pam:           %global  with_pam           0 }
 %{!?with_pbrd:          %global  with_pbrd          1 }
 %{!?with_pimd:          %global  with_pimd          1 }
+%{!?with_pim6d:         %global  with_pim6d         0 }
 %{!?with_vrrpd:         %global  with_vrrpd         1 }
 %{!?with_rtadv:         %global  with_rtadv         1 }
 %{!?with_watchfrr:      %global  with_watchfrr      1 }
@@ -81,6 +82,7 @@
 # if CentOS / RedHat and version < 7, then disable PIMd (too old, won't work)
 %if 0%{?rhel} && 0%{?rhel} < 7
     %global  with_pimd  0
+    %global  with_pim6d 0
 %endif
 
 # misc internal defines
     %define daemon_pimd ""
 %endif
 
+%if %{with_pim6d}
+    %define daemon_pim6d pim6d
+%else
+    %define daemon_pim6d ""
+%endif
+
 %if %{with_pbrd}
     %define daemon_pbrd pbrd
 %else
     %define daemon_pathd ""
 %endif
 
-%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_pimd} %{daemon_nhrpd} %{daemon_eigrpd} %{daemon_babeld} %{daemon_watchfrr} %{daemon_pbrd} %{daemon_bfdd} %{daemon_vrrpd} %{daemon_pathd}
+%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_pimd} %{daemon_pim6d} %{daemon_nhrpd} %{daemon_eigrpd} %{daemon_babeld} %{daemon_watchfrr} %{daemon_pbrd} %{daemon_bfdd} %{daemon_vrrpd} %{daemon_pathd}
 
 #release sub-revision (the two digits after the CONFDATE)
 %{!?release_rev:        %global  release_rev        01 }
@@ -342,6 +350,11 @@ routing state through standard SNMP MIBs.
 %else
     --disable-pimd \
 %endif
+%if %{with_pim6d}
+    --enable-pim6d \
+%else
+    --disable-pim6d \
+%endif
 %if %{with_pbrd}
     --enable-pbrd \
 %else
@@ -396,9 +409,6 @@ routing state through standard SNMP MIBs.
     --disable-bgp-vnc \
 %endif
     --enable-isisd \
-%if "%{initsystem}" == "systemd"
-    --enable-systemd \
-%endif
     --enable-rpki \
 %if %{with_bfdd}
     --enable-bfdd \
@@ -666,6 +676,9 @@ fi
 %if %{with_pimd}
     %{_sbindir}/pimd
 %endif
+%if %{with_pim6d}
+    %{_sbindir}/pim6d
+%endif
 %if %{with_pbrd}
     %{_sbindir}/pbrd
 %endif
@@ -779,7 +792,128 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons
 
 
 %changelog
-* Tue Jun  7 2022 Donatas Abraitis <donatas@opensourcerouting.org> - %{version}
+
+* Wed Jul 20 2022 Martin Winter <mwinter@opensourcerouting.org> - %{version}
+
+* Wed Jul 13 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.3
+- General:
+-    Add camelcase json keys in addition to pascalcase (Wrong JSON keys will be depracated)
+-    Fix corruption when route-map delete/add sequence happens (fast re-add)
+-    Reworked gRPC
+-    RFC5424 & journald extended syslog target
+- bfdd:
+-    Fix broken FSM in active/passive modes
+- bgpd:
+-    Notification Message Support for BGP Graceful Restart (rfc8538)
+-    BGP Cease Notification Subcode For BFD
+-    Send Hold Timer for BGP (own implementation without an additional knob)
+-    New `set as-path replace` command for BGP route-map
+-    New `match peer` command for BGP route-map
+-    New `ead-es-frag evi-limit` command for EVPN
+-    New `match evpn route-type` command for EVPN route-map to match Type-1/Type-4
+-    JSON outputs for all RPKI show commands
+-    Set attributes via route-map for BGP conditional advertisements
+-    Pass non-transitive extended communities between RS and RS-clients
+-    Send MED attribute when aggregate prefix is created
+-    Fix aspath memory leak in aggr_suppress_map_test
+-    Fix crash for `show ip bgp vrf all all neighbors 192.168.0.1 ...`
+-    Fix crash for `show ip bgp vrf all all`
+-    Fix memory leak for BGP Community Alias in CLI
+-    Fix memory leak when setting BGP community at egress
+-    Fix memory leak when setting BGP large-community at egress
+-    Fix SR color nexthop processing in BGP
+-    Fix setting local-preference in route-map using +/-
+-    Fix crash using Lua and route-map to set attributes via scripts
+-    Fix crash when issuing various forms of `bgp no-rib`
+- isisd:
+-    JSON output for show summary command
+-    Fix crash when MTU mismatch occurs
+-    Fix crash with xfrm interface type
+-    Fix infinite loop when parsing LSPs
+-    Fix router capability TLV parsing issues
+- vtysh:
+-    New `show thread timers` command
+- ospfd6:
+-    Add LSA statistics to LSA database
+-    Add LSA stats to `show area json` output
+-    Show time left in hello timer for `show ipv6 ospf6 int`
+-    Restart SPF when distance is updated
+-    Support keychain for ospf6 authentication
+- ospfd:
+-    New `show ip ospf reachable-routers` command
+-    Restart SPF when distance is updated
+-    Use consistent JSON keys for `show ip ospf neighbor` and detail version
+- pimd:
+-    Add additional IGMP stats
+-    Add IGMP join sent/failed statistics
+-    Add IGMP total groups and total source groups to statistics
+-    New `debug igmp trace detail` command
+-    New `ip pim passive` command
+-    JSON support added for command `show ip igmp sources`
+-    Allow the LPM match work properly with prefix lists and normal RPs
+-    Do not allow 224.0.0.0/24 range in IGMP join
+-    Fix IGMP packet/query check
+-    Handle PIM join/prune receive flow for IPv6
+-    Handle receive of (*,G) register stop with source address as 0
+-    Handle of exclude mode IGMPv3 report messages for SSM-aware group
+-    Handle of IGMPv2 report message for SSM-aware group range
+-    Send immediate join with possible sg rpt prune bit set
+-    Show group-type under `show ip pim rp-info`
+-    Show total received messages IGMP stats
+- staticd:
+-    Capture zebra advertised ECMP limit
+-    Do not register existing nexthop to Zebra
+-    Reject route config with too many nexthops
+-    Track nexthops per-safi
+- watchfrr:
+-    Add some more information to `show watchfrr`
+-    Send operational state to systemd
+- zebra:
+-    Add ability to know when FRR is not ASIC offloaded
+-    Add command for setting protodown bit
+-    Add dplane type for netconf data
+-    Add ECMP supported to `show zebra`
+-    Add EVPN status to `show zebra`
+-    Add if v4/v6 forwarding is turned on/off to `show zebra`
+-    Add initial zebra tracepoint support
+-    Add kernel nexthop group support to `show zebra`
+-    Add knowledge about ra and rfc 5549 to `show zebra`
+-    Add mpls status to `show zebra`
+-    Add netlink debug dump for netconf messages
+-    Add netlink debugs for ip rules
+-    Add OS and version to `show zebra`
+-    Add support for end.dt4
+-    Add to `show zebra` the type of vrf devices being used
+-    Allow *BSD to specify a receive buffer size
+-    Allow multiple connected routes to be choosen for kernel routes
+-    Allow system routes to recurse through themselves
+-    Do not send RAs w/o link-local v6 or on bridge-ports
+-    Evpn disable remove l2vni from l3vni list
+-    Evpn-mh bonds protodown check for set
+-    Evpn-mh use protodown update reason api
+-    Fix cleanup of meta queues on vrf disable
+-    Fix crash in evpn neigh cleanup all
+-    Fix missing delete vtep during vni transition
+-    Fix missing vrf change of l2vni on vxlan interface
+-    Fix rtadv startup when config read in is before interface up
+-    Fix use after deletion event in FreeBSD
+-    Fix v6 route replace failure turned into success
+-    Get zebra graceful restart working when restarting on *BSD
+-    Handle FreeBSD routing socket enobufs
+-    Handle protodown netlink for vxlan device
+-    Include mpls enabled status in interface output
+-    Include old reason in evpn-mh bond update
+-    Keep the interface flags safe on multiple ioctl calls
+-    Let /32 host route with same ip cross vrf
+-    Make router advertisement warnings show up once every 6 hours
+-    Prevent crash if zebra_route_all is used for a route type
+-    Prevent installation of connected multiple times
+-    Protodown-up event trigger interface up
+-    Register nht nexthops with proper safi
+-    Update advertise-svi-ip macips w/ new mac
+-    When handling unprocessed messages from kernel print usable string
+-    New `show ip nht mrib` command
+-    Handle ENOBUFS errors for FreeBSD
 
 * Tue Mar  1 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.2
 - The FRRouting community would like to announce FRR Release 8.2.
index 7ac37b7ee2e8315df474ed3d1c869a03ec6c29e1..02da56e4f3ba454d7fd5554f39a893858c5c41d3 100644 (file)
@@ -428,7 +428,7 @@ static void rip_interface_clean(struct rip_interface *ri)
        ri->enable_interface = 0;
        ri->running = 0;
 
-       thread_cancel(&ri->t_wakeup);
+       THREAD_OFF(ri->t_wakeup);
 }
 
 void rip_interfaces_clean(struct rip *rip)
index 52f2985cb3f5af0aa8d780b106b9942031b06957..25641f2f798e984bb402ead73cfede39035a1cb4 100644 (file)
@@ -64,8 +64,8 @@ static void clear_rip_route(struct rip *rip)
                }
 
                if (rinfo) {
-                       RIP_TIMER_OFF(rinfo->t_timeout);
-                       RIP_TIMER_OFF(rinfo->t_garbage_collect);
+                       THREAD_OFF(rinfo->t_timeout);
+                       THREAD_OFF(rinfo->t_garbage_collect);
                        listnode_delete(list, rinfo);
                        rip_info_free(rinfo);
                }
index 8febb436e7e8c9aad4bfa914ab57d80da79c6f9e..a52914bcf9f908cb0913c8ba6c8d78ad3c555f92 100644 (file)
@@ -38,7 +38,7 @@ static struct rip_peer *rip_peer_new(void)
 
 static void rip_peer_free(struct rip_peer *peer)
 {
-       RIP_TIMER_OFF(peer->t_timeout);
+       THREAD_OFF(peer->t_timeout);
        XFREE(MTYPE_RIP_PEER, peer);
 }
 
@@ -84,7 +84,7 @@ static struct rip_peer *rip_peer_get(struct rip *rip, struct in_addr *addr)
        peer = rip_peer_lookup(rip, addr);
 
        if (peer) {
-               thread_cancel(&peer->t_timeout);
+               THREAD_OFF(peer->t_timeout);
        } else {
                peer = rip_peer_new();
                peer->rip = rip;
index cc21c0bd69166e4ba93fd7d8b12c8c9557617f2b..9798186036d9815f07824bab841606fa254ddb7d 100644 (file)
@@ -144,7 +144,7 @@ static void rip_garbage_collect(struct thread *t)
        rinfo = THREAD_ARG(t);
 
        /* Off timeout timer. */
-       RIP_TIMER_OFF(rinfo->t_timeout);
+       THREAD_OFF(rinfo->t_timeout);
 
        /* Get route_node pointer. */
        rp = rinfo->rp;
@@ -226,14 +226,14 @@ struct rip_info *rip_ecmp_replace(struct rip *rip, struct rip_info *rinfo_new)
                if (tmp_rinfo == rinfo)
                        continue;
 
-               RIP_TIMER_OFF(tmp_rinfo->t_timeout);
-               RIP_TIMER_OFF(tmp_rinfo->t_garbage_collect);
+               THREAD_OFF(tmp_rinfo->t_timeout);
+               THREAD_OFF(tmp_rinfo->t_garbage_collect);
                list_delete_node(list, node);
                rip_info_free(tmp_rinfo);
        }
 
-       RIP_TIMER_OFF(rinfo->t_timeout);
-       RIP_TIMER_OFF(rinfo->t_garbage_collect);
+       THREAD_OFF(rinfo->t_timeout);
+       THREAD_OFF(rinfo->t_garbage_collect);
        memcpy(rinfo, rinfo_new, sizeof(struct rip_info));
 
        if (rip_route_rte(rinfo)) {
@@ -262,12 +262,12 @@ struct rip_info *rip_ecmp_delete(struct rip *rip, struct rip_info *rinfo)
        struct route_node *rp = rinfo->rp;
        struct list *list = (struct list *)rp->info;
 
-       RIP_TIMER_OFF(rinfo->t_timeout);
+       THREAD_OFF(rinfo->t_timeout);
 
        if (listcount(list) > 1) {
                /* Some other ECMP entries still exist. Just delete this entry.
                 */
-               RIP_TIMER_OFF(rinfo->t_garbage_collect);
+               THREAD_OFF(rinfo->t_garbage_collect);
                listnode_delete(list, rinfo);
                if (rip_route_rte(rinfo)
                    && CHECK_FLAG(rinfo->flags, RIP_RTF_FIB))
@@ -313,7 +313,7 @@ static void rip_timeout(struct thread *t)
 static void rip_timeout_update(struct rip *rip, struct rip_info *rinfo)
 {
        if (rinfo->metric != RIP_METRIC_INFINITY) {
-               RIP_TIMER_OFF(rinfo->t_timeout);
+               THREAD_OFF(rinfo->t_timeout);
                thread_add_timer(master, rip_timeout, rinfo, rip->timeout_time,
                                 &rinfo->t_timeout);
        }
@@ -659,8 +659,8 @@ static void rip_rte_process(struct rte *rte, struct sockaddr_in *from,
                                        assert(newinfo.metric
                                               != RIP_METRIC_INFINITY);
 
-                                       RIP_TIMER_OFF(rinfo->t_timeout);
-                                       RIP_TIMER_OFF(rinfo->t_garbage_collect);
+                                       THREAD_OFF(rinfo->t_timeout);
+                                       THREAD_OFF(rinfo->t_garbage_collect);
                                        memcpy(rinfo, &newinfo,
                                               sizeof(struct rip_info));
                                        rip_timeout_update(rip, rinfo);
@@ -1614,7 +1614,7 @@ void rip_redistribute_delete(struct rip *rip, int type, int sub_type,
                                RIP_TIMER_ON(rinfo->t_garbage_collect,
                                             rip_garbage_collect,
                                             rip->garbage_time);
-                               RIP_TIMER_OFF(rinfo->t_timeout);
+                               THREAD_OFF(rinfo->t_timeout);
                                rinfo->flags |= RIP_RTF_CHANGED;
 
                                if (IS_RIP_DEBUG_EVENT)
@@ -2506,7 +2506,7 @@ static void rip_update(struct thread *t)
 
        /* Triggered updates may be suppressed if a regular update is due by
           the time the triggered update would be sent. */
-       RIP_TIMER_OFF(rip->t_triggered_interval);
+       THREAD_OFF(rip->t_triggered_interval);
        rip->trigger = 0;
 
        /* Register myself. */
@@ -2553,7 +2553,7 @@ static void rip_triggered_update(struct thread *t)
        int interval;
 
        /* Cancel interval timer. */
-       RIP_TIMER_OFF(rip->t_triggered_interval);
+       THREAD_OFF(rip->t_triggered_interval);
        rip->trigger = 0;
 
        /* Logging triggered update. */
@@ -2603,7 +2603,7 @@ void rip_redistribute_withdraw(struct rip *rip, int type)
                rinfo->metric = RIP_METRIC_INFINITY;
                RIP_TIMER_ON(rinfo->t_garbage_collect, rip_garbage_collect,
                             rip->garbage_time);
-               RIP_TIMER_OFF(rinfo->t_timeout);
+               THREAD_OFF(rinfo->t_timeout);
                rinfo->flags |= RIP_RTF_CHANGED;
 
                if (IS_RIP_DEBUG_EVENT) {
@@ -2785,7 +2785,7 @@ void rip_event(struct rip *rip, enum rip_event event, int sock)
                thread_add_read(master, rip_read, rip, sock, &rip->t_read);
                break;
        case RIP_UPDATE_EVENT:
-               RIP_TIMER_OFF(rip->t_update);
+               THREAD_OFF(rip->t_update);
                jitter = rip_update_jitter(rip->update_time);
                thread_add_timer(master, rip_update, rip,
                                 sock ? 2 : rip->update_time + jitter,
@@ -2915,8 +2915,8 @@ void rip_ecmp_disable(struct rip *rip)
                        if (tmp_rinfo == rinfo)
                                continue;
 
-                       RIP_TIMER_OFF(tmp_rinfo->t_timeout);
-                       RIP_TIMER_OFF(tmp_rinfo->t_garbage_collect);
+                       THREAD_OFF(tmp_rinfo->t_timeout);
+                       THREAD_OFF(tmp_rinfo->t_garbage_collect);
                        list_delete_node(list, node);
                        rip_info_free(tmp_rinfo);
                }
@@ -3508,8 +3508,8 @@ static void rip_instance_disable(struct rip *rip)
                        rip_zebra_ipv4_delete(rip, rp);
 
                for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
-                       RIP_TIMER_OFF(rinfo->t_timeout);
-                       RIP_TIMER_OFF(rinfo->t_garbage_collect);
+                       THREAD_OFF(rinfo->t_timeout);
+                       THREAD_OFF(rinfo->t_garbage_collect);
                        rip_info_free(rinfo);
                }
                list_delete(&list);
@@ -3521,12 +3521,12 @@ static void rip_instance_disable(struct rip *rip)
        rip_redistribute_disable(rip);
 
        /* Cancel RIP related timers. */
-       RIP_TIMER_OFF(rip->t_update);
-       RIP_TIMER_OFF(rip->t_triggered_update);
-       RIP_TIMER_OFF(rip->t_triggered_interval);
+       THREAD_OFF(rip->t_update);
+       THREAD_OFF(rip->t_triggered_update);
+       THREAD_OFF(rip->t_triggered_interval);
 
        /* Cancel read thread. */
-       thread_cancel(&rip->t_read);
+       THREAD_OFF(rip->t_read);
 
        /* Close RIP socket. */
        close(rip->sock);
index f26dcd87754d87b9ffef3d30d7f5a3e72117c67b..d26592dac24de1f13f212a772d61829287e4fa9b 100644 (file)
@@ -404,9 +404,6 @@ enum rip_event {
 /* Macro for timer turn on. */
 #define RIP_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
 
-/* Macro for timer turn off. */
-#define RIP_TIMER_OFF(X) thread_cancel(&(X))
-
 #define RIP_OFFSET_LIST_IN  0
 #define RIP_OFFSET_LIST_OUT 1
 #define RIP_OFFSET_LIST_MAX 2
index 5159a9825bfe416f23892ebfe1aa1a957d5f7edc..3068d04b739c99606291c61c51b8a600acc7554a 100644 (file)
@@ -316,7 +316,7 @@ void ripng_interface_clean(struct ripng *ripng)
                ri->enable_interface = 0;
                ri->running = 0;
 
-               thread_cancel(&ri->t_wakeup);
+               THREAD_OFF(ri->t_wakeup);
        }
 }
 
index 4dfe9d96409b64d2dcf887d230ae67122eaf4334..57a5f266884f78888310417e6d7a2ebff99c1de5 100644 (file)
@@ -66,8 +66,8 @@ static void clear_ripng_route(struct ripng *ripng)
                }
 
                if (rinfo) {
-                       RIPNG_TIMER_OFF(rinfo->t_timeout);
-                       RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+                       THREAD_OFF(rinfo->t_timeout);
+                       THREAD_OFF(rinfo->t_garbage_collect);
                        listnode_delete(list, rinfo);
                        ripng_info_free(rinfo);
                }
index 010fdda89ebb33e134682a352b19de0b96334e2e..37cfe9833fb722d9e274e197ab1526dbc46aedb4 100644 (file)
@@ -43,7 +43,7 @@ static struct ripng_peer *ripng_peer_new(void)
 
 static void ripng_peer_free(struct ripng_peer *peer)
 {
-       RIPNG_TIMER_OFF(peer->t_timeout);
+       THREAD_OFF(peer->t_timeout);
        XFREE(MTYPE_RIPNG_PEER, peer);
 }
 
@@ -93,7 +93,7 @@ static struct ripng_peer *ripng_peer_get(struct ripng *ripng,
        peer = ripng_peer_lookup(ripng, addr);
 
        if (peer) {
-               thread_cancel(&peer->t_timeout);
+               THREAD_OFF(peer->t_timeout);
        } else {
                peer = ripng_peer_new();
                peer->ripng = ripng;
index e7d292869782042f547156c47bc87754216ba34a..1e7a13d7dc61e1f2d03a8257c37f1f8c13e3e936 100644 (file)
@@ -431,7 +431,7 @@ static void ripng_garbage_collect(struct thread *t)
        rinfo = THREAD_ARG(t);
 
        /* Off timeout timer. */
-       RIPNG_TIMER_OFF(rinfo->t_timeout);
+       THREAD_OFF(rinfo->t_timeout);
 
        /* Get route_node pointer. */
        rp = rinfo->rp;
@@ -518,14 +518,14 @@ struct ripng_info *ripng_ecmp_replace(struct ripng *ripng,
        /* Re-use the first entry, and delete the others. */
        for (ALL_LIST_ELEMENTS(list, node, nextnode, tmp_rinfo))
                if (tmp_rinfo != rinfo) {
-                       RIPNG_TIMER_OFF(tmp_rinfo->t_timeout);
-                       RIPNG_TIMER_OFF(tmp_rinfo->t_garbage_collect);
+                       THREAD_OFF(tmp_rinfo->t_timeout);
+                       THREAD_OFF(tmp_rinfo->t_garbage_collect);
                        list_delete_node(list, node);
                        ripng_info_free(tmp_rinfo);
                }
 
-       RIPNG_TIMER_OFF(rinfo->t_timeout);
-       RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+       THREAD_OFF(rinfo->t_timeout);
+       THREAD_OFF(rinfo->t_garbage_collect);
        memcpy(rinfo, rinfo_new, sizeof(struct ripng_info));
 
        if (ripng_route_rte(rinfo)) {
@@ -557,7 +557,7 @@ struct ripng_info *ripng_ecmp_delete(struct ripng *ripng,
        struct agg_node *rp = rinfo->rp;
        struct list *list = (struct list *)rp->info;
 
-       RIPNG_TIMER_OFF(rinfo->t_timeout);
+       THREAD_OFF(rinfo->t_timeout);
 
        if (rinfo->metric != RIPNG_METRIC_INFINITY)
                ripng_aggregate_decrement(rp, rinfo);
@@ -565,7 +565,7 @@ struct ripng_info *ripng_ecmp_delete(struct ripng *ripng,
        if (listcount(list) > 1) {
                /* Some other ECMP entries still exist. Just delete this entry.
                 */
-               RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+               THREAD_OFF(rinfo->t_garbage_collect);
                listnode_delete(list, rinfo);
                if (ripng_route_rte(rinfo)
                    && CHECK_FLAG(rinfo->flags, RIPNG_RTF_FIB))
@@ -611,7 +611,7 @@ static void ripng_timeout(struct thread *t)
 static void ripng_timeout_update(struct ripng *ripng, struct ripng_info *rinfo)
 {
        if (rinfo->metric != RIPNG_METRIC_INFINITY) {
-               RIPNG_TIMER_OFF(rinfo->t_timeout);
+               THREAD_OFF(rinfo->t_timeout);
                thread_add_timer(master, ripng_timeout, rinfo,
                                 ripng->timeout_time, &rinfo->t_timeout);
        }
@@ -1022,7 +1022,7 @@ void ripng_redistribute_delete(struct ripng *ripng, int type, int sub_type,
                                RIPNG_TIMER_ON(rinfo->t_garbage_collect,
                                               ripng_garbage_collect,
                                               ripng->garbage_time);
-                               RIPNG_TIMER_OFF(rinfo->t_timeout);
+                               THREAD_OFF(rinfo->t_timeout);
 
                                /* Aggregate count decrement. */
                                ripng_aggregate_decrement(rp, rinfo);
@@ -1061,7 +1061,7 @@ void ripng_redistribute_withdraw(struct ripng *ripng, int type)
                                RIPNG_TIMER_ON(rinfo->t_garbage_collect,
                                               ripng_garbage_collect,
                                               ripng->garbage_time);
-                               RIPNG_TIMER_OFF(rinfo->t_timeout);
+                               THREAD_OFF(rinfo->t_timeout);
 
                                /* Aggregate count decrement. */
                                ripng_aggregate_decrement(rp, rinfo);
@@ -1445,7 +1445,7 @@ static void ripng_update(struct thread *t)
 
        /* Triggered updates may be suppressed if a regular update is due by
           the time the triggered update would be sent. */
-       thread_cancel(&ripng->t_triggered_interval);
+       THREAD_OFF(ripng->t_triggered_interval);
        ripng->trigger = 0;
 
        /* Reset flush event. */
@@ -1472,7 +1472,7 @@ void ripng_triggered_update(struct thread *t)
        int interval;
 
        /* Cancel interval timer. */
-       thread_cancel(&ripng->t_triggered_interval);
+       THREAD_OFF(ripng->t_triggered_interval);
        ripng->trigger = 0;
 
        /* Logging triggered update. */
@@ -1917,7 +1917,7 @@ void ripng_event(struct ripng *ripng, enum ripng_event event, int sock)
                                &ripng->t_read);
                break;
        case RIPNG_UPDATE_EVENT:
-               thread_cancel(&ripng->t_update);
+               THREAD_OFF(ripng->t_update);
 
                /* Update timer jitter. */
                jitter = ripng_update_jitter(ripng->update_time);
@@ -2209,8 +2209,8 @@ void ripng_ecmp_disable(struct ripng *ripng)
                        /* Drop all other entries, except the first one. */
                        for (ALL_LIST_ELEMENTS(list, node, nextnode, tmp_rinfo))
                                if (tmp_rinfo != rinfo) {
-                                       RIPNG_TIMER_OFF(tmp_rinfo->t_timeout);
-                                       RIPNG_TIMER_OFF(
+                                       THREAD_OFF(tmp_rinfo->t_timeout);
+                                       THREAD_OFF(
                                                tmp_rinfo->t_garbage_collect);
                                        list_delete_node(list, node);
                                        ripng_info_free(tmp_rinfo);
@@ -2528,8 +2528,8 @@ static void ripng_instance_disable(struct ripng *ripng)
                                ripng_zebra_ipv6_delete(ripng, rp);
 
                        for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
-                               RIPNG_TIMER_OFF(rinfo->t_timeout);
-                               RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+                               THREAD_OFF(rinfo->t_timeout);
+                               THREAD_OFF(rinfo->t_garbage_collect);
                                ripng_info_free(rinfo);
                        }
                        list_delete(&list);
@@ -2548,12 +2548,12 @@ static void ripng_instance_disable(struct ripng *ripng)
        ripng_redistribute_disable(ripng);
 
        /* Cancel the RIPng timers */
-       RIPNG_TIMER_OFF(ripng->t_update);
-       RIPNG_TIMER_OFF(ripng->t_triggered_update);
-       RIPNG_TIMER_OFF(ripng->t_triggered_interval);
+       THREAD_OFF(ripng->t_update);
+       THREAD_OFF(ripng->t_triggered_update);
+       THREAD_OFF(ripng->t_triggered_interval);
 
        /* Cancel the read thread */
-       thread_cancel(&ripng->t_read);
+       THREAD_OFF(ripng->t_read);
 
        /* Close the RIPng socket */
        if (ripng->sock >= 0) {
index 6bf687b02a6d86b92e0f36565bc72095e2780c12..ac2edc5b2c81879b91d21cf405def69a1a5a4740 100644 (file)
@@ -312,8 +312,6 @@ enum ripng_event {
 /* RIPng timer on/off macro. */
 #define RIPNG_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
 
-#define RIPNG_TIMER_OFF(T)  thread_cancel(&(T))
-
 #define RIPNG_OFFSET_LIST_IN  0
 #define RIPNG_OFFSET_LIST_OUT 1
 #define RIPNG_OFFSET_LIST_MAX 2
index bf3902d9fabd3c2f415f7f68a53e9112c1c1adc6..9729be7b92e0a327a384ba59cb863625007a90ed 100644 (file)
@@ -1,5 +1,5 @@
 name: frr
-version: @VERSION@
+version: '@VERSION@'
 summary: FRRouting BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
 description: BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
  FRRouting (FRR) is free software which manages TCP/IP based routing
index 9ccffe53d9afdc935432f749a0185f1ba7901586..5a7044e9f9feadebc508fb275c539d4d9697d428 100644 (file)
@@ -263,6 +263,7 @@ nexthop_mpls_label_stack_entry_destroy(struct nb_cb_destroy_args *args)
        struct static_nexthop *nh;
        uint32_t pos;
        uint8_t index;
+       uint old_num_labels;
 
        switch (args->event) {
        case NB_EV_VALIDATE:
@@ -278,8 +279,12 @@ nexthop_mpls_label_stack_entry_destroy(struct nb_cb_destroy_args *args)
                        return NB_ERR;
                }
                index = pos - 1;
+               old_num_labels = nh->snh_label.num_labels;
                nh->snh_label.label[index] = 0;
                nh->snh_label.num_labels--;
+
+               if (old_num_labels != nh->snh_label.num_labels)
+                       nh->state = STATIC_START;
                break;
        }
 
@@ -291,6 +296,7 @@ static int static_nexthop_mpls_label_modify(struct nb_cb_modify_args *args)
        struct static_nexthop *nh;
        uint32_t pos;
        uint8_t index;
+       mpls_label_t old_label;
 
        nh = nb_running_get_entry(args->dnode, NULL, true);
        pos = yang_get_list_pos(lyd_parent(args->dnode));
@@ -301,8 +307,13 @@ static int static_nexthop_mpls_label_modify(struct nb_cb_modify_args *args)
        }
        /* Mapping to array = list-index -1 */
        index = pos - 1;
+
+       old_label = nh->snh_label.label[index];
        nh->snh_label.label[index] = yang_dnode_get_uint32(args->dnode, NULL);
 
+       if (old_label != nh->snh_label.label[index])
+               nh->state = STATIC_START;
+
        return NB_OK;
 }
 
@@ -310,6 +321,7 @@ static int static_nexthop_onlink_modify(struct nb_cb_modify_args *args)
 {
        struct static_nexthop *nh;
        enum static_nh_type nh_type;
+       bool old_onlink;
 
        switch (args->event) {
        case NB_EV_VALIDATE:
@@ -327,7 +339,11 @@ static int static_nexthop_onlink_modify(struct nb_cb_modify_args *args)
                break;
        case NB_EV_APPLY:
                nh = nb_running_get_entry(args->dnode, NULL, true);
+               old_onlink = nh->onlink;
                nh->onlink = yang_dnode_get_bool(args->dnode, NULL);
+
+               if (old_onlink != nh->onlink)
+                       nh->state = STATIC_START;
                break;
        }
 
@@ -337,20 +353,30 @@ static int static_nexthop_onlink_modify(struct nb_cb_modify_args *args)
 static int static_nexthop_color_modify(struct nb_cb_modify_args *args)
 {
        struct static_nexthop *nh;
+       uint32_t old_color;
 
        nh = nb_running_get_entry(args->dnode, NULL, true);
+       old_color = nh->color;
        nh->color = yang_dnode_get_uint32(args->dnode, NULL);
 
+       if (old_color != nh->color)
+               nh->state = STATIC_START;
+
        return NB_OK;
 }
 
 static int static_nexthop_color_destroy(struct nb_cb_destroy_args *args)
 {
        struct static_nexthop *nh;
+       uint32_t old_color;
 
        nh = nb_running_unset_entry(args->dnode);
+       old_color = nh->color;
        nh->color = 0;
 
+       if (old_color != nh->color)
+               nh->state = STATIC_START;
+
        return NB_OK;
 }
 
index bdb2e56ee1d80b24913363a54c0138a96c68278f..108bd1b49adfc9d26aab216439752c3388506638 100755 (executable)
@@ -115,6 +115,9 @@ def dump_testcase(testcase):
     for key, val in testcase.items():
         if isinstance(val, str) or isinstance(val, float) or isinstance(val, int):
             s += "{}: {}\n".format(key, val)
+        elif isinstance(val, list):
+            for k2, v2 in enumerate(val):
+                s += "{}: {}\n".format(k2, v2)
         else:
             for k2, v2 in val.items():
                 s += "{}: {}\n".format(k2, v2)
index fcea5d48fcc697a05601ffd50047e0e8b657d717..373a0c5b94f3dd65bb74273abd63fdd809115375 100644 (file)
@@ -2,6 +2,7 @@ interface r1-eth1
  ip ospf area 0
  ip ospf hello-interval 2
  ip ospf dead-interval 10
+ ip ospf bfd
  ip ospf bfd profile slowtx
 !
 router ospf
index 8fca099686859ebbabb5ebb36091fc65d7bf133e..0476df740aa67680d1497cb67ed3b37967207475 100644 (file)
@@ -5,7 +5,11 @@
 !
 bfd
  peer 192.168.0.2 vrf r1-bfd-cust1
+  receive-interval 1000
+  transmit-interval 1000
   echo-mode
+  echo transmit-interval 1000
+  echo receive-interval 1000
   no shutdown
  !
 !
index f49768ff759b9436a7625cf296c84fbf6d96f6a0..57cea71e539b5eeb83219297e37bc8b6eaa8fff7 100644 (file)
@@ -1,7 +1,7 @@
 [
   {
     "remote-receive-interval": 1000,
-    "remote-transmit-interval": 500,
+    "remote-transmit-interval": 1000,
     "peer": "192.168.0.2",
     "status": "up"
   }
index 4490090ec62f108feaf2815a48608157e73fd684..69edd1536b5886b744ec56569222b603e702eb45 100644 (file)
@@ -6,12 +6,18 @@
 bfd
  peer 192.168.0.1 vrf r2-bfd-cust1
   receive-interval 1000
-  transmit-interval 500
+  transmit-interval 1000
   echo-mode
+  echo transmit-interval 1000
+  echo receive-interval 1000
   no shutdown
  !
  peer 192.168.1.1 vrf r2-bfd-cust1
+  receive-interval 1000
+  transmit-interval 1000
   echo-mode
+  echo transmit-interval 1000
+  echo receive-interval 1000
   no shutdown
  !
 !
index 267459c7a87e9ad9fc4d24851e9215db78b9ef68..0a1c34224bcb4bb0d57ba7bb94211ea7ea956cda 100644 (file)
@@ -4,7 +4,7 @@
     "status": "up"
   },
   {
-    "remote-echo-receive-interval": 100,
+    "remote-echo-receive-interval": 1000,
     "peer": "192.168.1.1",
     "status": "up"
   },
index 0333320898c89cc2fc9e3fa67b908fe9b972a079..00162b524715290bf4e9c86538382d5d88c0285b 100644 (file)
@@ -5,7 +5,9 @@
 !
 bfd
  peer 192.168.1.2 vrf r3-bfd-cust1
-  echo-interval 100
+  receive-interval 1000
+  transmit-interval 1000
+  echo-interval 1000
   echo-mode
   no shutdown
  !
diff --git a/tests/topotests/bfd_vrflite_topo1/__init__.py b/tests/topotests/bfd_vrflite_topo1/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json b/tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json
new file mode 100644 (file)
index 0000000..07e96d7
--- /dev/null
@@ -0,0 +1,96 @@
+[
+   {
+      "multihop":false,
+      "peer":"192.168.0.2",
+      "vrf":"vrf1",
+      "passive-mode":false,
+      "status":"up",
+      "diagnostic":"ok",
+      "remote-diagnostic":"ok",
+      "receive-interval":1000,
+      "transmit-interval":1000,
+      "echo-receive-interval":50,
+      "echo-transmit-interval":0,
+      "detect-multiplier":3,
+      "remote-receive-interval":1000,
+      "remote-transmit-interval":1000,
+      "remote-echo-receive-interval":50,
+      "remote-detect-multiplier":3
+   },
+   {
+      "multihop":true,
+      "peer":"192.0.2.2",
+      "local":"192.0.2.1",
+      "vrf":"vrf2",
+      "passive-mode":false,
+      "minimum-ttl":254,
+      "status":"up",
+      "diagnostic":"ok",
+      "remote-diagnostic":"ok",
+      "receive-interval":1000,
+      "transmit-interval":1000,
+      "echo-receive-interval":50,
+      "echo-transmit-interval":0,
+      "detect-multiplier":3,
+      "remote-receive-interval":1000,
+      "remote-transmit-interval":1000,
+      "remote-echo-receive-interval":50,
+      "remote-detect-multiplier":3
+   },
+   {
+      "multihop":false,
+      "peer":"192.168.0.2",
+      "vrf":"vrf2",
+      "passive-mode":false,
+      "status":"up",
+      "diagnostic":"ok",
+      "remote-diagnostic":"ok",
+      "receive-interval":1000,
+      "transmit-interval":1000,
+      "echo-receive-interval":50,
+      "echo-transmit-interval":0,
+      "detect-multiplier":3,
+      "remote-receive-interval":1000,
+      "remote-transmit-interval":1000,
+      "remote-echo-receive-interval":50,
+      "remote-detect-multiplier":3
+   },
+   {
+      "multihop":false,
+      "peer":"192.168.0.2",
+      "vrf":"default",
+      "passive-mode":false,
+      "status":"up",
+      "diagnostic":"ok",
+      "remote-diagnostic":"ok",
+      "receive-interval":1000,
+      "transmit-interval":1000,
+      "echo-receive-interval":50,
+      "echo-transmit-interval":0,
+      "detect-multiplier":3,
+      "remote-receive-interval":1000,
+      "remote-transmit-interval":1000,
+      "remote-echo-receive-interval":50,
+      "remote-detect-multiplier":3
+   },
+   {
+      "multihop":true,
+      "peer":"192.0.2.2",
+      "local":"192.0.2.1",
+      "vrf":"vrf1",
+      "passive-mode":false,
+      "minimum-ttl":254,
+      "status":"up",
+      "diagnostic":"ok",
+      "remote-diagnostic":"ok",
+      "receive-interval":1000,
+      "transmit-interval":1000,
+      "echo-receive-interval":50,
+      "echo-transmit-interval":0,
+      "detect-multiplier":3,
+      "remote-receive-interval":1000,
+      "remote-transmit-interval":1000,
+      "remote-echo-receive-interval":50,
+      "remote-detect-multiplier":3
+   }
+]
diff --git a/tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf b/tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf
new file mode 100644 (file)
index 0000000..96e8ff4
--- /dev/null
@@ -0,0 +1,26 @@
+!
+! debug bfd network
+! debug bfd peer
+! debug bfd zebra
+!
+bfd
+ peer 192.168.0.2 vrf vrf1
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.168.0.2 vrf vrf2
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.168.0.2
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.0.2.2 multihop local-address 192.0.2.1 vrf vrf1
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.0.2.2 multihop local-address 192.0.2.1 vrf vrf2
+  transmit-interval 1000
+  receive-interval 1000
+ !
\ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/r1/zebra.conf b/tests/topotests/bfd_vrflite_topo1/r1/zebra.conf
new file mode 100644 (file)
index 0000000..ebb4e63
--- /dev/null
@@ -0,0 +1,24 @@
+vrf vrf1
+ ip route 192.0.2.2/32 192.168.0.2
+!
+vrf vrf2
+ ip route 192.0.2.2/32 192.168.0.2
+!
+interface r1-eth0 vrf vrf3
+ ip address 192.168.0.1/24
+!
+interface r1-eth0.100 vrf vrf1
+ ip address 192.168.0.1/24
+!
+interface r1-eth0.200 vrf vrf2
+ ip address 192.168.0.1/24
+!
+interface r1-eth0.300
+ ip address 192.168.0.1/24
+!
+interface r1-loop1 vrf vrf1
+ ip address 192.0.2.1/32
+!
+interface r1-loop2 vrf vrf2
+ ip address 192.0.2.1/32
+!
\ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf b/tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf
new file mode 100644 (file)
index 0000000..7b11a47
--- /dev/null
@@ -0,0 +1,26 @@
+!
+! debug bfd network
+! debug bfd peer
+! debug bfd zebra
+!
+bfd
+ peer 192.168.0.1 vrf vrf1
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.168.0.1 vrf vrf2
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.168.0.1
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.0.2.1 multihop local-address 192.0.2.2 vrf vrf1
+  transmit-interval 1000
+  receive-interval 1000
+ !
+ peer 192.0.2.1 multihop local-address 192.0.2.2 vrf vrf2
+  transmit-interval 1000
+  receive-interval 1000
+ !
\ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/r2/zebra.conf b/tests/topotests/bfd_vrflite_topo1/r2/zebra.conf
new file mode 100644 (file)
index 0000000..d8b996e
--- /dev/null
@@ -0,0 +1,24 @@
+vrf vrf1
+ ip route 192.0.2.1/32 192.168.0.1
+!
+vrf vrf2
+ ip route 192.0.2.1/32 192.168.0.1
+!
+interface r2-eth0 vrf vrf3
+ ip address 192.168.0.2/24
+!
+interface r2-eth0.100 vrf vrf1
+ ip address 192.168.0.2/24
+!
+interface r2-eth0.200 vrf vrf2
+ ip address 192.168.0.2/24
+!
+interface r2-eth0.300
+ ip address 192.168.0.2/24
+!
+interface r2-loop1 vrf vrf1
+ ip address 192.0.2.2/32
+!
+interface r2-loop2 vrf vrf2
+ ip address 192.0.2.2/32
+!
\ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py b/tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py
new file mode 100644 (file)
index 0000000..b7afb8e
--- /dev/null
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+#
+# test_bfd_vrflite_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2018 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+# Copyright (c) 2022 by 6WIND
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bfd_vrflite_topo1.py: Test the FRR BFD daemon.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    # Create 2 routers
+    for routern in range(1, 3):
+        tgen.add_router("r{}".format(routern))
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r1"])
+
+
+def setup_module(mod):
+    "Sets up the pytest environment"
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    logger.info("Testing with Linux VRF support and udp_l3mdev=0")
+    if os.system("echo 0 > /proc/sys/net/ipv4/udp_l3mdev_accept") != 0:
+        return pytest.skip(
+            "Skipping BFD vrflite Topo1 Test. Linux VRF not available on System"
+        )
+
+    for rname, router in router_list.items():
+        router.net.add_l3vrf("vrf1", 10)
+        router.net.add_l3vrf("vrf2", 20)
+        router.net.add_l3vrf("vrf3", 30)
+        router.net.add_vlan(rname + "-eth0.100", rname + "-eth0", 100)
+        router.net.add_vlan(rname + "-eth0.200", rname + "-eth0", 200)
+        router.net.add_vlan(rname + "-eth0.300", rname + "-eth0", 300)
+        router.net.attach_iface_to_l3vrf(rname + "-eth0.100", "vrf1")
+        router.net.attach_iface_to_l3vrf(rname + "-eth0.200", "vrf2")
+        router.net.add_loop(rname + "-loop1")
+        router.net.add_loop(rname + "-loop2")
+        router.net.attach_iface_to_l3vrf(rname + "-loop1", "vrf1")
+        router.net.attach_iface_to_l3vrf(rname + "-loop2", "vrf2")
+        router.net.attach_iface_to_l3vrf(rname + "-eth0", "vrf3")
+
+    for rname, router in router_list.items():
+        router.load_config(
+            TopoRouter.RD_ZEBRA,
+            os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
+        )
+
+    # Initialize all routers.
+    tgen.start_router()
+
+
+def teardown_module(_mod):
+    "Teardown the pytest environment"
+    tgen = get_topogen()
+
+    # Move interfaces out of vrf namespace and delete the namespace
+    router_list = tgen.routers()
+    for rname, router in router_list.items():
+        router.net.del_iface(rname + "-eth0.100")
+        router.net.del_iface(rname + "-eth0.200")
+        router.net.del_iface(rname + "-eth0.300")
+        router.net.del_iface(rname + "-loop1")
+        router.net.del_iface(rname + "-loop2")
+
+    tgen.stop_topology()
+
+
+def test_bfd_connection():
+    "Assert that the BFD peers can find themselves."
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+    logger.info("waiting for bfd peers to go up")
+    router = tgen.gears['r1']
+    json_file = "{}/{}/bfd_peers_status.json".format(CWD, 'r1')
+    expected = json.loads(open(json_file).read())
+
+    test_func = partial(
+        topotest.router_json_cmp, router, "show bfd peers json", expected
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=16, wait=1)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+
+def test_memory_leak():
+    "Run the memory leak test and report results."
+    tgen = get_topogen()
+    if not tgen.is_memleak_enabled():
+        pytest.skip("Memory leak test/report is disabled")
+
+    tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_as_override/__init__.py b/tests/topotests/bgp_as_override/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_as_override/r1/bgpd.conf b/tests/topotests/bgp_as_override/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..3cfb7a2
--- /dev/null
@@ -0,0 +1,10 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+ address-family ipv4
+  redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_as_override/r1/zebra.conf b/tests/topotests/bgp_as_override/r1/zebra.conf
new file mode 100644 (file)
index 0000000..63728eb
--- /dev/null
@@ -0,0 +1,9 @@
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.2/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/r2/bgpd.conf b/tests/topotests/bgp_as_override/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..5e3b0c7
--- /dev/null
@@ -0,0 +1,10 @@
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+!
diff --git a/tests/topotests/bgp_as_override/r2/zebra.conf b/tests/topotests/bgp_as_override/r2/zebra.conf
new file mode 100644 (file)
index 0000000..5bdfd02
--- /dev/null
@@ -0,0 +1,9 @@
+!
+interface r2-eth0
+ ip address 192.168.1.1/30
+!
+interface r2-eth1
+ ip address 192.168.2.1/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/r3/bgpd.conf b/tests/topotests/bgp_as_override/r3/bgpd.conf
new file mode 100644 (file)
index 0000000..6bbe56b
--- /dev/null
@@ -0,0 +1,13 @@
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+ neighbor 192.168.3.1 remote-as external
+ neighbor 192.168.3.1 timers 1 3
+ neighbor 192.168.3.1 timers connect 1
+ address-family ipv4 unicast
+  neighbor 192.168.3.1 as-override
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_as_override/r3/zebra.conf b/tests/topotests/bgp_as_override/r3/zebra.conf
new file mode 100644 (file)
index 0000000..77782be
--- /dev/null
@@ -0,0 +1,9 @@
+!
+interface r3-eth0
+ ip address 192.168.2.2/30
+!
+interface r3-eth1
+ ip address 192.168.3.2/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/r4/bgpd.conf b/tests/topotests/bgp_as_override/r4/bgpd.conf
new file mode 100644 (file)
index 0000000..1bdee08
--- /dev/null
@@ -0,0 +1,7 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.3.2 remote-as external
+ neighbor 192.168.3.2 timers 1 3
+ neighbor 192.168.3.2 timers connect 1
+!
diff --git a/tests/topotests/bgp_as_override/r4/zebra.conf b/tests/topotests/bgp_as_override/r4/zebra.conf
new file mode 100644 (file)
index 0000000..71dc595
--- /dev/null
@@ -0,0 +1,6 @@
+!
+interface r4-eth0
+ ip address 192.168.3.1/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/test_bgp_as_override.py b/tests/topotests/bgp_as_override/test_bgp_as_override.py
new file mode 100644 (file)
index 0000000..40085cd
--- /dev/null
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    for routern in range(1, 7):
+        tgen.add_router("r{}".format(routern))
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r2"])
+    switch.add_link(tgen.gears["r3"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r3"])
+    switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.items(), 1):
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    tgen.start_router()
+
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+
+def test_bgp_as_override():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    r3 = tgen.gears["r3"]
+    r4 = tgen.gears["r4"]
+
+    def _bgp_converge():
+        output = json.loads(r3.vtysh_cmd("show ip bgp neighbor 192.168.2.1 json"))
+        expected = {
+            "192.168.2.1": {
+                "bgpState": "Established",
+                "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    def _bgp_as_override():
+        output = json.loads(r4.vtysh_cmd("show bgp ipv4 unicast json"))
+        expected = {
+            "routes": {
+                "172.16.255.1/32": [{"valid": True, "path": "65003 65002 65003"}]
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    step("Initial BGP converge")
+    test_func = functools.partial(_bgp_converge)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Failed to see BGP convergence on R4"
+
+    step("Check if BGP as-override from R3 works")
+    test_func = functools.partial(_bgp_as_override)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Failed to see overriden ASN (65001) from R3"
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 6153aee4181874e14199791a83f5cfa5d0925fc0..1097be3d709c62de523796c1a39a41d78439cf2f 100644 (file)
@@ -197,11 +197,199 @@ def teardown_module(mod):
     logger.info("=" * 40)
 
 
-def test_bgp_conditional_advertisement():
-    """
-    Test BGP conditional advertisement functionality.
-    """
+def all_routes_advertised(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": [{"protocol": "bgp"}],
+        "192.0.2.1/32": [{"protocol": "bgp"}],
+        "192.0.2.5/32": [{"protocol": "bgp"}],
+        "10.139.224.0/20": [{"protocol": "bgp"}],
+        "203.0.113.1/32": [{"protocol": "bgp"}],
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def all_routes_withdrawn(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": None,
+        "192.0.2.1/32": None,
+        "192.0.2.5/32": None,
+        "10.139.224.0/20": None,
+        "203.0.113.1/32": None,
+    }
+    return topotest.json_cmp(output, expected)
+
+
+# BGP conditional advertisement with route-maps
+# EXIST-MAP, ADV-MAP-1 and RMAP-1
+def exist_map_routes_present(router):
+    return all_routes_advertised(router)
+
+
+def exist_map_routes_not_present(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": None,
+        "192.0.2.1/32": None,
+        "192.0.2.5/32": [{"protocol": "bgp"}],
+        "10.139.224.0/20": None,
+        "203.0.113.1/32": [{"protocol": "bgp"}],
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def non_exist_map_routes_present(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": [{"protocol": "bgp"}],
+        "192.0.2.1/32": None,
+        "192.0.2.5/32": [{"protocol": "bgp"}],
+        "10.139.224.0/20": None,
+        "203.0.113.1/32": [{"protocol": "bgp"}],
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def non_exist_map_routes_not_present(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": None,
+        "192.0.2.1/32": [{"protocol": "bgp"}],
+        "192.0.2.5/32": [{"protocol": "bgp"}],
+        "10.139.224.0/20": [{"protocol": "bgp"}],
+        "203.0.113.1/32": [{"protocol": "bgp"}],
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def exist_map_no_condition_route_map(router):
+    return non_exist_map_routes_present(router)
+
+
+def non_exist_map_no_condition_route_map(router):
+    return all_routes_advertised(router)
+
+
+def exist_map_routes_present_rmap_filter(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": None,
+        "192.0.2.1/32": [{"protocol": "bgp"}],
+        "192.0.2.5/32": None,
+        "10.139.224.0/20": [{"protocol": "bgp"}],
+        "203.0.113.1/32": None,
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def exist_map_routes_present_no_rmap_filter(router):
+    return all_routes_advertised(router)
+
+
+def non_exist_map_routes_present_rmap_filter(router):
+    return all_routes_withdrawn(router)
+
+
+def non_exist_map_routes_present_no_rmap_filter(router):
+    return non_exist_map_routes_present(router)
+
+
+def exist_map_routes_not_present_rmap_filter(router):
+    return all_routes_withdrawn(router)
+
+
+def exist_map_routes_not_present_no_rmap_filter(router):
+    return exist_map_routes_not_present(router)
+
+
+def non_exist_map_routes_not_present_rmap_filter(router):
+    return exist_map_routes_present_rmap_filter(router)
+
+
+def non_exist_map_routes_not_present_no_rmap_filter(router):
+    return non_exist_map_routes_not_present(router)
+
+
+# BGP conditional advertisement with route-maps
+# EXIST-MAP, ADV-MAP-2 and RMAP-2
+def exist_map_routes_not_present_rmap2_filter(router):
+    return all_routes_withdrawn(router)
+
+
+def exist_map_routes_not_present_no_rmap2_filter(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": None,
+        "192.0.2.1/32": [{"protocol": "bgp"}],
+        "192.0.2.5/32": [{"protocol": "bgp"}],
+        "10.139.224.0/20": [{"protocol": "bgp"}],
+        "203.0.113.1/32": None,
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def non_exist_map_routes_not_present_rmap2_filter(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": None,
+        "192.0.2.1/32": None,
+        "192.0.2.5/32": None,
+        "10.139.224.0/20": None,
+        "203.0.113.1/32": [{"protocol": "bgp", "metric": 911}],
+    }
+    return topotest.json_cmp(output, expected)
 
+
+def non_exist_map_routes_not_present_no_rmap2_filter(router):
+    return non_exist_map_routes_not_present(router)
+
+
+def exist_map_routes_present_rmap2_filter(router):
+    return non_exist_map_routes_not_present_rmap2_filter(router)
+
+
+def exist_map_routes_present_no_rmap2_filter(router):
+    return all_routes_advertised(router)
+
+
+def non_exist_map_routes_present_rmap2_filter(router):
+    return all_routes_withdrawn(router)
+
+
+def non_exist_map_routes_present_no_rmap2_filter(router):
+    output = json.loads(router.vtysh_cmd("show ip route json"))
+    expected = {
+        "0.0.0.0/0": [{"protocol": "bgp"}],
+        "192.0.2.1/32": [{"protocol": "bgp"}],
+        "192.0.2.5/32": [{"protocol": "bgp"}],
+        "10.139.224.0/20": [{"protocol": "bgp"}],
+        "203.0.113.1/32": None,
+    }
+    return topotest.json_cmp(output, expected)
+
+
+def exist_map_routes_present_rmap2_network(router):
+    return non_exist_map_routes_not_present_rmap2_filter(router)
+
+
+def exist_map_routes_present_rmap2_no_network(router):
+    return all_routes_withdrawn(router)
+
+
+def non_exist_map_routes_not_present_rmap2_network(router):
+    return non_exist_map_routes_not_present_rmap2_filter(router)
+
+
+def non_exist_map_routes_not_present_rmap2_no_network(router):
+    return all_routes_withdrawn(router)
+
+
+passed = "PASSED!!!"
+failed = "FAILED!!!"
+
+
+def test_bgp_conditional_advertisement_step1():
     tgen = get_topogen()
     if tgen.routers_have_failure():
         pytest.skip(tgen.errors)
@@ -210,172 +398,9 @@ def test_bgp_conditional_advertisement():
     router2 = tgen.gears["r2"]
     router3 = tgen.gears["r3"]
 
-    passed = "PASSED!!!"
-    failed = "FAILED!!!"
-
-    def _all_routes_advertised(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": [{"protocol": "bgp"}],
-            "192.0.2.1/32": [{"protocol": "bgp"}],
-            "192.0.2.5/32": [{"protocol": "bgp"}],
-            "10.139.224.0/20": [{"protocol": "bgp"}],
-            "203.0.113.1/32": [{"protocol": "bgp"}],
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _all_routes_withdrawn(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": None,
-            "192.0.2.1/32": None,
-            "192.0.2.5/32": None,
-            "10.139.224.0/20": None,
-            "203.0.113.1/32": None,
-        }
-        return topotest.json_cmp(output, expected)
-
-    # BGP conditional advertisement with route-maps
-    # EXIST-MAP, ADV-MAP-1 and RMAP-1
-    def _exist_map_routes_present(router):
-        return _all_routes_advertised(router)
-
-    def _exist_map_routes_not_present(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": None,
-            "192.0.2.1/32": None,
-            "192.0.2.5/32": [{"protocol": "bgp"}],
-            "10.139.224.0/20": None,
-            "203.0.113.1/32": [{"protocol": "bgp"}],
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _non_exist_map_routes_present(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": [{"protocol": "bgp"}],
-            "192.0.2.1/32": None,
-            "192.0.2.5/32": [{"protocol": "bgp"}],
-            "10.139.224.0/20": None,
-            "203.0.113.1/32": [{"protocol": "bgp"}],
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _non_exist_map_routes_not_present(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": None,
-            "192.0.2.1/32": [{"protocol": "bgp"}],
-            "192.0.2.5/32": [{"protocol": "bgp"}],
-            "10.139.224.0/20": [{"protocol": "bgp"}],
-            "203.0.113.1/32": [{"protocol": "bgp"}],
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _exist_map_no_condition_route_map(router):
-        return _non_exist_map_routes_present(router)
-
-    def _non_exist_map_no_condition_route_map(router):
-        return _all_routes_advertised(router)
-
-    def _exist_map_routes_present_rmap_filter(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": None,
-            "192.0.2.1/32": [{"protocol": "bgp"}],
-            "192.0.2.5/32": None,
-            "10.139.224.0/20": [{"protocol": "bgp"}],
-            "203.0.113.1/32": None,
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _exist_map_routes_present_no_rmap_filter(router):
-        return _all_routes_advertised(router)
-
-    def _non_exist_map_routes_present_rmap_filter(router):
-        return _all_routes_withdrawn(router)
-
-    def _non_exist_map_routes_present_no_rmap_filter(router):
-        return _non_exist_map_routes_present(router)
-
-    def _exist_map_routes_not_present_rmap_filter(router):
-        return _all_routes_withdrawn(router)
-
-    def _exist_map_routes_not_present_no_rmap_filter(router):
-        return _exist_map_routes_not_present(router)
-
-    def _non_exist_map_routes_not_present_rmap_filter(router):
-        return _exist_map_routes_present_rmap_filter(router)
-
-    def _non_exist_map_routes_not_present_no_rmap_filter(router):
-        return _non_exist_map_routes_not_present(router)
-
-    # BGP conditional advertisement with route-maps
-    # EXIST-MAP, ADV-MAP-2 and RMAP-2
-    def _exist_map_routes_not_present_rmap2_filter(router):
-        return _all_routes_withdrawn(router)
-
-    def _exist_map_routes_not_present_no_rmap2_filter(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": None,
-            "192.0.2.1/32": [{"protocol": "bgp"}],
-            "192.0.2.5/32": [{"protocol": "bgp"}],
-            "10.139.224.0/20": [{"protocol": "bgp"}],
-            "203.0.113.1/32": None,
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _non_exist_map_routes_not_present_rmap2_filter(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": None,
-            "192.0.2.1/32": None,
-            "192.0.2.5/32": None,
-            "10.139.224.0/20": None,
-            "203.0.113.1/32": [{"protocol": "bgp", "metric": 911}],
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _non_exist_map_routes_not_present_no_rmap2_filter(router):
-        return _non_exist_map_routes_not_present(router)
-
-    def _exist_map_routes_present_rmap2_filter(router):
-        return _non_exist_map_routes_not_present_rmap2_filter(router)
-
-    def _exist_map_routes_present_no_rmap2_filter(router):
-        return _all_routes_advertised(router)
-
-    def _non_exist_map_routes_present_rmap2_filter(router):
-        return _all_routes_withdrawn(router)
-
-    def _non_exist_map_routes_present_no_rmap2_filter(router):
-        output = json.loads(router.vtysh_cmd("show ip route json"))
-        expected = {
-            "0.0.0.0/0": [{"protocol": "bgp"}],
-            "192.0.2.1/32": [{"protocol": "bgp"}],
-            "192.0.2.5/32": [{"protocol": "bgp"}],
-            "10.139.224.0/20": [{"protocol": "bgp"}],
-            "203.0.113.1/32": None,
-        }
-        return topotest.json_cmp(output, expected)
-
-    def _exist_map_routes_present_rmap2_network(router):
-        return _non_exist_map_routes_not_present_rmap2_filter(router)
-
-    def _exist_map_routes_present_rmap2_no_network(router):
-        return _all_routes_withdrawn(router)
-
-    def _non_exist_map_routes_not_present_rmap2_network(router):
-        return _non_exist_map_routes_not_present_rmap2_filter(router)
-
-    def _non_exist_map_routes_not_present_rmap2_no_network(router):
-        return _all_routes_withdrawn(router)
-
     # TC11: R3 BGP convergence, without advertise-map configuration.
     # All routes are advertised to R3.
-    test_func = functools.partial(_all_routes_advertised, router3)
+    test_func = functools.partial(all_routes_advertised, router3)
     success, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
 
     msg = 'TC11: "router3" BGP convergence - '
@@ -383,6 +408,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step2():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC21: exist-map routes present in R2's BGP table.
     # advertise-map routes present in R2's BGP table are advertised to R3.
     router2.vtysh_cmd(
@@ -394,7 +429,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present, router3)
+    test_func = functools.partial(exist_map_routes_present, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = 'TC21: exist-map routes present in "router2" BGP table - '
@@ -402,6 +437,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step3():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC22: exist-map routes not present in R2's BGP table
     # advertise-map routes present in R2's BGP table are withdrawn from R3.
     router1.vtysh_cmd(
@@ -413,7 +458,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_not_present, router3)
+    test_func = functools.partial(exist_map_routes_not_present, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = 'TC22: exist-map routes not present in "router2" BGP table - '
@@ -421,6 +466,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step4():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC31: non-exist-map routes not present in R2's BGP table
     # advertise-map routes present in R2's BGP table are advertised to R3.
     router2.vtysh_cmd(
@@ -432,7 +487,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_non_exist_map_routes_not_present, router3)
+    test_func = functools.partial(non_exist_map_routes_not_present, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = 'TC31: non-exist-map routes not present in "router2" BGP table - '
@@ -440,6 +495,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step5():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC32: non-exist-map routes present in R2's BGP table
     # advertise-map routes present in R2's BGP table are withdrawn from R3.
     router1.vtysh_cmd(
@@ -451,7 +516,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_non_exist_map_routes_present, router3)
+    test_func = functools.partial(non_exist_map_routes_present, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = 'TC32: non-exist-map routes present in "router2" BGP table - '
@@ -459,6 +524,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step6():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC41: non-exist-map route-map configuration removed in R2.
     # advertise-map routes present in R2's BGP table are advertised to R3.
     router2.vtysh_cmd(
@@ -468,7 +543,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_non_exist_map_no_condition_route_map, router3)
+    test_func = functools.partial(non_exist_map_no_condition_route_map, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = 'TC41: non-exist-map route-map removed in "router2" - '
@@ -476,6 +551,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step7():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC42: exist-map route-map configuration removed in R2
     # advertise-map routes present in R2's BGP table are withdrawn from R3.
     router2.vtysh_cmd(
@@ -487,7 +572,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_no_condition_route_map, router3)
+    test_func = functools.partial(exist_map_no_condition_route_map, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = 'TC42: exist-map route-map removed in "router2" - '
@@ -495,6 +580,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step8():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC51: exist-map routes present in R2's BGP table, with route-map filter.
     # All routes are withdrawn from R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -510,7 +605,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present_rmap_filter, router3)
+    test_func = functools.partial(exist_map_routes_present_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC51: exist-map routes present with route-map filter - "
@@ -518,6 +613,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step9():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC52: exist-map routes present in R2's BGP table, no route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -529,7 +634,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present_no_rmap_filter, router3)
+    test_func = functools.partial(exist_map_routes_present_no_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC52: exist-map routes present, no route-map filter - "
@@ -537,6 +642,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step10():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC53: non-exist-map routes present in R2's BGP table, with route-map filter.
     # All routes are withdrawn from R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -549,7 +664,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_non_exist_map_routes_present_rmap_filter, router3)
+    test_func = functools.partial(non_exist_map_routes_present_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC53: non-exist-map routes present, with route-map filter - "
@@ -557,6 +672,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step11():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC54: non-exist-map routes present in R2's BGP table, no route-map filter.
     # All routes are advertised to R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -568,7 +693,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_non_exist_map_routes_present_no_rmap_filter, router3)
+    test_func = functools.partial(non_exist_map_routes_present_no_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC54: non-exist-map routes present, no route-map filter - "
@@ -576,6 +701,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step12():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC61: exist-map routes not present in R2's BGP table, with route-map filter.
     # All routes are withdrawn from R3 including advertise-map routes.
     router1.vtysh_cmd(
@@ -596,7 +731,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_not_present_rmap_filter, router3)
+    test_func = functools.partial(exist_map_routes_not_present_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC61: exist-map routes not present, route-map filter - "
@@ -604,6 +739,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step13():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC62: exist-map routes not present in R2's BGP table, without route-map filter.
     # All routes are advertised to R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -615,7 +760,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_not_present_no_rmap_filter, router3)
+    test_func = functools.partial(exist_map_routes_not_present_no_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC62: exist-map routes not present, no route-map filter - "
@@ -623,6 +768,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step14():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC63: non-exist-map routes not present in R2's BGP table, with route-map filter.
     # All routes are withdrawn from R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -635,9 +790,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(
-        _non_exist_map_routes_not_present_rmap_filter, router3
-    )
+    test_func = functools.partial(non_exist_map_routes_not_present_rmap_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC63: non-exist-map routes not present, route-map filter - "
@@ -645,6 +798,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step15():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC64: non-exist-map routes not present in R2's BGP table, without route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -657,7 +820,7 @@ def test_bgp_conditional_advertisement():
     )
 
     test_func = functools.partial(
-        _non_exist_map_routes_not_present_no_rmap_filter, router3
+        non_exist_map_routes_not_present_no_rmap_filter, router3
     )
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
@@ -666,6 +829,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step16():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC71: exist-map routes present in R2's BGP table, with route-map filter.
     # All routes are withdrawn from R3 except advertise-map routes.
     router1.vtysh_cmd(
@@ -686,7 +859,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present_rmap2_filter, router3)
+    test_func = functools.partial(exist_map_routes_present_rmap2_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC71: exist-map routes present, route-map filter - "
@@ -694,6 +867,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step17():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC72: exist-map routes present in R2's BGP table, without route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -705,7 +888,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present_no_rmap2_filter, router3)
+    test_func = functools.partial(exist_map_routes_present_no_rmap2_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC72: exist-map routes present, no route-map filter - "
@@ -713,6 +896,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step18():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC73: non-exist-map routes present in R2's BGP table, with route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -725,7 +918,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_non_exist_map_routes_present_rmap2_filter, router3)
+    test_func = functools.partial(non_exist_map_routes_present_rmap2_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC73: non-exist-map routes present, route-map filter - "
@@ -733,6 +926,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step19():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC74: non-exist-map routes present in R2's BGP table, without route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -744,9 +947,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(
-        _non_exist_map_routes_present_no_rmap2_filter, router3
-    )
+    test_func = functools.partial(non_exist_map_routes_present_no_rmap2_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC74: non-exist-map routes present, no route-map filter - "
@@ -754,6 +955,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step20():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC81: exist-map routes not present in R2's BGP table, with route-map filter.
     # All routes are withdrawn from R3 including advertise-map routes.
     router1.vtysh_cmd(
@@ -774,7 +985,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_not_present_rmap2_filter, router3)
+    test_func = functools.partial(exist_map_routes_not_present_rmap2_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC81: exist-map routes not present, route-map filter - "
@@ -782,6 +993,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step21():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC82: exist-map routes not present in R2's BGP table, without route-map filter.
     # All routes are advertised to R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -793,9 +1014,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(
-        _exist_map_routes_not_present_no_rmap2_filter, router3
-    )
+    test_func = functools.partial(exist_map_routes_not_present_no_rmap2_filter, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC82: exist-map routes not present, no route-map filter - "
@@ -803,6 +1022,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step22():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC83: non-exist-map routes not present in R2's BGP table, with route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -816,7 +1045,7 @@ def test_bgp_conditional_advertisement():
     )
 
     test_func = functools.partial(
-        _non_exist_map_routes_not_present_rmap2_filter, router3
+        non_exist_map_routes_not_present_rmap2_filter, router3
     )
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
@@ -825,6 +1054,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step23():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC84: non-exist-map routes not present in R2's BGP table, without route-map filter.
     # All routes are advertised to R3 including advertise-map routes.
     router2.vtysh_cmd(
@@ -837,7 +1076,7 @@ def test_bgp_conditional_advertisement():
     )
 
     test_func = functools.partial(
-        _non_exist_map_routes_not_present_no_rmap2_filter, router3
+        non_exist_map_routes_not_present_no_rmap2_filter, router3
     )
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
@@ -846,6 +1085,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step24():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC91: exist-map routes present in R2's BGP table, with route-map filter and network.
     # All routes are advertised to R3 including advertise-map routes.
     router1.vtysh_cmd(
@@ -866,7 +1115,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present_rmap2_network, router3)
+    test_func = functools.partial(exist_map_routes_present_rmap2_network, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC91: exist-map routes present, route-map filter and network - "
@@ -874,6 +1123,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step25():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC92: exist-map routes present in R2's BGP table, with route-map filter and no network.
     # All routes are advertised to R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -885,7 +1144,7 @@ def test_bgp_conditional_advertisement():
         """
     )
 
-    test_func = functools.partial(_exist_map_routes_present_rmap2_no_network, router3)
+    test_func = functools.partial(exist_map_routes_present_rmap2_no_network, router3)
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
     msg = "TC92: exist-map routes present, route-map filter and no network - "
@@ -893,6 +1152,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step26():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC93: non-exist-map routes not present in R2's BGP table, with route-map filter and network.
     # All routes are advertised to R3 including advertise-map routes.
     router1.vtysh_cmd(
@@ -914,7 +1183,7 @@ def test_bgp_conditional_advertisement():
     )
 
     test_func = functools.partial(
-        _non_exist_map_routes_not_present_rmap2_network, router3
+        non_exist_map_routes_not_present_rmap2_network, router3
     )
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
@@ -923,6 +1192,16 @@ def test_bgp_conditional_advertisement():
 
     logger.info(msg + passed)
 
+
+def test_bgp_conditional_advertisement_step27():
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router1 = tgen.gears["r1"]
+    router2 = tgen.gears["r2"]
+    router3 = tgen.gears["r3"]
+
     # TC94: non-exist-map routes not present in R2's BGP table, with route-map filter and no network.
     # All routes are advertised to R3 except advertise-map routes.
     router2.vtysh_cmd(
@@ -935,7 +1214,7 @@ def test_bgp_conditional_advertisement():
     )
 
     test_func = functools.partial(
-        _non_exist_map_routes_not_present_rmap2_no_network, router3
+        non_exist_map_routes_not_present_rmap2_no_network, router3
     )
     success, result = topotest.run_and_expect(test_func, None, count=90, wait=1)
 
diff --git a/tests/topotests/bgp_default_originate/bgp_default_orginate_vrf.json b/tests/topotests/bgp_default_originate/bgp_default_orginate_vrf.json
new file mode 100644 (file)
index 0000000..1a3d66b
--- /dev/null
@@ -0,0 +1,325 @@
+{
+    "address_types": [
+        "ipv4",
+        "ipv6"
+    ],
+    "ipv4base": "192.168.0.0",
+    "ipv4mask": 3024,
+    "ipv6base": "fd00::",
+    "ipv6mask": 64,
+    "link_ip_start": {
+        "ipv4": "192.168.0.0",
+        "v4mask": 24,
+        "ipv6": "fd00::",
+        "v6mask": 64
+    },
+    "lo_prefix": {
+        "ipv4": "1.0.",
+        "v4mask": 32,
+        "ipv6": "2001:db8:f::",
+        "v6mask": 128
+    },
+    "routers": {
+        "r0": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r1": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            },
+            "bgp": {
+                "local_as": "100",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r1": {
+                                    "dest_link": {
+                                        "r0": {}
+                                    }
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r1": {
+                                    "dest_link": {
+                                        "r0": {}
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        "r1": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r0": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r2": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            },
+            "bgp": [
+                {
+                    "local_as": "1000",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r0": {
+                                        "dest_link": {
+                                            "r1": {}
+                                        }
+                                    },
+                                    "r2": {
+                                        "dest_link": {
+                                            "r1": {}
+                                        }
+                                    }
+                                }
+                            }
+                        },
+                        "ipv6": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r0": {
+                                        "dest_link": {
+                                            "r1": {}
+                                        }
+                                    },
+                                    "r2": {
+                                        "dest_link": {
+                                            "r1": {}
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            ]
+        },
+        "r2": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r1": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r3": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "vrf": "RED"
+                }
+            },
+            "vrfs": [
+                {
+                    "name": "RED",
+                    "id": "1"
+                }
+            ],
+            "bgp": [
+                {
+                    "local_as": "2000",
+                    "vrf": "RED",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r3": {
+                                        "dest_link": {
+                                            "r2": {}
+                                        }
+                                    }
+                                }
+                            }
+                        },
+                        "ipv6": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r3": {
+                                        "dest_link": {
+                                            "r2": {}
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                },
+                {
+                    "local_as": "1000",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r1": {
+                                        "dest_link": {
+                                            "r2": {}
+                                        }
+                                    }
+                                }
+                            }
+                        },
+                        "ipv6": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r1": {
+                                        "dest_link": {
+                                            "r2": {}
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            ]
+        },
+        "r3": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback",
+                    "vrf": "RED"
+                },
+                "r2": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "vrf": "RED"
+                },
+                "r4": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            },
+            "vrfs": [
+                {
+                    "name": "RED",
+                    "id": "1"
+                }
+            ],
+            "bgp": [
+                {
+                    "local_as": "3000",
+                    "vrf": "RED",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r2": {
+                                        "dest_link": {
+                                            "r3": {}
+                                        }
+                                    }
+                                }
+                            }
+                        },
+                        "ipv6": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r2": {
+                                        "dest_link": {
+                                            "r3": {}
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                },
+                {
+                    "local_as": "400",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r4": {
+                                        "dest_link": {
+                                            "r3": {}
+                                        }
+                                    }
+                                }
+                            }
+                        },
+                        "ipv6": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r4": {
+                                        "dest_link": {
+                                            "r3": {}
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            ]
+        },
+        "r4": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r3": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            },
+            "bgp": [
+                {
+                    "local_as": "500",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r3": {
+                                        "dest_link": {
+                                            "r4": {}
+                                        }
+                                    }
+                                }
+                            }
+                        },
+                        "ipv6": {
+                            "unicast": {
+                                "neighbor": {
+                                    "r3": {
+                                        "dest_link": {
+                                            "r4": {}
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            ]
+        }
+    }
+}
\ No newline at end of file
diff --git a/tests/topotests/bgp_default_originate/bgp_default_originate_2links.json b/tests/topotests/bgp_default_originate/bgp_default_originate_2links.json
new file mode 100644 (file)
index 0000000..9e98235
--- /dev/null
@@ -0,0 +1,136 @@
+{
+    "address_types": ["ipv4", "ipv6"],
+    "ipv4base": "192.168.0.0",
+    "ipv4mask": 3024,
+    "ipv6base": "fd00::",
+    "ipv6mask": 64,
+    "link_ip_start": {
+        "ipv4": "192.168.0.0",
+        "v4mask": 24,
+        "ipv6": "fd00::",
+        "v6mask": 64
+    },
+    "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:db8:f::", "v6mask": 128},
+    "routers": {
+        "r0": {
+            "links": {
+                "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+                "r1": {"ipv4": "auto", "ipv6": "auto"}
+            },
+            "bgp": {
+                "local_as": "100",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"neighbor": {"r1": {"dest_link": {"r0": {}}}}}
+                    },
+                    "ipv6": {
+                        "unicast": {"neighbor": {"r1": {"dest_link": {"r0": {}}}}}
+                    }
+                }
+            }
+        },
+        "r1": {
+            "links": {
+                "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+                "r0": {"ipv4": "auto", "ipv6": "auto"},
+                "r2-link1": {"ipv4": "auto", "ipv6": "auto"},
+                "r2-link2": {"ipv4": "auto", "ipv6": "auto"}
+            },
+            "bgp": {
+                "local_as": "200",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r0": {"dest_link": {"r1": {}}},
+                                "r2": {"dest_link": {"r1-link1": {}, "r1-link2": {}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r0": {"dest_link": {"r1": {}}},
+                                "r2": {"dest_link": {"r1-link1": {}, "r1-link2": {}}}
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        "r2": {
+            "links": {
+                "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+                "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+                "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+                "r3": {"ipv4": "auto", "ipv6": "auto"}
+            },
+            "bgp": {
+                "local_as": "300",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r1": {"dest_link": {"r2-link1": {}, "r2-link2": {}}},
+                                "r3": {"dest_link": {"r2": {}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r1": {"dest_link": {"r2-link1": {}, "r2-link2": {}}},
+                                "r3": {"dest_link": {"r2": {}}}
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        "r3": {
+            "links": {
+                "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+                "r2": {"ipv4": "auto", "ipv6": "auto"},
+                "r4": {"ipv4": "auto", "ipv6": "auto"}
+            },
+            "bgp": {
+                "local_as": "400",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r3": {}}},
+                                "r4": {"dest_link": {"r3": {}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r3": {}}},
+                                "r4": {"dest_link": {"r3": {}}}
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        "r4": {
+            "links": {
+                "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+                "r3": {"ipv4": "auto", "ipv6": "auto"}
+            },
+            "bgp": {
+                "local_as": "500",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"neighbor": {"r3": {"dest_link": {"r4": {}}}}}
+                    },
+                    "ipv6": {
+                        "unicast": {"neighbor": {"r3": {"dest_link": {"r4": {}}}}}
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
new file mode 100644 (file)
index 0000000..c8cdc7e
--- /dev/null
@@ -0,0 +1,1414 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+1. Verify default-originate route with default static and network command
+2. Verify default-originate route with aggregate summary command
+"""
+import os
+import sys
+import time
+import pytest
+import datetime
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+    verify_bgp_convergence,
+    create_router_bgp,
+    verify_bgp_rib,
+    get_dut_as_number,
+    verify_rib_default_route,
+    verify_fib_default_route,
+)
+from lib.common_config import (
+    verify_fib_routes,
+    step,
+    run_frr_cmd,
+    get_frr_ipv6_linklocal,
+    start_topology,
+    apply_raw_config,
+    write_test_header,
+    check_address_types,
+    write_test_footer,
+    reset_config_on_routers,
+    create_static_routes,
+    check_router_status,
+)
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+NETWORK1_2 = {"ipv4": "198.51.1.2/32", "ipv6": "2001:DB8::1:2/128"}
+NETWORK1_3 = {"ipv4": "198.51.1.3/32", "ipv6": "2001:DB8::1:3/128"}
+NETWORK1_4 = {"ipv4": "198.51.1.4/32", "ipv6": "2001:DB8::1:4/128"}
+NETWORK1_5 = {"ipv4": "198.51.1.5/32", "ipv6": "2001:DB8::1:5/128"}
+
+ipv4_uptime_dict = {
+    "r2": {
+        "static_routes": [
+            {"network": "0.0.0.0/0"},
+        ]
+    }
+}
+
+ipv6_uptime_dict = {
+    "r2": {
+        "static_routes": [
+            {"network": "::/0"},
+        ]
+    }
+}
+
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: {}".format(testsuite_run_time))
+    logger.info("=" * 40)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/bgp_default_originate_2links.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global topo
+    topo = tgen.json_topo
+    # ... and here it calls Mininet initialization functions.
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, topo)
+
+    global ADDR_TYPES
+    global BGP_CONVERGENCE
+    global DEFAULT_ROUTES
+    global DEFAULT_ROUTE_NXT_HOP_LINK1, DEFAULT_ROUTE_NXT_HOP_LINK2
+    ADDR_TYPES = check_address_types()
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    interface = topo["routers"]["r1"]["links"]["r2-link1"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_LINK1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    interface = topo["routers"]["r1"]["links"]["r2-link2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_LINK2 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info(
+        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+    )
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#                      Local API's
+#
+#####################################################
+
+
+def get_rib_route_uptime(tgen, addr_type, dut, input_dict):
+    """
+    Verify route uptime in RIB using "show ip route"
+
+    Parameters
+    ----------
+    * `tgen` : topogen object
+    * `addr_type` : ip type, ipv4/ipv6
+    * `dut`: Device Under Test, for which user wants to test the data
+    * `input_dict` : input dict, has details of static routes
+    * `route_uptime`: uptime of the routes
+
+    Usage
+    -----
+    # Creating static routes for r1
+     input_dict_r1 = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": "147.10.13.4/32"
+                    },
+                   {
+                       "network": "147.10.12.0/24"
+                   },
+                    {
+                        "network": "147.10.13.4/32"
+                    },
+                   {
+                       "network": "147.10.13.4/32"
+                   },
+                   {
+                       "network": "147.10.13.4/32"
+                   }
+                ]
+            }
+    }
+
+
+    Returns
+    -------
+    errormsg(str) or True
+    """
+
+    logger.info("Entering lib API: get_rib_route_uptime()")
+    route_time = []
+    out_route_dict = {}
+    router_list = tgen.routers()
+    for routerInput in input_dict.keys():
+        for router, rnode in router_list.items():
+            if router != dut:
+                continue
+
+            logger.info("Checking router %s RIB:", router)
+
+            # Verifying RIB routes
+            if addr_type == "ipv4":
+                command = "show ip route"
+            else:
+                command = "show ipv6 route"
+
+            if "static_routes" in input_dict[routerInput]:
+                static_routes = input_dict[routerInput]["static_routes"]
+
+                for static_route in static_routes:
+                    if "vrf" in static_route and static_route["vrf"] is not None:
+
+                        logger.info(
+                            "[DUT: {}]: Verifying routes for VRF:"
+                            " {}".format(router, static_route["vrf"])
+                        )
+                        cmd = "{} vrf {}".format(command, static_route["vrf"])
+
+                    else:
+                        cmd = "{}".format(command)
+
+                    cmd = "{} json".format(cmd)
+
+                    rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+                    if bool(rib_routes_json) is False:
+                        errormsg = "No route found in rib of router {}..".format(router)
+                        return errormsg
+                    network = static_route["network"]
+                    route_time.append(rib_routes_json[network][0]["uptime"])
+
+    logger.info("Exiting lib API: get_rib_route_uptime()")
+    return route_time
+
+
+def verify_the_uptime(time_stamp_before, time_stamp_after, incremented=None):
+    """
+    time_stamp_before : string the time stamp captured
+    time_stamp_after : string the time stamp captured
+    """
+    uptime_before = datetime.datetime.strptime(time_stamp_before[0], "%H:%M:%S")
+    uptime_after = datetime.datetime.strptime(time_stamp_after[0], "%H:%M:%S")
+
+    if incremented == True:
+        if uptime_before < uptime_after:
+            logger.info(
+                "  The Uptime [{}] is incremented than [{}].......PASSED ".format(
+                    time_stamp_before, time_stamp_after
+                )
+            )
+            return True
+        else:
+            logger.error(
+                "  The Uptime [{}] is expected to be incremented than [{}].......FAILED ".format(
+                    time_stamp_before, time_stamp_after
+                )
+            )
+            return False
+    else:
+        logger.info(
+            "  The Uptime [{}] is not incremented than [{}] ".format(
+                time_stamp_before, time_stamp_after
+            )
+        )
+        return True
+
+
+#####################################################
+#
+#                      Testcases
+#
+#####################################################
+
+
+def test_verify_bgp_default_originate_with_default_static_route_p1(request):
+    """
+    Summary: "Verify default-originate route with default static and network command  "
+
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE, DEFAULT_ROUTE_NXT_HOP_LINK1, DEFAULT_ROUTE_NXT_HOP_LINK2, DEFAULT_ROUTES
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    step("Configure 2 link between R1 and R2")
+    step("Configure IPV4 and IPV6 EBGP between R1 and R2  both the links")
+    step("Configure default-originate on R1 IPv4 and IPv6 BGP session link-1 only ")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {"r2": {"dest-link": "r1-link1"}}
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {"r2": {"dest-link": "r1-link1"}}
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify IPv4/IPv6 default originate routes present on R2 nexthop as link-1")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_LINK1[addr_type],
+                    }
+                ]
+            }
+        }
+
+        result = verify_fib_routes(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_LINK1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_LINK1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure  network command on R1 (0.0.0.0/0 and 0::0/0) for IPv4 and IPv6 address family "
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        input_advertise = {
+            "r1": {
+                "bgp": {
+                    "address_family": {
+                        addr_type: {
+                            "unicast": {
+                                "advertise_networks": [
+                                    {"network": [DEFAULT_ROUTES[addr_type]]}
+                                ]
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        result = create_router_bgp(tgen, topo, input_advertise)
+        assert result is True, "Testcase {} :Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("No change on IPv4/IPv6  default-originate route advertised from link1")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify 0.0.0.0/0 and 0::0/0 route also get advertised from link-2  ")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step(
+        "Before removing default originate from R1 link -1 IPv4 and IPv6 address family taking the uptime snapshot"
+    )
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("Remove default originate from R1 link -1 IPv4 and IPv6 address family ")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {"dest-link": "r1-link1", "delete": True}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {"dest-link": "r1-link1", "delete": True}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Routes must be learned from network command")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("After removing the default originate  on R1 taking the uptime snapshot")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "After removing the default-originate uptime should get reset for link-1 learn route"
+    )
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot before configuring default - originate")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "Configure default-originate on R1 link-1 again for IPv4 and IPv6 address family"
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {
+                                    "dest-link": "r1-link1",
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {
+                                    "dest-link": "r1-link1",
+                                }
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify No change on R2 routing and BGP table for both the links ")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking snapshot after configuring default - originate")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "After configuring the default-originate uptime should not get reset for link-1 learn route"
+    )
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=True)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=True)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot before  removing  network 0.0.0.0  ")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("Remove network command from R1 IPv4/IPv6 address family ")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        input_advertise = {
+            "r1": {
+                "bgp": {
+                    "address_family": {
+                        addr_type: {
+                            "unicast": {
+                                "advertise_networks": [
+                                    {
+                                        "network": [DEFAULT_ROUTES[addr_type]],
+                                        "delete": True,
+                                    }
+                                ]
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        result = create_router_bgp(tgen, topo, input_advertise)
+        assert result is True, "Testcase {} :Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Verify 0.0.0.0/0 and 0::0/0 route get removed from link-2  and default-originate IPv4/IPv6 route learn on link-1"
+    )
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n Route from link2 is not expected \n Error: {}".format(
+        tc_name, result
+    )
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed\n Route from link2 is not expected \n  Error: {}".format(
+        tc_name, result
+    )
+
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "After removing default originate command on R1 verify that the uptime got reset on R2"
+    )
+
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot before  configuring static route  network")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "Configure static default route for IPv4 and IPv6 (0.0.0.0/0 next-hop Null0 and 0::0/0 next-hop Null0) on R1"
+    )
+    static_routes_input = {
+        "r1": {
+            "static_routes": [
+                {
+                    "network": "0.0.0.0/0",
+                    "next_hop": NEXT_HOP_IP["ipv4"],
+                },
+                {
+                    "network": "0::0/0",
+                    "next_hop": NEXT_HOP_IP["ipv6"],
+                },
+            ]
+        }
+    }
+    result = create_static_routes(tgen, static_routes_input)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("verifyIPv4 and IPv6 static routes are configure and up on R1 ")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": "0.0.0.0/0",
+                        "next_hop": NEXT_HOP_IP["ipv4"],
+                    },
+                    {
+                        "network": "0::0/0",
+                        "next_hop": NEXT_HOP_IP["ipv6"],
+                    },
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("Configure redistribute static on IPv4 and IPv6 address family")
+    redistribute_static = {
+        "r1": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                    "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify No change on IPv4/IPv6  default-originate route advertised from link1")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify 0.0.0.0/0 and 0::0/0 route also get advertised from link-2 ")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed\n Best Path sould be advertised in routes\n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Taking uptime snapshot before  removing  default originate")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("Remove default-originate from link-1 from IPv4 and IPv6 neighbor ")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {"dest-link": "r1-link1", "delete": True}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {"dest-link": "r1-link1", "delete": True}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot after  removing  default originate")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("verify the up time , up time should get reset ")
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Verify No change on IPv4/IPv6  default-originate route advertised from link1")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot before  configuring  default originate")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        " Configure default-originate on link-1 again for IPv4 and IPv6 address family"
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {
+                                    "dest-link": "r1-link1",
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {
+                                    "dest-link": "r1-link1",
+                                }
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify No change on IPv4/IPv6  default-originate route advertised from link1")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot after  configuring  default originate")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("After configuring the default originate the uptime should not get reset ")
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot before  removing   redisctribute static ")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("Remove redistribute static from IPv4 and IPv6 address family ")
+    input_dict_1 = {
+        "r1": {
+            "bgp": {
+                "local_as": get_dut_as_number(tgen, dut="r1"),
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [{"redist_type": "static", "delete": True}]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [{"redist_type": "static", "delete": True}]
+                        }
+                    },
+                },
+            }
+        }
+    }
+
+    result = create_router_bgp(tgen, topo, input_dict_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify No change on IPv4/IPv6  default-originate route advertised from link1")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Taking uptime snapshot before  removing   redisctribute static ")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("After removing default originate the route uptime should get reset ")
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=True)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=True)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_with_aggregate_summary_p1(request):
+    """
+    Summary: "Verify default-originate route with aggregate summary command"
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Configure default-originate on R1 IPv4 and IPv6 BGP session link-1 only")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {"r2": {"dest-link": "r1-link1"}}
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {"r2": {"dest-link": "r1-link1"}}
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify IPv4/IPv6 default originate routes present on R2 nexthop as link-1,on R2"
+    )
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Configure 5 static  route for IPv4 and IPv6  on R0")
+    for addr_type in ADDR_TYPES:
+        input_advertise = {
+            "r1": {
+                "bgp": {
+                    "address_family": {
+                        addr_type: {
+                            "unicast": {
+                                "advertise_networks": [
+                                    {
+                                        "network": [NETWORK1_1[addr_type]],
+                                        "next_hop": NEXT_HOP_IP[addr_type],
+                                    },
+                                    {
+                                        "network": [NETWORK1_2[addr_type]],
+                                        "next_hop": NEXT_HOP_IP[addr_type],
+                                    },
+                                    {
+                                        "network": [NETWORK1_3[addr_type]],
+                                        "next_hop": NEXT_HOP_IP[addr_type],
+                                    },
+                                    {
+                                        "network": [NETWORK1_4[addr_type]],
+                                        "next_hop": NEXT_HOP_IP[addr_type],
+                                    },
+                                    {
+                                        "network": [NETWORK1_5[addr_type]],
+                                        "next_hop": NEXT_HOP_IP[addr_type],
+                                    },
+                                ]
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        result = create_router_bgp(tgen, topo, input_advertise)
+        assert result is True, "Testcase {} :Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("Before configuring the aggregate route taking uptime snapshot  ")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("Configure aggregate summary command  for IPv4 and IPv6 address family ")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "router bgp {}".format(local_as),
+                "address-family ipv4 unicast",
+                "aggregate-address {} summary-only".format("0.0.0.0/0 "),
+                "exit-address-family",
+                "address-family ipv6 unicast",
+                "aggregate-address {} summary-only".format("0::0/0"),
+                "exit-address-family",
+            ]
+        }
+    }
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step(
+        "verify that no change on IPv4/IPv6  default-originate route advertised from link1  0.0.0.0/0 and 0::0/0 route also get advertised from link-2    on R2"
+    )
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("After configuring the aggregate route taking uptime snapshot  ")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "After Configuring  the aggregate route uptime should get reset for link-1 learn route"
+    )
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Before removing default originate  taking uptime snapshot ")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("Remove default originate from R1 link -1 IPv4 and IPv6 address family")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {"dest-link": "r1-link1", "delete": True}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {"dest-link": "r1-link1", "delete": True}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "verify that no change on IPv4/IPv6  default-originate route advertised from link1  0.0.0.0/0 and 0::0/0 route also get advertised from link-2    on R2"
+    )
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("After removing default origin taking uptime snapshot  ")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "After removing the default-originate uptime should get reset for link-1 learn route"
+    )
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Before Configuring default origin taking uptime snapshot ")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "Configure default-originate on R1 link-1 again for IPv4 and IPv6 address family"
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {
+                                    "dest-link": "r1-link1",
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "default_originate": {
+                                "r2": {
+                                    "dest-link": "r1-link1",
+                                }
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("After Configuring  default originate  taking uptime snapshot")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step(
+        "After Configuring  the default-originate uptime should get reset for link-1 learn route"
+    )
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Before removing aggregate -summary command taking the uptime snapshot ")
+    uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("remove aggregate summary command  for IPv4 and IPv6 address family ")
+    local_as = get_dut_as_number(tgen, dut="r1")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "router bgp {}".format(local_as),
+                "address-family ipv4 unicast",
+                "no aggregate-address {} summary-only".format("0.0.0.0/0"),
+                "exit-address-family",
+                "address-family ipv6 unicast",
+                "no aggregate-address {} summary-only".format("0::0/0"),
+                "exit-address-family",
+            ]
+        }
+    }
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Verify Default-originate IPv4/IPv6 route learn on link-1 ")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Verify 0.0.0.0/0 and 0::0/0 route get removed from link-2 ")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("After removing aggregate -summary command taking the uptime snapshot ")
+    uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+    uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+    step("After removing aggregate command uptime should get reset ")
+    result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index ee71ae16e0ad8595f86f9d2273c576165c1e6e4c..814272374a8e7cb28ab5b1f8ae309e523da4cb26 100644 (file)
@@ -82,6 +82,8 @@ from lib.common_config import (
 )
 
 
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
 # Save the Current Working Directory to find configuration files.
 CWD = os.path.dirname(os.path.realpath(__file__))
 sys.path.append(os.path.join(CWD, "../"))
index a9987a8f963c08b193b90e698470c0cd60e9e859..8e6f9306335d0aec10f4bb49822fdba7441360c9 100644 (file)
@@ -81,6 +81,8 @@ from lib.common_config import (
 )
 
 
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
 # Save the Current Working Directory to find configuration files.
 CWD = os.path.dirname(os.path.realpath(__file__))
 sys.path.append(os.path.join(CWD, "../"))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py
new file mode 100644 (file)
index 0000000..9e52504
--- /dev/null
@@ -0,0 +1,2540 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+10. Verify default-originate route after BGP and FRR process restart
+11. Verify default-originate route after shut/no shut and clear BGP neighbor
+"""
+import os
+import sys
+import time
+import pytest
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+    verify_bgp_convergence,
+    create_router_bgp,
+    modify_as_number,
+    clear_bgp,
+    verify_bgp_rib,
+    get_dut_as_number,
+    verify_rib_default_route,
+    verify_fib_default_route,
+)
+from lib.common_config import (
+    interface_status,
+    verify_prefix_lists,
+    verify_fib_routes,
+    kill_router_daemons,
+    start_router_daemons,
+    shutdown_bringup_interface,
+    step,
+    required_linux_kernel_version,
+    stop_router,
+    start_router,
+    create_route_maps,
+    create_prefix_lists,
+    get_frr_ipv6_linklocal,
+    start_topology,
+    write_test_header,
+    check_address_types,
+    write_test_footer,
+    reset_config_on_routers,
+    create_static_routes,
+    check_router_status,
+)
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+# Global variables
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+NETWORK2_1 = {"ipv4": "198.51.1.2/32", "ipv6": "2001:DB8::1:2/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    # Required linux kernel version for this suite to run.
+    result = required_linux_kernel_version("4.15")
+    if result is not True:
+        pytest.skip("Kernel requirements are not met")
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: {}".format(testsuite_run_time))
+    logger.info("=" * 40)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global topo
+    topo = tgen.json_topo
+    # ... and here it calls micronet initialization functions.
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen)
+    # Creating configuration from JSON
+    build_config_from_json(tgen, topo)
+
+    global ADDR_TYPES
+    global BGP_CONVERGENCE
+    global DEFAULT_ROUTES
+    global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+    global R0_NETWORK_LOOPBACK, R0_NETWORK_LOOPBACK_NXTHOP, R1_NETWORK_LOOPBACK
+    global R0_NETWORK_CONNECTED, R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+    global R4_NETWORK_LOOPBACK, R4_NETWORK_LOOPBACK_NXTHOP, R3_NETWORK_LOOPBACK
+    global R4_NETWORK_CONNECTED, R4_NETWORK_CONNECTED_NXTHOP, R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+    ADDR_TYPES = check_address_types()
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+    # There are the global varibles used through out the file these are acheived only after building the topology.
+
+    r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+    r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+        "ipv4"
+    ].split("/")[0]
+    r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+    r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+        "ipv6"
+    ].split("/")[0]
+
+    r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+    r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+        "ipv4"
+    ].split("/")[0]
+    r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+    r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+        "ipv6"
+    ].split("/")[0]
+
+    r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+    r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+        "ipv4"
+    ].split("/")[0]
+    r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+    r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+        "ipv6"
+    ].split("/")[0]
+
+    r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+    r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+        "ipv4"
+    ].split("/")[0]
+    r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+    r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+        "ipv6"
+    ].split("/")[0]
+
+    R0_NETWORK_LOOPBACK = {
+        "ipv4": r0_loopback_address_ipv4,
+        "ipv6": r0_loopback_address_ipv6,
+    }
+    R0_NETWORK_LOOPBACK_NXTHOP = {
+        "ipv4": r0_loopback_address_ipv4_nxt_hop,
+        "ipv6": r0_loopback_address_ipv6_nxt_hop,
+    }
+
+    R1_NETWORK_LOOPBACK = {
+        "ipv4": r1_loopback_address_ipv4,
+        "ipv6": r1_loopback_address_ipv6,
+    }
+
+    R0_NETWORK_CONNECTED = {
+        "ipv4": r0_connected_address_ipv4,
+        "ipv6": r0_connected_address_ipv6,
+    }
+    R0_NETWORK_CONNECTED_NXTHOP = {
+        "ipv4": r0_loopback_address_ipv4_nxt_hop,
+        "ipv6": r0_loopback_address_ipv6_nxt_hop,
+    }
+
+    R1_NETWORK_CONNECTED = {
+        "ipv4": r1_connected_address_ipv4,
+        "ipv6": r1_connected_address_ipv6,
+    }
+    R1_NETWORK_CONNECTED_NXTHOP = {
+        "ipv4": r1_loopback_address_ipv4_nxt_hop,
+        "ipv6": r1_loopback_address_ipv6_nxt_hop,
+    }
+
+    R4_NETWORK_LOOPBACK = {
+        "ipv4": r4_loopback_address_ipv4,
+        "ipv6": r4_loopback_address_ipv6,
+    }
+    R4_NETWORK_LOOPBACK_NXTHOP = {
+        "ipv4": r4_loopback_address_ipv4_nxt_hop,
+        "ipv6": r4_loopback_address_ipv6_nxt_hop,
+    }
+
+    R3_NETWORK_LOOPBACK = {
+        "ipv4": r3_loopback_address_ipv4,
+        "ipv6": r3_loopback_address_ipv6,
+    }
+    R4_NETWORK_CONNECTED = {
+        "ipv4": r4_connected_address_ipv4,
+        "ipv6": r4_connected_address_ipv6,
+    }
+    R4_NETWORK_CONNECTED_NXTHOP = {
+        "ipv4": r4_loopback_address_ipv4_nxt_hop,
+        "ipv6": r4_loopback_address_ipv6_nxt_hop,
+    }
+
+    R3_NETWORK_CONNECTED = {
+        "ipv4": r3_connected_address_ipv4,
+        "ipv6": r3_connected_address_ipv6,
+    }
+    R3_NETWORK_CONNECTED_NXTHOP = {
+        "ipv4": r3_loopback_address_ipv4_nxt_hop,
+        "ipv6": r3_loopback_address_ipv6_nxt_hop,
+    }
+
+    # populating the nexthop for default routes
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info(
+        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+    )
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#                      Testcases
+#
+#####################################################
+
+
+def test_verify_default_originate_after_BGP_and_FRR_restart_p2(request):
+    """
+    Summary: "Verify default-originate route after BGP and FRR process restart "
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+    global topo
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+
+    step("Configure EBGP between R0 to R1 and IBGP between R1 to R2")
+    step("Configure EBGP between R2 to R3 and IBGP between R3 to R4")
+    input_dict = {
+        "r0": {
+            "bgp": {
+                "local_as": 999,
+            }
+        },
+        "r1": {
+            "bgp": {
+                "local_as": 1000,
+            }
+        },
+        "r2": {
+            "bgp": {
+                "local_as": 1000,
+            }
+        },
+        "r3": {
+            "bgp": {
+                "local_as": 4000,
+            }
+        },
+        "r4": {
+            "bgp": {
+                "local_as": 4000,
+            }
+        },
+    }
+    result = modify_as_number(tgen, topo, input_dict)
+    try:
+        assert result is True
+    except AssertionError:
+        logger.info("Expected behaviour: {}".format(result))
+        logger.info("BGP config is not created because of invalid ASNs")
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert (
+        BGP_CONVERGENCE is True
+    ), " Failed convergence after chaning the AS number  :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Configure IPv4 and IPv6 static route (Sv4 , Sv6) on R0 and (S1v4, S1v6)on R4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed to configure the static route on R0 \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} :  Failed to configure the static route on R4  \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("verify IPv4 and IPv6 static route are configured and up on R0")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed  Route {} not found in R0 FIB  \n Error: {}".format(
+            tc_name, NETWORK1_1, result
+        )
+
+    step("verify IPv4 and IPv6 static route are configured and up on R4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed  Route {} not found in R4 FIB \n Error: {}".format(
+            tc_name, NETWORK2_1, result
+        )
+
+    step(
+        "Configure redistribute connected and static on R0 (R0-R1) on R4 ( R4-R3) IPv4 and IPv6 address family"
+    )
+    redistribute_static = {
+        "r0": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                }
+            }
+        },
+        "r4": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                }
+            }
+        },
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the static route  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("verify IPv4 and IPv6 static route are configured and up on R1")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed : Redistributed routes from R0 is not learned in Router R1 RIB \n Error: {}".format(
+            tc_name, result
+        )
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R1_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed : Redistributed routes from R0 is not learned in Router R1 FIB \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("verify IPv4 and IPv6 static route are configured and up on R3")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed : Redistributed routes from R4 is not learned in Router R3 RIB \n Error: {}".format(
+            tc_name, result
+        )
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R3_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R3_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R3_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Redistributed routes from R4 is not learned in Router R3 FIB \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("Configure IPv4 and IPv6 prefix-list on R1 for (Sv4 , Sv6) route")
+    input_dict_3 = {
+        "r1": {
+            "prefix_lists": {
+                "ipv4": {
+                    "Pv4": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK1_1["ipv4"],
+                            "action": "permit",
+                        }
+                    ]
+                },
+                "ipv6": {
+                    "Pv6": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK1_1["ipv6"],
+                            "action": "permit",
+                        }
+                    ]
+                },
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the prefix lists \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Verify the Prefix - lists")
+    input_dict = {"r3": {"prefix_lists": ["Pv4", "Pv6"]}}
+    result = verify_prefix_lists(tgen, input_dict)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to verify the prefix lists in router R3 \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Configure IPv4 (RMv4)  and IPv6 (RMv6)  route-map on R1")
+    input_dict_3 = {
+        "r1": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the route-map \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        " Configure default originate with route-map RMv4 and RMv6 for IPv4 and IPv6 bgp neighbors on R1 ( R1-R2) "
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+                    },
+                    "ipv6": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the default-originate in R1 towards R2 \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("verify IPv4 and IPv6 default route received on R2 with R1 nexthop ")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Failed : Default routes are not learned in  R2 FIB \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Failed : Default routes are not learned in  R2 RIB\n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Configure redistribute connected and static on R1 IPv4 and IPv6 address family"
+    )
+    redistribute_static = {
+        "r1": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"redistribute": [{"redist_type": "connected"}]}
+                    },
+                    "ipv6": {
+                        "unicast": {"redistribute": [{"redist_type": "connected"}]}
+                    },
+                }
+            }
+        },
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify IPv4 and IPv6 static and loopback route advertised from R4 and R0  are received on R2"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(" Configure default-originate on R3 for R3 to R2 IPv4 and IPv6 BGP neighbors ")
+    local_as = get_dut_as_number(tgen, dut="r3")
+    default_originate_config = {
+        "r3": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+                    "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    STEP = """After configuring the Default Originate From R3 --> R2
+        Both Default routes from R1 and R3 Should present in  R2 BGP RIB.
+        'The Deafult Route from iBGP is preffered over EBGP' thus
+        Default Route From R1->r2  should only present in R2 FIB  """
+    step(STEP)
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n IBGP default route should be preffered over EBGP default-originate \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify the default route from R1 is  recieved both on RIB and FIB on R2")
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=False,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify the static and loopback route advertised from R0  and R4 are received on R2 "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(" BGP Daemon restart operation")
+    routers = ["r1", "r2"]
+    for dut in routers:
+        step(
+            "Restart BGPD process on {}, when all the processes are running use watchfrr ".format(
+                dut
+            )
+        )
+        kill_router_daemons(tgen, dut, ["bgpd"])
+        start_router_daemons(tgen, dut, ["bgpd"])
+
+        step("After restarting the BGP daomon Verify the default originate ")
+        DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+        result = verify_fib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+            expected=False,
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n IBGP default route should be prefeered over EBGP \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_rib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+            expected=True,
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        step(
+            "Verify the default route from R1 is  is recieved both on RIB and FIB on R2"
+        )
+
+        DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+        result = verify_fib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+            expected=False,
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_rib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+            expected=True,
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        step(
+            "Verify the static and loopback route advertised from R0  and R4 are received on R2 "
+        )
+        for addr_type in ADDR_TYPES:
+            static_routes_input = {
+                "r2": {
+                    "static_routes": [
+                        {
+                            "network": [NETWORK1_1[addr_type]],
+                            "next_hop": NEXT_HOP_IP[addr_type],
+                        },
+                        {
+                            "network": [NETWORK2_1[addr_type]],
+                            "next_hop": NEXT_HOP_IP[addr_type],
+                        },
+                        {
+                            "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                            "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+                        },
+                        {
+                            "network": [R0_NETWORK_CONNECTED[addr_type]],
+                            "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        },
+                        {
+                            "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                            "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+                        },
+                        {
+                            "network": [R4_NETWORK_CONNECTED[addr_type]],
+                            "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        },
+                    ]
+                }
+            }
+
+            result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+            result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+    step(" Restarting  FRR routers  operation")
+    """
+    NOTE :  Verify that iBGP default route is preffered over eBGP default route
+    """
+    routers = ["r1", "r2"]
+    for dut in routers:
+        step(
+            "Restart FRR router process on {}, when all the processes are running use watchfrr ".format(
+                dut
+            )
+        )
+
+        stop_router(tgen, dut)
+        start_router(tgen, dut)
+
+        result = verify_bgp_convergence(tgen, topo)
+        assert (
+            result is True
+        ), " Testcase {} : After Restarting {} Convergence Failed".format(tc_name, dut)
+
+        step("After restarting the FRR Router Verify the default originate ")
+        DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+        result = verify_fib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+            expected=True,
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_rib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+            expected=True,
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        step(
+            "Verify the default route from R1 is  is recieved both on RIB and FIB on R2"
+        )
+
+        DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+        result = verify_rib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+            expected=True,
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_fib_default_route(
+            tgen,
+            topo,
+            dut="r2",
+            routes=DEFAULT_ROUTES,
+            expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+            expected=False,
+        )
+        assert (
+            result is not True
+        ), "Testcase {} :  Failed\n IBGP default route should be preffered over EBGP default route \n Error: {}".format(
+            tc_name, result
+        )
+
+        step(
+            "Verify the static and loopback route advertised from R0  and R4 are received on R2 "
+        )
+        for addr_type in ADDR_TYPES:
+            static_routes_input = {
+                "r2": {
+                    "static_routes": [
+                        {
+                            "network": [NETWORK1_1[addr_type]],
+                            "next_hop": NEXT_HOP_IP[addr_type],
+                        },
+                        {
+                            "network": [NETWORK2_1[addr_type]],
+                            "next_hop": NEXT_HOP_IP[addr_type],
+                        },
+                        {
+                            "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                            "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+                        },
+                        {
+                            "network": [R0_NETWORK_CONNECTED[addr_type]],
+                            "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        },
+                        {
+                            "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                            "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+                        },
+                        {
+                            "network": [R4_NETWORK_CONNECTED[addr_type]],
+                            "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        },
+                    ]
+                }
+            }
+
+            result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+            result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+    write_test_footer(tc_name)
+
+
+def test_verify_default_originate_after_shut_no_shut_bgp_neighbor_p1(request):
+    """
+    Summary: "Verify default-originate route after shut/no shut and clear BGP neighbor  "
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+    global topo
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+
+    step("Configure EBGP between R0 to R1 and IBGP between R1 to R2")
+    step("Configure EBGP between R2 to R3 and IBGP between R3 to R4")
+    input_dict = {
+        "r0": {
+            "bgp": {
+                "local_as": 999,
+            }
+        },
+        "r1": {
+            "bgp": {
+                "local_as": 1000,
+            }
+        },
+        "r2": {
+            "bgp": {
+                "local_as": 1000,
+            }
+        },
+        "r3": {
+            "bgp": {
+                "local_as": 4000,
+            }
+        },
+        "r4": {
+            "bgp": {
+                "local_as": 4000,
+            }
+        },
+    }
+    result = modify_as_number(tgen, topo, input_dict)
+    try:
+        assert result is True
+    except AssertionError:
+        logger.info("Expected behaviour: {}".format(result))
+        logger.info("BGP config is not created because of invalid ASNs")
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Configure one IPv4 and one IPv6 static route on R0 and R4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("Verify IPv4 and IPv6 static route configured on R0 and R4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure redistribute connected and static on R0 (R0-R1) on R4 ( R4-R3) IPv4 and IPv6 address family"
+    )
+    redistribute_static = {
+        "r0": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {
+                                    "redist_type": "static",
+                                },
+                                {
+                                    "redist_type": "connected",
+                                },
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {
+                                    "redist_type": "static",
+                                },
+                                {
+                                    "redist_type": "connected",
+                                },
+                            ]
+                        }
+                    },
+                }
+            }
+        },
+        "r4": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {
+                                    "redist_type": "static",
+                                },
+                                {
+                                    "redist_type": "connected",
+                                },
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {
+                                    "redist_type": "static",
+                                },
+                                {
+                                    "redist_type": "connected",
+                                },
+                            ]
+                        }
+                    },
+                }
+            }
+        },
+        "r1": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {
+                                    "redist_type": "connected",
+                                }
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {
+                                    "redist_type": "connected",
+                                }
+                            ]
+                        }
+                    },
+                }
+            }
+        },
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify IPv4 and IPv6 static route configured on R1 from R0")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("Verify IPv4 and IPv6 static route configured on R3 from R4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure default-originate on R1 for R1 to R2 neighbor  for IPv4 and IPv6 peer "
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+                    "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify IPv4 and IPv6 bgp default route received on R2 nexthop as R1")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    step(
+        "After configuring default-originate command , verify default  routes are advertised on R2 from R0 and R4"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure default-originate on R3 for R3 to R2 neighbor  for IPv4 and IPv6 peer"
+    )
+    local_as = get_dut_as_number(tgen, dut="r3")
+    default_originate_config = {
+        "r3": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+                    "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    STEP = """After configuring the Default Originate From R3 --> R2
+        Both Default routes from R1 and R3 Should present in  R2 BGP RIB.
+        'The Deafult Route from iBGP is preffered over EBGP' thus
+        Default Route From R1->r2  should only present in R2 FIB  """
+    step(STEP)
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n IBGP default route should be preffered over EBGP \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify the default route from R1 is  recieved both on RIB and FIB on R2")
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "After configuring default-originate command , verify static ,connected and loopback  routes are advertised on R2 from R0 and R4"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    # updating the topology with the updated AS-Number to avoid conflict in con configuring the AS
+    updated_topo = topo
+    updated_topo["routers"]["r0"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r0")
+    updated_topo["routers"]["r1"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r1")
+    updated_topo["routers"]["r2"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r2")
+    updated_topo["routers"]["r3"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r3")
+    updated_topo["routers"]["r4"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r4")
+
+    step(
+        "Shut  R1 to R2 IPv4 and IPv6 BGP neighbor from R1 IPv4 and IPv6 address family "
+    )
+
+    local_as = get_dut_as_number(tgen, dut="r1")
+    shut_neighbor = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r1": {"shutdown": True}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r1": {"shutdown": True}}}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    interface = topo["routers"]["r2"]["links"]["r1"]["interface"]
+    input_dict = {"r2": {"interface_list": [interface], "status": "down"}}
+
+    result = interface_status(tgen, topo, input_dict)
+    assert (
+        result is True
+    ), "Testcase {} : Bring down interface failed ! \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Verify IPv4 and IPv6 default static and loopback route which received from R1 are deleted from R2"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n after shutting down interface routes are not expected \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_fib_routes(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n after shutting down interface routes are not expected  \n  Error: {}".format(
+            tc_name, result
+        )
+
+    step("verify that  No impact on IPv4 IPv6 and default route received from R3 ")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(
+        "No-Shut  R1 to R2 IPv4 and IPv6 BGP neighbor from R1 IPv4 and IPv6 address family "
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    shut_neighbor = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r1": {"shutdown": False}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r1": {"shutdown": False}}}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    interface = topo["routers"]["r2"]["links"]["r1"]["interface"]
+    input_dict = {"r2": {"interface_list": [interface], "status": "up"}}
+
+    result = interface_status(tgen, topo, input_dict)
+    assert (
+        result is True
+    ), "Testcase {} : Bring up interface failed ! \n Error: {}".format(tc_name, result)
+
+    step(
+        "After no shut Verify IPv4 and IPv6 bgp default route next hop as R1 , static ,connected and loopback received on R2  from r0  and r4 "
+    )
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(
+        "Shut  R3 to R2 IPv4 and IPv6 BGP neighbor from R2 IPv4 and IPv6 address family"
+    )
+    local_as = get_dut_as_number(tgen, dut="r3")
+    shut_neighbor = {
+        "r3": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r3": {"shutdown": True}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r3": {"shutdown": True}}}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    interface = topo["routers"]["r2"]["links"]["r3"]["interface"]
+    input_dict = {"r2": {"interface_list": [interface], "status": "down"}}
+
+    result = interface_status(tgen, topo, input_dict)
+    assert (
+        result is True
+    ), "Testcase {} : Bring down interface failed ! \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Verify IPv4 and IPv6 default static and loopback route which received from R3 are deleted from R2 "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed\n After shutting down the interface routes are not expected  \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_fib_routes(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n  After shutting down the interface routes are not expected \n  Error: {}".format(
+            tc_name, result
+        )
+
+    step("Verify that Default route is removed  i.e advertised from R3")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n  After shutting down the interface Default route are not expected \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed \n After shutting down the interface Default route are not expected \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Verify that No impact on IPv4 IPv6 and default route received from R1")
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "No-Shut  R3 to R2 IPv4 and IPv6 BGP neighbor from R2 IPv4 and IPv6 address family"
+    )
+    local_as = get_dut_as_number(tgen, dut="r3")
+    shut_neighbor = {
+        "r3": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r3": {"shutdown": False}}}
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {"dest_link": {"r3": {"shutdown": False}}}
+                            }
+                        }
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    interface = topo["routers"]["r2"]["links"]["r3"]["interface"]
+    input_dict = {"r2": {"interface_list": [interface], "status": "up"}}
+
+    result = interface_status(tgen, topo, input_dict)
+    assert (
+        result is True
+    ), "Testcase {} : Bring up interface failed ! \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify that a static ,connected and loopback  routes are received from R0 and R4 on R2 "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step("verify that default route  is received on R2 from R1")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("verify that default route  is received on R2 from R3")
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+        tc_name, result
+    )
+    step("Clear IPv4 and IP6 BGP session from R2 and R1 one by one ")
+    routers = ["r1", "r2"]
+    for dut in routers:
+        for addr_type in ADDR_TYPES:
+
+            clear_bgp(tgen, addr_type, dut)
+
+            DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+            result = verify_rib_default_route(
+                tgen,
+                topo,
+                dut="r2",
+                routes=DEFAULT_ROUTES,
+                expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+                expected=True,
+            )
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+            result = verify_fib_default_route(
+                tgen,
+                topo,
+                dut="r2",
+                routes=DEFAULT_ROUTES,
+                expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+                expected=True,
+            )
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+            step("verify that default route  is received on R2 from R3")
+
+            interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+            ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+            ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+            DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+            DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+            result = verify_rib_default_route(
+                tgen,
+                topo,
+                dut="r2",
+                routes=DEFAULT_ROUTES,
+                expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+                expected=True,
+            )
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+            result = verify_fib_default_route(
+                tgen,
+                topo,
+                dut="r2",
+                routes=DEFAULT_ROUTES,
+                expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+                expected=False,
+            )
+            assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+        step(
+            "Verify the static , loopback and connected routes received from r0 and r4"
+        )
+        for addr_type in ADDR_TYPES:
+            static_routes_input = {
+                "r2": {
+                    "static_routes": [
+                        {
+                            "network": [NETWORK1_1[addr_type]],
+                            "next_hop": NEXT_HOP_IP[addr_type],
+                        },
+                        {
+                            "network": [NETWORK2_1[addr_type]],
+                            "next_hop": NEXT_HOP_IP[addr_type],
+                        },
+                        {
+                            "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                            "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                        },
+                        {
+                            "network": [R0_NETWORK_CONNECTED[addr_type]],
+                            "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        },
+                        {
+                            "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                            "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                        },
+                        {
+                            "network": [R4_NETWORK_CONNECTED[addr_type]],
+                            "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        },
+                    ]
+                }
+            }
+
+            result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+            result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+            assert result is True, "Testcase {} : Failed \n Error: {}".format(
+                tc_name, result
+            )
+
+    step("Shut BGP neighbor interface R2 (R2 to R1) link ")
+    intf_r2_r1 = topo["routers"]["r2"]["links"]["r1"]["interface"]
+    shutdown_bringup_interface(tgen, "r2", intf_r2_r1, False)
+
+    step("Verify the  bgp Convergence  ")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo, expected=False)
+    assert (
+        BGP_CONVERGENCE is not True
+    ), " :Failed  After shutting interface BGP convergence is expected to be faileed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Verify  that default route from R1 got deleted from BGP and RIB table")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : Failed\n After shuting interface default route should be removed from RIB  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("No - Shut BGP neighbor interface R2 (R2 to R1) link ")
+    intf_r2_r1 = topo["routers"]["r2"]["links"]["r1"]["interface"]
+    shutdown_bringup_interface(tgen, "r2", intf_r2_r1, True)
+
+    step("Verify the  bgp Convergence  ")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step("verify that default route  is received on R2 from R3")
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Verify the static , loopback and connected routes received from r0 and r4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("Shut link from R3 to R2 from R3")
+    intf_r3_r2 = topo["routers"]["r3"]["links"]["r2"]["interface"]
+    shutdown_bringup_interface(tgen, "r3", intf_r3_r2, False)
+
+    step("Verify the  bgp Convergence  ")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo, expected=False)
+    assert (
+        BGP_CONVERGENCE is not True
+    ), "  :Failed \nAfter Shuting the interface BGP convegence is expected to be failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Verify  that default route from R3 got deleted from BGP and RIB table")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+        tc_name, result
+    )
+    step("No-Shut link from R3 to R2 from R3")
+
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+
+    DEFAULT_ROUTE_NXT_HOP_1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_nxt_hop}
+
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+
+    DEFAULT_ROUTE_NXT_HOP_3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_nxt_hop}
+
+    intf_r3_r2 = topo["routers"]["r3"]["links"]["r2"]["interface"]
+    shutdown_bringup_interface(tgen, "r3", intf_r3_r2, True)
+
+    step("Verify the  bgp Convergence  ")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo, expected=True)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step("verify that default route  is received on R2 from R3")
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False,
+    )
+    assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Verify the static , loopback and connected routes received from r0 and r4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R0_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R4_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py b/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py
new file mode 100644 (file)
index 0000000..fa5164f
--- /dev/null
@@ -0,0 +1,1389 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+import os
+import sys
+import time
+import pytest
+from time import sleep
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+    verify_bgp_convergence,
+    verify_graceful_restart,
+    create_router_bgp,
+    verify_router_id,
+    modify_as_number,
+    verify_as_numbers,
+    clear_bgp_and_verify,
+    clear_bgp,
+    verify_bgp_rib,
+    get_prefix_count_route,
+    get_dut_as_number,
+    verify_rib_default_route,
+    verify_fib_default_route,
+    verify_bgp_advertised_routes_from_neighbor,
+    verify_bgp_received_routes_from_neighbor,
+)
+from lib.common_config import (
+    interface_status,
+    verify_prefix_lists,
+    verify_rib,
+    kill_router_daemons,
+    start_router_daemons,
+    shutdown_bringup_interface,
+    step,
+    required_linux_kernel_version,
+    stop_router,
+    start_router,
+    create_route_maps,
+    create_prefix_lists,
+    get_frr_ipv6_linklocal,
+    start_topology,
+    write_test_header,
+    check_address_types,
+    write_test_footer,
+    reset_config_on_routers,
+    create_static_routes,
+    check_router_status,
+    delete_route_maps,
+)
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+
+
+# Global variables
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+NETWORK2_1 = {"ipv4": "198.51.1.2/32", "ipv6": "2001:DB8::1:2/128"}
+NETWORK5_1 = {"ipv4": "198.51.1.3/32", "ipv6": "2001:DB8::1:3/128"}
+NETWORK5_2 = {"ipv4": "198.51.1.4/32", "ipv6": "2001:DB8::1:4/128"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    # Required linux kernel version for this suite to run.
+    result = required_linux_kernel_version("4.15")
+    if result is not True:
+        pytest.skip("Kernel requirements are not met")
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: {}".format(testsuite_run_time))
+    logger.info("=" * 40)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/bgp_default_orginate_vrf.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global topo
+    topo = tgen.json_topo
+    # ... and here it calls Mininet initialization functions.
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, topo)
+
+    global ADDR_TYPES
+    global BGP_CONVERGENCE
+    global DEFAULT_ROUTES
+    global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+    global R1_NETWORK_LOOPBACK, R1_NETWORK_LOOPBACK_NXTHOP
+    global R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+    global R3_NETWORK_LOOPBACK, R3_NETWORK_LOOPBACK_NXTHOP
+    global R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+    ADDR_TYPES = check_address_types()
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+    # There are the global varibles used through out the file these are acheived only after building the topology.
+
+    r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+    r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+        "ipv4"
+    ].split("/")[0]
+    r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+    r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+        "ipv6"
+    ].split("/")[0]
+
+    r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+    r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+        "ipv4"
+    ].split("/")[0]
+    r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+    r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+        "ipv6"
+    ].split("/")[0]
+
+    r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+    r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+        "ipv4"
+    ].split("/")[0]
+    r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+    r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+        "ipv6"
+    ].split("/")[0]
+
+    r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+    r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+        "ipv4"
+    ].split("/")[0]
+    r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+    r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+        "ipv6"
+    ].split("/")[0]
+
+    R1_NETWORK_LOOPBACK = {
+        "ipv4": r1_loopback_address_ipv4,
+        "ipv6": r1_loopback_address_ipv6,
+    }
+    R1_NETWORK_LOOPBACK_NXTHOP = {
+        "ipv4": r1_loopback_address_ipv4_nxt_hop,
+        "ipv6": r1_loopback_address_ipv6_nxt_hop,
+    }
+
+    R1_NETWORK_CONNECTED = {
+        "ipv4": r1_connected_address_ipv4,
+        "ipv6": r1_connected_address_ipv6,
+    }
+    R1_NETWORK_CONNECTED_NXTHOP = {
+        "ipv4": r1_loopback_address_ipv4_nxt_hop,
+        "ipv6": r1_loopback_address_ipv6_nxt_hop,
+    }
+
+    R3_NETWORK_LOOPBACK = {
+        "ipv4": r3_loopback_address_ipv4,
+        "ipv6": r3_loopback_address_ipv6,
+    }
+    R3_NETWORK_LOOPBACK_NXTHOP = {
+        "ipv4": r3_loopback_address_ipv4_nxt_hop,
+        "ipv6": r3_loopback_address_ipv6_nxt_hop,
+    }
+
+    R3_NETWORK_CONNECTED = {
+        "ipv4": r3_connected_address_ipv4,
+        "ipv6": r3_connected_address_ipv6,
+    }
+    R3_NETWORK_CONNECTED_NXTHOP = {
+        "ipv4": r3_loopback_address_ipv4_nxt_hop,
+        "ipv6": r3_loopback_address_ipv6_nxt_hop,
+    }
+
+    # populating the nexthop for default routes
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info(
+        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+    )
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#                      Testcases
+#
+#####################################################
+def test_verify_default_originate_route_with_non_default_VRF_p1(request):
+    """
+    "Verify default-originate route with non-default VRF"
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    # these steps are implemented as base toplopgy setup
+    step("Configure IPV4 and IPV6 IBGP between R1 and R2 default VRF")
+    step("Configure IPV4 and IPV6 EBGP between R2 to R3 non-default VRF (RED)")
+    step(
+        "Configure IPv4 and IP6 loopback address on R1 default  and R3 non-default (RED) VRF"
+    )
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+    step(
+        "Configure  IPv4 and IPv6 static route on R1 default and R3 non-default (RED) VRF with nexthop as Null ( different static route on each side)"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed to configure the static routes in router R1 default vrf \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed to configure static route in R3 non default vrf RED \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Verify IPv4 and IPv6 static route configured on R1 default vrf and R3 non-default (RED) vrf"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r1": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_rib(tgen, addr_type, "r1", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed: Routes configured on vrf is not seen in R1 default VRF FIB \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    }
+                ]
+            }
+        }
+        result = verify_rib(tgen, addr_type, "r3", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed : Routes configured in non-defaul vrf in R3 FIB is \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure redistribute connected and static on R1 (R1-R2) and on R3 ( R2-R3 RED VRF) IPv4 and IPv6 address family "
+    )
+    redistribute_static = {
+        "r1": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                }
+            }
+        },
+        "r3": {
+            "bgp": {
+                "local_as": 3000,
+                "vrf": "RED",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                },
+            }
+        },
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the redistribute on R1 and R3 \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Verify IPv4 and IPv6 static route configured on R1 received as BGP routes on R2 default VRF "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(
+        "Verify IPv4 and IPv6  static route configured on R3 received as BGP routes on R2 non-default VRF "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [R3_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        "vrf": "RED",
+                    },
+                ]
+            }
+        }
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer"
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+                    "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure  the default originate \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "After configuring default-originate command , verify default  routes are advertised on R2 "
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    }
+                ]
+            }
+        }
+
+        result = verify_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3", vrf="RED")
+
+    step(
+        "Configure default-originate on R3 for R3 to R2 neighbor (RED VRF)   for IPv4 and IPv6 peer"
+    )
+
+    default_originate_config = {
+        "r3": {
+            "bgp": {
+                "local_as": "3000",
+                "vrf": "RED",
+                "address_family": {
+                    "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+                    "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify IPv4 and IPv6 bgp default route and static route received on R2 VRF red nexthop as R3"
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+                        "vrf": "RED",
+                    },
+                ]
+            }
+        }
+        result = verify_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_bgp_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("verify Out-prefix count incremented for IPv4/IPv6 default route on VRF red")
+    snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3", vrf="RED")
+    step("verifying the prefix count incrementing or not ")
+    isIPv4prefix_incremented = False
+    isIPv6prefix_incremented = False
+    if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+        isIPv4prefix_incremented = True
+    if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+        isIPv6prefix_incremented = True
+
+    assert (
+        isIPv4prefix_incremented is True
+    ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+        tc_name
+    )
+
+    step("Configure import VRF red on R2 for IPV4 and IPV6 BGP peer")
+    step("Importing the non-default vrf in default VRF ")
+    local_as = get_dut_as_number(tgen, "r2")
+    input_import_vrf = {
+        "r2": {
+            "bgp": [
+                {
+                    "local_as": local_as,
+                    "address_family": {
+                        "ipv4": {"unicast": {"import": {"vrf": "RED"}}},
+                        "ipv6": {"unicast": {"import": {"vrf": "RED"}}},
+                    },
+                }
+            ]
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_import_vrf)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step(
+        "Verify VRF RED IPv4 and IPv6,  default-originate, \n static and loopback route are imported to R2 default VRF table  ,\n default-originate route coming from VRF red should not active on R2 default VRF table"
+    )
+    step("verifying the static routes connected and loop back routes")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R1_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R3_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    STEP = """ After importing non defualt VRF into default vrf .
+    verify that the default originate from R1 --> R2(non -default) is preffered over R3 --> R2
+    because the Default Route prefers iBGP over eBGP over
+    Default Route  from R1 Should be present in BGP RIB and FIB
+    Default Route  from R3 Should be present only in BGP RIB not in  FIB
+    """
+    step(STEP)
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    }
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+                    },
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(
+        "Configure import VRF default on R2 (R2-R3) RED VRF for IPV4 and IPV6 BGP peer"
+    )
+    step("Importing the default vrf in non-default VRF ")
+    local_as = "2000"
+    input_import_vrf = {
+        "r2": {
+            "bgp": [
+                {
+                    "local_as": local_as,
+                    "vrf": "RED",
+                    "address_family": {
+                        "ipv4": {"unicast": {"import": {"vrf": "default"}}},
+                        "ipv6": {"unicast": {"import": {"vrf": "default"}}},
+                    },
+                }
+            ]
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_import_vrf)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step(
+        "Default VR, IPv4 and IPv6 , default-originate, \n static and loopback route are imported to R2  VRF RED table  \n, default-originate route coming from VRF red should not active on R2 default VRF table"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [R1_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [R1_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [R3_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        "vrf": "RED",
+                    },
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    STEP = """ After importing  defualt VRF into non default vrf .
+    verify that the default originate from R1 --> R2(non -default) is preffered over R3 --> R2
+    because the Default Route prefers iBGP over eBGP over
+    Default Route  from R1 Should be present in BGP RIB and FIB
+    Default Route  from R3 Should be present only in BGP RIB not in  FIB
+    """
+    step(STEP)
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                        "vrf": "RED",
+                    }
+                ]
+            }
+        }
+
+        result = verify_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+        )
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+                        "vrf": "RED",
+                    }
+                ]
+            }
+        }
+
+        result = verify_rib(
+            tgen,
+            addr_type,
+            "r2",
+            static_routes_input,
+            next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+            expected=False,
+        )
+        assert result is not True, "Testcase {} : Failed {} \n Error: {}".format(
+            tc_name, STEP, result
+        )
+
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+                        "vrf": "RED",
+                    },
+                ]
+            }
+        }
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Remove import VRF configure in step 8 and then remove import VRF configured on step 9"
+    )
+    local_as = get_dut_as_number(tgen, "r2")
+    input_import_vrf = {
+        "r2": {
+            "bgp": [
+                {
+                    "local_as": local_as,
+                    "address_family": {
+                        "ipv4": {"unicast": {"import": {"vrf": "RED", "delete": True}}},
+                        "ipv6": {"unicast": {"import": {"vrf": "RED", "delete": True}}},
+                    },
+                }
+            ]
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_import_vrf)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify that the routes imported from non default VRF  - RED is removed")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [R3_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                    },
+                    {
+                        "network": [R3_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input, expected=False)
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n routes imported from non default VRF is not expected Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n routes imported from non default VRF is not expected \nError: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Remove import VRF configure in step 8 and then remove import VRF configured on step 9"
+    )
+    local_as = "2000"
+    input_import_vrf = {
+        "r2": {
+            "bgp": [
+                {
+                    "local_as": local_as,
+                    "vrf": "RED",
+                    "address_family": {
+                        "ipv4": {
+                            "unicast": {"import": {"vrf": "default", "delete": True}}
+                        },
+                        "ipv6": {
+                            "unicast": {"import": {"vrf": "default", "delete": True}}
+                        },
+                    },
+                }
+            ]
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_import_vrf)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step("Verify that the routes impoted from  default VRF   is removed")
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [R1_NETWORK_LOOPBACK[addr_type]],
+                        "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+                        "vrf": "RED",
+                    },
+                    {
+                        "network": [R1_NETWORK_CONNECTED[addr_type]],
+                        "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+                        "vrf": "RED",
+                    },
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input, expected=False)
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n routes impoted from  default VRF is not expected \n  Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n routes impoted from  default VRF is not expected \n  Error: {}".format(
+            tc_name, result
+        )
+
+    write_test_footer(tc_name)
+
+
+def test_verify_default_originate_route_with_non_default_VRF_with_route_map_p1(request):
+    """
+    "Verify default-originate route with non-default VRF with route-map import "
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    step("Configure IPV4 and IPV6 static route on R0 with Null nexthop")
+    STEP = """
+    Configure IPV4 and IPV6 EBGP session between R0 and R1
+    Configure IPV4 and IPV6 static route on R0 with Null nexthop """
+    step(STEP)
+    input_dict = {
+        "r0": {"bgp": {"local_as": 222, "vrf": "default"}},
+        "r1": {"bgp": {"local_as": 333, "vrf": "default"}},
+    }
+    result = modify_as_number(tgen, topo, input_dict)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    try:
+        assert result is True
+    except AssertionError:
+        logger.info("Expected behaviour: {}".format(result))
+        logger.info("BGP config is not created because of invalid ASNs")
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Configuring static route at R0")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK5_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(" Configure re-distribute static on R0 for R0 to R1  for IPV4 and IPV6 peer ")
+    redistribute_static = {
+        "r0": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "redistribute": [
+                                {"redist_type": "static"},
+                                {"redist_type": "connected"},
+                            ]
+                        }
+                    },
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Configure default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer"
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+                    "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3", vrf="RED")
+
+    step("Verify IPv4 and IPv6  static  received on R2 default VRF as BGP routes")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK5_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(
+        " Configure IPv4 and IPv6 prefix-list of of route received from R1 on R2 and for 0.0.0.0/0 0::0/0 route"
+    )
+    input_dict_3 = {
+        "r2": {
+            "prefix_lists": {
+                "ipv4": {
+                    "Pv4": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK5_1["ipv4"],
+                            "action": "permit",
+                        },
+                        {"seqid": "2", "network": "0.0.0.0/0", "action": "permit"},
+                        {
+                            "seqid": "3",
+                            "network": NETWORK2_1["ipv4"],
+                            "action": "permit",
+                        },
+                    ]
+                },
+                "ipv6": {
+                    "Pv6": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK5_1["ipv6"],
+                            "action": "permit",
+                        },
+                        {"seqid": "2", "network": "0::0/0", "action": "permit"},
+                        {
+                            "seqid": "3",
+                            "network": NETWORK2_1["ipv6"],
+                            "action": "permit",
+                        },
+                    ]
+                },
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("verify IPv4 and IPv6 Prefix list got configured on R3")
+    input_dict = {"r2": {"prefix_lists": ["Pv4", "Pv6"]}}
+    result = verify_prefix_lists(tgen, input_dict)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Configure IPv4/IPv6 route-map on R2 with deny sequence using above prefix-list"
+    )
+    input_dict_3 = {
+        "r2": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "deny",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "deny",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    STEP = """
+    import Route-map anf non-default VRF into defailt vrf
+    import vrf route-map RM1
+    import vrf red
+    """
+    step(STEP)
+
+    local_as = get_dut_as_number(tgen, "r2")
+    input_import_vrf = {
+        "r2": {
+            "bgp": [
+                {
+                    "local_as": local_as,
+                    "address_family": {
+                        "ipv4": {"unicast": {"import": {"vrf": "RED"}}},
+                        "ipv6": {"unicast": {"import": {"vrf": "RED"}}},
+                    },
+                }
+            ]
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_import_vrf)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step(STEP)
+    input_import_vrf = {
+        "r2": {
+            "bgp": [
+                {
+                    "local_as": local_as,
+                    "address_family": {
+                        "ipv4": {"unicast": {"import": {"vrf": "route-map RMv4"}}},
+                        "ipv6": {"unicast": {"import": {"vrf": "route-map RMv6"}}},
+                    },
+                }
+            ]
+        }
+    }
+    result = create_router_bgp(tgen, topo, input_import_vrf)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+    step(
+        "Verify IPv4 and IPv6 routes present on VRF red ( static , default-originate) should not get advertised to default VRF "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK2_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    },
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input, expected=False)
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n VRF red ( static , default-originate) should not get advertised to default VRF  \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(
+            tgen, addr_type, "r2", static_routes_input, expected=False
+        )
+        assert (
+            result is not True
+        ), "Testcase {} : Failed \n VRF red ( static , default-originate) should not get advertised to default VRF  \nError: {}".format(
+            tc_name, result
+        )
+
+    step("Change route-map sequence deny to permit")
+    input_dict_3 = {
+        "r2": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "IPv4 and IPv6 routes present on VRF red ( static , default-originate) should get advertised to default VRF"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r2": {
+                "static_routes": [
+                    {
+                        "network": [DEFAULT_ROUTES[addr_type]],
+                        "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+                    }
+                ]
+            }
+        }
+
+        result = verify_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("verify Out-prefix count incremented for IPv4/IPv6 default route on VRF red")
+    snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3", vrf="RED")
+    step("verifying the prefix count incrementing or not ")
+    isIPv4prefix_incremented = False
+    isIPv6prefix_incremented = False
+    if snapshot1["ipv4_count"] <= snapshot2["ipv4_count"]:
+        isIPv4prefix_incremented = True
+    if snapshot1["ipv6_count"] <= snapshot2["ipv6_count"]:
+        isIPv6prefix_incremented = True
+
+    assert (
+        isIPv4prefix_incremented is True
+    ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+        tc_name
+    )
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py b/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py
new file mode 100644 (file)
index 0000000..9e3a3b5
--- /dev/null
@@ -0,0 +1,2104 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following scenerios are covered.
+1. When there is change in route-map policy associated with default-originate, changes does not reflect.
+2. When route-map associated with default-originate is deleted, default route doesn't get withdrawn
+3. Update message is not being sent when only route-map is removed from the default-originate config.
+4. SNT counter gets incremented on change of every policy associated with default-originate
+5. Route-map with multiple match clauses causes inconsistencies with default-originate.
+6. BGP-Default originate behaviour with BGP attributes
+"""
+import os
+import sys
+import time
+import pytest
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+    verify_bgp_convergence,
+    create_router_bgp,
+    get_prefix_count_route,
+    modify_as_number,
+    verify_bgp_rib,
+    get_dut_as_number,
+    verify_rib_default_route,
+    verify_fib_default_route,
+)
+from lib.common_config import (
+    verify_fib_routes,
+    step,
+    required_linux_kernel_version,
+    create_route_maps,
+    interface_status,
+    create_prefix_lists,
+    get_frr_ipv6_linklocal,
+    start_topology,
+    write_test_header,
+    verify_prefix_lists,
+    check_address_types,
+    write_test_footer,
+    reset_config_on_routers,
+    create_static_routes,
+    check_router_status,
+    delete_route_maps,
+)
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    # Required linux kernel version for this suite to run.
+    result = required_linux_kernel_version("4.15")
+    if result is not True:
+        pytest.skip("Kernel requirements are not met")
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: {}".format(testsuite_run_time))
+    logger.info("=" * 40)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global topo
+    topo = tgen.json_topo
+    # ... and here it calls Mininet initialization functions.
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, topo)
+
+    global ADDR_TYPES
+    global BGP_CONVERGENCE
+    global DEFAULT_ROUTES
+    global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+    ADDR_TYPES = check_address_types()
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+    ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+    ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+    ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+    DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info(
+        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+    )
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#                      Testcases
+#
+#####################################################
+
+
+def test_default_originate_delete_conditional_routemap(request):
+    """
+    "scenerio covered":
+    1. When there is change in route-map policy associated with default-originate, changes does not reflect.
+    2. When route-map associated with default-originate is deleted, default route doesn't get withdrawn
+    3. Update message is not being sent when only route-map is removed from the default-originate config.
+    4. SNT counter gets incremented on change of every policy associated with default-originate
+    5. Route-map with multiple match clauses causes inconsistencies with default-originate.
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+    global topo
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+
+    step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R2")
+    step("Configure IPv4 and IPv6 , EBGP neighbor between R1 and R0")
+    input_dict = {
+        "r0": {
+            "bgp": {
+                "local_as": 999,
+            }
+        },
+        "r1": {
+            "bgp": {
+                "local_as": 1000,
+            }
+        },
+        "r2": {
+            "bgp": {
+                "local_as": 1000,
+            }
+        },
+        "r3": {
+            "bgp": {
+                "local_as": 2000,
+            }
+        },
+        "r4": {
+            "bgp": {
+                "local_as": 3000,
+            }
+        },
+    }
+    result = modify_as_number(tgen, topo, input_dict)
+    try:
+        assert result is True
+    except AssertionError:
+        logger.info("Expected behaviour: {}".format(result))
+        logger.info("BGP config is not created because of invalid ASNs")
+
+    step("After changing the BGP remote as , Verify the BGP Convergence")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert (
+        BGP_CONVERGENCE is True
+    ), "Complete convergence is expected after changing ASN ....! ERROR :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step("Configure 1 IPv4 and 1 IPv6 Static route on R0 with next-hop as Null0")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed to configure the static route  \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("verify IPv4 and IPv6 static route are configured and up on R0")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : routes {}  not found in R0 FIB  \n Error: {}".format(
+            tc_name, static_routes_input, result
+        )
+
+    step(
+        "Configure  redistribute static on IPv4 and IPv6 address family on R0 for R0 to R1 neighbor "
+    )
+    redistribute_static = {
+        "r0": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                    "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure redistribute  configuration....! \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("verify IPv4 and IPv6 static route are received on R1")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r0": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    },
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed... Routes {}  expected in r1 FIB after configuring the redistribute config on R0 \n Error: {}".format(
+            tc_name, static_routes_input, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+        assert (
+            result is True
+        ), "Testcase {} : Failed... Routes {}  expected in r1 RIB after configuring the redistribute config on R0\n Error: {}".format(
+            tc_name, static_routes_input, result
+        )
+
+    step(
+        "Configure IPv4 prefix-list 'Pv4' and and IPv6 prefix-list 'Pv6' on R1 to match BGP route Sv41, IPv6 route Sv61 permit "
+    )
+    input_dict_3 = {
+        "r1": {
+            "prefix_lists": {
+                "ipv4": {
+                    "Pv4": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK1_1["ipv4"],
+                            "action": "permit",
+                        },
+                    ]
+                },
+                "ipv6": {
+                    "Pv6": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK1_1["ipv6"],
+                            "action": "permit",
+                        },
+                    ]
+                },
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed  to configure the prefix list \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Configure IPV4 and IPv6 route-map (RMv4 and RMv6) matching prefix-list (Pv4 and Pv6) respectively on R1"
+    )
+    input_dict_3 = {
+        "r1": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                        "set": {
+                            "path": {
+                                "as_num": "5555",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                        "set": {
+                            "path": {
+                                "as_num": "5555",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the route map  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Configure default-originate with route-map (RMv4 and RMv6) on R1, on BGP IPv4 and IPv6 address family "
+    )
+    local_as = get_dut_as_number(tgen, dut="r1")
+    default_originate_config = {
+        "r1": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+                    },
+                    "ipv6": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the default originate \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "After configuring default-originate command , verify default  routes are advertised on R2 "
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        metric=0,
+        expected_aspath="5555",
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the default originate \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the default originate \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Changing the as-path  policy of the existing route-map")
+    input_dict_3 = {
+        "r1": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                        "set": {
+                            "path": {
+                                "as_num": "6666",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                        "set": {
+                            "path": {
+                                "as_num": "6666",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the route map  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "Verify prefix sent count on R1 towards R2 \n Send count shoud not be incremented on change of existing (AS-path) policy "
+    )
+    snapshot = get_prefix_count_route(
+        tgen, topo, dut="r1", peer="r2", link="r1", sent=True, received=False
+    )
+
+    ipv4_prefix_count = False
+    ipv6_prefix_count = False
+    if snapshot["ipv4_count"] == 2:
+        ipv4_prefix_count = True
+    if snapshot["ipv6_count"] == 2:
+        ipv6_prefix_count = True
+
+    assert (
+        ipv4_prefix_count is True
+    ), "Testcase {} : Failed Error: Expected sent Prefix is 2 but obtained {} ".format(
+        tc_name, ipv4_prefix_count
+    )
+    assert (
+        ipv6_prefix_count is True
+    ), "Testcase {} : Failed Error: Expected sent Prefix is 2 but obtained {} ".format(
+        tc_name, ipv6_prefix_count
+    )
+
+    step(
+        "After changing the as-path policy verify  the  new policy is advertised to router  R2"
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        metric=0,
+        expected_aspath="6666",
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Default route with expected attributes is not found in BGP RIB  \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Default route with expected attributes is not found in BGP FIB  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Remove the as-path policy from the route-map")
+    input_dict_3 = {
+        "r1": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                        "set": {
+                            "path": {
+                                "as_num": "6666",
+                                "as_action": "prepend",
+                                "delete": True,
+                            }
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                        "set": {
+                            "path": {
+                                "as_num": "6666",
+                                "as_action": "prepend",
+                                "delete": True,
+                            }
+                        },
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the route map  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "After removing the  route policy (AS-Path) verify that as-path is removed in r2 "
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+    )
+    assert result is True, "Testcase {} : Failed ... !  \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed .... !\n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Delete the route-map ")
+
+    delete_routemap = {"r1": {"route_maps": ["RMv4", "RMv6"]}}
+    result = delete_route_maps(tgen, delete_routemap)
+    assert (
+        result is True
+    ), "Testcase {} : Failed  to delete the route-map\n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "After deleting route-map , verify  the default route  in FIB and RIB are removed "
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        metric=0,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} : After removing the route-map the default-route is not removed from R2 RIB\n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=False,
+    )
+    assert (
+        result is not True
+    ), "Testcase {} :  After removing the route-map the default-route is not removed from R2 FIB \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Create route-map with with sequnce number 10 ")
+    input_dict_3 = {
+        "r1": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "10",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                        "set": {
+                            "path": {
+                                "as_num": "9999",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "10",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                        "set": {
+                            "path": {
+                                "as_num": "9999",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the route map  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "After Configuring the route-map the dut is expected to receive the  route policy (as-path) as 99999"
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        metric=0,
+        expected_aspath="9999",
+    )
+    assert result is True, "Testcase {} : Failed...!   \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert result is True, "Testcase {} : Failed ...!\n Error: {}".format(
+        tc_name, result
+    )
+
+    step("Create another route-map with seq number less than the previous i. <10 ")
+    input_dict_3 = {
+        "r1": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "5",
+                        "match": {"ipv4": {"prefix_lists": "Pv4"}},
+                        "set": {
+                            "path": {
+                                "as_num": "7777",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "5",
+                        "match": {"ipv6": {"prefix_lists": "Pv6"}},
+                        "set": {
+                            "path": {
+                                "as_num": "7777",
+                                "as_action": "prepend",
+                            }
+                        },
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert (
+        result is True
+    ), "Testcase {} : Failed to configure the route map  \n Error: {}".format(
+        tc_name, result
+    )
+
+    step(
+        "On creating new route-map  the route-map with lower seq id should be considered "
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        metric=0,
+        expected_aspath="7777",
+    )
+    assert (
+        result is True
+    ), "Testcase {} : Route-map with lowest prefix is not considered  \n Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+        expected=True,
+    )
+    assert (
+        result is True
+    ), "Testcase {} :  Route-map with lowest prefix is not considered   \n Error: {}".format(
+        tc_name, result
+    )
+
+    write_test_footer(tc_name)
+
+
+def test_verify_default_originate_after_BGP_attributes_p1(request):
+    """
+    "Verify different BGP attributes with default-originate route "
+    """
+    tgen = get_topogen()
+    global BGP_CONVERGENCE
+    global topo
+    # test case name
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+    reset_config_on_routers(tgen)
+
+    if BGP_CONVERGENCE != True:
+        pytest.skip("skipped because of BGP Convergence failure")
+
+    step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+    step("Configure IPv4 and IPv6 IBGP neighbor between R3 and R4")
+    r0_local_as = topo['routers']['r0']['bgp']['local_as']
+    r1_local_as = topo['routers']['r1']['bgp']['local_as']
+    r2_local_as = topo['routers']['r2']['bgp']['local_as']
+    r3_local_as = topo['routers']['r3']['bgp']['local_as']
+    r4_local_as = topo['routers']['r4']['bgp']['local_as']
+    input_dict = {
+        "r0": {
+            "bgp": {
+                "local_as": r0_local_as,
+            }
+        },
+        "r1": {
+            "bgp": {
+                "local_as": r1_local_as,
+            }
+        },
+        "r2": {
+            "bgp": {
+                "local_as": r2_local_as,
+            }
+        },
+        "r3": {
+            "bgp": {
+                "local_as": 4000,
+            }
+        },
+        "r4": {
+            "bgp": {
+                "local_as": 4000,
+            }
+        },
+    }
+    result = modify_as_number(tgen, topo, input_dict)
+
+    try:
+        assert result is True
+    except AssertionError:
+        logger.info("Expected behaviour: {}".format(result))
+        logger.info("BGP config is not created because of invalid ASNs")
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+
+    step(
+        "Configure one IPv4 and one IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, IPv6 route Sv61 "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step("Verify IPv4 and IPv6 static routes configured on R4 in FIB")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure redistribute static knob on R4 , for R4 to R3 neighbor for IPv4 and IPv6 address family "
+    )
+    redistribute_static = {
+        "r4": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                    "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify After configuring redistribute static , verify route received in BGP table of R3"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    NOTE = """Configure 2  IPv4 prefix-list Pv41 Pv42 and and 2 IPv6 prefix-list Pv61 Pv62 on R3 to match BGP IPv4 route Sv41, 200.1.1.1/24 , IPv6 route Sv61 and 200::1/64"""
+    step(NOTE)
+    input_dict_3 = {
+        "r3": {
+            "prefix_lists": {
+                "ipv4": {
+                    "Pv41": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK1_1["ipv4"],
+                            "action": "permit",
+                        }
+                    ],
+                    "Pv42": [
+                        {"seqid": "1", "network": "200.1.1.1/24", "action": "permit"}
+                    ],
+                },
+                "ipv6": {
+                    "Pv61": [
+                        {
+                            "seqid": "1",
+                            "network": NETWORK1_1["ipv6"],
+                            "action": "permit",
+                        }
+                    ],
+                    "Pv62": [
+                        {"seqid": " 1", "network": "200::1/64", "action": "permit"}
+                    ],
+                },
+            }
+        }
+    }
+    result = create_prefix_lists(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("verify IPv4 and IPv6 Prefix list got configured on R3")
+    input_dict = {"r3": {"prefix_lists": ["Pv41", "Pv61", "Pv42", "Pv62"]}}
+    result = verify_prefix_lists(tgen, input_dict)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Configure 2 sequence of route-map for IPv4 seq1 permit Pv41 and seq2 permit Pv42 and for IPv6 seq1 permit Pv61 , seq2 permit Pv62 on R3"
+    )
+    input_dict_3 = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv4": {"prefix_lists": "Pv41"}},
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "match": {"ipv4": {"prefix_lists": "Pv42"}},
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "match": {"ipv6": {"prefix_lists": "Pv61"}},
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "match": {"ipv6": {"prefix_lists": "Pv62"}},
+                    },
+                ],
+            }
+        }
+    }
+    result = create_route_maps(tgen, input_dict_3)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Apply on route-map seq1 set as-path prepend to 200 and route-map seq2 set  as-path prepend to 300 for IPv4 and IPv6 route-map "
+    )
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                                "path": {
+                                    "as_num": "200",
+                                    "as_action": "prepend",
+                                }
+                            }
+
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                                "path": {
+                                    "as_num": "300",
+                                    "as_action": "prepend",
+                                }
+                            }
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                                "path": {
+                                    "as_num": "200",
+                                    "as_action": "prepend",
+                                }
+                            }
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                                "path": {
+                                    "as_num": "300",
+                                    "as_action": "prepend",
+                                }
+                            }
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        " Configure default-originate with IPv4 and IPv6 route-map on R3 for R3-R2 IPv4 and IPv6 BGP neighbor"
+    )
+
+    local_as = get_dut_as_number(tgen, dut="r3")
+    default_originate_config = {
+        "r3": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+                    },
+                    "ipv6": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify IPv4 and IPv6 default route received on R2 with both the AS path on R2"
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        metric=0,
+        expected_aspath="4000 200",
+    )
+
+    step(
+        "Modify AS prepend path adding one more value 500 in route-map sequence 1 and 600 for route-map sequence 2 for IPv4 and IPv6 route-map"
+    )
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                                "path": {
+                                    "as_num": "500",
+                                    "as_action": "prepend",
+                                }
+                            }
+
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                                "path": {
+                                    "as_num": "600",
+                                    "as_action": "prepend",
+                                }
+                            }
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                                "path": {
+                                    "as_num": "500",
+                                    "as_action": "prepend",
+                                }
+                            }
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                                "path": {
+                                    "as_num": "600",
+                                    "as_action": "prepend",
+                                }
+                            }
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+
+    step("As path 500 added to IPv4 and IPv6 default -originate route received on R2")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        metric=0,
+        expected_aspath="4000 500",
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step(
+        "Apply on route-map seq1 set metric value to  70 and route-map seq2 set  metric 80 IPv4 and IPv6 route-map"
+    )
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "metric": 70,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "metric": 80,
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "metric": 70,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "metric": 80,
+                        },
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify Configured metric value received on R2 along with as-path for IPv4 and IPv6 default routes "
+    )
+
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        metric=70,
+        expected_aspath="4000 500",
+    )
+
+
+    step(
+        "Modify route-map seq1 configure metric 50 and route-map seq2 configure metric 100 IPv4 and IPv6 route-map "
+    )
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "metric": 50,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "metric": 100,
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "metric": 50,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "metric": 100,
+                        },
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify Configured metric value received on R2 along with as-path for IPv4 and IPv6 default routes "
+    )
+
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        metric=50,
+        expected_aspath="4000 500",
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Delete AS-prepend  from IP4 and IPv6 route-map configured on R3 ")
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+
+                        "set": {
+                            "path": {
+                                "as_num": "500",
+                                "as_action": "prepend",
+                                "delete": True,
+                            },
+                            "delete": True,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "path": {
+                                "as_num": "600",
+                                "as_action": "prepend",
+                                "delete": True,
+                            },
+                            "delete": True,
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "path": {
+                                "as_num": "500",
+                                "as_action": "prepend",
+                                "delete": True,
+                            },
+                            "delete": True,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "path": {
+                                "as_num": "600",
+                                "as_action": "prepend",
+                                "delete": True,
+                            },
+                            "delete": True,
+                        },
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify AS-prepend is deleted from default originate route and metric value only present on R2 for IPv4 and IPv6 default routes "
+    )
+
+
+
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        metric=50,
+        expected_aspath="4000",
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+
+    step("Delete metric value  from IP4 and IPv6 route-map configured on R3 ")
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {"metric": 50, "delete": True},
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {"metric": 100, "delete": True},
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {"metric": 50, "delete": True},
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {"metric": 100, "delete": True},
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify Metric value deleted from IPv4 and IPv6 default route on R2 ,verify  default routes "
+    )
+
+
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        metric=0,
+        expected_aspath="4000",
+    )
+
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    step("Change  IPv4 and IPv6 , EBGP to IBGP neighbor between R3 and R2")
+    step("Change IPv4 and IPv6 IBGP to EBGP neighbor between R3 and R4")
+    r0_local_as = topo['routers']['r0']['bgp']['local_as']
+    r1_local_as = topo['routers']['r1']['bgp']['local_as']
+    r2_local_as = topo['routers']['r2']['bgp']['local_as']
+    r3_local_as = topo['routers']['r3']['bgp']['local_as']
+    r4_local_as = topo['routers']['r4']['bgp']['local_as']
+    input_dict = {
+        "r0": {
+            "bgp": {
+                "local_as": r0_local_as,
+            }
+        },
+        "r1": {
+            "bgp": {
+                "local_as": r1_local_as,
+            }
+        },
+
+        "r2": {
+            "bgp": {
+                "local_as": 1111,
+            }
+        },
+        "r3": {
+            "bgp": {
+                "local_as": 1111,
+            }
+        },
+        "r4": {
+            "bgp": {
+                "local_as": 5555,
+            }
+        },
+    }
+    result = modify_as_number(tgen, topo, input_dict)
+    try:
+        assert result is True
+    except AssertionError:
+        logger.info("Expected behaviour: {}".format(result))
+        logger.info("BGP config is not created because of invalid ASNs")
+    step("After changing the BGP AS Path Verify the BGP Convergence")
+    BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+    assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+        BGP_CONVERGENCE
+    )
+    step(
+        "Configure one IPv4 and one IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, IPv6 route Sv61 "
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step("Verify IPv4 and IPv6 static routes configured on R4 in FIB")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step(
+        "Configure redistribute static knob on R4 , for R4 to R3 neighbor for IPv4 and IPv6 address family "
+    )
+    redistribute_static = {
+        "r4": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                    "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, redistribute_static)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify After configuring redistribute static , verify route received in BGP table of R3"
+    )
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r3": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+        result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step(
+        " Configure default-originate with IPv4 and IPv6 route-map on R3 for R3-R2 IPv4 and IPv6 BGP neighbor"
+    )
+    local_as = get_dut_as_number(tgen, dut="r3")
+    default_originate_config = {
+        "r3": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+                    },
+                    "ipv6": {
+                        "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+                    },
+                },
+            }
+        }
+    }
+    result = create_router_bgp(tgen, topo, default_originate_config)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify IPv4 and IPv6 default route received on R2 with both the AS path on R2"
+    )
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+    )
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step(
+        "Configure local -preference to 50 on IPv4 and IPv6 route map seq1 and 60 on seq2"
+    )
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "locPrf": 50,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "locPrf": 60,
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "locPrf": 50,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "locPrf": 60,
+                        },
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify Configured metric value received on R2 along with as-path for IPv4 and IPv6 default routes "
+    )
+
+
+
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        locPrf=50,
+    )
+
+
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step(
+        "Modify local preference value to 150  on IPv4 and IPv6 route map seq1 and 160 on seq2"
+    )
+    route_map = {
+        "r3": {
+            "route_maps": {
+                "RMv4": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "locPrf": 150,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "locPrf": 160,
+                        },
+                    },
+                ],
+                "RMv6": [
+                    {
+                        "action": "permit",
+                        "seq_id": "1",
+                        "set": {
+                            "locPrf": 150,
+                        },
+                    },
+                    {
+                        "action": "permit",
+                        "seq_id": "2",
+                        "set": {
+                            "locPrf": 160,
+                        },
+                    },
+                ],
+            }
+        }
+    }
+
+    result = create_route_maps(tgen, route_map)
+    assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+    step(
+        "Verify Modified local-preference  value received on R2  for IPv4 and IPv6 default routes "
+    )
+
+
+
+
+    DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        locPrf=150,
+    )
+
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+    # updating the topology with the updated AS-Number to avoid conflict in con configuring the AS
+    updated_topo = topo
+    updated_topo['routers']['r0']['bgp']['local_as']=get_dut_as_number(tgen,"r0")
+    updated_topo['routers']['r1']['bgp']['local_as']=get_dut_as_number(tgen,"r1")
+    updated_topo['routers']['r2']['bgp']['local_as']=get_dut_as_number(tgen,"r2")
+    updated_topo['routers']['r3']['bgp']['local_as']=get_dut_as_number(tgen,"r3")
+    updated_topo['routers']['r4']['bgp']['local_as']=get_dut_as_number(tgen,"r4")
+
+    step("Shut IPv4/IPv6 BGP neighbor from R4 ( R4-R3) using 'neighbor x.x.x.x shut' command ")
+    local_as = get_dut_as_number(tgen, dut="r4")
+    shut_neighbor = {
+        "r4": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {
+                                    "dest_link": {
+                                        "r4": {"shutdown":True}
+                                    }
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {
+                                    "dest_link": {
+                                        "r4": {"shutdown":True}
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    interface = topo['routers']['r3']['links']['r4']['interface']
+    input_dict = {
+            "r1": {
+                "interface_list": [interface],
+                "status": "down"
+            }
+        }
+
+    result = interface_status(tgen, topo, input_dict)
+    assert result is True, "Testcase {} : Shut down the interface failed ! \n Error: {}".format(tc_name, result)
+
+    step("After shutting the interface verify the BGP convergence")
+    result = verify_bgp_convergence(tgen,topo,expected=False)
+    assert result is not True, "Testcase {} : Failed \n After shutting Down BGP convergence should Fail and return False \n Error: {}".format(tc_name, result)
+
+    step("verify default route deleted from R2 ")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False)
+    assert result is not  True, "Testcase {} : Failed \n Error: After Shut down interface the default route is NOT expected but found in RIB -> {}".format( tc_name, result)
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=False)
+    assert result is not  True, "Testcase {} : Failed \n Error:  After Shut down interface the default route is NOT expected but found in FIB -> {}".format( tc_name, result)
+
+
+    step("no Shut IPv4/IPv6 BGP neighbor from R4 ( R4-R3) using 'neighbor x.x.x.x shut' command ")
+    local_as = get_dut_as_number(tgen, dut="r4")
+    shut_neighbor = {
+        "r4": {
+            "bgp": {
+                "local_as": local_as,
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {
+                                    "dest_link": {
+                                        "r4": {"shutdown":False}
+                                    }
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {
+                                    "dest_link": {
+                                        "r4": {"shutdown":False}
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    interface = topo['routers']['r3']['links']['r4']['interface']
+    input_dict = {
+            "r1": {
+                "interface_list": [interface],
+                "status": "up"
+            }
+        }
+
+    result = interface_status(tgen, topo, input_dict)
+    assert result is True, "Testcase {} : Bring up interface failed ! \n Error: {}".format(tc_name, result)
+
+    step("After no shutting the interface verify the BGP convergence")
+    result = verify_bgp_convergence(tgen,topo,expected=True)
+    assert result is True, "Testcase {} : Failed \n After shutting Down BGP convergence should Fail and return False \n Error: {}".format(tc_name, result)
+
+    step("After no shut neighbor , verify default route relearn on R2")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True)
+    assert result is   True, "Testcase {} : Failed \n Error: After no Shut down interface the default route is  expected but found in RIB -> {}".format( tc_name, result)
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected=True)
+    assert result is True, "Testcase {} : Failed \n Error:  After Shut down interface the default route is  expected but found in FIB -> {}".format( tc_name, result)
+
+
+
+    step("Remove IPv4/IPv6 static route configure on R4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                        "delete": True
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step("Verify IPv4 and IPv6 static routes removed on R4 in FIB")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input, expected=False)
+        assert result is not  True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_bgp_rib(tgen, addr_type, "r4", static_routes_input, expected=False)
+        assert result is not  True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("After removing static route  , verify default route removed on R2")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= False)
+    assert result is not  True, "Testcase {} : Failed \n Error: After removing static  the default route is NOT expected but found in RIB -> {}".format( tc_name, result)
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= False)
+    assert result is not True, "Testcase {} : Failed \n Error:  After removing static the default route is NOT expected but found in FIB -> {}".format( tc_name, result)
+
+
+    step("Configuring the static route back in r4")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = create_static_routes(tgen, static_routes_input)
+        assert result is True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+    step("Verify IPv4 and IPv6 static routes configured on R4 in FIB")
+    for addr_type in ADDR_TYPES:
+        static_routes_input = {
+            "r4": {
+                "static_routes": [
+                    {
+                        "network": [NETWORK1_1[addr_type]],
+                        "next_hop": NEXT_HOP_IP[addr_type],
+                    }
+                ]
+            }
+        }
+        result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input, expected=True)
+        assert result is   True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+        result = verify_bgp_rib(tgen, addr_type, "r4", static_routes_input, expected=True)
+        assert result is    True, "Testcase {} : Failed \n Error: {}".format(
+            tc_name, result
+        )
+
+    step("After adding static route back  , verify default route learned  on R2")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= True)
+    assert result is   True, "Testcase {} : Failed \n Error: After removing static  the default route is  expected but found in RIB -> {}".format( tc_name, result)
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= True)
+    assert result is  True, "Testcase {} : Failed \n Error:  After removing static  the default route is  expected but found in FIB -> {}".format( tc_name, result)
+
+    step("Deactivate IPv4 and IPv6 neighbor configured from R4 ( R4-R3)")
+
+    configure_bgp_on_r1 = {
+        "r4": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {"dest_link": {"r4": {"deactivate": "ipv4"}}}
+                            }
+                        },
+
+                    },"ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {"dest_link": {"r4": {"deactivate": "ipv6"}}}
+                            }
+                        },
+
+                    }
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, configure_bgp_on_r1)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("After deactivating the BGP neighbor   , verify default route removed on R2")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= False)
+    assert result is not  True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is NOT expected but found in RIB -> {}".format( tc_name, result)
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= False)
+    assert result is not  True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor  the default route is NOT expected but found in FIB -> {}".format( tc_name, result)
+
+    step("Activate IPv4 and IPv6 neighbor configured from R4 ( R4-R3)")
+
+    configure_bgp_on_r1 = {
+        "r4": {
+            "bgp": {
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {"dest_link": {"r4": {"activate": "ipv4"}}}
+                            }
+                        },
+
+                    },"ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {"dest_link": {"r4": {"activate": "ipv6"}}}
+                            }
+                        },
+
+                    }
+                }
+            }
+        }
+    }
+    result = create_router_bgp(tgen, updated_topo, configure_bgp_on_r1)
+    assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+    step("Verify bgp convergence.")
+    bgp_convergence = verify_bgp_convergence(tgen, updated_topo)
+    assert bgp_convergence is  True, "Testcase {} : Failed \n Error: {}".format(
+        tc_name, bgp_convergence
+    )
+    step("After Activating the BGP neighbor   , verify default route learned on R2")
+    result = verify_rib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= True)
+    assert result is   True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is   expected but found in RIB -> {}".format( tc_name, result)
+
+    result = verify_fib_default_route(
+        tgen,
+        topo,
+        dut="r2",
+        routes=DEFAULT_ROUTES,
+        expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+        expected= True)
+    assert result is   True, "Testcase {} : Failed \n Error:  After Deactivating the BGP neighbor the default route is   expected but found in FIB -> {}".format( tc_name, result)
+    write_test_footer(tc_name)
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 9e581a7be7ec0f8dbfc6d8f5da9ce6d919f3ac98..0a283c06d57c15d87fe3c0a2807105fbe1f4a21b 100644 (file)
@@ -5,7 +5,5 @@ interface lo
 interface r1-eth0
  ip address 192.168.255.1/24
 !
-ip route 192.168.13.0./24 Null0
-!
 ip forwarding
 !
index 9e581a7be7ec0f8dbfc6d8f5da9ce6d919f3ac98..0a283c06d57c15d87fe3c0a2807105fbe1f4a21b 100644 (file)
@@ -5,7 +5,5 @@ interface lo
 interface r1-eth0
  ip address 192.168.255.1/24
 !
-ip route 192.168.13.0./24 Null0
-!
 ip forwarding
 !
diff --git a/tests/topotests/bgp_ipv4_class_e_peer/__init__.py b/tests/topotests/bgp_ipv4_class_e_peer/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_ipv4_class_e_peer/r1/bgpd.conf b/tests/topotests/bgp_ipv4_class_e_peer/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..bf0a68e
--- /dev/null
@@ -0,0 +1,12 @@
+!
+allow-reserved-ranges
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 240.0.0.2 remote-as external
+ neighbor 240.0.0.2 timers 1 3
+ neighbor 240.0.0.2 timers connect 1
+ address-family ipv4
+  redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_ipv4_class_e_peer/r1/zebra.conf b/tests/topotests/bgp_ipv4_class_e_peer/r1/zebra.conf
new file mode 100644 (file)
index 0000000..d4ac46f
--- /dev/null
@@ -0,0 +1,11 @@
+!
+allow-reserved-ranges
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 240.0.0.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_ipv4_class_e_peer/r2/bgpd.conf b/tests/topotests/bgp_ipv4_class_e_peer/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..7d08963
--- /dev/null
@@ -0,0 +1,9 @@
+!
+allow-reserved-ranges
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 240.0.0.1 remote-as external
+ neighbor 240.0.0.1 timers 1 3
+ neighbor 240.0.0.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_ipv4_class_e_peer/r2/zebra.conf b/tests/topotests/bgp_ipv4_class_e_peer/r2/zebra.conf
new file mode 100644 (file)
index 0000000..f0a350f
--- /dev/null
@@ -0,0 +1,8 @@
+!
+allow-reserved-ranges
+!
+interface r2-eth0
+ ip address 240.0.0.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_ipv4_class_e_peer/test_bgp_ipv4_class_e_peer.py b/tests/topotests/bgp_ipv4_class_e_peer/test_bgp_ipv4_class_e_peer.py
new file mode 100644 (file)
index 0000000..c2f0d2e
--- /dev/null
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+#
+# bgp_ipv4_class_e_peer.py
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Check if the peering works by using IPv4 Class E IP ranges, and if
+we don't treat next-hop as martian in such a case.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+    for routern in range(1, 3):
+        tgen.add_router("r{}".format(routern))
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.items(), 1):
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    tgen.start_router()
+
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+
+def test_bgp_ipv4_class_e_peer():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    router = tgen.gears["r2"]
+
+    def _bgp_converge():
+        output = json.loads(router.vtysh_cmd("show ip bgp neighbor 240.0.0.1 json"))
+        expected = {
+            "240.0.0.1": {
+                "bgpState": "Established",
+                "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    def _bgp_next_hop_ipv4_class_e():
+        output = json.loads(
+            router.vtysh_cmd("show bgp ipv4 unicast 172.16.255.1/32 json")
+        )
+        expected = {
+            "paths": [
+                {
+                    "valid": True,
+                    "nexthops": [
+                        {
+                            "ip": "240.0.0.1",
+                            "accessible": True,
+                        }
+                    ],
+                }
+            ]
+        }
+        return topotest.json_cmp(output, expected)
+
+    step("Initial BGP converge")
+    test_func = functools.partial(_bgp_converge)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Failed to see BGP convergence on R2"
+
+    step("Check if IPv4 BGP peering works with Class E IP ranges")
+    test_func = functools.partial(_bgp_next_hop_ipv4_class_e)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Failed to see 172.16.255.1/32 via 240.0.0.1 on R2"
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 68436177d8d8552db7eb105cebe8974c64f48508..9f01287c91d910f6ba1ba9c279c6218c3a5fb9f6 100644 (file)
@@ -56,6 +56,9 @@ from lib.bgp import (
 )
 from lib.topojson import build_config_from_json
 
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
 # Global variables
 topo = None
 
index 1d424caa30eb3a04654efd153ca6294bb7044efc..48f308e316e94a5ff9159e4c209fb2cc3b500eef 100644 (file)
@@ -55,6 +55,8 @@ from lib.bgp import (
 )
 from lib.topojson import build_config_from_json
 
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
 # Global variables
 topo = None
 
index fc2d2364c69aadcbb60529cb7045914735667942..4105c3fe63eb6de27e0be757bc04906322301ccc 100644 (file)
@@ -52,6 +52,8 @@ from lib.bgp import create_router_bgp, verify_bgp_convergence, verify_bgp_rib
 
 from lib.topojson import build_config_from_json
 
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
 # Global variables
 topo = None
 
index 862cae42e9e1b98cc32ae822f6330b040b6960ca..a9e6d21b8d928b3412afaa8307e3d10766f7c240 100644 (file)
@@ -58,9 +58,10 @@ from lib.bgp import (
 )
 from lib.topojson import build_config_from_json
 
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
 # Global variables
 topo = None
-
 # Global variables
 NETWORK = {
     "ipv4": [
index 1a91257f06a0c1ed402ecd0b4abddbd7c2542ecc..9a0fc441755e8ca555db5c91ad2f8a814019dc5d 100644 (file)
@@ -54,6 +54,7 @@ from lib.topojson import build_config_from_json
 # Global variables
 topo = None
 
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
 
 # Global variables
 NETWORK_CMD_IP = "1.0.1.17/32"
index 36bfdfe0682359a64a1aea2ba91af0bad5ac4a80..c0a5c7ad9cc8cefc6ba3ad88c26c8fc6afcd052f 100644 (file)
@@ -137,22 +137,6 @@ def ltemplatePreRouterStartHook():
     if tgen.hasmpls != True:
         logger.info("MPLS not available, skipping setup")
         return False
-    # configure r2 mpls interfaces
-    intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"]
-    for intf in intfs:
-        cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf))
-    # configure MPLS
-    rtrs = ["r1", "r3", "r4"]
-    cmds = ["echo 1 > /proc/sys/net/mpls/conf/lo/input"]
-    for rtr in rtrs:
-        router = tgen.gears[rtr]
-        for cmd in cmds:
-            cc.doCmd(tgen, rtr, cmd)
-        intfs = ["lo", rtr + "-eth0", rtr + "-eth4"]
-        for intf in intfs:
-            cc.doCmd(
-                tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)
-            )
     logger.info("setup mpls input")
     return True
 
index 18f61e0c54c51e5d8db1bc77e5c5424475a1eec8..767e17e62c09ef6654f3c7cc044ce3b33d1cd6f2 100644 (file)
@@ -3,15 +3,18 @@ log file zebra.log
 hostname r1
 !
 interface lo
+ mpls
  ip address 1.1.1.1/32
 !
 interface r1-eth0
  description to sw0
+ mpls
  ip address 10.0.1.1/24
  no link-detect
 !
 interface r1-eth4
  description to ce1
+ mpls
  ip address 192.168.1.1/24
  no link-detect
 !
index dd1dbac32b8aba7ca95ec7e68c6d3d89f393cef8..829ac969e343e4dd8c676a9cfb17f115c46a5be2 100644 (file)
@@ -3,20 +3,24 @@ log file zebra.log
 hostname r2
 !
 interface lo
+ mpls
  ip address 2.2.2.2/32
 !
 interface r2-eth0
  description to sw0
+ mpls
  ip address 10.0.1.2/24
  no link-detect
 !
 interface r2-eth1
  description to sw1
+ mpls
  ip address 10.0.2.2/24
  no link-detect
 !
 interface r2-eth2
  description to sw2
+ mpls
  ip address 10.0.3.2/24
  no link-detect
 !
index 9dbc29024378a212d8570839e26fb0b61643e4f8..916dabf37d62121edce825a55ba0318a14bff10c 100644 (file)
@@ -3,10 +3,12 @@ log file zebra.log
 hostname r3
 !
 interface lo
+ mpls
  ip address 3.3.3.3/32
 !
 interface r3-eth0
  description to sw1
+ mpls
  ip address 10.0.2.3/24
  no link-detect
 !
@@ -17,6 +19,7 @@ interface r3-eth1
 !
 interface r3-eth4
  description to ce2
+ mpls
  ip address 192.168.1.1/24
  no link-detect
 !
index 415f03df51250e02db08e3d4cd870b0f32298d6e..e08ac86aec66dc28194dfaff58a396d8342f378d 100644 (file)
@@ -3,15 +3,18 @@ log file zebra.log
 hostname r4
 !
 interface lo
+ mpls
  ip address 4.4.4.4/32
 !
 interface r4-eth0
  description to sw1
+ mpls
  ip address 10.0.2.4/24
  no link-detect
 !
 interface r4-eth4
  description to ce3
+ mpls
  ip address 192.168.1.1/24
  no link-detect
 !
diff --git a/tests/topotests/bgp_orf/__init__.py b/tests/topotests/bgp_orf/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_orf/r1/bgpd.conf b/tests/topotests/bgp_orf/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..800bf1b
--- /dev/null
@@ -0,0 +1,9 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ address-family ipv4 unicast
+  redistribute connected
+  neighbor 192.168.1.2 capability orf prefix-list both
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_orf/r1/zebra.conf b/tests/topotests/bgp_orf/r1/zebra.conf
new file mode 100644 (file)
index 0000000..85ab531
--- /dev/null
@@ -0,0 +1,8 @@
+!
+int lo
+ ip address 10.10.10.1/32
+ ip address 10.10.10.2/32
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
diff --git a/tests/topotests/bgp_orf/r2/bgpd.conf b/tests/topotests/bgp_orf/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..8c488aa
--- /dev/null
@@ -0,0 +1,9 @@
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ address-family ipv4 unicast
+  neighbor 192.168.1.1 capability orf prefix-list both
+  neighbor 192.168.1.1 prefix-list r1 in
+ exit-address-family
+!
+ip prefix-list r1 seq 5 permit 10.10.10.1/32
diff --git a/tests/topotests/bgp_orf/r2/zebra.conf b/tests/topotests/bgp_orf/r2/zebra.conf
new file mode 100644 (file)
index 0000000..cffe827
--- /dev/null
@@ -0,0 +1,4 @@
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_orf/test_bgp_orf.py b/tests/topotests/bgp_orf/test_bgp_orf.py
new file mode 100644 (file)
index 0000000..cb74b85
--- /dev/null
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if BGP ORF filtering is working correctly when modifying
+prefix-list.
+
+Initially advertise 10.10.10.1/32 from R1 to R2. Add new prefix
+10.10.10.2/32 to r1 prefix list on R2. Test if we updated ORF
+prefix-list correctly.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = pytest.mark.bgpd
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def setup_module(mod):
+    topodef = {"s1": ("r1", "r2")}
+    tgen = Topogen(topodef, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+
+    for i, (rname, router) in enumerate(router_list.items(), 1):
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    tgen.start_router()
+
+
+def teardown_module(mod):
+    tgen = get_topogen()
+    tgen.stop_topology()
+
+
+def test_bgp_orf():
+    tgen = get_topogen()
+
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    r1 = tgen.gears["r1"]
+    r2 = tgen.gears["r2"]
+
+    def _bgp_converge_r1():
+        output = json.loads(
+            r1.vtysh_cmd(
+                "show bgp ipv4 unicast neighbor 192.168.1.2 advertised-routes json"
+            )
+        )
+        expected = {"advertisedRoutes": {"10.10.10.1/32": {}, "10.10.10.2/32": None}}
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_converge_r1)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Can't apply ORF from R1 to R2"
+
+    def _bgp_converge_r2():
+        output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast summary json"))
+        expected = {
+            "peers": {
+                "192.168.1.1": {
+                    "pfxRcd": 1,
+                    "pfxSnt": 1,
+                    "state": "Established",
+                    "peerState": "OK",
+                }
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_converge_r2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "ORF filtering is not working from R1 to R2"
+
+    r2.vtysh_cmd(
+        """
+        configure terminal
+        ip prefix-list r1 seq 10 permit 10.10.10.2/32
+    """
+    )
+
+    def _bgp_orf_changed_r1():
+        output = json.loads(
+            r1.vtysh_cmd(
+                "show bgp ipv4 unicast neighbor 192.168.1.2 advertised-routes json"
+            )
+        )
+        expected = {"advertisedRoutes": {"10.10.10.1/32": {}, "10.10.10.2/32": {}}}
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_orf_changed_r1)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Can't apply new ORF from R1 to R2"
+
+    def _bgp_orf_changed_r2():
+        output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast json"))
+        expected = {
+            "routes": {
+                "10.10.10.1/32": [{"valid": True}],
+                "10.10.10.2/32": [{"valid": True}],
+            }
+        }
+        return topotest.json_cmp(output, expected)
+
+    test_func = functools.partial(_bgp_orf_changed_r2)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "New ORF filtering is not working from R1 to R2"
+
+    r2.vtysh_cmd(
+        """
+        configure terminal
+        no ip prefix-list r1 seq 10 permit 10.10.10.2/32
+    """
+    )
+
+    test_func = functools.partial(_bgp_converge_r1)
+    _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+    assert result is None, "Can't apply initial ORF from R1 to R2"
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 4ec83093dcd4ea4646df46c8e4b5070a7887b715..273f933d6e0fe59d17d33eccc0183febdd15f3c4 100644 (file)
@@ -2,18 +2,24 @@ router bgp 64501
   bgp router-id 192.168.2.1
   network 192.0.2.0/24
 ! Correct role pair
-  neighbor 192.168.2.2 remote-as 64502
+  neighbor 192.168.2.2 remote-as external
   neighbor 192.168.2.2 local-role provider
   neighbor 192.168.2.2 timers 3 10
 ! Incorrect role pair
-  neighbor 192.168.3.2 remote-as 64503
+  neighbor 192.168.3.2 remote-as external
   neighbor 192.168.3.2 local-role provider
   neighbor 192.168.3.2 timers 3 10
 ! Missed neighbor role
-  neighbor 192.168.4.2 remote-as 64504
+  neighbor 192.168.4.2 remote-as external
   neighbor 192.168.4.2 local-role provider
   neighbor 192.168.4.2 timers 3 10
 ! Missed neighbor role with strict-mode
-  neighbor 192.168.5.2 remote-as 64505
+  neighbor 192.168.5.2 remote-as external
   neighbor 192.168.5.2 local-role provider strict-mode
   neighbor 192.168.5.2 timers 3 10
+! Testing peer-groups
+  neighbor PG peer-group
+  neighbor PG remote-as external
+  neighbor PG local-role provider
+  neighbor PG timers 3 10
+  neighbor 192.168.6.2 peer-group PG
index 3e90a261c19570e10cb28158d774605932d5dace..301a1e889029ddf252f343feb1ed2e757a3dd5a6 100644 (file)
@@ -11,5 +11,8 @@ interface r1-eth2
 interface r1-eth3
  ip address 192.168.5.1/24
 !
+interface r1-eth4
+ ip address 192.168.6.1/24
+!
 ip forwarding
 !
index e741aa16ce2b0464d307662064844bdcb919c644..aa8ba72cadf6f8fe5c918e93b710ab8fa5e4a6b1 100644 (file)
@@ -1,5 +1,5 @@
 router bgp 64502
   bgp router-id 192.168.2.2
-  neighbor 192.168.2.1 remote-as 64501
+  neighbor 192.168.2.1 remote-as external
   neighbor 192.168.2.1 local-role customer
   neighbor 192.168.2.1 timers 3 10
index d1404260e66a2f6d2e1545e3e27a8d0b82bec67e..5ed93d63e53dd654466abb892163be0b8b5a9cc6 100644 (file)
@@ -1,4 +1,4 @@
 router bgp 64503
-  neighbor 192.168.3.1 remote-as 64501
+  neighbor 192.168.3.1 remote-as external
   neighbor 192.168.3.1 local-role peer
   neighbor 192.168.3.1 timers 3 10
index e6a0a9222aeabaf08e0dc19dcfc4759d0b1a5b38..118132d4903f9de5a118625b4f16341accc42e19 100644 (file)
@@ -1,3 +1,3 @@
 router bgp 64504
-  neighbor 192.168.4.1 remote-as 64501
+  neighbor 192.168.4.1 remote-as external
   neighbor 192.168.4.1 timers 3 10
index eeeed7ed3046415ba5e6908dcafd8e1395789449..2f62fbf11a551dfec59910967122196b7b73a677 100644 (file)
@@ -1,3 +1,3 @@
 router bgp 64505
-  neighbor 192.168.5.1 remote-as 64501
+  neighbor 192.168.5.1 remote-as external
   neighbor 192.168.5.1 timers 3 10
diff --git a/tests/topotests/bgp_roles_capability/r6/bgpd.conf b/tests/topotests/bgp_roles_capability/r6/bgpd.conf
new file mode 100644 (file)
index 0000000..44b12fe
--- /dev/null
@@ -0,0 +1,4 @@
+router bgp 64506
+  neighbor 192.168.6.1 remote-as external
+  neighbor 192.168.6.1 local-role customer
+  neighbor 192.168.6.1 timers 3 10
diff --git a/tests/topotests/bgp_roles_capability/r6/zebra.conf b/tests/topotests/bgp_roles_capability/r6/zebra.conf
new file mode 100644 (file)
index 0000000..c8428c4
--- /dev/null
@@ -0,0 +1,6 @@
+!
+interface r6-eth0
+ ip address 192.168.6.2/24
+!
+ip forwarding
+!
index f72b6d2ceed1bd888e33b7cd03aa91b1997dcd63..d72fd19b91323b8b67fe3b30779582823d7403a4 100644 (file)
@@ -43,7 +43,7 @@ from lib.topolog import logger
 pytestmark = [pytest.mark.bgpd]
 
 
-topodef = {f"s{i}": ("r1", f"r{i}") for i in range(2, 6)}
+topodef = {f"s{i}": ("r1", f"r{i}") for i in range(2, 7)}
 
 
 @pytest.fixture(scope="module")
@@ -162,6 +162,24 @@ def test_role_strict_mode(tgen):
     assert success, "Session between r1 and r5 was not correctly closed"
 
 
+def test_correct_pair_peer_group(tgen):
+    # provider-customer pair (using peer-groups)
+    router = tgen.gears["r1"]
+    neighbor_ip = "192.168.6.2"
+    check_r6_established = functools.partial(
+        check_session_established, router, neighbor_ip
+    )
+    success, _ = topotest.run_and_expect(check_r6_established, True, count=20, wait=3)
+    assert success, "Session with r6 is not Established"
+
+    neighbor_status = find_neighbor_status(router, neighbor_ip)
+    assert neighbor_status["localRole"] == "provider"
+    assert neighbor_status["remoteRole"] == "customer"
+    assert (
+        neighbor_status["neighborCapabilities"].get("role") == "advertisedAndReceived"
+    )
+
+
 if __name__ == "__main__":
     args = ["-s"] + sys.argv[1:]
     sys.exit(pytest.main(args))
index d612ad2c9470a3e7dd05efed737f0cd82cb9a303..bc53dfb469bc361bafb841c151e6fe518c8e7c6e 100755 (executable)
@@ -38,6 +38,7 @@ sys.path.append(os.path.join(CWD, "../"))
 # Import topogen and topotest helpers
 from lib.topogen import Topogen, TopoRouter, get_topogen
 from lib.snmptest import SnmpTester
+from lib import topotest
 
 # Required to instantiate the topology builder class.
 
@@ -239,10 +240,18 @@ def test_pe1_converge_evpn():
     tgen = get_topogen()
 
     r1 = tgen.gears["r1"]
-    r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
 
+    def _convergence():
+        r1 = tgen.gears["r1"]
+        r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+        return r1_snmp.test_oid("bgpVersion", "10")
+
+    _, result = topotest.run_and_expect(_convergence, True, count=20, wait=1)
     assertmsg = "BGP SNMP does not seem to be running"
-    assert r1_snmp.test_oid("bgpVersion", "10"), assertmsg
+    assert result, assertmsg
+
+    r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
     count = 0
     passed = False
     while count < 125:
diff --git a/tests/topotests/bgp_vpnv4_noretain/__init__.py b/tests/topotests/bgp_vpnv4_noretain/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf
new file mode 100644 (file)
index 0000000..3d8773b
--- /dev/null
@@ -0,0 +1,24 @@
+router bgp 65500
+ bgp router-id 1.1.1.1
+ neighbor 10.125.0.2 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 10.125.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 10.125.0.2 activate
+  no bgp retain route-target all
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 1.1.1.1
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 101
+  rd vpn export 444:1
+  rt vpn import 51:100 52:100
+  rt vpn export 51:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json
new file mode 100644 (file)
index 0000000..903c460
--- /dev/null
@@ -0,0 +1,69 @@
+{
+   "vrfId":0,
+   "vrfName":"default",
+   "tableVersion":1,
+   "routerId":"1.1.1.1",
+   "defaultLocPrf":100,
+   "localAS":65500,
+   "routes":{
+      "routeDistinguishers":{
+         "444:1":{
+            "10.201.0.0/24":[
+               {
+                  "valid":true,
+                  "bestpath":true,
+                  "selectionReason":"First path received",
+                  "pathFrom":"external",
+                  "prefix":"10.201.0.0",
+                  "prefixLen":24,
+                  "network":"10.201.0.0\/24",
+                  "version":1,
+                  "metric":0,
+                  "weight":32768,
+                  "peerId":"(unspec)",
+                  "path":"",
+                  "origin":"incomplete",
+                  "announceNexthopSelf":true,
+                  "nhVrfName":"vrf1",
+                  "nexthops":[
+                     {
+                        "ip":"0.0.0.0",
+                        "afi":"ipv4",
+                        "used":true
+                     }
+                  ]
+               }
+            ]
+         },
+         "444:2":{
+            "10.200.0.0/24":[
+               {
+                  "valid":true,
+                  "bestpath":true,
+                  "selectionReason":"First path received",
+                  "pathFrom":"internal",
+                  "prefix":"10.200.0.0",
+                  "prefixLen":24,
+                  "network":"10.200.0.0\/24",
+                  "version":1,
+                  "metric":0,
+                  "locPrf":100,
+                  "weight":0,
+                  "peerId":"10.125.0.2",
+                  "path":"",
+                  "origin":"incomplete",
+                  "nexthops":[
+                     {
+                        "ip":"10.125.0.2",
+                        "afi":"ipv4",
+                        "used":true
+                     }
+                  ]
+               }
+            ]
+         },
+         "444:3":{
+         }
+      }
+   }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json
new file mode 100644 (file)
index 0000000..3cc0b4a
--- /dev/null
@@ -0,0 +1,94 @@
+{
+   "vrfId":0,
+   "vrfName":"default",
+   "tableVersion":1,
+   "routerId":"1.1.1.1",
+   "defaultLocPrf":100,
+   "localAS":65500,
+   "routes":{
+      "routeDistinguishers":{
+         "444:1":{
+            "10.201.0.0/24":[
+               {
+                  "valid":true,
+                  "bestpath":true,
+                  "selectionReason":"First path received",
+                  "pathFrom":"external",
+                  "prefix":"10.201.0.0",
+                  "prefixLen":24,
+                  "network":"10.201.0.0\/24",
+                  "version":1,
+                  "metric":0,
+                  "weight":32768,
+                  "peerId":"(unspec)",
+                  "path":"",
+                  "origin":"incomplete",
+                  "announceNexthopSelf":true,
+                  "nhVrfName":"vrf1",
+                  "nexthops":[
+                     {
+                        "ip":"0.0.0.0",
+                        "afi":"ipv4",
+                        "used":true
+                     }
+                  ]
+               }
+            ]
+         },
+         "444:2":{
+            "10.200.0.0/24":[
+               {
+                  "valid":true,
+                  "bestpath":true,
+                  "selectionReason":"First path received",
+                  "pathFrom":"internal",
+                  "prefix":"10.200.0.0",
+                  "prefixLen":24,
+                  "network":"10.200.0.0\/24",
+                  "version":1,
+                  "metric":0,
+                  "locPrf":100,
+                  "weight":0,
+                  "peerId":"10.125.0.2",
+                  "path":"",
+                  "origin":"incomplete",
+                  "nexthops":[
+                     {
+                        "ip":"10.125.0.2",
+                        "afi":"ipv4",
+                        "used":true
+                     }
+                  ]
+               }
+            ]
+         },
+         "444:3":{
+            "10.210.0.0/24":[
+               {
+                  "valid":true,
+                  "bestpath":true,
+                  "selectionReason":"First path received",
+                  "pathFrom":"internal",
+                  "prefix":"10.210.0.0",
+                  "prefixLen":24,
+                  "network":"10.210.0.0\/24",
+                  "version":1,
+                  "metric":0,
+                  "locPrf":100,
+                  "weight":0,
+                  "peerId":"10.125.0.2",
+                  "path":"",
+                  "origin":"incomplete",
+                  "nexthops":[
+                     {
+                        "ip":"10.125.0.2",
+                        "afi":"ipv4",
+                        "used":true
+                     }
+                  ]
+               }
+            ]
+         }
+      }
+   }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf b/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf
new file mode 100644 (file)
index 0000000..6f5cb6e
--- /dev/null
@@ -0,0 +1,14 @@
+interface r1-eth0
+ ip router isis 1
+ isis circuit-type level-1
+!
+interface lo
+ ip router isis 1
+ isis passive
+!
+router isis 1
+ is-type level-1
+ net 49.0002.0000.1994.00
+ segment-routing on
+ segment-routing prefix 1.1.1.1/32 index 11
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf b/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf
new file mode 100644 (file)
index 0000000..5b8b1e8
--- /dev/null
@@ -0,0 +1,13 @@
+log stdout
+interface lo
+ ip address 1.1.1.1/32
+!
+interface r1-gre0
+ ip address 192.168.0.1/24
+!
+interface r1-eth1 vrf vrf1
+ ip address 10.201.0.1/24
+!
+interface r1-eth0
+ ip address 10.125.0.1/24
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf
new file mode 100644 (file)
index 0000000..235fb31
--- /dev/null
@@ -0,0 +1,35 @@
+router bgp 65500
+ bgp router-id 2.2.2.2
+ neighbor 10.125.0.1 remote-as 65500
+ address-family ipv4 unicast
+  no neighbor 10.125.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+  neighbor 10.125.0.1 activate
+  no bgp retain route-target all
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 2.2.2.2
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 102
+  rd vpn export 444:2
+  rt vpn import 53:100 52:100 51:100
+  rt vpn export 52:100
+  export vpn
+  import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf2
+ bgp router-id 2.2.2.2
+ address-family ipv4 unicast
+  redistribute connected
+  label vpn export 102
+  rd vpn export 444:3
+  rt vpn both 53:100 52:100 51:100
+  rt vpn both 53:100
+  export vpn
+  import vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf b/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf
new file mode 100644 (file)
index 0000000..cbec8c3
--- /dev/null
@@ -0,0 +1,14 @@
+interface r2-eth0
+ ip router isis 1
+ isis circuit-type level-1
+!
+interface lo
+ ip router isis 1
+ isis passive
+!
+router isis 1
+ is-type level-1
+ net 49.0002.0000.1995.00
+ segment-routing on
+ segment-routing prefix 2.2.2.2/32 index 22
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf b/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf
new file mode 100644 (file)
index 0000000..7ec644a
--- /dev/null
@@ -0,0 +1,16 @@
+log stdout
+interface lo
+ ip address 2.2.2.2/32
+!
+interface r2-gre0
+ ip address 192.168.0.2/24
+!
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth2 vrf vrf2
+ ip address 10.210.0.2/24
+!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
new file mode 100644 (file)
index 0000000..b4a841d
--- /dev/null
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_vpnv4_noretain.py
+# Part of NetDEF Topology Tests
+#
+# Copyright 2022 6WIND S.A.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+ test_bgp_vpnv4_noretain.py: Do not keep the VPNvx entries when no
+ VRF matches incoming VPNVx entries
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+def build_topo(tgen):
+    "Build function"
+
+    tgen.add_router("r1")
+    tgen.add_router("r2")
+
+    switch = tgen.add_switch("s1")
+    switch.add_link(tgen.gears["r1"])
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s2")
+    switch.add_link(tgen.gears["r1"])
+
+    switch = tgen.add_switch("s3")
+    switch.add_link(tgen.gears["r2"])
+
+    switch = tgen.add_switch("s4")
+    switch.add_link(tgen.gears["r2"])
+
+        
+def _populate_iface():
+    tgen = get_topogen()
+    cmds_list = [
+        'modprobe mpls_router',
+        'echo 100000 > /proc/sys/net/mpls/platform_labels',
+        'ip link add vrf1 type vrf table 10',
+        'ip link set dev vrf1 up',
+        'ip link set dev {0}-eth1 master vrf1',
+        'echo 1 > /proc/sys/net/mpls/conf/vrf1/input',
+    ]
+    cmds_list_extra = [
+        'ip link add vrf2 type vrf table 20',
+        'ip link set dev vrf2 up',
+        'ip link set dev {0}-eth2 master vrf2',
+        'echo 1 > /proc/sys/net/mpls/conf/vrf2/input',
+    ]
+    
+    for cmd in cmds_list:
+        input = cmd.format('r1', '1', '2')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
+        logger.info('output: ' + output)
+
+    for cmd in cmds_list:
+        input = cmd.format('r2', '2', '1')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+        logger.info('output: ' + output)
+
+    for cmd in cmds_list_extra:
+        input = cmd.format('r2', '2', '1')
+        logger.info('input: ' + cmd)
+        output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+        logger.info('output: ' + output)
+
+def setup_module(mod):
+    "Sets up the pytest environment"
+
+    tgen = Topogen(build_topo, mod.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+    _populate_iface() 
+
+    for rname, router in router_list.items():
+        router.load_config(
+            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+        router.load_config(
+            TopoRouter.RD_ISIS, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+        )
+
+    # Initialize all routers.
+    tgen.start_router()
+
+
+def teardown_module(_mod):
+    "Teardown the pytest environment"
+    tgen = get_topogen()
+
+    tgen.stop_topology()
+
+
+def test_protocols_convergence():
+    """
+    Assert that all protocols have converged
+    statuses as they depend on it.
+    """
+    tgen = get_topogen()
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Check IPv4 VPN routing tables on r1
+    logger.info("Checking IPv4 routes for convergence on r1")
+    router = tgen.gears['r1']
+    json_file = "{}/{}/ipv4_vpn_routes.json".format(CWD, router.name)
+    if not os.path.isfile(json_file):
+        logger.info("skipping file {}".format(json_file))
+        assert 0, 'ipv4_vpn_routes.json file not found'
+        return
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp ipv4 vpn json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+    # Check BGP IPv4 routing tables after unsetting no retain flag
+    logger.info("Checking BGP IPv4 routes for convergence on r2")
+    router = tgen.gears['r1']
+    router.vtysh_cmd("configure\nrouter bgp 65500\naddress-family ipv4 vpn\nbgp retain route-target all\n")
+
+    # Check IPv4 VPN routing tables on r1
+    logger.info("Checking IPv4 routes for convergence on r1")
+    router = tgen.gears['r1']
+    json_file = "{}/{}/ipv4_vpn_routes_unfiltered.json".format(CWD, router.name)
+    if not os.path.isfile(json_file):
+        logger.info("skipping file {}".format(json_file))
+        assert 0, 'ipv4_vpn_routes_unfiltered.json file not found'
+        return
+
+    expected = json.loads(open(json_file).read())
+    test_func = partial(
+        topotest.router_json_cmp,
+        router,
+        "show bgp ipv4 vpn json",
+        expected,
+    )
+    _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+    assertmsg = '"{}" JSON output mismatches'.format(router.name)
+    assert result is None, assertmsg
+
+def test_memory_leak():
+    "Run the memory leak test and report results."
+    tgen = get_topogen()
+    if not tgen.is_memleak_enabled():
+        pytest.skip("Memory leak test/report is disabled")
+
+    tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 3e1e6eb79b1449e4a1b15fbfdd5960166380c39d..120a3e82e4096d714ba70ace75fb3684b66458b4 100755 (executable)
@@ -1,6 +1,7 @@
 """
 Topotest conftest.py file.
 """
+# pylint: disable=consider-using-f-string
 
 import glob
 import os
@@ -135,7 +136,7 @@ def check_for_memleaks():
     assert topotest_extra_config["valgrind_memleaks"]
 
     leaks = []
-    tgen = get_topogen()
+    tgen = get_topogen()  # pylint: disable=redefined-outer-name
     latest = []
     existing = []
     if tgen is not None:
@@ -143,19 +144,28 @@ def check_for_memleaks():
         if hasattr(tgen, "valgrind_existing_files"):
             existing = tgen.valgrind_existing_files
         latest = glob.glob(os.path.join(logdir, "*.valgrind.*"))
+        latest = [x for x in latest if "core" not in x]
 
     daemons = set()
     for vfile in latest:
         if vfile in existing:
             continue
-        existing.append(vfile)
+        # do not consider memleaks from parent fork (i.e., owned by root)
+        if os.stat(vfile).st_uid == 0:
+            existing.append(vfile)  # do not check again
+            logger.debug("Skipping valgrind file %s owned by root", vfile)
+            continue
+        logger.debug("Checking valgrind file %s not owned by root", vfile)
         with open(vfile, encoding="ascii") as vf:
             vfcontent = vf.read()
             match = re.search(r"ERROR SUMMARY: (\d+) errors", vfcontent)
+            if match:
+                existing.append(vfile)  # have summary don't check again
             if match and match.group(1) != "0":
                 emsg = "{} in {}".format(match.group(1), vfile)
                 leaks.append(emsg)
-                daemons.add(re.match(r".*\.valgrind\.(.*)\.\d+", vfile).group(1))
+                daemon = re.match(r".*\.valgrind\.(.*)\.\d+", vfile).group(1)
+                daemons.add("{}({})".format(daemon, match.group(1)))
 
     if tgen is not None:
         tgen.valgrind_existing_files = existing
@@ -165,6 +175,15 @@ def check_for_memleaks():
         pytest.fail("valgrind memleaks found for daemons: " + " ".join(daemons))
 
 
+@pytest.fixture(autouse=True, scope="module")
+def module_check_memtest(request):
+    del request  # disable unused warning
+    yield
+    if topotest_extra_config["valgrind_memleaks"]:
+        if get_topogen() is not None:
+            check_for_memleaks()
+
+
 def pytest_runtest_logstart(nodeid, location):
     # location is (filename, lineno, testname)
     topolog.logstart(nodeid, location, topotest_extra_config["rundir"])
@@ -178,6 +197,7 @@ def pytest_runtest_logfinish(nodeid, location):
 @pytest.hookimpl(hookwrapper=True)
 def pytest_runtest_call(item: pytest.Item) -> None:
     "Hook the function that is called to execute the test."
+    del item  # disable unused warning
 
     # For topology only run the CLI then exit
     if topotest_extra_config["topology_only"]:
@@ -416,7 +436,7 @@ def pytest_runtest_makereport(item, call):
                 )
 
             # (topogen) Set topology error to avoid advancing in the test.
-            tgen = get_topogen()
+            tgen = get_topogen()  # pylint: disable=redefined-outer-name
             if tgen is not None:
                 # This will cause topogen to report error on `routers_have_failure`.
                 tgen.set_error("{}/{}".format(modname, item.name))
@@ -499,7 +519,7 @@ def pytest_runtest_makereport(item, call):
         if user == "cli":
             cli(Mininet.g_mnet_inst)
         elif user == "pdb":
-            pdb.set_trace()
+            pdb.set_trace()  # pylint: disable=forgotten-debug-statement
         elif user:
             print('Unrecognized input: "%s"' % user)
         else:
index 216756f5121e8ff2974f2c8432cc47924c1999e9..341ec25a19d980d0e61b8709173ff27274702163 100644 (file)
@@ -1630,9 +1630,14 @@ def modify_as_number(tgen, topo, input_dict):
             # Remove bgp configuration
 
             router_dict.update({router: {"bgp": {"delete": True}}})
-
-            new_topo[router]["bgp"]["local_as"] = input_dict[router]["bgp"]["local_as"]
-
+            try:
+                new_topo[router]["bgp"]["local_as"] = input_dict[router]["bgp"][
+                    "local_as"
+                ]
+            except TypeError:
+                new_topo[router]["bgp"][0]["local_as"] = input_dict[router]["bgp"][
+                    "local_as"
+                ]
         logger.info("Removing bgp configuration")
         create_router_bgp(tgen, topo, router_dict)
 
index 0e9e0ba40386db465deb8c7ecc005b77df4e76a2..fa33b02ed154d8bcfa184599e7afbc1c20d8f9d1 100644 (file)
@@ -449,6 +449,8 @@ def check_router_status(tgen):
                     daemons.append("zebra")
                 if "pimd" in result:
                     daemons.append("pimd")
+                if "pim6d" in result:
+                    daemons.append("pim6d")
                 if "ospfd" in result:
                     daemons.append("ospfd")
                 if "ospf6d" in result:
@@ -1035,6 +1037,12 @@ def start_topology(tgen, daemon=None):
                 TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
             )
 
+        if daemon and "pim6d" in daemon:
+            # Loading empty pimd.conf file to router, to start the pim6d deamon
+            router.load_config(
+                TopoRouter.RD_PIM6, "{}/{}/pim6d.conf".format(tgen.logdir, rname)
+            )
+
     # Starting routers
     logger.info("Starting all routers once topology is created")
     tgen.start_router()
@@ -1131,6 +1139,8 @@ def topo_daemons(tgen, topo=None):
         for val in topo["routers"][rtr]["links"].values():
             if "pim" in val and "pimd" not in daemon_list:
                 daemon_list.append("pimd")
+            if "pim6" in val and "pim6d" not in daemon_list:
+                daemon_list.append("pim6d")
             if "ospf" in val and "ospfd" not in daemon_list:
                 daemon_list.append("ospfd")
             if "ospf6" in val and "ospf6d" not in daemon_list:
@@ -3234,6 +3244,86 @@ def configure_interface_mac(tgen, input_dict):
     return True
 
 
+def socat_send_igmp_join_traffic(
+    tgen,
+    server,
+    protocol_option,
+    igmp_groups,
+    send_from_intf,
+    send_from_intf_ip=None,
+    port=12345,
+    reuseaddr=True,
+    join=False,
+    traffic=False,
+):
+    """
+    API to send IGMP join using SOCAT tool
+
+    Parameters:
+    -----------
+    * `tgen`  : Topogen object
+    * `server`: iperf server, from where IGMP join would be sent
+    * `protocol_option`: Protocol options, ex: UDP6-RECV
+    * `igmp_groups`: IGMP group for which join has to be sent
+    * `send_from_intf`: Interface from which join would be sent
+    * `send_from_intf_ip`: Interface IP, default is None
+    * `port`: Port to be used, default is 12345
+    * `reuseaddr`: True|False, bydefault True
+    * `join`: If join needs to be sent
+    * `traffic`: If traffic needs to be sent
+
+    returns:
+    --------
+    errormsg or True
+    """
+
+    logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+    rnode = tgen.routers()[server]
+    socat_cmd = "socat -u "
+
+    # UDP4/TCP4/UDP6/UDP6-RECV
+    if protocol_option:
+        socat_cmd += "{}".format(protocol_option)
+
+    if port:
+        socat_cmd += ":{},".format(port)
+
+    if reuseaddr:
+        socat_cmd += "{},".format("reuseaddr")
+
+    # Group address range to cover
+    if igmp_groups:
+        if not isinstance(igmp_groups, list):
+            igmp_groups = [igmp_groups]
+
+    for igmp_group in igmp_groups:
+        if join:
+            join_traffic_option = "ipv6-join-group"
+        elif traffic:
+            join_traffic_option = "ipv6-join-group-source"
+
+        if send_from_intf and not send_from_intf_ip:
+            socat_cmd += "{}='[{}]:{}'".format(
+                join_traffic_option, igmp_group, send_from_intf
+            )
+        else:
+            socat_cmd += "{}='[{}]:{}:[{}]'".format(
+                join_traffic_option, igmp_group, send_from_intf, send_from_intf_ip
+            )
+
+        socat_cmd += " STDOUT"
+
+        socat_cmd += " &>{}/socat.logs &".format(tgen.logdir)
+
+        # Run socat command to send IGMP join
+        logger.info("[DUT: {}]: Running command: [{}]".format(server, socat_cmd))
+        output = rnode.run("set +m; {} sleep 0.5".format(socat_cmd))
+
+    logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+    return True
+
+
 #############################################
 # Verification APIs
 #############################################
@@ -3437,8 +3527,23 @@ def verify_rib(
                                 found_hops = [
                                     rib_r["ip"]
                                     for rib_r in rib_routes_json[st_rt][0]["nexthops"]
+                                    if "ip" in rib_r
                                 ]
 
+                                # If somehow key "ip" is not found in nexthops JSON
+                                # then found_hops would be 0, this particular
+                                # situation will be handled here
+                                if not len(found_hops):
+                                    errormsg = (
+                                        "Nexthop {} is Missing for "
+                                        "route {} in RIB of router {}\n".format(
+                                            next_hop,
+                                            st_rt,
+                                            dut,
+                                        )
+                                    )
+                                    return errormsg
+
                                 # Check only the count of nexthops
                                 if count_only:
                                     if len(next_hop) == len(found_hops):
index 02f66e9c26cba27be8a8596f0d5c2d6b341e5828..dfa10ccb2fb14ced23f7be055c0dab7630b92047 100644 (file)
@@ -599,6 +599,60 @@ class LinuxNamespace(Commander):
         self.cmd_raises("mkdir -p " + inner)
         self.cmd_raises("mount --rbind {} {} ".format(outer, inner))
 
+    def add_vlan(self, vlanname, linkiface, vlanid):
+        self.logger.debug("Adding VLAN interface: %s (%s)", vlanname, vlanid)
+        ip_path = self.get_exec_path("ip")
+        assert ip_path, "XXX missing ip command!"
+        self.cmd_raises(
+            [
+                ip_path,
+                "link",
+                "add",
+                "link",
+                linkiface,
+                "name",
+                vlanname,
+                "type",
+                "vlan",
+                "id",
+                vlanid,
+            ]
+        )
+        self.cmd_raises([ip_path, "link", "set", "dev", vlanname, "up"])
+
+    def add_loop(self, loopname):
+        self.logger.debug("Adding Linux iface: %s", loopname)
+        ip_path = self.get_exec_path("ip")
+        assert ip_path, "XXX missing ip command!"
+        self.cmd_raises([ip_path, "link", "add", loopname, "type", "dummy"])
+        self.cmd_raises([ip_path, "link", "set", "dev", loopname, "up"])
+
+    def add_l3vrf(self, vrfname, tableid):
+        self.logger.debug("Adding Linux VRF: %s", vrfname)
+        ip_path = self.get_exec_path("ip")
+        assert ip_path, "XXX missing ip command!"
+        self.cmd_raises(
+            [ip_path, "link", "add", vrfname, "type", "vrf", "table", tableid]
+        )
+        self.cmd_raises([ip_path, "link", "set", "dev", vrfname, "up"])
+
+    def del_iface(self, iface):
+        self.logger.debug("Removing Linux Iface: %s", iface)
+        ip_path = self.get_exec_path("ip")
+        assert ip_path, "XXX missing ip command!"
+        self.cmd_raises([ip_path, "link", "del", iface])
+
+    def attach_iface_to_l3vrf(self, ifacename, vrfname):
+        self.logger.debug("Attaching Iface %s to Linux VRF %s", ifacename, vrfname)
+        ip_path = self.get_exec_path("ip")
+        assert ip_path, "XXX missing ip command!"
+        if vrfname:
+            self.cmd_raises(
+                [ip_path, "link", "set", "dev", ifacename, "master", vrfname]
+            )
+        else:
+            self.cmd_raises([ip_path, "link", "set", "dev", ifacename, "nomaster"])
+
     def add_netns(self, ns):
         self.logger.debug("Adding network namespace %s", ns)
 
index cd070e08b9a6def3084bbe51ed3230aff59617d5..03ab02460f50cdabfb069b7667bb6037180507f6 100644 (file)
@@ -36,6 +36,7 @@ from lib.common_config import (
     InvalidCLIError,
     retry,
     run_frr_cmd,
+    validate_ip_address,
 )
 from lib.micronet import get_exec_path
 from lib.topolog import logger
@@ -47,7 +48,7 @@ CWD = os.path.dirname(os.path.realpath(__file__))
 
 def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True):
     """
-    API to configure pim on router
+    API to configure pim/pimv6 on router
 
     Parameters
     ----------
@@ -70,6 +71,16 @@ def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True
                     "prefix-list": "pf_list_1"
                     "delete": True
                 }]
+            },
+            "pim6": {
+                "disable" : ["l1-i1-eth1"],
+                "rp": [{
+                    "rp_addr" : "2001:db8:f::5:17".
+                    "keep-alive-timer": "100"
+                    "group_addr_range": ["FF00::/8"]
+                    "prefix-list": "pf_list_1"
+                    "delete": True
+                }]
             }
         }
     }
@@ -97,12 +108,8 @@ def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True
 
     # Now add RP config to all routers
     for router in input_dict.keys():
-        if "pim" not in input_dict[router]:
-            continue
-        if "rp" not in input_dict[router]["pim"]:
-            continue
-        _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
-
+        if "pim" in input_dict[router] or "pim6" in input_dict[router]:
+            _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
     try:
         result = create_common_configurations(
             tgen, config_data_dict, "pim", build, load_config
@@ -133,81 +140,123 @@ def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
     """
 
     logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+    rp_data = []
+
+    # PIMv4
+    pim_data = None
+    if "pim" in input_dict[router]:
+        pim_data = input_dict[router]["pim"]
+        if "rp" in input_dict[router]["pim"]:
+            rp_data += pim_data["rp"]
 
-    pim_data = input_dict[router]["pim"]
-    rp_data = pim_data["rp"]
+    # PIMv6
+    pim6_data = None
+    if "pim6" in input_dict[router]:
+        pim6_data = input_dict[router]["pim6"]
+        if "rp" in input_dict[router]["pim6"]:
+            rp_data += pim6_data["rp"]
 
     # Configure this RP on every router.
     for dut in tgen.routers():
         # At least one interface must be enabled for PIM on the router
         pim_if_enabled = False
+        pim6_if_enabled = False
         for destLink, data in topo[dut]["links"].items():
             if "pim" in data:
                 pim_if_enabled = True
-        if not pim_if_enabled:
+            if "pim6" in data:
+                pim6_if_enabled = True
+        if not pim_if_enabled and pim_data:
+            continue
+        if not pim6_if_enabled and pim6_data:
             continue
 
         config_data = []
 
-        for rp_dict in deepcopy(rp_data):
-            # ip address of RP
-            if "rp_addr" not in rp_dict and build:
-                logger.error(
-                    "Router %s: 'ip address of RP' not " "present in input_dict/JSON",
-                    router,
-                )
-
-                return False
-            rp_addr = rp_dict.setdefault("rp_addr", None)
-
-            # Keep alive Timer
-            keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None)
-
-            # Group Address range to cover
-            if "group_addr_range" not in rp_dict and build:
-                logger.error(
-                    "Router %s:'Group Address range to cover'"
-                    " not present in input_dict/JSON",
-                    router,
-                )
-
-                return False
-            group_addr_range = rp_dict.setdefault("group_addr_range", None)
+        if rp_data:
+            for rp_dict in deepcopy(rp_data):
+                # ip address of RP
+                if "rp_addr" not in rp_dict and build:
+                    logger.error(
+                        "Router %s: 'ip address of RP' not "
+                        "present in input_dict/JSON",
+                        router,
+                    )
 
-            # Group prefix-list filter
-            prefix_list = rp_dict.setdefault("prefix_list", None)
+                    return False
+                rp_addr = rp_dict.setdefault("rp_addr", None)
+                if rp_addr:
+                    addr_type = validate_ip_address(rp_addr)
+                # Keep alive Timer
+                keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None)
+
+                # Group Address range to cover
+                if "group_addr_range" not in rp_dict and build:
+                    logger.error(
+                        "Router %s:'Group Address range to cover'"
+                        " not present in input_dict/JSON",
+                        router,
+                    )
 
-            # Delete rp config
-            del_action = rp_dict.setdefault("delete", False)
+                    return False
+                group_addr_range = rp_dict.setdefault("group_addr_range", None)
 
-            if keep_alive_timer:
-                cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer)
-                if del_action:
-                    cmd = "no {}".format(cmd)
-                config_data.append(cmd)
+                # Group prefix-list filter
+                prefix_list = rp_dict.setdefault("prefix_list", None)
 
-            if rp_addr:
-                if group_addr_range:
-                    if type(group_addr_range) is not list:
-                        group_addr_range = [group_addr_range]
+                # Delete rp config
+                del_action = rp_dict.setdefault("delete", False)
 
-                    for grp_addr in group_addr_range:
-                        cmd = "ip pim rp {} {}".format(rp_addr, grp_addr)
+                if keep_alive_timer:
+                    if addr_type == "ipv4":
+                        cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer)
+                        if del_action:
+                            cmd = "no {}".format(cmd)
+                        config_data.append(cmd)
+                    if addr_type == "ipv6":
+                        cmd = "ipv6 pim rp keep-alive-timer {}".format(keep_alive_timer)
                         if del_action:
                             cmd = "no {}".format(cmd)
                         config_data.append(cmd)
 
-                if prefix_list:
-                    cmd = "ip pim rp {} prefix-list {}".format(rp_addr, prefix_list)
-                    if del_action:
-                        cmd = "no {}".format(cmd)
-                    config_data.append(cmd)
+                if rp_addr:
+                    if group_addr_range:
+                        if type(group_addr_range) is not list:
+                            group_addr_range = [group_addr_range]
 
-            if config_data:
-                if dut not in config_data_dict:
-                    config_data_dict[dut] = config_data
-                else:
-                    config_data_dict[dut].extend(config_data)
+                        for grp_addr in group_addr_range:
+                            if addr_type == "ipv4":
+                                cmd = "ip pim rp {} {}".format(rp_addr, grp_addr)
+                                if del_action:
+                                    cmd = "no {}".format(cmd)
+                                config_data.append(cmd)
+                            if addr_type == "ipv6":
+                                cmd = "ipv6 pim rp {} {}".format(rp_addr, grp_addr)
+                                if del_action:
+                                    cmd = "no {}".format(cmd)
+                                config_data.append(cmd)
+
+                    if prefix_list:
+                        if addr_type == "ipv4":
+                            cmd = "ip pim rp {} prefix-list {}".format(
+                                rp_addr, prefix_list
+                            )
+                            if del_action:
+                                cmd = "no {}".format(cmd)
+                            config_data.append(cmd)
+                        if addr_type == "ipv6":
+                            cmd = "ipv6 pim rp {} prefix-list {}".format(
+                                rp_addr, prefix_list
+                            )
+                            if del_action:
+                                cmd = "no {}".format(cmd)
+                            config_data.append(cmd)
+
+                if config_data:
+                    if dut not in config_data_dict:
+                        config_data_dict[dut] = config_data
+                    else:
+                        config_data_dict[dut].extend(config_data)
 
 
 def create_igmp_config(tgen, topo, input_dict=None, build=False):
@@ -319,6 +368,121 @@ def create_igmp_config(tgen, topo, input_dict=None, build=False):
     return result
 
 
+def create_mld_config(tgen, topo, input_dict=None, build=False):
+    """
+    API to configure mld for PIMv6 on router
+
+    Parameters
+    ----------
+    * `tgen` : Topogen object
+    * `topo` : json file data
+    * `input_dict` : Input dict data, required when configuring from
+                     testcase
+    * `build` : Only for initial setup phase this is set as True.
+
+    Usage
+    -----
+    input_dict = {
+        "r1": {
+            "mld": {
+                "interfaces": {
+                    "r1-r0-eth0" :{
+                        "mld":{
+                            "version":  "2",
+                            "delete": True
+                            "query": {
+                                "query-interval" : 100,
+                                "query-max-response-time": 200
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    Returns
+    -------
+    True or False
+    """
+    logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+    result = False
+    if not input_dict:
+        input_dict = deepcopy(topo)
+    else:
+        topo = topo["routers"]
+        input_dict = deepcopy(input_dict)
+    for router in input_dict.keys():
+        if "mld" not in input_dict[router]:
+            logger.debug("Router %s: 'mld' is not present in " "input_dict", router)
+            continue
+
+        mld_data = input_dict[router]["mld"]
+
+        if "interfaces" in mld_data:
+            config_data = []
+            intf_data = mld_data["interfaces"]
+
+            for intf_name in intf_data.keys():
+                cmd = "interface {}".format(intf_name)
+                config_data.append(cmd)
+                protocol = "mld"
+                del_action = intf_data[intf_name]["mld"].setdefault("delete", False)
+                cmd = "ipv6 mld"
+                if del_action:
+                    cmd = "no {}".format(cmd)
+                config_data.append(cmd)
+
+                del_attr = intf_data[intf_name]["mld"].setdefault("delete_attr", False)
+                join = intf_data[intf_name]["mld"].setdefault("join", None)
+                source = intf_data[intf_name]["mld"].setdefault("source", None)
+                version = intf_data[intf_name]["mld"].setdefault("version", False)
+                query = intf_data[intf_name]["mld"].setdefault("query", {})
+
+                if version:
+                    cmd = "ipv6 {} version {}".format(protocol, version)
+                    if del_action:
+                        cmd = "no {}".format(cmd)
+                    config_data.append(cmd)
+
+                if source and join:
+                    for group in join:
+                        cmd = "ipv6 {} join {} {}".format(protocol, group, source)
+
+                        if del_attr:
+                            cmd = "no {}".format(cmd)
+                        config_data.append(cmd)
+
+                elif join:
+                    for group in join:
+                        cmd = "ipv6 {} join {}".format(protocol, group)
+
+                        if del_attr:
+                            cmd = "no {}".format(cmd)
+                        config_data.append(cmd)
+
+                if query:
+                    for _query, value in query.items():
+                        if _query != "delete":
+                            cmd = "ipv6 {} {} {}".format(protocol, _query, value)
+
+                            if "delete" in intf_data[intf_name][protocol]["query"]:
+                                cmd = "no {}".format(cmd)
+
+                        config_data.append(cmd)
+        try:
+            result = create_common_configuration(
+                tgen, router, config_data, "interface_config", build=build
+            )
+        except InvalidCLIError:
+            errormsg = traceback.format_exc()
+            logger.error(errormsg)
+            return errormsg
+
+    logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+    return result
+
+
 def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
     """
     Helper API to enable or disable pim on interfaces
@@ -338,7 +502,7 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
 
     config_data = []
 
-    # Enable pim on interfaces
+    # Enable pim/pim6 on interfaces
     for destRouterLink, data in sorted(topo[router]["links"].items()):
         if "pim" in data and data["pim"] == "enable":
             # Loopback interfaces
@@ -351,6 +515,17 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
             config_data.append(cmd)
             config_data.append("ip pim")
 
+        if "pim6" in data and data["pim6"] == "enable":
+            # Loopback interfaces
+            if "type" in data and data["type"] == "loopback":
+                interface_name = destRouterLink
+            else:
+                interface_name = data["interface"]
+
+            cmd = "interface {}".format(interface_name)
+            config_data.append(cmd)
+            config_data.append("ipv6 pim")
+
     # pim global config
     if "pim" in input_dict[router]:
         pim_data = input_dict[router]["pim"]
@@ -366,6 +541,21 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
                     cmd = "no {}".format(cmd)
                 config_data.append(cmd)
 
+    # pim6 global config
+    if "pim6" in input_dict[router]:
+        pim6_data = input_dict[router]["pim6"]
+        del_action = pim6_data.setdefault("delete", False)
+        for t in [
+            "join-prune-interval",
+            "keep-alive-timer",
+            "register-suppress-time",
+        ]:
+            if t in pim6_data:
+                cmd = "ipv6 pim {} {}".format(t, pim6_data[t])
+                if del_action:
+                    cmd = "no {}".format(cmd)
+                config_data.append(cmd)
+
     return config_data
 
 
@@ -732,9 +922,6 @@ def verify_upstream_iif(
         "[DUT: %s]: Verifying upstream Inbound Interface" " for IGMP groups received:",
         dut,
     )
-    show_ip_pim_upstream_json = run_frr_cmd(
-        rnode, "show ip pim upstream json", isjson=True
-    )
 
     if type(group_addresses) is not list:
         group_addresses = [group_addresses]
@@ -742,6 +929,17 @@ def verify_upstream_iif(
     if type(iif) is not list:
         iif = [iif]
 
+    for grp in group_addresses:
+        addr_type = validate_ip_address(grp)
+
+    if addr_type == "ipv4":
+        ip_cmd = "ip"
+    elif addr_type == "ipv6":
+        ip_cmd = "ipv6"
+
+    cmd = "show {} pim upstream json".format(ip_cmd)
+    show_ip_pim_upstream_json = run_frr_cmd(rnode, cmd, isjson=True)
+
     for grp_addr in group_addresses:
         # Verify group address
         if grp_addr not in show_ip_pim_upstream_json:
@@ -883,13 +1081,19 @@ def verify_join_state_and_timer(
         "[DUT: %s]: Verifying Join state and Join Timer" " for IGMP groups received:",
         dut,
     )
-    show_ip_pim_upstream_json = run_frr_cmd(
-        rnode, "show ip pim upstream json", isjson=True
-    )
 
     if type(group_addresses) is not list:
         group_addresses = [group_addresses]
 
+    for grp in group_addresses:
+        addr_type = validate_ip_address(grp)
+
+    if addr_type == "ipv4":
+        cmd = "show ip pim upstream json"
+    elif addr_type == "ipv6":
+        cmd = "show ipv6 pim upstream json"
+    show_ip_pim_upstream_json = run_frr_cmd(rnode, cmd, isjson=True)
+
     for grp_addr in group_addresses:
         # Verify group address
         if grp_addr not in show_ip_pim_upstream_json:
@@ -1010,12 +1214,31 @@ def verify_mroutes(
 
     rnode = tgen.routers()[dut]
 
+    if not isinstance(group_addresses, list):
+        group_addresses = [group_addresses]
+
+    if not isinstance(iif, list) and iif != "none":
+        iif = [iif]
+
+    if not isinstance(oil, list) and oil != "none":
+        oil = [oil]
+
+    for grp in group_addresses:
+        addr_type = validate_ip_address(grp)
+
+    if addr_type == "ipv4":
+        ip_cmd = "ip"
+    elif addr_type == "ipv6":
+        ip_cmd = "ipv6"
+
     if return_uptime:
         logger.info("Sleeping for %s sec..", mwait)
         sleep(mwait)
 
     logger.info("[DUT: %s]: Verifying ip mroutes", dut)
-    show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True)
+    show_ip_mroute_json = run_frr_cmd(
+        rnode, "show {} mroute json".format(ip_cmd), isjson=True
+    )
 
     if return_uptime:
         uptime_dict = {}
@@ -1024,15 +1247,6 @@ def verify_mroutes(
         error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut)
         return error_msg
 
-    if not isinstance(group_addresses, list):
-        group_addresses = [group_addresses]
-
-    if not isinstance(iif, list) and iif != "none":
-        iif = [iif]
-
-    if not isinstance(oil, list) and oil != "none":
-        oil = [oil]
-
     for grp_addr in group_addresses:
         if grp_addr not in show_ip_mroute_json:
             errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
@@ -1214,15 +1428,20 @@ def verify_pim_rp_info(
 
     rnode = tgen.routers()[dut]
 
-    logger.info("[DUT: %s]: Verifying ip rp info", dut)
-    show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True)
-
     if type(group_addresses) is not list:
         group_addresses = [group_addresses]
 
     if type(oif) is not list:
         oif = [oif]
 
+    for grp in group_addresses:
+        addr_type = validate_ip_address(grp)
+
+    if addr_type == "ipv4":
+        ip_cmd = "ip"
+    elif addr_type == "ipv6":
+        ip_cmd = "ipv6"
+
     for grp_addr in group_addresses:
         if rp is None:
             rp_details = find_rp_details(tgen, topo)
@@ -1232,9 +1451,14 @@ def verify_pim_rp_info(
             else:
                 iamRP = False
         else:
-            show_ip_route_json = run_frr_cmd(
-                rnode, "show ip route connected json", isjson=True
-            )
+            if addr_type == "ipv4":
+                show_ip_route_json = run_frr_cmd(
+                    rnode, "show ip route connected json", isjson=True
+                )
+            elif addr_type == "ipv6":
+                show_ip_route_json = run_frr_cmd(
+                    rnode, "show ipv6 route connected json", isjson=True
+                )
             for _rp in show_ip_route_json.keys():
                 if rp == _rp.split("/")[0]:
                     iamRP = True
@@ -1242,16 +1466,27 @@ def verify_pim_rp_info(
                 else:
                     iamRP = False
 
+        logger.info("[DUT: %s]: Verifying ip rp info", dut)
+        cmd = "show {} pim rp-info json".format(ip_cmd)
+        show_ip_rp_info_json = run_frr_cmd(rnode, cmd, isjson=True)
+
         if rp not in show_ip_rp_info_json:
-            errormsg = "[DUT %s]: Verifying rp-info" "for rp_address %s [FAILED]!! " % (
-                dut,
-                rp,
+            errormsg = (
+                "[DUT %s]: Verifying rp-info "
+                "for rp_address %s [FAILED]!! " % (dut, rp)
             )
             return errormsg
         else:
             group_addr_json = show_ip_rp_info_json[rp]
 
         for rp_json in group_addr_json:
+            if "rpAddress" not in rp_json:
+                errormsg = "[DUT %s]: %s key not " "present in rp-info " % (
+                    dut,
+                    "rpAddress",
+                )
+                return errormsg
+
             if oif is not None:
                 found = False
                 if rp_json["outboundInterface"] not in oif:
@@ -1380,14 +1615,26 @@ def verify_pim_state(
     rnode = tgen.routers()[dut]
 
     logger.info("[DUT: %s]: Verifying pim state", dut)
-    show_pim_state_json = run_frr_cmd(rnode, "show ip pim state json", isjson=True)
-
-    if installed_fl is None:
-        installed_fl = 1
 
     if type(group_addresses) is not list:
         group_addresses = [group_addresses]
 
+    for grp in group_addresses:
+        addr_type = validate_ip_address(grp)
+
+    if addr_type == "ipv4":
+        ip_cmd = "ip"
+    elif addr_type == "ipv6":
+        ip_cmd = "ipv6"
+
+    logger.info("[DUT: %s]: Verifying pim state", dut)
+    show_pim_state_json = run_frr_cmd(
+        rnode, "show {} pim state json".format(ip_cmd), isjson=True
+    )
+
+    if installed_fl is None:
+        installed_fl = 1
+
     for grp_addr in group_addresses:
         if src_address is None:
             src_address = "*"
@@ -3635,7 +3882,7 @@ def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
     return True
 
 
-def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
+def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type="ipv4"):
     """
     Verify ip pim interface traffice by running
     "show ip pim interface traffic" cli
@@ -3645,6 +3892,8 @@ def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
     * `tgen`: topogen object
     * `input_dict(dict)`: defines DUT, what and from which interfaces
                           traffic needs to be verified
+    * [optional]`addr_type`: specify address-family, default is ipv4
+
     Usage
     -----
     input_dict = {
@@ -3675,9 +3924,13 @@ def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
         rnode = tgen.routers()[dut]
 
         logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
-        show_pim_intf_traffic_json = run_frr_cmd(
-            rnode, "show ip pim interface traffic json", isjson=True
-        )
+
+        if addr_type == "ipv4":
+            cmd = "show ip pim interface traffic json"
+        elif addr_type == "ipv6":
+            cmd = "show ipv6 pim interface traffic json"
+
+        show_pim_intf_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
 
         output_dict[dut] = {}
         for intf, data in input_dict[dut].items():
index c04506f47e5bd614cee790d336b3764471b6c50f..c51a187f285900d2ac3c5e2d5103f1e9609b6122 100644 (file)
@@ -725,6 +725,7 @@ class TopoRouter(TopoGear):
     RD_PBRD = 16
     RD_PATH = 17
     RD_SNMP = 18
+    RD_PIM6 = 19
     RD = {
         RD_FRR: "frr",
         RD_ZEBRA: "zebra",
@@ -735,6 +736,7 @@ class TopoRouter(TopoGear):
         RD_ISIS: "isisd",
         RD_BGP: "bgpd",
         RD_PIM: "pimd",
+        RD_PIM6: "pim6d",
         RD_LDP: "ldpd",
         RD_EIGRP: "eigrpd",
         RD_NHRP: "nhrpd",
@@ -820,7 +822,8 @@ class TopoRouter(TopoGear):
         Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP,
         TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
         TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
-        TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP.
+        TopoRouter.RD_PIM, TopoRouter.RD_PIM6, TopoRouter.RD_PBR,
+        TopoRouter.RD_SNMP.
 
         Possible `source` values are `None` for an empty config file, a path name which is
         used directly, or a file name with no path components which is first looked for
@@ -1276,6 +1279,7 @@ def diagnose_env_linux(rundir):
             "ripngd",
             "isisd",
             "pimd",
+            "pim6d",
             "ldpd",
             "pbrd",
         ]:
index 3ca3353ed33f67e3fcfa7bc997580fcd5b5c74a8..b49b09e63625ae6fd1be0550aec76cfdd2e71caa 100644 (file)
@@ -41,7 +41,11 @@ from lib.common_config import (
     number_to_column,
 )
 from lib.ospf import create_router_ospf
-from lib.pim import create_igmp_config, create_pim_config
+from lib.pim import (
+    create_igmp_config,
+    create_pim_config,
+    create_mld_config,
+)
 from lib.topolog import logger
 
 
@@ -332,6 +336,7 @@ def build_config_from_json(tgen, topo=None, save_bkup=True):
             ("route_maps", create_route_maps),
             ("pim", create_pim_config),
             ("igmp", create_igmp_config),
+            ("mld", create_mld_config),
             ("bgp", create_router_bgp),
             ("ospf", create_router_ospf),
         ]
@@ -352,7 +357,9 @@ def build_config_from_json(tgen, topo=None, save_bkup=True):
         logger.info("build_config_from_json: failed to configure topology")
         pytest.exit(1)
 
-    logger.info("Built config now clearing ospf neighbors as that router-id might not be what is used")
+    logger.info(
+        "Built config now clearing ospf neighbors as that router-id might not be what is used"
+    )
     for ospf in ["ospf", "ospf6"]:
         for router in data:
             if ospf not in data[router]:
index 27b566a8f5529fe9e1658e0176a40b7f678e276a..5a3f586f824245a225a4f58a2446fed8f6536cee 100644 (file)
@@ -1330,6 +1330,7 @@ class Router(Node):
             "isisd": 0,
             "bgpd": 0,
             "pimd": 0,
+            "pim6d": 0,
             "ldpd": 0,
             "eigrpd": 0,
             "nhrpd": 0,
index 4610c5f15ae235bfeebdfa0327a52c7ec588d766..bcf8e5b5f3e0b03756213288d977294c3032c696 100644 (file)
@@ -304,7 +304,6 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
 
         # Add static routes
         input_dict = {
-            fhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop}]},
             rp: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_rp}]},
             lhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_lhr}]},
         }
@@ -313,9 +312,11 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
         assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
         # Verifying static routes are installed
-        for dut, _nexthop in zip([fhr, rp, lhr], [next_hop, next_hop_rp, next_hop_lhr]):
+        for dut, _nexthop in zip([rp, lhr], [next_hop_rp, next_hop_lhr]):
             input_routes = {dut: input_dict[dut]}
-            result = verify_rib(tgen, "ipv4", dut, input_routes, _nexthop)
+            result = verify_rib(
+                tgen, "ipv4", dut, input_routes, _nexthop, protocol="static"
+            )
             assert result is True, "Testcase {} : Failed \n Error {}".format(
                 tc_name, result
             )
@@ -343,7 +344,9 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
         assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
         # Verifying static routes are installed
-        result = verify_rib(tgen, "ipv4", fhr, input_dict, next_hop_fhr)
+        result = verify_rib(
+            tgen, "ipv4", fhr, input_dict, next_hop_fhr, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
@@ -355,7 +358,9 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
         assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
         # Verifying static routes are installed
-        result = verify_rib(tgen, "ipv4", lhr, input_dict, next_hop_lhr)
+        result = verify_rib(
+            tgen, "ipv4", lhr, input_dict, next_hop_lhr, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
@@ -473,7 +478,9 @@ def test_BSR_higher_prefer_ip_p0(request):
     # Verifying static routes are installed
     for dut, _nexthop in zip(["i1", "l1"], [next_hop_rp, next_hop_lhr]):
         input_routes = {dut: input_dict[dut]}
-        result = verify_rib(tgen, "ipv4", dut, input_routes, _nexthop)
+        result = verify_rib(
+            tgen, "ipv4", dut, input_routes, _nexthop, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
@@ -482,7 +489,9 @@ def test_BSR_higher_prefer_ip_p0(request):
         input_routes = {
             "f1": {"static_routes": [{"network": bsr_add, "next_hop": next_hop}]}
         }
-        result = verify_rib(tgen, "ipv4", "f1", input_routes, next_hop)
+        result = verify_rib(
+            tgen, "ipv4", "f1", input_routes, next_hop, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
@@ -673,7 +682,9 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
     # Verifying static routes are installed
     for dut, _nexthop in zip(["i1", "l1"], [next_hop_rp, next_hop_lhr]):
         input_routes = {dut: input_dict[dut]}
-        result = verify_rib(tgen, "ipv4", dut, input_routes, _nexthop)
+        result = verify_rib(
+            tgen, "ipv4", dut, input_routes, _nexthop, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
@@ -681,7 +692,9 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
     input_routes = {
         "f1": {"static_routes": [{"network": CRP, "next_hop": next_hop_fhr}]}
     }
-    result = verify_rib(tgen, "ipv4", "f1", input_routes, expected=False)
+    result = verify_rib(
+        tgen, "ipv4", "f1", input_routes, protocol="static", expected=False
+    )
     assert (
         result is not True
     ), "Testcase {} : Failed \n " "Route is still present \n Error {}".format(
@@ -705,7 +718,7 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
     assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
     # Verifying static routes are installed
-    result = verify_rib(tgen, "ipv4", "f1", input_dict)
+    result = verify_rib(tgen, "ipv4", "f1", input_dict, protocol="static")
     assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
 
     intf_f1_i1 = topo["routers"]["f1"]["links"]["i1"]["interface"]
@@ -764,7 +777,7 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
     input_dict = {
         "f1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": NEXT_HOP1}]}
     }
-    result = verify_rib(tgen, "ipv4", "f1", input_dict, NEXT_HOP1)
+    result = verify_rib(tgen, "ipv4", "f1", input_dict, NEXT_HOP1, protocol="static")
     assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
 
     input_dict = {
@@ -774,7 +787,9 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
             ]
         }
     }
-    result = verify_rib(tgen, "ipv4", "f1", input_dict, expected=False)
+    result = verify_rib(
+        tgen, "ipv4", "f1", input_dict, protocol="static", expected=False
+    )
     assert result is not True, (
         "Testcase {} : Failed \n "
         "Routes:[{}, {}] are still present \n Error {}".format(
@@ -1703,7 +1718,9 @@ def test_iif_join_state_p0(request):
     assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
     # Verifying static routes are installed
-    result = verify_rib(tgen, "ipv4", "l1", input_dict, expected=False)
+    result = verify_rib(
+        tgen, "ipv4", "l1", input_dict, protocol="static", expected=False
+    )
     assert (
         result is not True
     ), "Testcase {} : Failed \n " "Routes:{} are still present \n Error {}".format(
@@ -1751,7 +1768,7 @@ def test_iif_join_state_p0(request):
     assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
     # Verifying static routes are installed
-    result = verify_rib(tgen, "ipv4", "l1", input_dict, next_hop_lhr)
+    result = verify_rib(tgen, "ipv4", "l1", input_dict, next_hop_lhr, protocol="static")
     assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
 
     # Verify that (*,G) installed in mroute again
index 191615cbbe044a48a6e9ee785227a4a2ec28f2ec..8a4ef1d9c70592d7be0c754ab7025a3a9bbcc79e 100644 (file)
@@ -245,7 +245,6 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
 
         # Add static routes
         input_dict = {
-            fhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop}]},
             rp: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_rp}]},
             lhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_lhr}]},
         }
@@ -254,9 +253,11 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
         assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
         # Verifying static routes are installed
-        for dut, _nexthop in zip([fhr, rp, lhr], [next_hop, next_hop_rp, next_hop_lhr]):
+        for dut, _nexthop in zip([rp, lhr], [next_hop_rp, next_hop_lhr]):
             input_routes = {dut: input_dict[dut]}
-            result = verify_rib(tgen, "ipv4", dut, input_routes, _nexthop)
+            result = verify_rib(
+                tgen, "ipv4", dut, input_routes, _nexthop, protocol="static"
+            )
             assert result is True, "Testcase {} : Failed \n Error {}".format(
                 tc_name, result
             )
@@ -300,7 +301,9 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
         assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
         # Verifying static routes are installed
-        result = verify_rib(tgen, "ipv4", fhr, input_dict, next_hop_fhr)
+        result = verify_rib(
+            tgen, "ipv4", fhr, input_dict, next_hop_fhr, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
@@ -312,7 +315,9 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,
         assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
 
         # Verifying static routes are installed
-        result = verify_rib(tgen, "ipv4", lhr, input_dict, next_hop_lhr)
+        result = verify_rib(
+            tgen, "ipv4", lhr, input_dict, next_hop_lhr, protocol="static"
+        )
         assert result is True, "Testcase {} : Failed \n Error {}".format(
             tc_name, result
         )
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json b/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json
new file mode 100644 (file)
index 0000000..9edfae4
--- /dev/null
@@ -0,0 +1,197 @@
+{
+    "address_types": ["ipv6"],
+    "ipv6base": "fd00::",
+    "ipv6mask": 64,
+    "link_ip_start": {
+        "ipv6": "fd00::",
+        "v6mask": 64
+    },
+    "lo_prefix": {
+        "ipv6": "2001:db8:f::",
+        "v6mask": 128
+    },
+    "routers": {
+        "r0": {
+            "links": {
+                "r1": {"ipv6": "auto"}
+            }
+        },
+        "r1": {
+            "links": {
+                "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r0": {"ipv6": "auto", "pim6": "enable"},
+                "r2": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r3": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r4": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }}
+            },
+            "ospf6": {
+                "router_id": "100.1.1.0",
+                "neighbors": {
+                    "r2": {},
+                    "r3": {},
+                    "r4": {}
+                },
+                "redistribute": [
+                    {
+                        "redist_type": "static"
+                    },
+                    {
+                        "redist_type": "connected"
+                    }
+                ]
+            },
+            "mld": {
+                "interfaces": {
+                    "r1-r0-eth0" :{
+                        "mld":{
+                        }
+                    }
+                }
+            }
+        },
+        "r2": {
+            "links": {
+                "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r1": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r3": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }}
+            },
+            "ospf6": {
+                "router_id": "100.1.1.1",
+                "neighbors": {
+                    "r1": {},
+                    "r3": {}
+                },
+                "redistribute": [
+                    {
+                        "redist_type": "static"
+                    },
+                    {
+                        "redist_type": "connected"
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "links": {
+                "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r1": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r2": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r4": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r5": {"ipv6": "auto", "pim6": "enable"}
+            },
+            "ospf6": {
+                "router_id": "100.1.1.2",
+                "neighbors": {
+                    "r1": {},
+                    "r2": {},
+                    "r4": {}
+                },
+                "redistribute": [
+                    {
+                        "redist_type": "static"
+                    },
+                    {
+                        "redist_type": "connected"
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "links": {
+                "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r1": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }},
+                "r3": {"ipv6": "auto", "pim6": "enable",
+                "ospf6": {
+                    "area": "0.0.0.0",
+                    "hello_interval": 1,
+                    "dead_interval": 4
+                }}
+            },
+            "ospf6": {
+                "router_id": "100.1.1.3",
+                "neighbors": {
+                    "r1": {},
+                    "r3": {}
+                },
+                "redistribute": [
+                    {
+                        "redist_type": "static"
+                    },
+                    {
+                        "redist_type": "connected"
+                    }
+                ]
+            }
+        },
+        "r5": {
+            "links": {
+                "r3": {"ipv6": "auto"}
+            }
+        }
+    }
+}
index eafad06e963a2aa083507cac753f2168ab9b687a..2a9fe32b08fa4a87d6b04762bf8f9003957dc985 100755 (executable)
@@ -1391,2531 +1391,6 @@ def test_send_join_on_higher_preffered_rp_p1(request):
     write_test_footer(tc_name)
 
 
-def test_RP_configured_as_LHR_1_p1(request):
-    """
-    TC_21_1_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure
-             in LHR router
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-
-    r1 : LHR/RP
-    r3 : FHR
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send the IGMP join from r0")
-    step("Send multicast traffic from r5")
-
-    step("r1 , r2, r3, r4: Delete existing RP configuration" "configure r1(LHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Configure r1(LHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    shutdown_bringup_interface(tgen, "r1", "lo", False)
-    sleep(5)
-    shutdown_bringup_interface(tgen, "r1", "lo", True)
-    sleep(5)
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.1.17"
-    iif = "lo"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S, G) upstream join state is joined and join"
-        " timer is running \n Error: {}".format(tc_name, result)
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    # Uncomment next line for debugging
-    # tgen.mininet_cli()
-
-    write_test_footer(tc_name)
-
-
-def test_RP_configured_as_LHR_2_p1(request):
-    """
-    TC_21_2_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure
-             in LHR router
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-
-    r1 : LHR/RP
-    r3 : FHR
-
-    """
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send multicast traffic from r5")
-    step("Send the IGMP join from r0")
-
-    step("r1, r2, r3, r4: Delete existing RP configuration," "configure r1(LHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1, r2, r3, r4: Configure r1(LHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.1.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.1.17"
-    iif = "lo"
-    result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_ADDRESS, iif, rp_address, SOURCE)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    # Uncomment next line for debugging
-    # tgen.mininet_cli()
-
-    write_test_footer(tc_name)
-
-
-def test_RP_configured_as_FHR_1_p1(request):
-    """
-    TC_22_1_P1: Verify OIF and RFP for (*,G) and (S,G) when static RP configure
-             in FHR router
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-
-    r1 : LHR
-    r3 : FHR/RP
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send the IGMP join from r0")
-    step("Send multicast traffic from r5")
-
-    step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-    }
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1, r2, r3, r4: Configure r3(FHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.3.17"
-    iif = "r1-r3-eth2"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Verify IGMP groups")
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    # Uncomment next line for debugging
-    # tgen.mininet_cli()
-
-    write_test_footer(tc_name)
-
-
-def test_RP_configured_as_FHR_2_p2(request):
-    """
-    TC_22_2_P2: Verify OIF and RFP for (*,G) and (S,G) when static RP configure
-             in FHR router
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-
-    r1 : LHR
-    r3 : FHR/RP
-    """
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send multicast traffic from r5")
-    step("Send the IGMP join from r0")
-
-    step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1, r2, r3, r4: Configure r3(FHR) as RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r3": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.3.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        },
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.3.17"
-    iif = "r1-r3-eth2"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Verify IGMP groups")
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    # Uncomment next line for debugging
-    # tgen.mininet_cli()
-
-    write_test_footer(tc_name)
-
-
-def test_SPT_RPT_path_different_p1(request):
-    """
-    TC_23_P1: Verify (*,G) and (S,G) populated correctly when RPT and SPT path
-              are different
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-
-    r1: LHR
-    r2: RP
-    r3: FHR
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
-    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send multicast traffic from r3")
-
-    step("r2: Verify RP info")
-    dut = "r2"
-    rp_address = "1.0.2.17"
-    iif = "lo"
-    result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_ADDRESS, iif, rp_address, SOURCE)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    dut = "r1"
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream IIF interface")
-    dut = "r2"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream IIF interface")
-    dut = "r2"
-    iif = "r2-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    # Uncomment next line for debugging
-    # tgen.mininet_cli()
-
-    write_test_footer(tc_name)
-
-
-def test_clear_pim_configuration_p1(request):
-    """
-    TC_25_P1: Verify (*,G) and (S,G) populated correctly after clearing the
-              PIM,IGMP and mroutes joins
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-                |             |
-                |_____________|
-                        r4
-    r1 : LHR
-    r2 : RP
-    r3 : FHR
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send the IGMP join from r0")
-    step("Send multicast traffic from r5")
-
-    step("r2: Verify RP info")
-    dut = "r2"
-    rp_address = "1.0.2.17"
-    oif = "lo"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    dut = "r1"
-    iif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, iif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups timer restarted")
-    result = clear_igmp_interfaces(tgen, dut)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify PIM neighbor timer restarted")
-    result = clear_pim_interfaces(tgen, dut)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify PIM mroute timer restarted")
-    result = clear_mroute_verify(tgen, dut)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    # Uncomment next line for debugging
-    # tgen.mininet_cli()
-
-    write_test_footer(tc_name)
-
-
-def test_restart_pimd_process_p2(request):
-    """
-    TC_26_P2: Restart the PIMd process and verify PIM upstream and mroutes
-              entries
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-                |             |
-                |_____________|
-                        r4
-    r1 : LHR
-    r2 : RP
-    r3 : FHR
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1")
-    step("Configure RP on r3 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
-    step("Send multicast traffic from R3")
-    step("Restart the PIMd process")
-
-    step("r2: Verify RP info")
-    dut = "r2"
-    rp_address = "1.0.2.17"
-    oif = "lo"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    dut = "r1"
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream IIF interface")
-    dut = "r2"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    oil = "r1-r0-eth0"
-    logger.info("waiting for 10 sec to make sure old mroute time is higher")
-    sleep(10)
-    # Why do we then wait 60 seconds below before checking the routes?
-    uptime_before = verify_mroutes(
-        tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=60
-    )
-    assert isinstance(uptime_before, dict), "Testcase{} : Failed Error: {}".format(
-        tc_name, result
-    )
-
-    step("r1: Kill pimd process")
-    kill_router_daemons(tgen, "r1", ["pimd"])
-
-    step("r1 : Start pimd process")
-    start_router_daemons(tgen, "r1", ["pimd"])
-
-    logger.info("Waiting for 5sec to get PIMd restarted and mroute" " re-learned..")
-    sleep(5)
-
-    # Why do we then wait 10 seconds below before checking the routes?
-    uptime_after = verify_mroutes(
-        tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=10
-    )
-    assert isinstance(uptime_after, dict), "Testcase{} : Failed Error: {}".format(
-        tc_name, result
-    )
-
-    result = verify_mroute_repopulated(uptime_before, uptime_after)
-    assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
-
-    write_test_footer(tc_name)
-
-
-def test_multiple_groups_same_RP_address_p2(request):
-    """
-    TC_27_P2: Configure multiple  groups (10 grps) with same RP address
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-
-    r1 : LHR
-    r2 : RP
-    r3 : FHR
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
-    step("Configure RP on r2 (loopback interface) for the group range" "225.1.1.0/24")
-    step("Enable the PIM on all the interfaces of r1-r2-r3")
-    step("Send multicast traffic from r5 to all the groups")
-    step("r1 : Remove the groups to RP mapping one by one")
-    step("r1: Shut the upstream interfaces")
-    step("r1: No shut the upstream interfaces")
-    step("r1: Configure the RP again")
-    step("r1: Shut the receiver interfaces")
-    step("r1: No Shut the receiver interfaces")
-    step("r2: Verify RP info")
-
-    step("r2: verify rp-info")
-    dut = "r2"
-    rp_address = "1.0.2.17"
-    oif = "lo"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
-    step("r0: Send IGMP join for 10 groups")
-    result = app_helper.run_join("r0", group_address_list, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    dut = "r1"
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", group_address_list, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream IIF interface")
-    dut = "r2"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream IIF interface")
-    dut = "r2"
-    iif = "r2-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Delete RP configuration")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        }
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No Shut the interface r1-r2-eth1 from R1 to R2")
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Configure RP")
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                    }
-                ]
-            }
-        }
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Shut the interface r1-r0-eth0 from R1 to R2")
-    intf = "r1-r0-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No Shut the interface r1-r0-eth0 from R1 to R2")
-    intf = "r1-r0-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream IIF interface")
-    dut = "r2"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream IIF interface")
-    dut = "r2"
-    iif = "r2-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    write_test_footer(tc_name)
-
-
-def test_multiple_groups_different_RP_address_p2(request):
-    """
-    TC_28_P2: Verify IIF and OIL in updated in mroute when upstream interface
-              configure as RP
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |     iperf
-        r0-----r1-------------r3-----r5
-                |             |
-                |_____________|
-                        r4
-    r1 : LHR
-    r2 & r4 : RP
-    r3 : FHR
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Delete existing RP configuration")
-    input_dict = {
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        }
-    }
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    input_dict = {
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_LIST_1,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.4.17",
-                        "group_addr_range": GROUP_RANGE_LIST_2,
-                    }
-                ]
-            }
-        },
-    }
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify RP info")
-    dut = "r2"
-    rp_address = "1.0.2.17"
-    oif = "lo"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_LIST_1, oif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify RP info")
-    dut = "r4"
-    rp_address = "1.0.4.17"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_LIST_2, oif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", group_address_list, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    dut = "r1"
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, group_address_list)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r5: Send multicast traffic for group 225.1.1.1")
-    result = app_helper.run_traffic("r5", group_address_list, "r3")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream IIF interface")
-    dut = "r2"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream IIF interface")
-    iif = "r2-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r4-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (*, G) upstream IIF interface")
-    dut = "r4"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (*, G) ip mroutes")
-    oif = "r4-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (S, G) upstream IIF interface")
-    iif = "r4-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r4: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r4: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
-    )
-    assert result is not True, "Testcase {} :Failed \n Error: {}".format(
-        tc_name, result
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("Delete RP configuration")
-    input_dict = {
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_LIST_1,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.4.17",
-                        "group_addr_range": GROUP_RANGE_LIST_2,
-                        "delete": True,
-                    }
-                ]
-            }
-        },
-    }
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1, r2, r3, r4: Re-configure RP")
-    input_dict = {
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_LIST_1,
-                    }
-                ]
-            }
-        },
-        "r4": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.4.17",
-                        "group_addr_range": GROUP_RANGE_LIST_2,
-                    }
-                ]
-            }
-        },
-    }
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No shut the interface r1-r2-eth1 from R1 to R2")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Shut the interface r1-r2-eth1 from R1 to R4")
-    dut = "r1"
-    intf = "r1-r4-eth3"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No shut the interface r1-r2-eth1 from R1 to r4")
-    dut = "r1"
-    intf = "r1-r4-eth3"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Shut the interface r1-r0-eth0 from R1 to R0")
-    dut = "r1"
-    intf = "r1-r0-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No Shut the interface r1-r0-eth0 from R1 to R0")
-    dut = "r1"
-    intf = "r1-r0-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream IIF interface")
-    dut = "r2"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream IIF interface")
-    iif = "r2-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream IIF interface")
-    dut = "r1"
-    iif = "r1-r4-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream IIF interface")
-    iif = "r1-r3-eth2"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (S, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (*, G) upstream IIF interface")
-    dut = "r4"
-    iif = "lo"
-    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (*, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (*, G) ip mroutes")
-    oif = "r4-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (S, G) upstream IIF interface")
-    iif = "r4-r3-eth1"
-    result = verify_upstream_iif(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r4: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r4: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r4: Verify (S, G) ip mroutes")
-    oif = "none"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream IIF interface")
-    dut = "r3"
-    iif = "r3-r5-eth3"
-    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (S, G) upstream join state and join timer")
-    result = verify_join_state_and_timer(
-        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
-    )
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (S, G) ip mroutes")
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    write_test_footer(tc_name)
-
-
-def test_shutdown_primary_path_p1(request):
-    """
-    TC_30_P1: Verify IIF and OIL change to other path after shut the primary
-              path
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |
-        r0-----r1-------------r3
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    # Steps to execute
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("r1: Shut the link from r1 to r2")
-    step("r3: Shut the link from r1 to r3")
-    step("r1: No shut the link from r1 to r2")
-    step("r3: No shut the link from r1 to r3")
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.2.17"
-    iif = "r1-r2-eth1"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step(
-        "Verify after shut the R1 to R2 link , verify join is reaching to RP"
-        "via other path"
-    )
-
-    logger.info("Waiting for 110 sec only if test run with crucible")
-
-    step("r1: Verify (*, G) ip mroutes")
-    dut = "r1"
-    iif = "r1-r3-eth2"
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r3-eth1"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (*, G) ip mroutes")
-    dut = "r3"
-    iif = "r3-r2-eth1"
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Shut the link from R1 to R3 from R3 node")
-    dut = "r3"
-    intf = "r3-r1-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step(
-        "Verify after shut of R1 to R3 link , verify (*,G) entries got"
-        " cleared from all the node R1, R2, R3"
-    )
-
-    step("r1: Verify (*, G) ip mroutes")
-    dut = "r1"
-    iif = "r1-r3-eth2"
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r1: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (*, G) ip mroutes")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r3-eth1"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: Verify (*, G) ip mroutes")
-    dut = "r3"
-    iif = "r3-r2-eth1"
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r3: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r3: No shutdown the link from R1 to R3 from R3 node")
-    dut = "r3"
-    intf = "r3-r1-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Verify (*, G) ip mroutes")
-    dut = "r1"
-    iif = "r1-r3-eth2"
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r3-eth1"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r3: Verify (*, G) ip mroutes")
-    dut = "r3"
-    iif = "r3-r2-eth1"
-    oif = "r3-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: No shutdown the link from R1 to R2 from R1 node")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Verify (*, G) ip mroutes")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    write_test_footer(tc_name)
-
-
-def test_delete_RP_shut_noshut_upstream_interface_p1(request):
-    """
-    TC_31_P1: Verify RP info and (*,G) mroute after deleting the RP and shut /
-           no shut the RPF interface.
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |
-        r0-----r1-------------r3
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
-    step("r1: Delete the RP config")
-    step("r1: Shut and no shut the upstream interface (R1-R2) connected link")
-    step("r1: Shut and no shut the OIL interface")
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.2.17"
-    iif = "r1-r2-eth1"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    dut = "r1"
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes created")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes created")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Delete RP configuration")
-
-    # Delete RP configuration
-    input_dict = {
-        "r1": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        }
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No shutdown the interface r1-r2-eth1 from R1 to R2")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Shutdown the OIL interface r1-r0-eth0 from R1 to R0 ")
-    dut = "r1"
-    intf = "r1-r0-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: No shutdown the OIL interface r1-r0-eth0 from R1 to R0")
-    dut = "r1"
-    intf = "r1-r0-eth0"
-    shutdown_bringup_interface(tgen, dut, intf, True)
-
-    step("r1: Verify (*, G) ip mroutes cleared")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r1: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (*, G) ip mroutes cleared")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    write_test_footer(tc_name)
-
-
-def test_delete_RP_shut_noshut_RP_interface_p1(request):
-    """
-    TC_32_P1: Verify RP info and (*,G) mroute after deleting the RP and shut/
-           no shut the RPF interface
-
-    Topology used:
-                ________r2_____
-                |             |
-      iperf     |             |
-        r0-----r1-------------r3
-    """
-
-    tgen = get_topogen()
-    tc_name = request.node.name
-    write_test_header(tc_name)
-
-    # Don"t run this test if we have any failure.
-    if tgen.routers_have_failure():
-        pytest.skip(tgen.errors)
-
-    step("Creating configuration from JSON")
-    reset_config_on_routers(tgen)
-    app_helper.stop_all_hosts()
-    clear_mroute(tgen)
-    clear_pim_interface_traffic(tgen, TOPO)
-
-    step("Enable IGMP on r1 interface")
-    step("Configure RP on r2 (lo) for the group range" " 224.0.0.0/4")
-    step("r2: Delete the RP configuration")
-    step("r2: Shut the RP interface (lo)")
-    step("r1: Shut the interface(r1-r2-eth1, r1-r3-eth2) towards rp")
-
-    step("r1: Verify RP info")
-    dut = "r1"
-    rp_address = "1.0.2.17"
-    iif = "r1-r2-eth1"
-    result = verify_pim_rp_info(
-        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
-    )
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r0: Send IGMP join")
-    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify IGMP groups")
-    oif = "r1-r0-eth0"
-    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r1: Verify (*, G) ip mroutes created")
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Verify (*, G) ip mroutes created")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Delete RP configuration")
-
-    # Delete RP configuration
-    input_dict = {
-        "r2": {
-            "pim": {
-                "rp": [
-                    {
-                        "rp_addr": "1.0.2.17",
-                        "group_addr_range": GROUP_RANGE_ALL,
-                        "delete": True,
-                    }
-                ]
-            }
-        }
-    }
-
-    result = create_pim_config(tgen, TOPO, input_dict)
-    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-    step("r2: Shut the RP interface lo")
-    dut = "r2"
-    intf = "lo"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: Shut the interface r1-r2-eth1 towards RP")
-    dut = "r1"
-    intf = "r1-r2-eth1"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: Shut the interface r1-r3-eth2 towards RP")
-    dut = "r1"
-    intf = "r1-r3-eth2"
-    shutdown_bringup_interface(tgen, dut, intf, False)
-
-    step("r1: Verify (*, G) ip mroutes cleared")
-    dut = "r1"
-    iif = "r1-r2-eth1"
-    oif = "r1-r0-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r1: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    step("r2: Verify (*, G) ip mroutes cleared")
-    dut = "r2"
-    iif = "lo"
-    oif = "r2-r1-eth0"
-    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
-    assert result is not True, (
-        "Testcase {} : Failed \n "
-        "r2: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
-            tc_name, result
-        )
-    )
-
-    write_test_footer(tc_name)
-
-
 if __name__ == "__main__":
     ARGS = ["-s"] + sys.argv[1:]
     sys.exit(pytest.main(ARGS))
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py
new file mode 100755 (executable)
index 0000000..35303c3
--- /dev/null
@@ -0,0 +1,1424 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+                 _______r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+TC_1 : Verify upstream interfaces(IIF) and join state are updated properly
+       after adding and deleting the static RP
+TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after
+       adding and deleting the static RP
+TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP
+TC_5: Verify OIF entry for RP is cleared when RP becomes unreachable
+TC_6: Verify IIF and OIL in "show ip pim state" updated properly when RP
+      becomes unreachable
+TC_7 : Verify upstream interfaces(IIF) and join state are updated properly
+       after adding and deleting the static RP
+TC_8: Verify (*,G) prune is send towards the RP when RP becomes unreachable
+TC_9 : Verify RP configured after IGMP join received, PIM join towards RP is
+       sent immediately
+TC_10 : Verify RP becomes reachable after IGMP join received, PIM join
+        towards RP is sent immediately
+TC_11 : Verify PIM join send towards the higher preferred RP
+TC_12 : Verify PIM prune send towards the lower preferred RP
+TC_13 : Verify RPF interface is updated in mroute (kernel) when higher
+        preferred overlapping RP configured
+TC_14 : Verify IIF and OIL in "show ip pim state" updated properly when higher
+        preferred overlapping RP configured
+TC_15 : Verify upstream interfaces(IIF) and join state are updated when higher
+        preferred overlapping RP is configured
+TC_16 : Verify join is send to lower preferred RP, when higher preferred RP
+        gets deleted
+TC_17 : Verify prune is send to higher preferred RP when higher preferred RP
+        gets deleted
+TC_18 : Verify RPF interface updated in mroute when higher preferred RP gets
+        deleted
+TC_19 : Verify IIF and OIL in "show ip pim state" updated when higher
+        preferred overlapping RP is deleted
+TC_20 : Verify PIM upstream IIF updated when higher preferred overlapping RP
+        deleted
+TC_21_1 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in
+          LHR router
+TC_21_2 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in
+          LHR router
+TC_22_1 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in
+          FHR router
+TC_22_2 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in
+          FHR router
+TC_23 : Verify (*,G) and (S,G) populated correctly when RPT and SPT path are
+        different
+TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the
+        same path
+TC_25 : Verify (*,G) and (S,G) populated correctly after clearing the PIM ,
+        IGMP and mroutes joins
+TC_26 : Restart the PIMd process and verify PIM joins , and mroutes entries
+TC_27 : Configure multiple  groups (10 grps) with same RP address
+TC_28 : Configure multiple  groups (10 grps) with different RP address
+TC_29 : Verify IIF and OIL in updated in mroute when upstream interface
+        configure as RP
+TC_30 : Verify IIF and OIL change to other path after shut the primary path
+TC_31 : Verify RP info and (*,G) mroute after deleting the RP and shut / no
+        shut the RPF interface.
+TC_32 : Verify RP info and (*,G) mroute after deleting the RP and shut / no
+        shut the RPF interface
+"""
+
+import os
+import sys
+import time
+from time import sleep
+import datetime
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+from lib.common_config import (
+    start_topology,
+    write_test_header,
+    write_test_footer,
+    reset_config_on_routers,
+    step,
+    shutdown_bringup_interface,
+    kill_router_daemons,
+    start_router_daemons,
+    create_static_routes,
+    topo_daemons,
+)
+from lib.pim import (
+    create_pim_config,
+    verify_igmp_groups,
+    verify_upstream_iif,
+    verify_join_state_and_timer,
+    verify_mroutes,
+    verify_pim_neighbors,
+    get_pim_interface_traffic,
+    verify_pim_rp_info,
+    verify_pim_state,
+    clear_pim_interface_traffic,
+    clear_igmp_interfaces,
+    clear_pim_interfaces,
+    clear_mroute,
+    clear_mroute_verify,
+    McastTesterHelper,
+)
+
+pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
+
+
+# Global variables
+GROUP_RANGE_ALL = "224.0.0.0/4"
+GROUP_RANGE = "225.1.1.1/32"
+GROUP_RANGE_LIST_1 = [
+    "225.1.1.1/32",
+    "225.1.1.2/32",
+    "225.1.1.3/32",
+    "225.1.1.4/32",
+    "225.1.1.5/32",
+]
+GROUP_RANGE_LIST_2 = [
+    "225.1.1.6/32",
+    "225.1.1.7/32",
+    "225.1.1.8/32",
+    "225.1.1.9/32",
+    "225.1.1.10/32",
+]
+GROUP_ADDRESS = "225.1.1.1"
+GROUP_ADDRESS_LIST_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_ADDRESS_LIST_2 = [
+    "225.1.1.6",
+    "225.1.1.7",
+    "225.1.1.8",
+    "225.1.1.9",
+    "225.1.1.10",
+]
+STAR = "*"
+SOURCE_ADDRESS = "10.0.6.2"
+SOURCE = "Static"
+
+
+def build_topo(tgen):
+    """Build function"""
+
+    # Building topology from json file
+    build_topo_from_json(tgen, TOPO)
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: %s", testsuite_run_time)
+    logger.info("=" * 40)
+
+    topology = """
+
+                 _______r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+
+    """
+    logger.info("Master Topology: \n %s", topology)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/multicast_pim_static_rp.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global TOPO
+    TOPO = tgen.json_topo
+
+    # ... and here it calls Mininet initialization functions.
+
+    # get list of daemons needs to be started for this suite.
+    daemons = topo_daemons(tgen, TOPO)
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen, daemons)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, TOPO)
+
+    # Verify PIM neighbors
+    result = verify_pim_neighbors(tgen, TOPO)
+    assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+    # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+    global app_helper
+    app_helper = McastTesterHelper(tgen)
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    app_helper.cleanup()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#   Testcases
+#
+#####################################################
+
+
+def verify_mroute_repopulated(uptime_before, uptime_after):
+    """
+    API to compare uptime for mroutes
+
+    Parameters
+    ----------
+    * `uptime_before` : Uptime dictionary for any particular instance
+    * `uptime_after` : Uptime dictionary for any particular instance
+    """
+
+    for group in uptime_before.keys():
+        for source in uptime_before[group].keys():
+            if set(uptime_before[group]) != set(uptime_after[group]):
+                errormsg = (
+                    "mroute (%s, %s) has not come"
+                    " up after mroute clear [FAILED!!]" % (source, group)
+                )
+                return errormsg
+
+            d_1 = datetime.datetime.strptime(uptime_before[group][source], "%H:%M:%S")
+            d_2 = datetime.datetime.strptime(uptime_after[group][source], "%H:%M:%S")
+            if d_2 >= d_1:
+                errormsg = "mroute (%s, %s) is not " "repopulated [FAILED!!]" % (
+                    source,
+                    group,
+                )
+                return errormsg
+
+            logger.info("mroute (%s, %s) is " "repopulated [PASSED!!]", source, group)
+
+    return True
+
+
+def verify_state_incremented(state_before, state_after):
+    """
+    API to compare interface traffic state incrementing
+
+    Parameters
+    ----------
+    * `state_before` : State dictionary for any particular instance
+    * `state_after` : State dictionary for any particular instance
+    """
+
+    for router, state_data in state_before.items():
+        for state, _ in state_data.items():
+            if state_before[router][state] >= state_after[router][state]:
+                errormsg = (
+                    "[DUT: %s]: state %s value has not"
+                    " incremented, Initial value: %s, "
+                    "Current value: %s [FAILED!!]"
+                    % (
+                        router,
+                        state,
+                        state_before[router][state],
+                        state_after[router][state],
+                    )
+                )
+                return errormsg
+
+            logger.info(
+                "[DUT: %s]: State %s value is "
+                "incremented, Initial value: %s, Current value: %s"
+                " [PASSED!!]",
+                router,
+                state,
+                state_before[router][state],
+                state_after[router][state],
+            )
+
+    return True
+
+
+def test_RP_configured_as_LHR_1_p1(request):
+    """
+    TC_21_1_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure
+             in LHR router
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+
+    r1 : LHR/RP
+    r3 : FHR
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send the IGMP join from r0")
+    step("Send multicast traffic from r5")
+
+    step("r1 , r2, r3, r4: Delete existing RP configuration" "configure r1(LHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Configure r1(LHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    shutdown_bringup_interface(tgen, "r1", "lo", False)
+    sleep(5)
+    shutdown_bringup_interface(tgen, "r1", "lo", True)
+    sleep(5)
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.1.17"
+    iif = "lo"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S, G) upstream join state is joined and join"
+        " timer is running \n Error: {}".format(tc_name, result)
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    # Uncomment next line for debugging
+    # tgen.mininet_cli()
+
+    write_test_footer(tc_name)
+
+
+def test_RP_configured_as_LHR_2_p1(request):
+    """
+    TC_21_2_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure
+             in LHR router
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+
+    r1 : LHR/RP
+    r3 : FHR
+
+    """
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send multicast traffic from r5")
+    step("Send the IGMP join from r0")
+
+    step("r1, r2, r3, r4: Delete existing RP configuration," "configure r1(LHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1, r2, r3, r4: Configure r1(LHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.1.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.1.17"
+    iif = "lo"
+    result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_ADDRESS, iif, rp_address, SOURCE)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    # Uncomment next line for debugging
+    # tgen.mininet_cli()
+
+    write_test_footer(tc_name)
+
+
+def test_RP_configured_as_FHR_1_p1(request):
+    """
+    TC_22_1_P1: Verify OIF and RFP for (*,G) and (S,G) when static RP configure
+             in FHR router
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+
+    r1 : LHR
+    r3 : FHR/RP
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send the IGMP join from r0")
+    step("Send multicast traffic from r5")
+
+    step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+    }
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1, r2, r3, r4: Configure r3(FHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.3.17"
+    iif = "r1-r3-eth2"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Verify IGMP groups")
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    # Uncomment next line for debugging
+    # tgen.mininet_cli()
+
+    write_test_footer(tc_name)
+
+
+def test_RP_configured_as_FHR_2_p2(request):
+    """
+    TC_22_2_P2: Verify OIF and RFP for (*,G) and (S,G) when static RP configure
+             in FHR router
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+
+    r1 : LHR
+    r3 : FHR/RP
+    """
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send multicast traffic from r5")
+    step("Send the IGMP join from r0")
+
+    step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1, r2, r3, r4: Configure r3(FHR) as RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r3": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.3.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        },
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.3.17"
+    iif = "r1-r3-eth2"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Verify IGMP groups")
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    # Uncomment next line for debugging
+    # tgen.mininet_cli()
+
+    write_test_footer(tc_name)
+
+
+def test_SPT_RPT_path_different_p1(request):
+    """
+    TC_23_P1: Verify (*,G) and (S,G) populated correctly when RPT and SPT path
+              are different
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+
+    r1: LHR
+    r2: RP
+    r3: FHR
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
+    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send multicast traffic from r3")
+
+    step("r2: Verify RP info")
+    dut = "r2"
+    rp_address = "1.0.2.17"
+    iif = "lo"
+    result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_ADDRESS, iif, rp_address, SOURCE)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    dut = "r1"
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream IIF interface")
+    dut = "r2"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream IIF interface")
+    dut = "r2"
+    iif = "r2-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    # Uncomment next line for debugging
+    # tgen.mininet_cli()
+
+    write_test_footer(tc_name)
+
+
+def test_clear_pim_configuration_p1(request):
+    """
+    TC_25_P1: Verify (*,G) and (S,G) populated correctly after clearing the
+              PIM,IGMP and mroutes joins
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+    r1 : LHR
+    r2 : RP
+    r3 : FHR
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send the IGMP join from r0")
+    step("Send multicast traffic from r5")
+
+    step("r2: Verify RP info")
+    dut = "r2"
+    rp_address = "1.0.2.17"
+    oif = "lo"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    dut = "r1"
+    iif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, iif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups timer restarted")
+    result = clear_igmp_interfaces(tgen, dut)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify PIM neighbor timer restarted")
+    result = clear_pim_interfaces(tgen, dut)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify PIM mroute timer restarted")
+    result = clear_mroute_verify(tgen, dut)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    # Uncomment next line for debugging
+    # tgen.mininet_cli()
+
+    write_test_footer(tc_name)
+
+
+
+
+if __name__ == "__main__":
+    ARGS = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(ARGS))
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py
new file mode 100755 (executable)
index 0000000..991d7d5
--- /dev/null
@@ -0,0 +1,1799 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+                 _______r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+TC_1 : Verify upstream interfaces(IIF) and join state are updated properly
+       after adding and deleting the static RP
+TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after
+       adding and deleting the static RP
+TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP
+TC_5: Verify OIF entry for RP is cleared when RP becomes unreachable
+TC_6: Verify IIF and OIL in "show ip pim state" updated properly when RP
+      becomes unreachable
+TC_7 : Verify upstream interfaces(IIF) and join state are updated properly
+       after adding and deleting the static RP
+TC_8: Verify (*,G) prune is send towards the RP when RP becomes unreachable
+TC_9 : Verify RP configured after IGMP join received, PIM join towards RP is
+       sent immediately
+TC_10 : Verify RP becomes reachable after IGMP join received, PIM join
+        towards RP is sent immediately
+TC_11 : Verify PIM join send towards the higher preferred RP
+TC_12 : Verify PIM prune send towards the lower preferred RP
+TC_13 : Verify RPF interface is updated in mroute (kernel) when higher
+        preferred overlapping RP configured
+TC_14 : Verify IIF and OIL in "show ip pim state" updated properly when higher
+        preferred overlapping RP configured
+TC_15 : Verify upstream interfaces(IIF) and join state are updated when higher
+        preferred overlapping RP is configured
+TC_16 : Verify join is send to lower preferred RP, when higher preferred RP
+        gets deleted
+TC_17 : Verify prune is send to higher preferred RP when higher preferred RP
+        gets deleted
+TC_18 : Verify RPF interface updated in mroute when higher preferred RP gets
+        deleted
+TC_19 : Verify IIF and OIL in "show ip pim state" updated when higher
+        preferred overlapping RP is deleted
+TC_20 : Verify PIM upstream IIF updated when higher preferred overlapping RP
+        deleted
+TC_21_1 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in
+          LHR router
+TC_21_2 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in
+          LHR router
+TC_22_1 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in
+          FHR router
+TC_22_2 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in
+          FHR router
+TC_23 : Verify (*,G) and (S,G) populated correctly when RPT and SPT path are
+        different
+TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the
+        same path
+TC_25 : Verify (*,G) and (S,G) populated correctly after clearing the PIM ,
+        IGMP and mroutes joins
+TC_26 : Restart the PIMd process and verify PIM joins , and mroutes entries
+TC_27 : Configure multiple  groups (10 grps) with same RP address
+TC_28 : Configure multiple  groups (10 grps) with different RP address
+TC_29 : Verify IIF and OIL in updated in mroute when upstream interface
+        configure as RP
+TC_30 : Verify IIF and OIL change to other path after shut the primary path
+TC_31 : Verify RP info and (*,G) mroute after deleting the RP and shut / no
+        shut the RPF interface.
+TC_32 : Verify RP info and (*,G) mroute after deleting the RP and shut / no
+        shut the RPF interface
+"""
+
+import os
+import sys
+import time
+from time import sleep
+import datetime
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+from lib.common_config import (
+    start_topology,
+    write_test_header,
+    write_test_footer,
+    reset_config_on_routers,
+    step,
+    shutdown_bringup_interface,
+    kill_router_daemons,
+    start_router_daemons,
+    create_static_routes,
+    topo_daemons,
+)
+from lib.pim import (
+    create_pim_config,
+    verify_igmp_groups,
+    verify_upstream_iif,
+    verify_join_state_and_timer,
+    verify_mroutes,
+    verify_pim_neighbors,
+    get_pim_interface_traffic,
+    verify_pim_rp_info,
+    verify_pim_state,
+    clear_pim_interface_traffic,
+    clear_igmp_interfaces,
+    clear_pim_interfaces,
+    clear_mroute,
+    clear_mroute_verify,
+    McastTesterHelper,
+)
+
+pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
+
+
+# Global variables
+GROUP_RANGE_ALL = "224.0.0.0/4"
+GROUP_RANGE = "225.1.1.1/32"
+GROUP_RANGE_LIST_1 = [
+    "225.1.1.1/32",
+    "225.1.1.2/32",
+    "225.1.1.3/32",
+    "225.1.1.4/32",
+    "225.1.1.5/32",
+]
+GROUP_RANGE_LIST_2 = [
+    "225.1.1.6/32",
+    "225.1.1.7/32",
+    "225.1.1.8/32",
+    "225.1.1.9/32",
+    "225.1.1.10/32",
+]
+GROUP_ADDRESS = "225.1.1.1"
+GROUP_ADDRESS_LIST_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_ADDRESS_LIST_2 = [
+    "225.1.1.6",
+    "225.1.1.7",
+    "225.1.1.8",
+    "225.1.1.9",
+    "225.1.1.10",
+]
+STAR = "*"
+SOURCE_ADDRESS = "10.0.6.2"
+SOURCE = "Static"
+
+
+def build_topo(tgen):
+    """Build function"""
+
+    # Building topology from json file
+    build_topo_from_json(tgen, TOPO)
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: %s", testsuite_run_time)
+    logger.info("=" * 40)
+
+    topology = """
+
+                 _______r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+
+    """
+    logger.info("Master Topology: \n %s", topology)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/multicast_pim_static_rp.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global TOPO
+    TOPO = tgen.json_topo
+
+    # ... and here it calls Mininet initialization functions.
+
+    # get list of daemons needs to be started for this suite.
+    daemons = topo_daemons(tgen, TOPO)
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen, daemons)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, TOPO)
+
+    # Verify PIM neighbors
+    result = verify_pim_neighbors(tgen, TOPO)
+    assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+    # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+    global app_helper
+    app_helper = McastTesterHelper(tgen)
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+
+    tgen = get_topogen()
+
+    app_helper.cleanup()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#   Testcases
+#
+#####################################################
+
+
+def verify_mroute_repopulated(uptime_before, uptime_after):
+    """
+    API to compare uptime for mroutes
+
+    Parameters
+    ----------
+    * `uptime_before` : Uptime dictionary for any particular instance
+    * `uptime_after` : Uptime dictionary for any particular instance
+    """
+
+    for group in uptime_before.keys():
+        for source in uptime_before[group].keys():
+            if set(uptime_before[group]) != set(uptime_after[group]):
+                errormsg = (
+                    "mroute (%s, %s) has not come"
+                    " up after mroute clear [FAILED!!]" % (source, group)
+                )
+                return errormsg
+
+            d_1 = datetime.datetime.strptime(uptime_before[group][source], "%H:%M:%S")
+            d_2 = datetime.datetime.strptime(uptime_after[group][source], "%H:%M:%S")
+            if d_2 >= d_1:
+                errormsg = "mroute (%s, %s) is not " "repopulated [FAILED!!]" % (
+                    source,
+                    group,
+                )
+                return errormsg
+
+            logger.info("mroute (%s, %s) is " "repopulated [PASSED!!]", source, group)
+
+    return True
+
+
+def verify_state_incremented(state_before, state_after):
+    """
+    API to compare interface traffic state incrementing
+
+    Parameters
+    ----------
+    * `state_before` : State dictionary for any particular instance
+    * `state_after` : State dictionary for any particular instance
+    """
+
+    for router, state_data in state_before.items():
+        for state, _ in state_data.items():
+            if state_before[router][state] >= state_after[router][state]:
+                errormsg = (
+                    "[DUT: %s]: state %s value has not"
+                    " incremented, Initial value: %s, "
+                    "Current value: %s [FAILED!!]"
+                    % (
+                        router,
+                        state,
+                        state_before[router][state],
+                        state_after[router][state],
+                    )
+                )
+                return errormsg
+
+            logger.info(
+                "[DUT: %s]: State %s value is "
+                "incremented, Initial value: %s, Current value: %s"
+                " [PASSED!!]",
+                router,
+                state,
+                state_before[router][state],
+                state_after[router][state],
+            )
+
+    return True
+
+
+def test_restart_pimd_process_p2(request):
+    """
+    TC_26_P2: Restart the PIMd process and verify PIM upstream and mroutes
+              entries
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+    r1 : LHR
+    r2 : RP
+    r3 : FHR
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1")
+    step("Configure RP on r3 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+    step("Send multicast traffic from R3")
+    step("Restart the PIMd process")
+
+    step("r2: Verify RP info")
+    dut = "r2"
+    rp_address = "1.0.2.17"
+    oif = "lo"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    dut = "r1"
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream IIF interface")
+    dut = "r2"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    oil = "r1-r0-eth0"
+    logger.info("waiting for 10 sec to make sure old mroute time is higher")
+    sleep(10)
+    # Why do we then wait 60 seconds below before checking the routes?
+    uptime_before = verify_mroutes(
+        tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=60
+    )
+    assert isinstance(uptime_before, dict), "Testcase{} : Failed Error: {}".format(
+        tc_name, result
+    )
+
+    step("r1: Kill pimd process")
+    kill_router_daemons(tgen, "r1", ["pimd"])
+
+    step("r1 : Start pimd process")
+    start_router_daemons(tgen, "r1", ["pimd"])
+
+    logger.info("Waiting for 5sec to get PIMd restarted and mroute" " re-learned..")
+    sleep(5)
+
+    # Why do we then wait 10 seconds below before checking the routes?
+    uptime_after = verify_mroutes(
+        tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=10
+    )
+    assert isinstance(uptime_after, dict), "Testcase{} : Failed Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_mroute_repopulated(uptime_before, uptime_after)
+    assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+    write_test_footer(tc_name)
+
+
+def test_multiple_groups_same_RP_address_p2(request):
+    """
+    TC_27_P2: Configure multiple  groups (10 grps) with same RP address
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+
+    r1 : LHR
+    r2 : RP
+    r3 : FHR
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
+    step("Configure RP on r2 (loopback interface) for the group range" "225.1.1.0/24")
+    step("Enable the PIM on all the interfaces of r1-r2-r3")
+    step("Send multicast traffic from r5 to all the groups")
+    step("r1 : Remove the groups to RP mapping one by one")
+    step("r1: Shut the upstream interfaces")
+    step("r1: No shut the upstream interfaces")
+    step("r1: Configure the RP again")
+    step("r1: Shut the receiver interfaces")
+    step("r1: No Shut the receiver interfaces")
+    step("r2: Verify RP info")
+
+    step("r2: verify rp-info")
+    dut = "r2"
+    rp_address = "1.0.2.17"
+    oif = "lo"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
+    step("r0: Send IGMP join for 10 groups")
+    result = app_helper.run_join("r0", group_address_list, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    dut = "r1"
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", group_address_list, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream IIF interface")
+    dut = "r2"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream IIF interface")
+    dut = "r2"
+    iif = "r2-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Delete RP configuration")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        }
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No Shut the interface r1-r2-eth1 from R1 to R2")
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Configure RP")
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                    }
+                ]
+            }
+        }
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Shut the interface r1-r0-eth0 from R1 to R2")
+    intf = "r1-r0-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No Shut the interface r1-r0-eth0 from R1 to R2")
+    intf = "r1-r0-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream IIF interface")
+    dut = "r2"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream IIF interface")
+    dut = "r2"
+    iif = "r2-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    write_test_footer(tc_name)
+
+
+def test_multiple_groups_different_RP_address_p2(request):
+    """
+    TC_28_P2: Verify IIF and OIL in updated in mroute when upstream interface
+              configure as RP
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+    r1 : LHR
+    r2 & r4 : RP
+    r3 : FHR
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Delete existing RP configuration")
+    input_dict = {
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        }
+    }
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    input_dict = {
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_LIST_1,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.4.17",
+                        "group_addr_range": GROUP_RANGE_LIST_2,
+                    }
+                ]
+            }
+        },
+    }
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify RP info")
+    dut = "r2"
+    rp_address = "1.0.2.17"
+    oif = "lo"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_LIST_1, oif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify RP info")
+    dut = "r4"
+    rp_address = "1.0.4.17"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_LIST_2, oif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", group_address_list, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    dut = "r1"
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, group_address_list)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r5: Send multicast traffic for group 225.1.1.1")
+    result = app_helper.run_traffic("r5", group_address_list, "r3")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream IIF interface")
+    dut = "r2"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream IIF interface")
+    iif = "r2-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r4-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (*, G) upstream IIF interface")
+    dut = "r4"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (*, G) ip mroutes")
+    oif = "r4-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (S, G) upstream IIF interface")
+    iif = "r4-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r4: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r4: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+    )
+    assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+        tc_name, result
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("Delete RP configuration")
+    input_dict = {
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_LIST_1,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.4.17",
+                        "group_addr_range": GROUP_RANGE_LIST_2,
+                        "delete": True,
+                    }
+                ]
+            }
+        },
+    }
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1, r2, r3, r4: Re-configure RP")
+    input_dict = {
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_LIST_1,
+                    }
+                ]
+            }
+        },
+        "r4": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.4.17",
+                        "group_addr_range": GROUP_RANGE_LIST_2,
+                    }
+                ]
+            }
+        },
+    }
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No shut the interface r1-r2-eth1 from R1 to R2")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Shut the interface r1-r2-eth1 from R1 to R4")
+    dut = "r1"
+    intf = "r1-r4-eth3"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No shut the interface r1-r2-eth1 from R1 to r4")
+    dut = "r1"
+    intf = "r1-r4-eth3"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Shut the interface r1-r0-eth0 from R1 to R0")
+    dut = "r1"
+    intf = "r1-r0-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No Shut the interface r1-r0-eth0 from R1 to R0")
+    dut = "r1"
+    intf = "r1-r0-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream IIF interface")
+    dut = "r2"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream IIF interface")
+    iif = "r2-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream IIF interface")
+    dut = "r1"
+    iif = "r1-r4-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream IIF interface")
+    iif = "r1-r3-eth2"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (S, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (*, G) upstream IIF interface")
+    dut = "r4"
+    iif = "lo"
+    result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (*, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (*, G) ip mroutes")
+    oif = "r4-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (S, G) upstream IIF interface")
+    iif = "r4-r3-eth1"
+    result = verify_upstream_iif(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r4: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r4: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r4: Verify (S, G) ip mroutes")
+    oif = "none"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream IIF interface")
+    dut = "r3"
+    iif = "r3-r5-eth3"
+    result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (S, G) upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (S, G) ip mroutes")
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    write_test_footer(tc_name)
+
+
+def test_shutdown_primary_path_p1(request):
+    """
+    TC_30_P1: Verify IIF and OIL change to other path after shut the primary
+              path
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |
+        r0-----r1-------------r3
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    # Steps to execute
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("r1: Shut the link from r1 to r2")
+    step("r3: Shut the link from r1 to r3")
+    step("r1: No shut the link from r1 to r2")
+    step("r3: No shut the link from r1 to r3")
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.2.17"
+    iif = "r1-r2-eth1"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step(
+        "Verify after shut the R1 to R2 link , verify join is reaching to RP"
+        "via other path"
+    )
+
+    logger.info("Waiting for 110 sec only if test run with crucible")
+
+    step("r1: Verify (*, G) ip mroutes")
+    dut = "r1"
+    iif = "r1-r3-eth2"
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r3-eth1"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (*, G) ip mroutes")
+    dut = "r3"
+    iif = "r3-r2-eth1"
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Shut the link from R1 to R3 from R3 node")
+    dut = "r3"
+    intf = "r3-r1-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step(
+        "Verify after shut of R1 to R3 link , verify (*,G) entries got"
+        " cleared from all the node R1, R2, R3"
+    )
+
+    step("r1: Verify (*, G) ip mroutes")
+    dut = "r1"
+    iif = "r1-r3-eth2"
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r1: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (*, G) ip mroutes")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r3-eth1"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: Verify (*, G) ip mroutes")
+    dut = "r3"
+    iif = "r3-r2-eth1"
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r3: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r3: No shutdown the link from R1 to R3 from R3 node")
+    dut = "r3"
+    intf = "r3-r1-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Verify (*, G) ip mroutes")
+    dut = "r1"
+    iif = "r1-r3-eth2"
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r3-eth1"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r3: Verify (*, G) ip mroutes")
+    dut = "r3"
+    iif = "r3-r2-eth1"
+    oif = "r3-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: No shutdown the link from R1 to R2 from R1 node")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Verify (*, G) ip mroutes")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    write_test_footer(tc_name)
+
+
+def test_delete_RP_shut_noshut_upstream_interface_p1(request):
+    """
+    TC_31_P1: Verify RP info and (*,G) mroute after deleting the RP and shut /
+           no shut the RPF interface.
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |
+        r0-----r1-------------r3
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+    step("r1: Delete the RP config")
+    step("r1: Shut and no shut the upstream interface (R1-R2) connected link")
+    step("r1: Shut and no shut the OIL interface")
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.2.17"
+    iif = "r1-r2-eth1"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    dut = "r1"
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes created")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes created")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Delete RP configuration")
+
+    # Delete RP configuration
+    input_dict = {
+        "r1": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        }
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No shutdown the interface r1-r2-eth1 from R1 to R2")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Shutdown the OIL interface r1-r0-eth0 from R1 to R0 ")
+    dut = "r1"
+    intf = "r1-r0-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: No shutdown the OIL interface r1-r0-eth0 from R1 to R0")
+    dut = "r1"
+    intf = "r1-r0-eth0"
+    shutdown_bringup_interface(tgen, dut, intf, True)
+
+    step("r1: Verify (*, G) ip mroutes cleared")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r1: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (*, G) ip mroutes cleared")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    write_test_footer(tc_name)
+
+
+def test_delete_RP_shut_noshut_RP_interface_p1(request):
+    """
+    TC_32_P1: Verify RP info and (*,G) mroute after deleting the RP and shut/
+           no shut the RPF interface
+
+    Topology used:
+                ________r2_____
+                |             |
+      iperf     |             |
+        r0-----r1-------------r3
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    step("Creating configuration from JSON")
+    reset_config_on_routers(tgen)
+    app_helper.stop_all_hosts()
+    clear_mroute(tgen)
+    clear_pim_interface_traffic(tgen, TOPO)
+
+    step("Enable IGMP on r1 interface")
+    step("Configure RP on r2 (lo) for the group range" " 224.0.0.0/4")
+    step("r2: Delete the RP configuration")
+    step("r2: Shut the RP interface (lo)")
+    step("r1: Shut the interface(r1-r2-eth1, r1-r3-eth2) towards rp")
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    rp_address = "1.0.2.17"
+    iif = "r1-r2-eth1"
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r0: Send IGMP join")
+    result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify IGMP groups")
+    oif = "r1-r0-eth0"
+    result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify (*, G) ip mroutes created")
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Verify (*, G) ip mroutes created")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Delete RP configuration")
+
+    # Delete RP configuration
+    input_dict = {
+        "r2": {
+            "pim": {
+                "rp": [
+                    {
+                        "rp_addr": "1.0.2.17",
+                        "group_addr_range": GROUP_RANGE_ALL,
+                        "delete": True,
+                    }
+                ]
+            }
+        }
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r2: Shut the RP interface lo")
+    dut = "r2"
+    intf = "lo"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: Shut the interface r1-r2-eth1 towards RP")
+    dut = "r1"
+    intf = "r1-r2-eth1"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: Shut the interface r1-r3-eth2 towards RP")
+    dut = "r1"
+    intf = "r1-r3-eth2"
+    shutdown_bringup_interface(tgen, dut, intf, False)
+
+    step("r1: Verify (*, G) ip mroutes cleared")
+    dut = "r1"
+    iif = "r1-r2-eth1"
+    oif = "r1-r0-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r1: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    step("r2: Verify (*, G) ip mroutes cleared")
+    dut = "r2"
+    iif = "lo"
+    oif = "r2-r1-eth0"
+    result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+    assert result is not True, (
+        "Testcase {} : Failed \n "
+        "r2: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
+            tc_name, result
+        )
+    )
+
+    write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+    ARGS = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(ARGS))
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py
new file mode 100755 (executable)
index 0000000..bd5473a
--- /dev/null
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+                 _______r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+TC_1 : Verify upstream interfaces(IIF) and join state are updated properly
+       after adding and deleting the static RP
+TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after
+       adding and deleting the static RP
+TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP
+TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the
+        same path
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+from time import sleep
+import datetime
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+    start_topology,
+    write_test_header,
+    write_test_footer,
+    reset_config_on_routers,
+    step,
+    shutdown_bringup_interface,
+    kill_router_daemons,
+    start_router_daemons,
+    create_static_routes,
+    check_router_status,
+    socat_send_igmp_join_traffic,
+    topo_daemons
+)
+from lib.pim import (
+    create_pim_config,
+    verify_igmp_groups,
+    verify_upstream_iif,
+    verify_join_state_and_timer,
+    verify_mroutes,
+    verify_pim_neighbors,
+    verify_pim_interface_traffic,
+    verify_pim_rp_info,
+    verify_pim_state,
+    clear_pim_interface_traffic,
+    clear_igmp_interfaces,
+    clear_pim_interfaces,
+    clear_mroute,
+    clear_mroute_verify,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Global variables
+GROUP_RANGE_V6 = "ff08::/64"
+IGMP_JOIN_V6 = "ff08::1"
+STAR = "*"
+SOURCE = "Static"
+
+pytestmark = [pytest.mark.pimd]
+
+
+def build_topo(tgen):
+    """Build function"""
+
+    # Building topology from json file
+    build_topo_from_json(tgen, TOPO)
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment
+
+    * `mod`: module name
+    """
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: %s", testsuite_run_time)
+    logger.info("=" * 40)
+
+    topology = """
+
+                 _______r2_____
+                |             |
+      iperf     |             |     iperf
+        r0-----r1-------------r3-----r5
+                |             |
+                |_____________|
+                        r4
+
+    """
+    logger.info("Master Topology: \n %s", topology)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/multicast_pimv6_static_rp.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global TOPO
+    TOPO = tgen.json_topo
+
+    # ... and here it calls Mininet initialization functions.
+
+    # get list of daemons needs to be started for this suite.
+    daemons = topo_daemons(tgen, TOPO)
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start daemons and then start routers
+    start_topology(tgen, daemons)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, TOPO)
+
+    # Verify PIM neighbors
+    result = verify_pim_neighbors(tgen, TOPO)
+    assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module():
+    """Teardown the pytest environment"""
+
+    logger.info("Running teardown_module to delete topology")
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
+    logger.info("=" * 40)
+
+
+#####################################################
+#
+#   Testcases
+#
+#####################################################
+
+
+def verify_state_incremented(state_before, state_after):
+    """
+    API to compare interface traffic state incrementing
+
+    Parameters
+    ----------
+    * `state_before` : State dictionary for any particular instance
+    * `state_after` : State dictionary for any particular instance
+    """
+
+    for router, state_data in state_before.items():
+        for state, value in state_data.items():
+            if state_before[router][state] >= state_after[router][state]:
+                errormsg = (
+                    "[DUT: %s]: state %s value has not"
+                    " incremented, Initial value: %s, "
+                    "Current value: %s [FAILED!!]"
+                    % (
+                        router,
+                        state,
+                        state_before[router][state],
+                        state_after[router][state],
+                    )
+                )
+                return errormsg
+
+            logger.info(
+                "[DUT: %s]: State %s value is "
+                "incremented, Initial value: %s, Current value: %s"
+                " [PASSED!!]",
+                router,
+                state,
+                state_before[router][state],
+                state_after[router][state],
+            )
+
+    return True
+
+
+#####################################################
+
+def test_pimv6_add_delete_static_RP_p0(request):
+    """
+    TC_1: Verify upstream interfaces(IIF) and join state are updated
+        properly after adding and deleting the static RP
+    TC_2: Verify IIF and OIL in "show ip pim state" updated properly
+        after adding and deleting the static RP
+    TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+    TC_4: Verify (*,G) prune is send towards the RP after deleting the
+        static RP
+
+    TOPOlogy used:
+         r0------r1-----r2
+       iperf    DUT     RP
+    """
+
+    tgen = get_topogen()
+    tc_name = request.node.name
+    write_test_header(tc_name)
+
+    # Don"t run this test if we have any failure.
+    if tgen.routers_have_failure():
+        check_router_status(tgen)
+
+    step("Shut link b/w R1 and R3 and R1 and R4 as per tescase topology")
+    intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+    intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+    for intf in [intf_r1_r3, intf_r1_r4]:
+        shutdown_bringup_interface(tgen, "r1", intf, ifaceaction=False)
+
+    step("Enable PIM between r1 and r2")
+    step("Enable MLD on r1 interface and send IGMP " "join (FF08::1) to r1")
+    step("Configure r2 loopback interface as RP")
+    input_dict = {
+        "r2": {
+            "pim6": {
+                "rp": [
+                    {
+                        "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+                            "/"
+                        )[0],
+                        "group_addr_range": GROUP_RANGE_V6,
+                    }
+                ]
+            }
+        }
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Verify show ip pim interface traffic without any mld join")
+    state_dict = {
+        "r1": {TOPO["routers"]["r1"]["links"]["r2"]["interface"]: ["pruneTx"]}
+    }
+
+    state_before = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
+    assert isinstance(
+        state_before, dict
+    ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+        tc_name, result
+    )
+
+    step("send mld join (FF08::1) to R1")
+    intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+    intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+    result = socat_send_igmp_join_traffic(
+        tgen, "r0", "UDP6-RECV", IGMP_JOIN_V6, intf, intf_ip, join=True
+    )
+    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+    step("r1: Verify RP info")
+    dut = "r1"
+    oif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+    iif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+    rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_V6, oif, rp_address, SOURCE
+    )
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, oif, STAR, IGMP_JOIN_V6)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify upstream join state and join timer")
+    result = verify_join_state_and_timer(tgen, dut, oif, STAR, IGMP_JOIN_V6)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify PIM state")
+    result = verify_pim_state(tgen, dut, oif, iif, IGMP_JOIN_V6)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Verify ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, IGMP_JOIN_V6, oif, iif)
+    assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+    step("r1: Delete RP configuration")
+    input_dict = {
+        "r2": {
+            "pim6": {
+                "rp": [
+                    {
+                        "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+                            "/"
+                        )[0],
+                        "group_addr_range": GROUP_RANGE_V6,
+                        "delete": True,
+                    }
+                ]
+            }
+        }
+    }
+
+    result = create_pim_config(tgen, TOPO, input_dict)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("r1: Verify RP info")
+    result = verify_pim_rp_info(
+        tgen, TOPO, dut, GROUP_RANGE_V6, oif, rp_address, SOURCE, expected=False
+    )
+    assert (
+        result is not True
+    ), "Testcase {} :Failed \n " "RP: {} info is still present \n Error: {}".format(
+        tc_name, rp_address, result
+    )
+
+    step("r1: Verify upstream IIF interface")
+    result = verify_upstream_iif(tgen, dut, oif, STAR, IGMP_JOIN_V6, expected=False)
+    assert result is not True, (
+        "Testcase {} :Failed \n "
+        "Upstream ({}, {}) is still in join state \n Error: {}".format(
+            tc_name, STAR, IGMP_JOIN_V6, result
+        )
+    )
+
+    step("r1: Verify upstream join state and join timer")
+    result = verify_join_state_and_timer(
+        tgen, dut, oif, STAR, IGMP_JOIN_V6, expected=False
+    )
+    assert result is not True, (
+        "Testcase {} :Failed \n "
+        "Upstream ({}, {}) timer is still running \n Error: {}".format(
+            tc_name, STAR, IGMP_JOIN_V6, result
+        )
+    )
+
+    step("r1: Verify PIM state")
+    result = verify_pim_state(tgen, dut, oif, iif, IGMP_JOIN_V6, expected=False)
+    assert result is not True, (
+        "Testcase {} :Failed \n "
+        "PIM state for group: {} is still Active \n Error: {}".format(
+            tc_name, IGMP_JOIN_V6, result
+        )
+    )
+
+    step("r1: Verify ip mroutes")
+    result = verify_mroutes(tgen, dut, STAR, IGMP_JOIN_V6, oif, iif, expected=False)
+    assert result is not True, (
+        "Testcase {} :Failed \n "
+        "mroute ({}, {}) is still present \n Error: {}".format(
+            tc_name, STAR, IGMP_JOIN_V6, result
+        )
+    )
+
+    step("r1: Verify show ip pim interface traffic without any IGMP join")
+    state_after = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
+    assert isinstance(
+        state_after, dict
+    ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+        tc_name, result
+    )
+
+    result = verify_state_incremented(state_before, state_after)
+    assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+    write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
index 8c855620be6fba08cbc6ecde83eb12ce6e74504b..58d37a368c0df86d52a9dcd0edbf8f7375adbea9 100644 (file)
@@ -57,6 +57,8 @@ from lib.ospf import (
     create_router_ospf,
 )
 
+pytestmark = [pytest.mark.ospfd]
+
 # Global variables
 topo = None
 Iters = 5
index e7d0621df835f0f740de211b23b99c91ae403a65..85646a8fab5ac3f98cdeddf221c81b768fa287bc 100644 (file)
@@ -57,6 +57,8 @@ from lib.ospf import (
     create_router_ospf,
 )
 
+pytestmark = [pytest.mark.ospfd]
+
 # Global variables
 topo = None
 Iters = 5
index 4cb3747c5681e0f67b9f8988831eb2cddfeb31b1..ec97c254d1fc0c1989e7b10aa8d9b00788338cff 100644 (file)
@@ -57,6 +57,8 @@ from lib.ospf import (
     create_router_ospf,
 )
 
+pytestmark = [pytest.mark.ospfd]
+
 # Global variables
 topo = None
 Iters = 5
index e2a6ff64a48553bb7dedea9892c859d7ddd34832..37facfc5da3c4e991bca56255bc58aecbb5304ba 100644 (file)
@@ -316,17 +316,26 @@ def test_ospf6_kernel_route():
     for router in rlist:
         logger.info('Checking OSPF IPv6 kernel routes in "%s"', router.name)
 
-        routes = topotest.ip6_route(router)
-        expected = {
-            "2001:db8:1::/64": {},
-            "2001:db8:2::/64": {},
-            "2001:db8:3::/64": {},
-            "2001:db8:100::/64": {},
-            "2001:db8:200::/64": {},
-            "2001:db8:300::/64": {},
-        }
+        def _routes_in_fib6():
+            routes = topotest.ip6_route(router)
+            expected = {
+                "2001:db8:1::/64": {},
+                "2001:db8:2::/64": {},
+                "2001:db8:3::/64": {},
+                "2001:db8:100::/64": {},
+                "2001:db8:200::/64": {},
+                "2001:db8:300::/64": {},
+            }
+            logger.info("Routes:")
+            logger.info(routes)
+            logger.info(topotest.json_cmp(routes, expected))
+            logger.info("ENd:")
+            return topotest.json_cmp(routes, expected)
+
+        _, result = topotest.run_and_expect(_routes_in_fib6, None, count=20, wait=1)
+
         assertmsg = 'OSPF IPv6 route mismatch in router "{}"'.format(router.name)
-        assert topotest.json_cmp(routes, expected) is None, assertmsg
+        assert result is None, assertmsg
 
 
 def test_ospf_json():
@@ -337,6 +346,7 @@ def test_ospf_json():
 
     for rnum in range(1, 5):
         router = tgen.gears["r{}".format(rnum)]
+        logger.info(router.vtysh_cmd("show ip ospf database"))
         logger.info('Comparing router "%s" "show ip ospf json" output', router.name)
         expected = {
             "routerId": "10.0.255.{}".format(rnum),
index 3967f5f42a886042679e1f75b72b25ce68854bea..59ba8236c743640a42c7687a6bc240d327320ab6 100644 (file)
@@ -68,6 +68,7 @@ from lib.ospf import (
     verify_ospf_summary,
 )
 
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
 
 # Global variables
 topo = None
index c9824e79c50970aa618bdea344048e5022e7d5c9..d318ec09062682b215863b579805ee70e306fb16 100644 (file)
@@ -183,7 +183,7 @@ def test_ospfv3_p2p_tc3_p0(request):
     step("Verify that interface is enabled in ospf.")
     step("Verify that config is successful.")
     dut = "r0"
-    input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}}
+    input_dict = {"r0": {"links": {"r3": {"ospf6": {}}}}}
     result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
     assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
 
@@ -339,7 +339,7 @@ def test_ospfv3_p2p_tc3_p0(request):
     assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
     step("Verify that interface is enabled in ospf.")
     dut = "r0"
-    input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}}
+    input_dict = {"r0": {"links": {"r3": {"ospf6": {}}}}}
     result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
     assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
 
@@ -374,7 +374,7 @@ def test_ospfv3_p2p_tc3_p0(request):
 
     step("Verify that interface is enabled in ospf.")
     dut = "r0"
-    input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}}
+    input_dict = {"r0": {"links": {"r3": {"ospf6": {}}}}}
     result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
     assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
 
@@ -1172,7 +1172,6 @@ def test_ospfv3_show_p1(request):
                     "ospf6": {
                         "status": "up",
                         "type": "BROADCAST",
-                        "ospf6Enabled": True,
                         "attachedToArea": True,
                         "instanceId": 0,
                         "interfaceMtu": 1500,
index 860c4980b666b2488978f00d5d7ddd8c8523dd29..8aa08871e351306b685b78078d7757fd5ec479c1 100644 (file)
@@ -21,6 +21,7 @@ ripd=no
 ripngd=no
 isisd=no
 pimd=no
+pim6d=no
 ldpd=no
 nhrpd=no
 eigrpd=no
@@ -46,6 +47,7 @@ ripd_options="   -A 127.0.0.1"
 ripngd_options=" -A ::1"
 isisd_options="  -A 127.0.0.1"
 pimd_options="   -A 127.0.0.1"
+pim6d_options="  -A ::1"
 ldpd_options="   -A 127.0.0.1"
 nhrpd_options="  -A 127.0.0.1"
 eigrpd_options=" -A 127.0.0.1"
index 469e95ed7398339a83af26cf7df545b91972df05..6e14eb7abcc88a205f5648ee905ccd06d463389c 100644 (file)
@@ -5,6 +5,7 @@
 $outchannel frr_log,/var/log/frr/frr.log
 if  $programname == 'babeld' or
     $programname == 'bgpd' or
+    $programname == 'bfdd' or
     $programname == 'eigrpd' or
     $programname == 'frr' or
     $programname == 'isisd' or
@@ -14,6 +15,7 @@ if  $programname == 'babeld' or
     $programname == 'ospf6d' or
     $programname == 'ospfd' or
     $programname == 'pimd' or
+    $programname == 'pim6d' or
     $programname == 'pathd' or
     $programname == 'ripd' or
     $programname == 'ripngd' or
@@ -24,6 +26,7 @@ if  $programname == 'babeld' or
 
 if  $programname == 'babeld' or
     $programname == 'bgpd' or
+    $programname == 'bfdd' or
     $programname == 'eigrpd' or
     $programname == 'frr' or
     $programname == 'isisd' or
@@ -33,6 +36,7 @@ if  $programname == 'babeld' or
     $programname == 'ospf6d' or
     $programname == 'ospfd' or
     $programname == 'pimd' or
+    $programname == 'pim6d' or
     $programname == 'pathd' or
     $programname == 'ripd' or
     $programname == 'ripngd' or
index 743d32d5e22152a879c4b3e9fba2a4f1369589cf..32f9a7884197ef5710afbfd34bbe956a18d84220 100755 (executable)
@@ -1886,6 +1886,7 @@ if __name__ == "__main__":
         "ospfd",
         "pbrd",
         "pimd",
+        "pim6d",
         "ripd",
         "ripngd",
         "sharpd",
index 889c075f81d4ec0944cfee02beeda1a13dc46703..27b2c0ab84d8a83b47fe97969ec17226024ba802 100755 (executable)
@@ -27,7 +27,7 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter
 # Local Daemon selection may be done by using /etc/frr/daemons.
 # See /usr/share/doc/frr/README.Debian.gz for further information.
 # Keep zebra first and do not list watchfrr!
-DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
+DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
 MAX_INSTANCES=5
 RELOAD_SCRIPT="$D_PATH/frr-reload.py"
 
index 3344ff49543ef31e94681a30a5432ad6d5a7be2f..b589ced965aec46e89728b46adaa98ee335cb6af 100755 (executable)
@@ -35,7 +35,7 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter
 # - keep zebra first
 # - watchfrr does NOT belong in this list
 
-DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
+DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
 RELOAD_SCRIPT="$D_PATH/frr-reload.py"
 
 #
@@ -208,7 +208,7 @@ daemon_stop() {
 
        [ -r "$pidfile" ] || fail="pid file not found"
        $all && [ -n "$fail" ] && return 0
-       [ -z "$fail" ] && pid="$(cat \"$pidfile\")"
+       [ -z "$fail" ] && pid="$(cat "$pidfile")"
        [ -z "$fail" -a -z "$pid" ] && fail="pid file is empty"
        [ -n "$fail" ] || kill -0 "$pid" 2>/dev/null || fail="pid $pid not running"
 
@@ -242,7 +242,7 @@ daemon_status() {
        pidfile="$V_PATH/$daemon${inst:+-$inst}.pid"
 
        [ -r "$pidfile" ] || return 3
-       pid="$(cat \"$pidfile\")"
+       pid="$(cat "$pidfile")"
        [ -z "$pid" ] && return 1
        kill -0 "$pid" 2>/dev/null || return 1
        return 0
@@ -272,7 +272,7 @@ all_start() {
 }
 
 all_stop() {
-       local pids reversed
+       local pids reversed need_zebra
 
        daemon_list enabled_daemons disabled_daemons
        [ "$1" = "--reallyall" ] && enabled_daemons="$enabled_daemons $disabled_daemons"
@@ -282,13 +282,23 @@ all_stop() {
                reversed="$dmninst $reversed"
        done
 
+       # Stop zebra last, after trying to stop the other daemons
        for dmninst in $reversed; do
+               if [ "$dmninst" = "zebra" ]; then
+                       need_zebra="yes"
+                       continue
+               fi
+
                daemon_stop "$dmninst" "$1" &
                pids="$pids $!"
        done
        for pid in $pids; do
                wait $pid
        done
+
+       if [ -n "$need_zebra" ]; then
+               daemon_stop "zebra"
+       fi
 }
 
 all_status() {
index e9f397f225044859ec24c66ec6f25e52c8013324..0a24b091a3931ee0a5a9d01ec9bc138dd117a5d2 100644 (file)
@@ -1042,7 +1042,7 @@ check_format_info (function_format_info *info, tree params,
   format_ctx.arglocs = arglocs;
 
   check_function_arguments_recurse (check_format_arg, &format_ctx,
-                                   format_tree, arg_num);
+                                   format_tree, arg_num, OPT_Wformat_);
 
   location_t loc = format_ctx.res->format_string_loc;
 
index ec45de1a53deb4fef624e15e6f72c7a3a6e0d646..9f59447d63347840aa5f61b1ec0e420954f0fe4c 100644 (file)
@@ -982,4 +982,9 @@ static inline void debug_gimple_stmt(const_gimple s)
 #define SET_DECL_MODE(decl, mode)      DECL_MODE(decl) = (mode)
 #endif
 
+#if BUILDING_GCC_VERSION < 12000
+#define check_function_arguments_recurse(arg, ctx, tree, num, opt)             \
+       check_function_arguments_recurse(arg, ctx, tree, num)
+#endif
+
 #endif
index 3081c0d955bc6701ad33635d45d621810b2cb5e1..4a0356411f6594cf86d929cc41859aa5eb9e34bd 100644 (file)
@@ -985,7 +985,7 @@ static int vrrp_recv_advertisement(struct vrrp_router *r, struct ipaddr *src,
  */
 static void vrrp_read(struct thread *thread)
 {
-       struct vrrp_router *r = thread->arg;
+       struct vrrp_router *r = THREAD_ARG(thread);
 
        struct vrrp_pkt *pkt;
        ssize_t pktsize;
@@ -1480,7 +1480,7 @@ static void vrrp_change_state(struct vrrp_router *r, int to)
  */
 static void vrrp_adver_timer_expire(struct thread *thread)
 {
-       struct vrrp_router *r = thread->arg;
+       struct vrrp_router *r = THREAD_ARG(thread);
 
        DEBUGD(&vrrp_dbg_proto,
               VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
@@ -1508,7 +1508,7 @@ static void vrrp_adver_timer_expire(struct thread *thread)
  */
 static void vrrp_master_down_timer_expire(struct thread *thread)
 {
-       struct vrrp_router *r = thread->arg;
+       struct vrrp_router *r = THREAD_ARG(thread);
 
        zlog_info(VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
                  "Master_Down_Timer expired",
index a52bd7b116b36ca79c2e93825d7106dc3ad7a77f..f39ecf0709014e248efde9fb6f034d4a371ee7e7 100644 (file)
@@ -51,6 +51,7 @@
 #include "frrstr.h"
 #include "json.h"
 #include "ferr.h"
+#include "bgpd/bgp_vty.h"
 
 DEFINE_MTYPE_STATIC(MVTYSH, VTYSH_CMD, "Vtysh cmd copy");
 
@@ -885,10 +886,23 @@ int vtysh_config_from_file(struct vty *vty, FILE *fp)
        int lineno = 0;
        /* once we have an error, we remember & return that */
        int retcode = CMD_SUCCESS;
+       char *vty_buf_copy = XCALLOC(MTYPE_VTYSH_CMD, VTY_BUFSIZ);
+       char *vty_buf_trimmed = NULL;
 
        while (fgets(vty->buf, VTY_BUFSIZ, fp)) {
                lineno++;
 
+               strlcpy(vty_buf_copy, vty->buf, VTY_BUFSIZ);
+               vty_buf_trimmed = trim(vty_buf_copy);
+
+               /*
+                * Ignore the "end" lines, we will generate these where
+                * appropriate, otherwise we never execute
+                * XFRR_end_configuration, and start/end markers do not work.
+                */
+               if (strmatch(vty_buf_trimmed, "end"))
+                       continue;
+
                ret = command_config_read_one_line(vty, &cmd, lineno, 1);
 
                switch (ret) {
@@ -955,6 +969,8 @@ int vtysh_config_from_file(struct vty *vty, FILE *fp)
                }
        }
 
+       XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
+
        return (retcode);
 }
 
@@ -1685,8 +1701,8 @@ DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_vpnv4, address_family_vpnv4_cmd,
        "address-family vpnv4 [unicast]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_VPNV4_NODE;
        return CMD_SUCCESS;
@@ -1695,8 +1711,8 @@ DEFUNSH(VTYSH_BGPD, address_family_vpnv4, address_family_vpnv4_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_vpnv6, address_family_vpnv6_cmd,
        "address-family vpnv6 [unicast]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_VPNV6_NODE;
        return CMD_SUCCESS;
@@ -1706,8 +1722,8 @@ DEFUNSH(VTYSH_BGPD, address_family_vpnv6, address_family_vpnv6_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_ipv4, address_family_ipv4_cmd,
        "address-family ipv4 [unicast]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family Modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_IPV4_NODE;
        return CMD_SUCCESS;
@@ -1716,8 +1732,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4, address_family_ipv4_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_flowspecv4, address_family_flowspecv4_cmd,
        "address-family ipv4 flowspec",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family Modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_FLOWSPECV4_NODE;
        return CMD_SUCCESS;
@@ -1726,8 +1742,8 @@ DEFUNSH(VTYSH_BGPD, address_family_flowspecv4, address_family_flowspecv4_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_flowspecv6, address_family_flowspecv6_cmd,
        "address-family ipv6 flowspec",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family Modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_FLOWSPECV6_NODE;
        return CMD_SUCCESS;
@@ -1736,8 +1752,8 @@ DEFUNSH(VTYSH_BGPD, address_family_flowspecv6, address_family_flowspecv6_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_ipv4_multicast,
        address_family_ipv4_multicast_cmd, "address-family ipv4 multicast",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_IPV4M_NODE;
        return CMD_SUCCESS;
@@ -1746,8 +1762,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4_multicast,
 DEFUNSH(VTYSH_BGPD, address_family_ipv4_vpn, address_family_ipv4_vpn_cmd,
        "address-family ipv4 vpn",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_VPNV4_NODE;
        return CMD_SUCCESS;
@@ -1757,8 +1773,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4_labeled_unicast,
        address_family_ipv4_labeled_unicast_cmd,
        "address-family ipv4 labeled-unicast",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_IPV4L_NODE;
        return CMD_SUCCESS;
@@ -1767,8 +1783,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4_labeled_unicast,
 DEFUNSH(VTYSH_BGPD, address_family_ipv6, address_family_ipv6_cmd,
        "address-family ipv6 [unicast]",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_IPV6_NODE;
        return CMD_SUCCESS;
@@ -1777,8 +1793,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6, address_family_ipv6_cmd,
 DEFUNSH(VTYSH_BGPD, address_family_ipv6_multicast,
        address_family_ipv6_multicast_cmd, "address-family ipv6 multicast",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_IPV6M_NODE;
        return CMD_SUCCESS;
@@ -1787,8 +1803,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6_multicast,
 DEFUNSH(VTYSH_BGPD, address_family_ipv6_vpn, address_family_ipv6_vpn_cmd,
        "address-family ipv6 vpn",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_VPNV6_NODE;
        return CMD_SUCCESS;
@@ -1798,8 +1814,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6_labeled_unicast,
        address_family_ipv6_labeled_unicast_cmd,
        "address-family ipv6 labeled-unicast",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_IPV6L_NODE;
        return CMD_SUCCESS;
@@ -1863,8 +1879,8 @@ DEFUNSH(VTYSH_BGPD,
 DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
        "address-family <l2vpn evpn>",
        "Enter Address Family command mode\n"
-       "Address Family\n"
-       "Address Family modifier\n")
+       BGP_AF_STR
+       BGP_AF_MODIFIER_STR)
 {
        vty->node = BGP_EVPN_NODE;
        return CMD_SUCCESS;
@@ -3140,6 +3156,20 @@ DEFUN(vtysh_debug_uid_backtrace,
        return err;
 }
 
+DEFUNSH(VTYSH_ALL, vtysh_allow_reserved_ranges, vtysh_allow_reserved_ranges_cmd,
+       "allow-reserved-ranges",
+       "Allow using IPv4 (Class E) reserved IP space\n")
+{
+       return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_ALL, no_vtysh_allow_reserved_ranges,
+       no_vtysh_allow_reserved_ranges_cmd, "no allow-reserved-ranges",
+       NO_STR "Allow using IPv4 (Class E) reserved IP space\n")
+{
+       return CMD_SUCCESS;
+}
+
 DEFUNSH(VTYSH_ALL, vtysh_service_password_encrypt,
        vtysh_service_password_encrypt_cmd, "service password-encryption",
        "Set up miscellaneous service\n"
@@ -4902,6 +4932,9 @@ void vtysh_init_vty(void)
        install_element(CONFIG_NODE, &vtysh_service_password_encrypt_cmd);
        install_element(CONFIG_NODE, &no_vtysh_service_password_encrypt_cmd);
 
+       install_element(CONFIG_NODE, &vtysh_allow_reserved_ranges_cmd);
+       install_element(CONFIG_NODE, &no_vtysh_allow_reserved_ranges_cmd);
+
        install_element(CONFIG_NODE, &vtysh_password_cmd);
        install_element(CONFIG_NODE, &no_vtysh_password_cmd);
        install_element(CONFIG_NODE, &vtysh_enable_password_cmd);
index 3bd5489eefa45d08710074b9b36cce986a32197b..a7ec2a93c2d56f3ad9636157044ce81d08d7107f 100644 (file)
@@ -478,14 +478,18 @@ void vtysh_config_parse_line(void *arg, const char *line)
                else if (strncmp(line, "rpki", strlen("rpki")) == 0)
                        config = config_get(RPKI_NODE, line);
                else {
-                       if (strncmp(line, "log", strlen("log")) == 0
-                           || strncmp(line, "hostname", strlen("hostname")) == 0
-                           || strncmp(line, "domainname", strlen("domainname")) == 0
-                           || strncmp(line, "frr", strlen("frr")) == 0
-                           || strncmp(line, "agentx", strlen("agentx")) == 0
-                           || strncmp(line, "no log", strlen("no log")) == 0
-                           || strncmp(line, "no ip prefix-list", strlen("no ip prefix-list")) == 0
-                           || strncmp(line, "no ipv6 prefix-list", strlen("no ipv6 prefix-list")) == 0)
+                       if (strncmp(line, "log", strlen("log")) == 0 ||
+                           strncmp(line, "hostname", strlen("hostname")) ==
+                                   0 ||
+                           strncmp(line, "domainname", strlen("domainname")) ==
+                                   0 ||
+                           strncmp(line, "frr", strlen("frr")) == 0 ||
+                           strncmp(line, "agentx", strlen("agentx")) == 0 ||
+                           strncmp(line, "no log", strlen("no log")) == 0 ||
+                           strncmp(line, "no ip prefix-list",
+                                   strlen("no ip prefix-list")) == 0 ||
+                           strncmp(line, "no ipv6 prefix-list",
+                                   strlen("no ipv6 prefix-list")) == 0)
                                config_add_line_uniq(config_top, line);
                        else
                                config_add_line(config_top, line);
index 04eb47feeb8a5529ed658a3d65c8bb3080f4fd9c..ca119eb9004155fdc4dc45c0d984b7ddcafcc749 100644 (file)
@@ -78,36 +78,56 @@ int execute_flag = 0;
 /* Flag to indicate if in user/unprivileged mode. */
 int user_mode;
 
-/* For sigsetjmp() & siglongjmp(). */
-static sigjmp_buf jmpbuf;
-
-/* Flag for avoid recursive siglongjmp() call. */
-static int jmpflag = 0;
-
 /* Master of threads. */
 struct thread_master *master;
 
 /* Command logging */
 FILE *logfile;
 
+static void vtysh_rl_callback(char *line_read)
+{
+       HIST_ENTRY *last;
+
+       rl_callback_handler_remove();
+
+       if (!line_read) {
+               vtysh_loop_exited = true;
+               return;
+       }
+
+       /* If the line has any text in it, save it on the history. But only if
+        * last command in history isn't the same one.
+        */
+       if (*line_read) {
+               using_history();
+               last = previous_history();
+               if (!last || strcmp(last->line, line_read) != 0) {
+                       add_history(line_read);
+                       append_history(1, history_file);
+               }
+       }
+
+       vtysh_execute(line_read);
+
+       if (!vtysh_loop_exited)
+               rl_callback_handler_install(vtysh_prompt(), vtysh_rl_callback);
+}
+
 /* SIGTSTP handler.  This function care user's ^Z input. */
 static void sigtstp(int sig)
 {
+       rl_callback_handler_remove();
+
        /* Execute "end" command. */
        vtysh_execute("end");
 
+       if (!vtysh_loop_exited)
+               rl_callback_handler_install(vtysh_prompt(), vtysh_rl_callback);
+
        /* Initialize readline. */
        rl_initialize();
        printf("\n");
-
-       /* Check jmpflag for duplicate siglongjmp(). */
-       if (!jmpflag)
-               return;
-
-       jmpflag = 0;
-
-       /* Back to main command loop. */
-       siglongjmp(jmpbuf, 1);
+       rl_forced_update_display();
 }
 
 /* SIGINT handler.  This function care user's ^Z input.  */
@@ -207,34 +227,6 @@ struct option longopts[] = {
 
 bool vtysh_loop_exited;
 
-static void vtysh_rl_callback(char *line_read)
-{
-       HIST_ENTRY *last;
-
-       rl_callback_handler_remove();
-
-       if (!line_read) {
-               vtysh_loop_exited = true;
-               return;
-       }
-
-       /* If the line has any text in it, save it on the history. But only if
-        * last command in history isn't the same one. */
-       if (*line_read) {
-               using_history();
-               last = previous_history();
-               if (!last || strcmp(last->line, line_read) != 0) {
-                       add_history(line_read);
-                       append_history(1, history_file);
-               }
-       }
-
-       vtysh_execute(line_read);
-
-       if (!vtysh_loop_exited)
-               rl_callback_handler_install(vtysh_prompt(), vtysh_rl_callback);
-}
-
 static struct thread *vtysh_rl_read_thread;
 
 static void vtysh_rl_read(struct thread *thread)
@@ -752,10 +744,6 @@ int main(int argc, char **argv, char **env)
 
        vtysh_add_timestamp = ts_flag;
 
-       /* Preparation for longjmp() in sigtstp(). */
-       sigsetjmp(jmpbuf, 1);
-       jmpflag = 1;
-
        /* Main command loop. */
        vtysh_rl_run();
 
index fc285c748aa0266942529454bf12407fc69f25a5..423f25faa2ccb8652681935b9aab0a16fe67c2cb 100644 (file)
@@ -1429,7 +1429,7 @@ int main(int argc, char **argv)
 
                        if ((sscanf(optarg, "%ld%1s", &gs.operational_timeout,
                                    garbage) != 1) ||
-                           (gs.max_restart_interval < 0)) {
+                           (gs.operational_timeout < 0)) {
                                fprintf(stderr,
                                        "Invalid Operational_timeout argument: %s\n",
                                        optarg);
index f8b866cd2589c008f5c51680fa4befb96d740c84..afefab66746d4543b5fafe326cd7f0cc3aedfaea 100644 (file)
@@ -543,6 +543,8 @@ const char *rtm_rta2str(int type)
                return "MFC_STATS";
        case RTA_NH_ID:
                return "NH_ID";
+       case RTA_EXPIRES:
+               return "EXPIRES";
        default:
                return "UNKNOWN";
        }
@@ -1070,9 +1072,11 @@ next_rta:
 
 static void nlroute_dump(struct rtmsg *rtm, size_t msglen)
 {
+       struct rta_mfc_stats *mfc_stats;
        struct rtattr *rta;
        size_t plen;
        uint32_t u32v;
+       uint64_t u64v;
 
        /* Get the first attribute and go from there. */
        rta = RTM_RTA(rtm);
@@ -1095,6 +1099,11 @@ next_rta:
                zlog_debug("      %u", u32v);
                break;
 
+       case RTA_EXPIRES:
+               u64v = *(uint64_t *)RTA_DATA(rta);
+               zlog_debug("      %" PRIu64, u64v);
+               break;
+
        case RTA_GATEWAY:
        case RTA_DST:
        case RTA_SRC:
@@ -1113,6 +1122,14 @@ next_rta:
                }
                break;
 
+       case RTA_MFC_STATS:
+               mfc_stats = (struct rta_mfc_stats *)RTA_DATA(rta);
+               zlog_debug("      pkts=%ju bytes=%ju wrong_if=%ju",
+                          (uintmax_t)mfc_stats->mfcs_packets,
+                          (uintmax_t)mfc_stats->mfcs_bytes,
+                          (uintmax_t)mfc_stats->mfcs_wrong_if);
+               break;
+
        default:
                /* NOTHING: unhandled. */
                break;
@@ -1519,6 +1536,24 @@ next_rta:
        goto next_rta;
 }
 
+static const char *tcm_nltype2str(int nltype)
+{
+       switch (nltype) {
+       case RTM_NEWQDISC:
+       case RTM_DELQDISC:
+               return "qdisc";
+       case RTM_NEWTCLASS:
+       case RTM_DELTCLASS:
+               return "tclass";
+       case RTM_NEWTFILTER:
+       case RTM_DELTFILTER:
+               return "tfilter";
+       default:
+               /* should never hit */
+               return "unknown";
+       }
+}
+
 static void nlncm_dump(const struct netconfmsg *ncm, size_t msglen)
 {
        const struct rtattr *rta;
@@ -1578,6 +1613,8 @@ void nl_dump(void *msg, size_t msglen)
        struct ifinfomsg *ifi;
        struct tunnel_msg *tnlm;
        struct fib_rule_hdr *frh;
+       struct tcmsg *tcm;
+
        char fbuf[128];
        char ibuf[128];
 
@@ -1713,6 +1750,21 @@ next_header:
                nlncm_dump(ncm, nlmsg->nlmsg_len - NLMSG_LENGTH(sizeof(*ncm)));
                break;
 
+       case RTM_NEWQDISC:
+       case RTM_DELQDISC:
+       case RTM_NEWTCLASS:
+       case RTM_DELTCLASS:
+       case RTM_NEWTFILTER:
+       case RTM_DELTFILTER:
+               tcm = NLMSG_DATA(nlmsg);
+               zlog_debug(
+                       " tcm [type=%s family=%s (%d) ifindex=%d handle=%04x:%04x]",
+                       tcm_nltype2str(nlmsg->nlmsg_type),
+                       af_type2str(tcm->tcm_family), tcm->tcm_family,
+                       tcm->tcm_ifindex, tcm->tcm_handle >> 16,
+                       tcm->tcm_handle & 0xffff);
+               break;
+
        default:
                break;
        }
index ec4ea372f1c3950c2140e190fcdc66424225b7df..d07c4c63324b9985acd4725e097cdd20677dbf96 100644 (file)
@@ -815,6 +815,9 @@ static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
        case DPLANE_OP_INTF_INSTALL:
        case DPLANE_OP_INTF_UPDATE:
        case DPLANE_OP_INTF_DELETE:
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
        case DPLANE_OP_NONE:
                break;
 
index 4e4ebc9cdaee9608367f545358fafdf03ee7f026..2c83a7ed4ce5f2914a020958476a1d64796685c5 100644 (file)
@@ -199,7 +199,7 @@ static int if_getaddrs(void)
                ifp = if_lookup_by_name(ifap->ifa_name, VRF_DEFAULT);
                if (ifp == NULL) {
                        flog_err(EC_LIB_INTERFACE,
-                                "if_getaddrs(): Can't lookup interface %s\n",
+                                "if_getaddrs(): Can't lookup interface %s",
                                 ifap->ifa_name);
                        continue;
                }
index 7a4e1304bd6ea2832a0fe9df589e8ceb00148be2..c7e7443a1b7a18862f65848ebf05a0c0819c7e93 100644 (file)
@@ -423,7 +423,7 @@ static uint32_t get_iflink_speed(struct interface *interface, int *error)
        ecmd.cmd = ETHTOOL_GSET; /* ETHTOOL_GLINK */
        ifdata.ifr_data = (caddr_t)&ecmd;
 
-       /* use ioctl to get IP address of an interface */
+       /* use ioctl to get speed of an interface */
        frr_with_privs(&zserv_privs) {
                sd = vrf_socket(PF_INET, SOCK_DGRAM, IPPROTO_IP,
                                interface->vrf->vrf_id, NULL);
@@ -436,7 +436,7 @@ static uint32_t get_iflink_speed(struct interface *interface, int *error)
                                *error = -1;
                        return 0;
                }
-       /* Get the current link state for the interface */
+               /* Get the current link state for the interface */
                rc = vrf_ioctl(interface->vrf->vrf_id, sd, SIOCETHTOOL,
                               (char *)&ifdata);
        }
index 309d5a3f3e2091198401b0a2357a07c5c2c753d9..da9fadf7c7e3665dbfdbdccddad668ce7f9d0180 100644 (file)
@@ -38,4 +38,15 @@ enum zebra_dplane_result kernel_intf_update(struct zebra_dplane_ctx *ctx)
        return ZEBRA_DPLANE_REQUEST_FAILURE;
 }
 
+enum zebra_dplane_result
+kernel_intf_netconf_update(struct zebra_dplane_ctx *ctx)
+{
+       const char *ifname = dplane_ctx_get_ifname(ctx);
+       enum dplane_netconf_status_e mpls_on = dplane_ctx_get_netconf_mpls(ctx);
+
+       zlog_warn("%s:  Unable to set kernel mpls state for interface %s(%d)",
+                 __func__, ifname, mpls_on);
+
+       return ZEBRA_DPLANE_REQUEST_SUCCESS;
+}
 #endif
index 9800e6450335d0a54e6dde00d1ea7b947d156b35..c674b499ac82ad6ec4e30506300262ab653630ec 100644 (file)
@@ -147,8 +147,8 @@ static int if_zebra_new_hook(struct interface *ifp)
        zebra_if = XCALLOC(MTYPE_ZINFO, sizeof(struct zebra_if));
        zebra_if->ifp = ifp;
 
-       zebra_if->multicast = IF_ZEBRA_MULTICAST_UNSPEC;
-       zebra_if->shutdown = IF_ZEBRA_SHUTDOWN_OFF;
+       zebra_if->multicast = IF_ZEBRA_DATA_UNSPEC;
+       zebra_if->shutdown = IF_ZEBRA_DATA_OFF;
 
        zebra_if_nhg_dependents_init(zebra_if);
 
@@ -583,9 +583,9 @@ void if_add_update(struct interface *ifp)
        if_data = ifp->info;
        assert(if_data);
 
-       if (if_data->multicast == IF_ZEBRA_MULTICAST_ON)
+       if (if_data->multicast == IF_ZEBRA_DATA_ON)
                if_set_flags(ifp, IFF_MULTICAST);
-       else if (if_data->multicast == IF_ZEBRA_MULTICAST_OFF)
+       else if (if_data->multicast == IF_ZEBRA_DATA_OFF)
                if_unset_flags(ifp, IFF_MULTICAST);
 
        zebra_ptm_if_set_ptm_state(ifp, if_data);
@@ -595,7 +595,7 @@ void if_add_update(struct interface *ifp)
        if (!CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) {
                SET_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE);
 
-               if (if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON) {
+               if (if_data->shutdown == IF_ZEBRA_DATA_ON) {
                        if (IS_ZEBRA_DEBUG_KERNEL) {
                                zlog_debug(
                                        "interface %s vrf %s(%u) index %d is shutdown. Won't wake it up.",
@@ -848,10 +848,6 @@ void if_handle_vrf_change(struct interface *ifp, vrf_id_t vrf_id)
        /* Send out notification on interface VRF change. */
        /* This is to issue an ADD, if needed. */
        zebra_interface_vrf_update_add(ifp, old_vrf_id);
-
-       /* Install connected routes (in new VRF). */
-       if (if_is_operative(ifp))
-               if_install_connected(ifp);
 }
 
 static void ipv6_ll_address_to_mac(struct in6_addr *address, uint8_t *mac)
@@ -1453,9 +1449,10 @@ static void zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx,
                /*
                 * mpls netconf data is neither v4 or v6 it's AF_MPLS!
                 */
-               if (mpls == DPLANE_NETCONF_STATUS_ENABLED)
+               if (mpls == DPLANE_NETCONF_STATUS_ENABLED) {
                        zif->mpls = true;
-               else if (mpls == DPLANE_NETCONF_STATUS_DISABLED)
+                       zebra_mpls_turned_on();
+               } else if (mpls == DPLANE_NETCONF_STATUS_DISABLED)
                        zif->mpls = false;
        }
 
@@ -1504,7 +1501,7 @@ void zebra_if_dplane_result(struct zebra_dplane_ctx *ctx)
                if (IS_ZEBRA_DEBUG_KERNEL)
                        zlog_debug("%s: can't find zns id %u", __func__, ns_id);
 
-               goto done;
+               return;
        }
 
        ifp = if_lookup_by_index_per_ns(zns, ifindex);
@@ -1516,7 +1513,7 @@ void zebra_if_dplane_result(struct zebra_dplane_ctx *ctx)
                                        "%s: can't find ifp at nsid %u index %d",
                                        __func__, ns_id, ifindex);
 
-                       goto done;
+                       return;
                }
        }
 
@@ -1576,10 +1573,11 @@ void zebra_if_dplane_result(struct zebra_dplane_ctx *ctx)
        case DPLANE_OP_IPSET_ENTRY_DELETE:
        case DPLANE_OP_NEIGH_TABLE_UPDATE:
        case DPLANE_OP_GRE_SET:
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
                break; /* should never hit here */
        }
-done:
-       dplane_ctx_fini(&ctx);
 }
 
 /* Dump if address information to vty. */
@@ -2910,7 +2908,7 @@ int if_multicast_set(struct interface *ifp)
                if_refresh(ifp);
        }
        if_data = ifp->info;
-       if_data->multicast = IF_ZEBRA_MULTICAST_ON;
+       if_data->multicast = IF_ZEBRA_DATA_ON;
 
        return 0;
 }
@@ -2933,7 +2931,28 @@ DEFUN (multicast,
                if_refresh(ifp);
        }
        if_data = ifp->info;
-       if_data->multicast = IF_ZEBRA_MULTICAST_ON;
+       if_data->multicast = IF_ZEBRA_DATA_ON;
+
+       return CMD_SUCCESS;
+}
+
+DEFPY (mpls,
+       mpls_cmd,
+       "[no] mpls enable",
+       NO_STR
+       MPLS_STR
+       "Set mpls to be on for the interface\n")
+{
+       VTY_DECLVAR_CONTEXT(interface, ifp);
+       struct zebra_if *if_data = ifp->info;
+
+       if (no) {
+               dplane_intf_mpls_modify_state(ifp, false);
+               if_data->mpls = IF_ZEBRA_DATA_UNSPEC;
+       } else {
+               dplane_intf_mpls_modify_state(ifp, true);
+               if_data->mpls = IF_ZEBRA_DATA_ON;
+       }
 
        return CMD_SUCCESS;
 }
@@ -2951,7 +2970,7 @@ int if_multicast_unset(struct interface *ifp)
                if_refresh(ifp);
        }
        if_data = ifp->info;
-       if_data->multicast = IF_ZEBRA_MULTICAST_OFF;
+       if_data->multicast = IF_ZEBRA_DATA_OFF;
 
        return 0;
 }
@@ -2975,7 +2994,7 @@ DEFUN (no_multicast,
                if_refresh(ifp);
        }
        if_data = ifp->info;
-       if_data->multicast = IF_ZEBRA_MULTICAST_OFF;
+       if_data->multicast = IF_ZEBRA_DATA_OFF;
 
        return CMD_SUCCESS;
 }
@@ -3041,7 +3060,7 @@ int if_shutdown(struct interface *ifp)
                if_refresh(ifp);
        }
        if_data = ifp->info;
-       if_data->shutdown = IF_ZEBRA_SHUTDOWN_ON;
+       if_data->shutdown = IF_ZEBRA_DATA_ON;
 
        return 0;
 }
@@ -3066,7 +3085,7 @@ DEFUN (shutdown_if,
                if_refresh(ifp);
        }
        if_data = ifp->info;
-       if_data->shutdown = IF_ZEBRA_SHUTDOWN_ON;
+       if_data->shutdown = IF_ZEBRA_DATA_ON;
 
        return CMD_SUCCESS;
 }
@@ -3090,7 +3109,7 @@ int if_no_shutdown(struct interface *ifp)
        }
 
        if_data = ifp->info;
-       if_data->shutdown = IF_ZEBRA_SHUTDOWN_OFF;
+       if_data->shutdown = IF_ZEBRA_DATA_OFF;
 
        return 0;
 }
@@ -3121,7 +3140,7 @@ DEFUN (no_shutdown_if,
        }
 
        if_data = ifp->info;
-       if_data->shutdown = IF_ZEBRA_SHUTDOWN_OFF;
+       if_data->shutdown = IF_ZEBRA_DATA_OFF;
 
        return CMD_SUCCESS;
 }
@@ -3908,9 +3927,9 @@ int if_ip_address_install(struct interface *ifp, struct prefix *prefix,
                SET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED);
 
        /* In case of this route need to install kernel. */
-       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED)
-           && CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)
-           && !(if_data && if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)) {
+       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) &&
+           CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) &&
+           !(if_data && if_data->shutdown == IF_ZEBRA_DATA_ON)) {
                /* Some system need to up the interface to set IP address. */
                if (!if_is_up(ifp)) {
                        if_set_flags(ifp, IFF_UP | IFF_RUNNING);
@@ -4003,9 +4022,9 @@ static int ip_address_install(struct vty *vty, struct interface *ifp,
                SET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED);
 
        /* In case of this route need to install kernel. */
-       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED)
-           && CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)
-           && !(if_data && if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)) {
+       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) &&
+           CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) &&
+           !(if_data && if_data->shutdown == IF_ZEBRA_DATA_ON)) {
                /* Some system need to up the interface to set IP address. */
                if (!if_is_up(ifp)) {
                        if_set_flags(ifp, IFF_UP | IFF_RUNNING);
@@ -4266,9 +4285,9 @@ int if_ipv6_address_install(struct interface *ifp, struct prefix *prefix,
                SET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED);
 
        /* In case of this route need to install kernel. */
-       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED)
-           && CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)
-           && !(if_data && if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)) {
+       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) &&
+           CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) &&
+           !(if_data && if_data->shutdown == IF_ZEBRA_DATA_ON)) {
                /* Some system need to up the interface to set IP address. */
                if (!if_is_up(ifp)) {
                        if_set_flags(ifp, IFF_UP | IFF_RUNNING);
@@ -4339,9 +4358,9 @@ static int ipv6_address_install(struct vty *vty, struct interface *ifp,
                SET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED);
 
        /* In case of this route need to install kernel. */
-       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED)
-           && CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)
-           && !(if_data && if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)) {
+       if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) &&
+           CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) &&
+           !(if_data && if_data->shutdown == IF_ZEBRA_DATA_ON)) {
                /* Some system need to up the interface to set IP address. */
                if (!if_is_up(ifp)) {
                        if_set_flags(ifp, IFF_UP | IFF_RUNNING);
@@ -4526,7 +4545,7 @@ static int if_config_write(struct vty *vty)
                        if_vty_config_start(vty, ifp);
 
                        if (if_data) {
-                               if (if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)
+                               if (if_data->shutdown == IF_ZEBRA_DATA_ON)
                                        vty_out(vty, " shutdown\n");
 
                                zebra_ptm_if_write(vty, if_data);
@@ -4576,13 +4595,14 @@ static int if_config_write(struct vty *vty)
                        }
 
                        if (if_data) {
-                               if (if_data->multicast
-                                   != IF_ZEBRA_MULTICAST_UNSPEC)
+                               if (if_data->multicast != IF_ZEBRA_DATA_UNSPEC)
                                        vty_out(vty, " %smulticast\n",
-                                               if_data->multicast
-                                                               == IF_ZEBRA_MULTICAST_ON
+                                               if_data->multicast ==
+                                                               IF_ZEBRA_DATA_ON
                                                        ? ""
                                                        : "no ");
+                               if (if_data->mpls == IF_ZEBRA_DATA_ON)
+                                       vty_out(vty, " mpls\n");
                        }
 
                        hook_call(zebra_if_config_wr, vty, ifp);
@@ -4619,6 +4639,7 @@ void zebra_if_init(void)
        install_element(ENABLE_NODE, &show_interface_desc_vrf_all_cmd);
        install_element(INTERFACE_NODE, &multicast_cmd);
        install_element(INTERFACE_NODE, &no_multicast_cmd);
+       install_element(INTERFACE_NODE, &mpls_cmd);
        install_element(INTERFACE_NODE, &linkdetect_cmd);
        install_element(INTERFACE_NODE, &no_linkdetect_cmd);
        install_element(INTERFACE_NODE, &shutdown_if_cmd);
index f3f41b90c5c418a1fc9b281ce81de598a88cc4aa..801078e83d52b640a60d5fc819a5a494bc86314d 100644 (file)
 extern "C" {
 #endif
 
-/* For interface multicast configuration. */
-#define IF_ZEBRA_MULTICAST_UNSPEC 0
-#define IF_ZEBRA_MULTICAST_ON     1
-#define IF_ZEBRA_MULTICAST_OFF    2
-
-/* For interface shutdown configuration. */
-#define IF_ZEBRA_SHUTDOWN_OFF    0
-#define IF_ZEBRA_SHUTDOWN_ON     1
+/* For interface configuration. */
+#define IF_ZEBRA_DATA_UNSPEC 0
+#define IF_ZEBRA_DATA_ON 1
+#define IF_ZEBRA_DATA_OFF 2
 
 #define IF_VLAN_BITMAP_MAX 4096
 
index 43478c98f17d85cc5c0e108df6af7e5616d16d1a..65aad49a25c9bdea718dcb01eb96eb5228b2dbd8 100644 (file)
@@ -260,7 +260,7 @@ void irdp_advert_off(struct interface *ifp)
        if (!irdp)
                return;
 
-       thread_cancel(&irdp->t_advertise);
+       THREAD_OFF(irdp->t_advertise);
 
        if (ifp->connected)
                for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, ifc)) {
@@ -295,7 +295,7 @@ void process_solicit(struct interface *ifp)
                return;
 
        irdp->flags |= IF_SOLICIT;
-       thread_cancel(&irdp->t_advertise);
+       THREAD_OFF(irdp->t_advertise);
 
        timer = (frr_weak_random() % MAX_RESPONSE_DELAY) + 1;
 
index 4bd0ac27f658eec33ff62c6fb38a56f60c5cde2b..45a372f88c9e5e78ec5a4177e64a251009079948 100644 (file)
@@ -47,6 +47,7 @@
 #include "zebra/rt_netlink.h"
 #include "zebra/if_netlink.h"
 #include "zebra/rule_netlink.h"
+#include "zebra/tc_netlink.h"
 #include "zebra/netconf_netlink.h"
 #include "zebra/zebra_errors.h"
 
@@ -114,6 +115,15 @@ static const struct message nlmsg_str[] = {{RTM_NEWROUTE, "RTM_NEWROUTE"},
                                           {RTM_NEWTUNNEL, "RTM_NEWTUNNEL"},
                                           {RTM_DELTUNNEL, "RTM_DELTUNNEL"},
                                           {RTM_GETTUNNEL, "RTM_GETTUNNEL"},
+                                          {RTM_NEWQDISC, "RTM_NEWQDISC"},
+                                          {RTM_DELQDISC, "RTM_DELQDISC"},
+                                          {RTM_GETQDISC, "RTM_GETQDISC"},
+                                          {RTM_NEWTCLASS, "RTM_NEWTCLASS"},
+                                          {RTM_DELTCLASS, "RTM_DELTCLASS"},
+                                          {RTM_GETTCLASS, "RTM_GETTCLASS"},
+                                          {RTM_NEWTFILTER, "RTM_NEWTFILTER"},
+                                          {RTM_DELTFILTER, "RTM_DELTFILTER"},
+                                          {RTM_GETTFILTER, "RTM_GETTFILTER"},
                                           {0}};
 
 static const struct message rtproto_str[] = {
@@ -290,9 +300,20 @@ static int netlink_recvbuf(struct nlsock *nl, uint32_t newsize)
        return 0;
 }
 
+static const char *group2str(uint32_t group)
+{
+       switch (group) {
+       case RTNLGRP_TUNNEL:
+               return "RTNLGRP_TUNNEL";
+       default:
+               return "UNKNOWN";
+       }
+}
+
 /* Make socket for Linux netlink interface. */
 static int netlink_socket(struct nlsock *nl, unsigned long groups,
-                         unsigned long ext_groups, ns_id_t ns_id)
+                         uint32_t ext_groups[], uint8_t ext_group_size,
+                         ns_id_t ns_id)
 {
        int ret;
        struct sockaddr_nl snl;
@@ -311,18 +332,30 @@ static int netlink_socket(struct nlsock *nl, unsigned long groups,
                snl.nl_family = AF_NETLINK;
                snl.nl_groups = groups;
 
+               if (ext_group_size) {
+                       uint8_t i;
+
+                       for (i = 0; i < ext_group_size; i++) {
 #if defined SOL_NETLINK
-               if (ext_groups) {
-                       ret = setsockopt(sock, SOL_NETLINK,
-                                        NETLINK_ADD_MEMBERSHIP, &ext_groups,
-                                        sizeof(ext_groups));
-                       if (ret < 0) {
+                               ret = setsockopt(sock, SOL_NETLINK,
+                                                NETLINK_ADD_MEMBERSHIP,
+                                                &ext_groups[i],
+                                                sizeof(ext_groups[i]));
+                               if (ret < 0) {
+                                       zlog_notice(
+                                               "can't setsockopt NETLINK_ADD_MEMBERSHIP for group %s(%u), this linux kernel does not support it: %s(%d)",
+                                               group2str(ext_groups[i]),
+                                               ext_groups[i],
+                                               safe_strerror(errno), errno);
+                               }
+#else
                                zlog_notice(
-                                       "can't setsockopt NETLINK_ADD_MEMBERSHIP: %s(%d)",
-                                       safe_strerror(errno), errno);
+                                       "Unable to use NETLINK_ADD_MEMBERSHIP via SOL_NETLINK for %s(%u) since the linux kernel does not support the socket option",
+                                       group2str(ext_groups[i]),
+                                       ext_groups[i]);
+#endif
                        }
                }
-#endif
 
                /* Bind the socket to the netlink structure for anything. */
                ret = bind(sock, (struct sockaddr *)&snl, sizeof(snl));
@@ -1590,14 +1623,21 @@ static enum netlink_msg_status nl_put_msg(struct nl_batch *bth,
 
        case DPLANE_OP_INTF_ADDR_ADD:
        case DPLANE_OP_INTF_ADDR_DEL:
-       case DPLANE_OP_INTF_NETCONFIG:
        case DPLANE_OP_NONE:
                return FRR_NETLINK_ERROR;
 
+       case DPLANE_OP_INTF_NETCONFIG:
+               return netlink_put_intf_netconfig(bth, ctx);
+
        case DPLANE_OP_INTF_INSTALL:
        case DPLANE_OP_INTF_UPDATE:
        case DPLANE_OP_INTF_DELETE:
                return netlink_put_intf_update_msg(bth, ctx);
+
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
+               return netlink_put_tc_update_msg(bth, ctx);
        }
 
        return FRR_NETLINK_ERROR;
@@ -1734,7 +1774,8 @@ void kernel_init(struct zebra_ns *zns)
        snprintf(zns->netlink.name, sizeof(zns->netlink.name),
                 "netlink-listen (NS %u)", zns->ns_id);
        zns->netlink.sock = -1;
-       if (netlink_socket(&zns->netlink, groups, ext_groups, zns->ns_id) < 0) {
+       if (netlink_socket(&zns->netlink, groups, &ext_groups, 1, zns->ns_id) <
+           0) {
                zlog_err("Failure to create %s socket",
                         zns->netlink.name);
                exit(-1);
@@ -1745,7 +1786,7 @@ void kernel_init(struct zebra_ns *zns)
        snprintf(zns->netlink_cmd.name, sizeof(zns->netlink_cmd.name),
                 "netlink-cmd (NS %u)", zns->ns_id);
        zns->netlink_cmd.sock = -1;
-       if (netlink_socket(&zns->netlink_cmd, 0, 0, zns->ns_id) < 0) {
+       if (netlink_socket(&zns->netlink_cmd, 0, 0, 0, zns->ns_id) < 0) {
                zlog_err("Failure to create %s socket",
                         zns->netlink_cmd.name);
                exit(-1);
@@ -1758,7 +1799,7 @@ void kernel_init(struct zebra_ns *zns)
                 sizeof(zns->netlink_dplane_out.name), "netlink-dp (NS %u)",
                 zns->ns_id);
        zns->netlink_dplane_out.sock = -1;
-       if (netlink_socket(&zns->netlink_dplane_out, 0, 0, zns->ns_id) < 0) {
+       if (netlink_socket(&zns->netlink_dplane_out, 0, 0, 0, zns->ns_id) < 0) {
                zlog_err("Failure to create %s socket",
                         zns->netlink_dplane_out.name);
                exit(-1);
@@ -1771,7 +1812,7 @@ void kernel_init(struct zebra_ns *zns)
                 sizeof(zns->netlink_dplane_in.name), "netlink-dp-in (NS %u)",
                 zns->ns_id);
        zns->netlink_dplane_in.sock = -1;
-       if (netlink_socket(&zns->netlink_dplane_in, dplane_groups, 0,
+       if (netlink_socket(&zns->netlink_dplane_in, dplane_groups, 0, 0,
                           zns->ns_id) < 0) {
                zlog_err("Failure to create %s socket",
                         zns->netlink_dplane_in.name);
@@ -1878,7 +1919,7 @@ static void kernel_nlsock_fini(struct nlsock *nls)
 
 void kernel_terminate(struct zebra_ns *zns, bool complete)
 {
-       thread_cancel(&zns->t_netlink);
+       THREAD_OFF(zns->t_netlink);
 
        kernel_nlsock_fini(&zns->netlink);
 
index 57fd304ae8d0292bf7bf946ec60e2eee153f0743..cb549339af5c130977178956ed9802e456a8b913 100644 (file)
@@ -1354,6 +1354,16 @@ static void kernel_read(struct thread *thread)
 
        if (nbytes < 0) {
                if (errno == ENOBUFS) {
+#ifdef __FreeBSD__
+                       /*
+                        * ENOBUFS indicates a temporary resource
+                        * shortage and is not harmful for consistency of
+                        * reading the routing socket.  Ignore it.
+                        */
+                       thread_add_read(zrouter.master, kernel_read, NULL, sock,
+                                       NULL);
+                       return;
+#else
                        flog_err(EC_ZEBRA_RECVMSG_OVERRUN,
                                 "routing socket overrun: %s",
                                 safe_strerror(errno));
@@ -1363,6 +1373,7 @@ static void kernel_read(struct thread *thread)
                         *  recover zebra at this point.
                         */
                        exit(-1);
+#endif
                }
                if (errno != EAGAIN && errno != EWOULDBLOCK)
                        flog_err_sys(EC_LIB_SOCKET, "routing socket error: %s",
@@ -1518,7 +1529,7 @@ void kernel_update_multi(struct dplane_ctx_q *ctx_list)
 {
        struct zebra_dplane_ctx *ctx;
        struct dplane_ctx_q handled_list;
-       enum zebra_dplane_result res;
+       enum zebra_dplane_result res = ZEBRA_DPLANE_REQUEST_SUCCESS;
 
        TAILQ_INIT(&handled_list);
 
@@ -1592,6 +1603,12 @@ void kernel_update_multi(struct dplane_ctx_q *ctx_list)
                        res = kernel_intf_update(ctx);
                        break;
 
+               case DPLANE_OP_TC_INSTALL:
+               case DPLANE_OP_TC_UPDATE:
+               case DPLANE_OP_TC_DELETE:
+                       res = kernel_tc_update(ctx);
+                       break;
+
                /* Ignore 'notifications' - no-op */
                case DPLANE_OP_SYS_ROUTE_ADD:
                case DPLANE_OP_SYS_ROUTE_DELETE:
@@ -1600,9 +1617,27 @@ void kernel_update_multi(struct dplane_ctx_q *ctx_list)
                        res = ZEBRA_DPLANE_REQUEST_SUCCESS;
                        break;
 
-               default:
-                       res = ZEBRA_DPLANE_REQUEST_FAILURE;
+               case DPLANE_OP_INTF_NETCONFIG:
+                       res = kernel_intf_netconf_update(ctx);
                        break;
+
+               case DPLANE_OP_NONE:
+               case DPLANE_OP_BR_PORT_UPDATE:
+               case DPLANE_OP_IPTABLE_ADD:
+               case DPLANE_OP_IPTABLE_DELETE:
+               case DPLANE_OP_IPSET_ADD:
+               case DPLANE_OP_IPSET_DELETE:
+               case DPLANE_OP_IPSET_ENTRY_ADD:
+               case DPLANE_OP_IPSET_ENTRY_DELETE:
+               case DPLANE_OP_NEIGH_IP_INSTALL:
+               case DPLANE_OP_NEIGH_IP_DELETE:
+               case DPLANE_OP_NEIGH_TABLE_UPDATE:
+               case DPLANE_OP_GRE_SET:
+               case DPLANE_OP_INTF_ADDR_ADD:
+               case DPLANE_OP_INTF_ADDR_DEL:
+                       zlog_err("Unhandled dplane data for %s",
+                                dplane_op2str(dplane_ctx_get_op(ctx)));
+                       res = ZEBRA_DPLANE_REQUEST_FAILURE;
                }
 
        skip_one:
index e516688a1955a54f766f090f4c9f193bd45d079c..46cf2eea7dea60871397daa1b3d49e7c4be806a1 100644 (file)
@@ -71,9 +71,6 @@ struct thread_master *master;
 /* Route retain mode flag. */
 int retain_mode = 0;
 
-/* Allow non-frr entities to delete frr routes */
-int allow_delete = 0;
-
 int graceful_restart;
 
 bool v6_rr_semantics = false;
@@ -336,7 +333,7 @@ int main(int argc, char **argv)
                        // batch_mode = 1;
                        break;
                case 'a':
-                       allow_delete = 1;
+                       zrouter.allow_delete = true;
                        break;
                case 'e': {
                        unsigned long int parsed_multipath =
index f9cb37a3cf7dd691b2a73f8d8c872b26a95c9b60..56f56bfe669d2df7d89855fe56ec5a5acf463021 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "linux/netconf.h"
 
+#include "lib/lib_errors.h"
 #include "zebra/zebra_ns.h"
 #include "zebra/zebra_dplane.h"
 #include "zebra/kernel_netlink.h"
@@ -185,4 +186,56 @@ int netlink_request_netconf(int sockfd)
        return netlink_request(nls, &req);
 }
 
+extern struct zebra_privs_t zserv_privs;
+/*
+ * Currently netconf has no ability to set from netlink.
+ * So we've received a request to do this work in the data plane.
+ * as such we need to set the value via the /proc system
+ */
+enum netlink_msg_status netlink_put_intf_netconfig(struct nl_batch *bth,
+                                                  struct zebra_dplane_ctx *ctx)
+{
+       const char *ifname = dplane_ctx_get_ifname(ctx);
+       enum dplane_netconf_status_e mpls_on = dplane_ctx_get_netconf_mpls(ctx);
+       char set[64];
+       char mpls_proc[PATH_MAX];
+       int fd, ret = FRR_NETLINK_ERROR;
+
+       snprintf(mpls_proc, sizeof(mpls_proc),
+                "/proc/sys/net/mpls/conf/%s/input", ifname);
+
+       if (mpls_on == DPLANE_NETCONF_STATUS_ENABLED)
+               snprintf(set, sizeof(set), "1\n");
+       else if (mpls_on == DPLANE_NETCONF_STATUS_DISABLED)
+               snprintf(set, sizeof(set), "0\n");
+       else {
+               flog_err_sys(
+                       EC_LIB_DEVELOPMENT,
+                       "%s: Expected interface %s to be set to ENABLED or DISABLED was %d",
+                       __func__, ifname, mpls_on);
+               return ret;
+       }
+
+       frr_with_privs (&zserv_privs) {
+               fd = open(mpls_proc, O_WRONLY);
+               if (fd < 0) {
+                       flog_err_sys(
+                               EC_LIB_SOCKET,
+                               "%s: Unable to open %s for writing: %s(%d)",
+                               __func__, mpls_proc, safe_strerror(errno),
+                               errno);
+                       return ret;
+               }
+               if (write(fd, set, 2) == 2)
+                       ret = FRR_NETLINK_SUCCESS;
+               else
+                       flog_err_sys(EC_LIB_SOCKET,
+                                    "%s: Unsuccessful write to %s: %s(%d)",
+                                    __func__, mpls_proc, safe_strerror(errno),
+                                    errno);
+               close(fd);
+       }
+       return ret;
+}
+
 #endif /* HAVE_NETLINK */
index 3f2e7af7685326271dd5c3688e13cbcfc400129e..1b3450bcb5f774a80ca89571cec54955eeacd426 100644 (file)
@@ -38,6 +38,10 @@ extern int netlink_netconf_change(struct nlmsghdr *h, ns_id_t ns_id,
 /* Request info from the host OS. */
 int netlink_request_netconf(int sockfd);
 
+struct nl_batch;
+
+extern enum netlink_msg_status
+netlink_put_intf_netconfig(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
 
 #ifdef __cplusplus
 }
index 60092c9632e619ced30a6a36acfff35c6379fbc6..a40843e27f479b287fab78a1716cc3b4f49dd97e 100644 (file)
@@ -454,9 +454,7 @@ int zebra_rib_queue_evpn_rem_vtep_add(vrf_id_t vrf_id, vni_t vni,
 int zebra_rib_queue_evpn_rem_vtep_del(vrf_id_t vrf_id, vni_t vni,
                                      struct in_addr vtep_ip);
 
-extern void meta_queue_free(struct meta_queue *mq);
-extern void rib_meta_queue_free_vrf(struct meta_queue *mq,
-                                   struct zebra_vrf *zvrf);
+extern void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf);
 extern int zebra_rib_labeled_unicast(struct route_entry *re);
 extern struct route_table *rib_table_ipv6;
 
index 4952c3eb1a21dae117a991898a9715ebc416f222..d8a22d2cfcd82e8b751a9fef96ee7e196c7af206 100644 (file)
@@ -69,6 +69,10 @@ kernel_pbr_rule_update(struct zebra_dplane_ctx *ctx);
 extern enum zebra_dplane_result
 kernel_intf_update(struct zebra_dplane_ctx *ctx);
 
+extern enum zebra_dplane_result
+kernel_intf_netconf_update(struct zebra_dplane_ctx *ctx);
+extern enum zebra_dplane_result kernel_tc_update(struct zebra_dplane_ctx *ctx);
+
 #endif /* !HAVE_NETLINK */
 
 extern int kernel_neigh_update(int cmd, int ifindex, void *addr, char *lla,
index 21c991c7db2cfa307abd6824fb8978a0b80a0711..0eab1fa8505ef3ab3fc617764c32cda916269214 100644 (file)
@@ -341,6 +341,22 @@ static inline int proto2zebra(int proto, int family, bool is_nexthop)
        case RTPROT_SRTE:
                proto = ZEBRA_ROUTE_SRTE;
                break;
+       case RTPROT_UNSPEC:
+       case RTPROT_REDIRECT:
+       case RTPROT_KERNEL:
+       case RTPROT_BOOT:
+       case RTPROT_GATED:
+       case RTPROT_RA:
+       case RTPROT_MRT:
+       case RTPROT_BIRD:
+       case RTPROT_DNROUTED:
+       case RTPROT_XORP:
+       case RTPROT_NTK:
+       case RTPROT_MROUTED:
+       case RTPROT_KEEPALIVED:
+       case RTPROT_OPENR:
+               proto = ZEBRA_ROUTE_KERNEL;
+               break;
        case RTPROT_ZEBRA:
                if (is_nexthop) {
                        proto = ZEBRA_ROUTE_NHG;
@@ -421,10 +437,10 @@ static enum seg6local_action_t
 parse_encap_seg6local(struct rtattr *tb,
                      struct seg6local_context *ctx)
 {
-       struct rtattr *tb_encap[256] = {};
+       struct rtattr *tb_encap[SEG6_LOCAL_MAX + 1] = {};
        enum seg6local_action_t act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
 
-       netlink_parse_rtattr_nested(tb_encap, 256, tb);
+       netlink_parse_rtattr_nested(tb_encap, SEG6_LOCAL_MAX, tb);
 
        if (tb_encap[SEG6_LOCAL_ACTION])
                act = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_ACTION]);
@@ -449,11 +465,11 @@ parse_encap_seg6local(struct rtattr *tb,
 
 static int parse_encap_seg6(struct rtattr *tb, struct in6_addr *segs)
 {
-       struct rtattr *tb_encap[256] = {};
+       struct rtattr *tb_encap[SEG6_IPTUNNEL_MAX + 1] = {};
        struct seg6_iptunnel_encap *ipt = NULL;
        struct in6_addr *segments = NULL;
 
-       netlink_parse_rtattr_nested(tb_encap, 256, tb);
+       netlink_parse_rtattr_nested(tb_encap, SEG6_IPTUNNEL_MAX, tb);
 
        /*
         * TODO: It's not support multiple SID list.
@@ -977,8 +993,19 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
                        if (nhe_id || ng)
                                rib_add_multipath(afi, SAFI_UNICAST, &p,
                                                  &src_p, re, ng, startup);
-                       else
+                       else {
+                               /*
+                                * I really don't see how this is possible
+                                * but since we are testing for it let's
+                                * let the end user know why the route
+                                * that was just received was swallowed
+                                * up and forgotten
+                                */
+                               zlog_err(
+                                       "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel",
+                                       __func__, &p);
                                XFREE(MTYPE_RE, re);
+                       }
                }
        } else {
                if (nhe_id) {
@@ -1017,7 +1044,6 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,
        struct rtmsg *rtm;
        struct rtattr *tb[RTA_MAX + 1];
        struct mcast_route_data *m;
-       struct mcast_route_data mr;
        int iif = 0;
        int count;
        int oif[256];
@@ -1026,12 +1052,8 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,
        vrf_id_t vrf;
        int table;
 
-       if (mroute)
-               m = mroute;
-       else {
-               memset(&mr, 0, sizeof(mr));
-               m = &mr;
-       }
+       assert(mroute);
+       m = mroute;
 
        rtm = NLMSG_DATA(h);
 
@@ -1138,9 +1160,19 @@ int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
                return 0;
        }
 
-       if (!(rtm->rtm_family == AF_INET ||
-             rtm->rtm_family == AF_INET6 ||
-             rtm->rtm_family == RTNL_FAMILY_IPMR )) {
+       switch (rtm->rtm_family) {
+       case AF_INET:
+       case AF_INET6:
+               break;
+
+       case RTNL_FAMILY_IPMR:
+       case RTNL_FAMILY_IP6MR:
+               /* notifications on IPMR are irrelevant to zebra, we only care
+                * about responses to RTM_GETROUTE requests we sent.
+                */
+               return 0;
+
+       default:
                flog_warn(
                        EC_ZEBRA_UNKNOWN_FAMILY,
                        "Invalid address family: %u received from kernel route change: %s",
@@ -1166,10 +1198,14 @@ int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
                return -1;
        }
 
+       /* these are "magic" kernel-managed *unicast* routes used for
+        * outputting locally generated multicast traffic (which uses unicast
+        * handling on Linux because ~reasons~.
+        */
        if (rtm->rtm_type == RTN_MULTICAST)
-               netlink_route_change_read_multicast(h, ns_id, startup);
-       else
-               netlink_route_change_read_unicast(h, ns_id, startup);
+               return 0;
+
+       netlink_route_change_read_unicast(h, ns_id, startup);
        return 0;
 }
 
@@ -2297,7 +2333,7 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
        struct mcast_route_data *mr = (struct mcast_route_data *)in;
        struct {
                struct nlmsghdr n;
-               struct ndmsg ndm;
+               struct rtmsg rtm;
                char buf[256];
        } req;
 
@@ -2307,17 +2343,17 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
        zns = zvrf->zns;
        memset(&req, 0, sizeof(req));
 
-       req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
+       req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
        req.n.nlmsg_flags = NLM_F_REQUEST;
        req.n.nlmsg_pid = zns->netlink_cmd.snl.nl_pid;
 
        req.n.nlmsg_type = RTM_GETROUTE;
 
-       nl_attr_put32(&req.n, sizeof(req), RTA_IIF, mroute->ifindex);
-       nl_attr_put32(&req.n, sizeof(req), RTA_OIF, mroute->ifindex);
-
        if (mroute->family == AF_INET) {
-               req.ndm.ndm_family = RTNL_FAMILY_IPMR;
+               req.rtm.rtm_family = RTNL_FAMILY_IPMR;
+               req.rtm.rtm_dst_len = IPV4_MAX_BITLEN;
+               req.rtm.rtm_src_len = IPV4_MAX_BITLEN;
+
                nl_attr_put(&req.n, sizeof(req), RTA_SRC,
                            &mroute->src.ipaddr_v4,
                            sizeof(mroute->src.ipaddr_v4));
@@ -2325,7 +2361,10 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
                            &mroute->grp.ipaddr_v4,
                            sizeof(mroute->grp.ipaddr_v4));
        } else {
-               req.ndm.ndm_family = RTNL_FAMILY_IP6MR;
+               req.rtm.rtm_family = RTNL_FAMILY_IP6MR;
+               req.rtm.rtm_dst_len = IPV6_MAX_BITLEN;
+               req.rtm.rtm_src_len = IPV6_MAX_BITLEN;
+
                nl_attr_put(&req.n, sizeof(req), RTA_SRC,
                            &mroute->src.ipaddr_v6,
                            sizeof(mroute->src.ipaddr_v6));
@@ -2348,8 +2387,13 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
         * and the kernel goes screw you and the delicious cookies you
         * are trying to give me.  So now we have this little hack.
         */
-       actual_table = (zvrf->table_id == RT_TABLE_MAIN) ? RT_TABLE_DEFAULT :
-               zvrf->table_id;
+       if (mroute->family == AF_INET)
+               actual_table = (zvrf->table_id == RT_TABLE_MAIN)
+                                      ? RT_TABLE_DEFAULT
+                                      : zvrf->table_id;
+       else
+               actual_table = zvrf->table_id;
+
        nl_attr_put32(&req.n, sizeof(req), RTA_TABLE, actual_table);
 
        suc = netlink_talk(netlink_route_change_read_multicast, &req.n,
index 5d4ed1e424c48f80f85d4578467e7453a3950460..bf959980be85c4327841b56a1f948526453a885d 100644 (file)
@@ -830,39 +830,51 @@ static int rtadv_make_socket(ns_id_t ns_id)
        int sock = -1;
        int ret = 0;
        struct icmp6_filter filter;
+       int error;
 
        frr_with_privs(&zserv_privs) {
 
                sock = ns_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, ns_id);
-
+               /*
+                * with privs might set errno too if it fails save
+                * to the side
+                */
+               error = errno;
        }
 
        if (sock < 0) {
+               zlog_warn("RTADV socket for ns: %u failure to create: %s(%u)",
+                         ns_id, safe_strerror(error), error);
                return -1;
        }
 
        ret = setsockopt_ipv6_pktinfo(sock, 1);
        if (ret < 0) {
+               zlog_warn("RTADV failure to set Packet Information");
                close(sock);
                return ret;
        }
        ret = setsockopt_ipv6_multicast_loop(sock, 0);
        if (ret < 0) {
+               zlog_warn("RTADV failure to set multicast Loop detection");
                close(sock);
                return ret;
        }
        ret = setsockopt_ipv6_unicast_hops(sock, 255);
        if (ret < 0) {
+               zlog_warn("RTADV failure to set maximum unicast hops");
                close(sock);
                return ret;
        }
        ret = setsockopt_ipv6_multicast_hops(sock, 255);
        if (ret < 0) {
+               zlog_warn("RTADV failure to set maximum multicast hops");
                close(sock);
                return ret;
        }
        ret = setsockopt_ipv6_hoplimit(sock, 1);
        if (ret < 0) {
+               zlog_warn("RTADV failure to set maximum incoming hop limit");
                close(sock);
                return ret;
        }
index a926c14adf0585b87a1647f97f70b968a4b05f57..298b71598c4b96b0ac3a230d8b17126d60759c67 100644 (file)
@@ -82,6 +82,8 @@ zebra_zebra_SOURCES = \
        zebra/rule_netlink.c \
        zebra/rule_socket.c \
        zebra/table_manager.c \
+       zebra/tc_netlink.c \
+       zebra/tc_socket.c \
        zebra/zapi_msg.c \
        zebra/zebra_dplane.c \
        zebra/zebra_errors.c \
@@ -163,6 +165,7 @@ noinst_HEADERS += \
        zebra/rtadv.h \
        zebra/rule_netlink.h \
        zebra/table_manager.h \
+       zebra/tc_netlink.h \
        zebra/zapi_msg.h \
        zebra/zebra_dplane.h \
        zebra/zebra_errors.h \
diff --git a/zebra/tc_netlink.c b/zebra/tc_netlink.c
new file mode 100644 (file)
index 0000000..4fb0241
--- /dev/null
@@ -0,0 +1,468 @@
+/*
+ * Zebra Traffic Control (TC) interaction with the kernel using netlink.
+ *
+ * Copyright (C) 2022 Shichu Yang
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FRR; see the file COPYING.  If not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <zebra.h>
+
+#ifdef HAVE_NETLINK
+
+#include <linux/if_ether.h>
+#include <sys/socket.h>
+
+#include "if.h"
+#include "prefix.h"
+#include "vrf.h"
+
+#include <linux/fib_rules.h>
+#include <linux/pkt_cls.h>
+#include <linux/pkt_sched.h>
+#include "zebra/zserv.h"
+#include "zebra/zebra_ns.h"
+#include "zebra/zebra_vrf.h"
+#include "zebra/rt.h"
+#include "zebra/interface.h"
+#include "zebra/debug.h"
+#include "zebra/rtadv.h"
+#include "zebra/kernel_netlink.h"
+#include "zebra/tc_netlink.h"
+#include "zebra/zebra_errors.h"
+#include "zebra/zebra_dplane.h"
+#include "zebra/zebra_trace.h"
+
+/* TODO: move these bitflags to zebra_tc.h */
+#define TC_FILTER_SRC_IP (1 << 0)
+#define TC_FILTER_DST_IP (1 << 1)
+#define TC_FILTER_IP_PROTOCOL (1 << 9)
+
+#define TC_FREQ_DEFAULT (100)
+
+#define TC_MAJOR_BASE (0x1000u)
+#define TC_MINOR_NOCLASS (0xffffu)
+
+#define TC_FILTER_MASK (0x8000u)
+
+#define TIME_UNITS_PER_SEC (1000000)
+#define xmittime(r, s) (TIME_UNITS_PER_SEC * ((double)(s) / (double)(r)))
+
+static uint32_t tc_get_freq(void)
+{
+       int freq = 0;
+       FILE *fp = fopen("/proc/net/psched", "r");
+
+       if (fp) {
+               uint32_t nom, denom;
+
+               if (fscanf(fp, "%*08x%*08x%08x%08x", &nom, &denom) == 2) {
+                       if (nom == 1000000)
+                               freq = denom;
+               }
+               fclose(fp);
+       }
+
+       return freq == 0 ? TC_FREQ_DEFAULT : freq;
+}
+
+static inline uint32_t tc_make_handle(uint16_t major, uint16_t minor)
+{
+       return (major) << 16 | (minor);
+}
+
+static inline uint32_t tc_get_handle(struct zebra_dplane_ctx *ctx,
+                                    uint16_t minor)
+{
+       uint16_t major = TC_MAJOR_BASE + (uint16_t)dplane_ctx_get_ifindex(ctx);
+
+       return tc_make_handle(major, minor);
+}
+
+static void tc_calc_rate_table(struct tc_ratespec *ratespec, uint32_t *table,
+                              uint32_t mtu)
+{
+       if (mtu == 0)
+               mtu = 2047;
+
+       int cell_log = -1;
+
+       if (cell_log < 0) {
+               cell_log = 0;
+               while ((mtu >> cell_log) > 255)
+                       cell_log++;
+       }
+
+       for (int i = 0; i < 256; i++)
+               table[i] = xmittime(ratespec->rate, (i + 1) << cell_log);
+
+       ratespec->cell_align = -1;
+       ratespec->cell_log = cell_log;
+       ratespec->linklayer = TC_LINKLAYER_ETHERNET;
+}
+
+static int tc_flower_get_inet_prefix(const struct prefix *prefix,
+                                    struct inet_prefix *addr)
+{
+       addr->family = prefix->family;
+
+       if (addr->family == AF_INET) {
+               addr->bytelen = 4;
+               addr->bitlen = prefix->prefixlen;
+               addr->flags = 0;
+               addr->flags |= PREFIXLEN_SPECIFIED;
+               addr->flags |= ADDRTYPE_INET;
+               memcpy(addr->data, prefix->u.val32, sizeof(prefix->u.val32));
+       } else if (addr->family == AF_INET6) {
+               addr->bytelen = 16;
+               addr->bitlen = prefix->prefixlen;
+               addr->flags = 0;
+               addr->flags |= PREFIXLEN_SPECIFIED;
+               addr->flags |= ADDRTYPE_INET;
+               memcpy(addr->data, prefix->u.val, sizeof(prefix->u.val));
+       } else {
+               return -1;
+       }
+
+       return 0;
+}
+
+static int tc_flower_get_inet_mask(const struct prefix *prefix,
+                                  struct inet_prefix *addr)
+{
+       addr->family = prefix->family;
+
+       if (addr->family == AF_INET) {
+               addr->bytelen = 4;
+               addr->bitlen = prefix->prefixlen;
+               addr->flags = 0;
+               addr->flags |= PREFIXLEN_SPECIFIED;
+               addr->flags |= ADDRTYPE_INET;
+       } else if (addr->family == AF_INET6) {
+               addr->bytelen = 16;
+               addr->bitlen = prefix->prefixlen;
+               addr->flags = 0;
+               addr->flags |= PREFIXLEN_SPECIFIED;
+               addr->flags |= ADDRTYPE_INET;
+       } else {
+               return -1;
+       }
+
+       memset(addr->data, 0xff, addr->bytelen);
+
+       int rest = prefix->prefixlen;
+
+       for (int i = 0; i < addr->bytelen / 4; i++) {
+               if (!rest) {
+                       addr->data[i] = 0;
+               } else if (rest / 32 >= 1) {
+                       rest -= 32;
+               } else {
+                       addr->data[i] <<= 32 - rest;
+                       addr->data[i] = htonl(addr->data[i]);
+                       rest = 0;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Traffic control queue discipline encoding (only "htb" supported)
+ */
+static ssize_t netlink_qdisc_msg_encode(int cmd, struct zebra_dplane_ctx *ctx,
+                                       void *data, size_t datalen)
+{
+       struct nlsock *nl;
+
+       const char *kind = "htb";
+
+       struct tc_htb_glob htb_glob = {
+               .rate2quantum = 10, .version = 3, .defcls = TC_MINOR_NOCLASS};
+
+       struct rtattr *nest;
+
+       struct {
+               struct nlmsghdr n;
+               struct tcmsg t;
+               char buf[0];
+       } *req = (void *)data;
+
+       if (datalen < sizeof(*req))
+               return 0;
+
+       nl = kernel_netlink_nlsock_lookup(dplane_ctx_get_ns_sock(ctx));
+
+       memset(req, 0, sizeof(*req));
+
+       req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+       req->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST;
+
+       req->n.nlmsg_flags |= NLM_F_REPLACE;
+
+       req->n.nlmsg_type = cmd;
+
+       req->n.nlmsg_pid = nl->snl.nl_pid;
+
+       req->t.tcm_family = AF_UNSPEC;
+       req->t.tcm_ifindex = dplane_ctx_get_ifindex(ctx);
+       req->t.tcm_handle = tc_get_handle(ctx, 0);
+       req->t.tcm_parent = TC_H_ROOT;
+
+       nl_attr_put(&req->n, datalen, TCA_KIND, kind, strlen(kind) + 1);
+
+       nest = nl_attr_nest(&req->n, datalen, TCA_OPTIONS);
+
+       nl_attr_put(&req->n, datalen, TCA_HTB_INIT, &htb_glob,
+                   sizeof(htb_glob));
+       nl_attr_nest_end(&req->n, nest);
+
+       return NLMSG_ALIGN(req->n.nlmsg_len);
+}
+
+/*
+ * Traffic control class encoding
+ */
+static ssize_t netlink_tclass_msg_encode(int cmd, struct zebra_dplane_ctx *ctx,
+                                        void *data, size_t datalen)
+{
+       struct nlsock *nl;
+       struct tc_htb_opt htb_opt = {};
+
+       uint64_t rate, ceil;
+       uint64_t buffer, cbuffer;
+
+       /* TODO: fetch mtu from interface */
+       uint32_t mtu = 0;
+
+       uint32_t rtab[256];
+       uint32_t ctab[256];
+
+       struct rtattr *nest;
+
+       struct {
+               struct nlmsghdr n;
+               struct tcmsg t;
+               char buf[0];
+       } *req = (void *)data;
+
+       if (datalen < sizeof(*req))
+               return 0;
+
+       nl = kernel_netlink_nlsock_lookup(dplane_ctx_get_ns_sock(ctx));
+
+       memset(req, 0, sizeof(*req));
+
+       req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+       req->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST;
+
+       req->n.nlmsg_type = cmd;
+
+       req->n.nlmsg_pid = nl->snl.nl_pid;
+
+       req->t.tcm_family = AF_UNSPEC;
+       req->t.tcm_ifindex = dplane_ctx_get_ifindex(ctx);
+       req->t.tcm_handle = tc_get_handle(ctx, 1);
+       req->t.tcm_parent = tc_get_handle(ctx, 0);
+
+       rate = dplane_ctx_tc_get_rate(ctx);
+       ceil = dplane_ctx_tc_get_ceil(ctx);
+
+       ceil = ceil < rate ? rate : ceil;
+
+       htb_opt.rate.rate = (rate >> 32 != 0) ? ~0U : rate;
+       htb_opt.ceil.rate = (ceil >> 32 != 0) ? ~0U : ceil;
+
+       buffer = rate / tc_get_freq(), cbuffer = ceil / tc_get_freq();
+
+       htb_opt.buffer = buffer;
+       htb_opt.cbuffer = cbuffer;
+
+       tc_calc_rate_table(&htb_opt.rate, rtab, mtu);
+       tc_calc_rate_table(&htb_opt.ceil, ctab, mtu);
+
+       htb_opt.ceil.mpu = htb_opt.rate.mpu = 0;
+       htb_opt.ceil.overhead = htb_opt.rate.overhead = 0;
+
+       nest = nl_attr_nest(&req->n, datalen, TCA_OPTIONS);
+
+       if (rate >> 32 != 0) {
+               nl_attr_put(&req->n, datalen, TCA_HTB_CEIL64, &rate,
+                           sizeof(rate));
+       }
+
+       if (ceil >> 32 != 0) {
+               nl_attr_put(&req->n, datalen, TCA_HTB_CEIL64, &ceil,
+                           sizeof(ceil));
+       }
+
+       nl_attr_put(&req->n, datalen, TCA_HTB_PARMS, &htb_opt, sizeof(htb_opt));
+
+       nl_attr_put(&req->n, datalen, TCA_HTB_RTAB, rtab, sizeof(rtab));
+       nl_attr_put(&req->n, datalen, TCA_HTB_CTAB, ctab, sizeof(ctab));
+       nl_attr_nest_end(&req->n, nest);
+
+       return NLMSG_ALIGN(req->n.nlmsg_len);
+}
+
+/*
+ * Traffic control filter encoding (only "flower" supported)
+ */
+static ssize_t netlink_tfilter_msg_encode(int cmd, struct zebra_dplane_ctx *ctx,
+                                         void *data, size_t datalen)
+{
+       struct nlsock *nl;
+       struct rtattr *nest;
+
+       const char *kind = "flower";
+
+       uint16_t priority;
+       uint16_t protocol;
+       uint32_t classid;
+       uint32_t filter_bm;
+       uint32_t flags = 0;
+
+       struct inet_prefix addr;
+
+       struct {
+               struct nlmsghdr n;
+               struct tcmsg t;
+               char buf[0];
+       } *req = (void *)data;
+
+       if (datalen < sizeof(*req))
+               return 0;
+
+       nl = kernel_netlink_nlsock_lookup(dplane_ctx_get_ns_sock(ctx));
+
+       memset(req, 0, sizeof(*req));
+
+       req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+       req->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST;
+
+       req->n.nlmsg_flags |= NLM_F_EXCL;
+
+       req->n.nlmsg_type = cmd;
+
+       req->n.nlmsg_pid = nl->snl.nl_pid;
+
+       req->t.tcm_family = AF_UNSPEC;
+       req->t.tcm_ifindex = dplane_ctx_get_ifindex(ctx);
+
+       /* TODO: priority and layer-3 protocol support */
+       priority = 0;
+       protocol = htons(ETH_P_IP);
+       classid = tc_get_handle(ctx, 1);
+       filter_bm = dplane_ctx_tc_get_filter_bm(ctx);
+
+       req->t.tcm_info = tc_make_handle(priority, protocol);
+
+       req->t.tcm_handle = 1;
+       req->t.tcm_parent = tc_get_handle(ctx, 0);
+
+       nl_attr_put(&req->n, datalen, TCA_KIND, kind, strlen(kind) + 1);
+       nest = nl_attr_nest(&req->n, datalen, TCA_OPTIONS);
+
+       nl_attr_put(&req->n, datalen, TCA_FLOWER_CLASSID, &classid,
+                   sizeof(classid));
+
+       if (filter_bm & TC_FILTER_SRC_IP) {
+               const struct prefix *src_p = dplane_ctx_tc_get_src_ip(ctx);
+
+               if (tc_flower_get_inet_prefix(src_p, &addr) != 0)
+                       return 0;
+
+               nl_attr_put(&req->n, datalen,
+                           (addr.family == AF_INET) ? TCA_FLOWER_KEY_IPV4_SRC
+                                                    : TCA_FLOWER_KEY_IPV6_SRC,
+                           addr.data, addr.bytelen);
+
+               if (tc_flower_get_inet_mask(src_p, &addr) != 0)
+                       return 0;
+
+               nl_attr_put(&req->n, datalen,
+                           (addr.family == AF_INET)
+                                   ? TCA_FLOWER_KEY_IPV4_SRC_MASK
+                                   : TCA_FLOWER_KEY_IPV6_SRC_MASK,
+                           addr.data, addr.bytelen);
+       }
+
+       if (filter_bm & TC_FILTER_DST_IP) {
+               const struct prefix *dst_p = dplane_ctx_tc_get_dst_ip(ctx);
+
+               if (tc_flower_get_inet_prefix(dst_p, &addr) != 0)
+                       return 0;
+
+               nl_attr_put(&req->n, datalen,
+                           (addr.family == AF_INET) ? TCA_FLOWER_KEY_IPV4_DST
+                                                    : TCA_FLOWER_KEY_IPV6_DST,
+                           addr.data, addr.bytelen);
+
+               if (tc_flower_get_inet_mask(dst_p, &addr) != 0)
+                       return 0;
+
+               nl_attr_put(&req->n, datalen,
+                           (addr.family == AF_INET)
+                                   ? TCA_FLOWER_KEY_IPV4_DST_MASK
+                                   : TCA_FLOWER_KEY_IPV6_DST_MASK,
+                           addr.data, addr.bytelen);
+       }
+
+       if (filter_bm & TC_FILTER_IP_PROTOCOL) {
+               nl_attr_put8(&req->n, datalen, TCA_FLOWER_KEY_IP_PROTO,
+                            dplane_ctx_tc_get_ip_proto(ctx));
+       }
+
+       nl_attr_put32(&req->n, datalen, TCA_FLOWER_FLAGS, flags);
+
+       nl_attr_put16(&req->n, datalen, TCA_FLOWER_KEY_ETH_TYPE, protocol);
+       nl_attr_nest_end(&req->n, nest);
+
+       return NLMSG_ALIGN(req->n.nlmsg_len);
+}
+
+static ssize_t netlink_newqdisc_msg_encoder(struct zebra_dplane_ctx *ctx,
+                                           void *buf, size_t buflen)
+{
+       return netlink_qdisc_msg_encode(RTM_NEWQDISC, ctx, buf, buflen);
+}
+
+static ssize_t netlink_newtclass_msg_encoder(struct zebra_dplane_ctx *ctx,
+                                            void *buf, size_t buflen)
+{
+       return netlink_tclass_msg_encode(RTM_NEWTCLASS, ctx, buf, buflen);
+}
+
+static ssize_t netlink_newtfilter_msg_encoder(struct zebra_dplane_ctx *ctx,
+                                             void *buf, size_t buflen)
+{
+       return netlink_tfilter_msg_encode(RTM_NEWTFILTER, ctx, buf, buflen);
+}
+
+enum netlink_msg_status netlink_put_tc_update_msg(struct nl_batch *bth,
+                                                 struct zebra_dplane_ctx *ctx)
+{
+       /* TODO: error handling and other actions (delete, replace, ...) */
+
+       netlink_batch_add_msg(bth, ctx, netlink_newqdisc_msg_encoder, false);
+       netlink_batch_add_msg(bth, ctx, netlink_newtclass_msg_encoder, false);
+       return netlink_batch_add_msg(bth, ctx, netlink_newtfilter_msg_encoder,
+                                    false);
+}
+
+#endif /* HAVE_NETLINK */
diff --git a/zebra/tc_netlink.h b/zebra/tc_netlink.h
new file mode 100644 (file)
index 0000000..2190bca
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Zebra Traffic Control (TC) interaction with the kernel using netlink.
+ *
+ * Copyright (C) 2022 Shichu Yang
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FRR; see the file COPYING.  If not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _ZEBRA_TC_NETLINK_H
+#define _ZEBRA_TC_NETLINK_H
+
+#ifdef HAVE_NETLINK
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Represent a prefixed address in flower filter */
+
+struct inet_prefix {
+       uint16_t flags;
+       uint16_t bytelen;
+       uint16_t bitlen;
+       uint16_t family;
+       uint32_t data[64];
+};
+
+enum {
+       PREFIXLEN_SPECIFIED = (1 << 0),
+       ADDRTYPE_INET = (1 << 1),
+       ADDRTYPE_UNSPEC = (1 << 2),
+       ADDRTYPE_MULTI = (1 << 3),
+
+       ADDRTYPE_INET_UNSPEC = ADDRTYPE_INET | ADDRTYPE_UNSPEC,
+       ADDRTYPE_INET_MULTI = ADDRTYPE_INET | ADDRTYPE_MULTI
+};
+
+extern enum netlink_msg_status
+netlink_put_tc_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HAVE_NETLINK */
+
+#endif /* _ZEBRA_TC_NETLINK_H */
diff --git a/zebra/tc_socket.c b/zebra/tc_socket.c
new file mode 100644 (file)
index 0000000..0bf9e48
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Zebra Traffic Control (TC) interaction with the kernel using socket.
+ *
+ * Copyright (C) 2022 Shichu Yang
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FRR; see the file COPYING.  If not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <zebra.h>
+
+#ifndef HAVE_NETLINK
+
+#include "lib_errors.h"
+
+#include "zebra/rt.h"
+#include "zebra/zebra_dplane.h"
+#include "zebra/zebra_errors.h"
+
+enum zebra_dplane_result kernel_tc_update(struct zebra_dplane_ctx *ctx)
+{
+       flog_err(EC_LIB_UNAVAILABLE, "%s not Implemented for this platform",
+                __func__);
+       return ZEBRA_DPLANE_REQUEST_FAILURE;
+}
+
+#endif /* !HAVE_NETLINK */
index 98959430167c1ac3b90d0ee975fc84014bcc524e..a578395ef8cf2bda46113663164830a47e4e35b8 100644 (file)
@@ -2492,7 +2492,6 @@ static void zread_mpls_labels_add(ZAPI_HANDLER_ARGS)
 {
        struct stream *s;
        struct zapi_labels zl;
-       int ret;
 
        /* Get input stream.  */
        s = msg;
@@ -2510,12 +2509,7 @@ static void zread_mpls_labels_add(ZAPI_HANDLER_ARGS)
        if (zapi_labels_validate(&zl) < 0)
                return;
 
-       ret = mpls_zapi_labels_process(true, zvrf, &zl);
-       if (ret < 0) {
-               if (IS_ZEBRA_DEBUG_RECV)
-                       zlog_debug("%s: Error processing zapi request",
-                                  __func__);
-       }
+       mpls_zapi_labels_process(true, zvrf, &zl);
 }
 
 /*
@@ -2532,7 +2526,6 @@ static void zread_mpls_labels_delete(ZAPI_HANDLER_ARGS)
 {
        struct stream *s;
        struct zapi_labels zl;
-       int ret;
 
        /* Get input stream.  */
        s = msg;
@@ -2547,12 +2540,7 @@ static void zread_mpls_labels_delete(ZAPI_HANDLER_ARGS)
                return;
 
        if (zl.nexthop_num > 0) {
-               ret = mpls_zapi_labels_process(false /*delete*/, zvrf, &zl);
-               if (ret < 0) {
-                       if (IS_ZEBRA_DEBUG_RECV)
-                               zlog_debug("%s: Error processing zapi request",
-                                          __func__);
-               }
+               mpls_zapi_labels_process(false /*delete*/, zvrf, &zl);
        } else {
                mpls_lsp_uninstall_all_vrf(zvrf, zl.type, zl.local_label);
 
index 424cea467309ec4b9561d37927280d9554b11040..5a816a4c19aebf2770f948f4f4b07f3f5e3cc706 100644 (file)
@@ -313,6 +313,25 @@ struct dplane_netconf_info {
        enum dplane_netconf_status_e linkdown_val;
 };
 
+/*
+ * Traffic control contexts for the dplane
+ */
+struct dplane_tc_info {
+       /* Rate spec (unit: Bytes/s) */
+       uint64_t rate;
+       uint64_t ceil;
+
+       /* TODO: custom burst */
+
+       /* Filter components for "tfilter" */
+       uint32_t filter_bm;
+       struct prefix src_ip;
+       struct prefix dst_ip;
+       uint8_t ip_proto;
+
+       /* TODO: more filter components */
+};
+
 /*
  * The context block used to exchange info about route updates across
  * the boundary between the zebra main context (and pthread) and the
@@ -362,6 +381,7 @@ struct zebra_dplane_ctx {
                struct dplane_mac_info macinfo;
                struct dplane_neigh_info neigh;
                struct dplane_rule_info rule;
+               struct dplane_tc_info tc;
                struct zebra_pbr_iptable iptable;
                struct zebra_pbr_ipset ipset;
                struct {
@@ -509,6 +529,8 @@ static struct zebra_dplane_globals {
 
        _Atomic uint32_t dg_intf_addrs_in;
        _Atomic uint32_t dg_intf_addr_errors;
+       _Atomic uint32_t dg_intf_changes;
+       _Atomic uint32_t dg_intf_changes_errors;
 
        _Atomic uint32_t dg_macs_in;
        _Atomic uint32_t dg_mac_errors;
@@ -538,6 +560,9 @@ static struct zebra_dplane_globals {
        _Atomic uint32_t dg_intfs_in;
        _Atomic uint32_t dg_intf_errors;
 
+       _Atomic uint32_t dg_tcs_in;
+       _Atomic uint32_t dg_tcs_errors;
+
        /* Dataplane pthread */
        struct frr_pthread *dg_pthread;
 
@@ -775,6 +800,9 @@ static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
        case DPLANE_OP_INTF_INSTALL:
        case DPLANE_OP_INTF_UPDATE:
        case DPLANE_OP_INTF_DELETE:
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
                break;
 
        case DPLANE_OP_IPSET_ENTRY_ADD:
@@ -1098,6 +1126,16 @@ const char *dplane_op2str(enum dplane_op_e op)
        case DPLANE_OP_INTF_DELETE:
                ret = "INTF_DELETE";
                break;
+
+       case DPLANE_OP_TC_INSTALL:
+               ret = "TC_INSTALL";
+               break;
+       case DPLANE_OP_TC_UPDATE:
+               ret = "TC_UPDATE";
+               break;
+       case DPLANE_OP_TC_DELETE:
+               ret = "TC_DELETE";
+               break;
        }
 
        return ret;
@@ -1417,6 +1455,50 @@ uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
        return ctx->u.rinfo.zd_old_distance;
 }
 
+uint64_t dplane_ctx_tc_get_rate(const struct zebra_dplane_ctx *ctx)
+{
+       DPLANE_CTX_VALID(ctx);
+
+       return ctx->u.tc.rate;
+}
+
+uint64_t dplane_ctx_tc_get_ceil(const struct zebra_dplane_ctx *ctx)
+{
+       DPLANE_CTX_VALID(ctx);
+
+       return ctx->u.tc.ceil;
+}
+
+uint32_t dplane_ctx_tc_get_filter_bm(const struct zebra_dplane_ctx *ctx)
+{
+       DPLANE_CTX_VALID(ctx);
+
+       return ctx->u.tc.filter_bm;
+}
+
+const struct prefix *
+dplane_ctx_tc_get_src_ip(const struct zebra_dplane_ctx *ctx)
+{
+       DPLANE_CTX_VALID(ctx);
+
+       return &(ctx->u.tc.src_ip);
+}
+
+const struct prefix *
+dplane_ctx_tc_get_dst_ip(const struct zebra_dplane_ctx *ctx)
+{
+       DPLANE_CTX_VALID(ctx);
+
+       return &(ctx->u.tc.dst_ip);
+}
+
+uint8_t dplane_ctx_tc_get_ip_proto(const struct zebra_dplane_ctx *ctx)
+{
+       DPLANE_CTX_VALID(ctx);
+
+       return ctx->u.tc.ip_proto;
+}
+
 /*
  * Set the nexthops associated with a context: note that processing code
  * may well expect that nexthops are in canonical (sorted) order, so we
@@ -2689,6 +2771,25 @@ done:
        return ret;
 }
 
+int dplane_ctx_tc_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
+{
+       int ret = EINVAL;
+
+       struct zebra_ns *zns = NULL;
+
+       ctx->zd_op = op;
+       ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+
+       /* TODO: init traffic control qdisc */
+       zns = zebra_ns_lookup(NS_DEFAULT);
+
+       dplane_ctx_ns_init(ctx, zns, true);
+
+       ret = AOK;
+
+       return ret;
+}
+
 /**
  * dplane_ctx_nexthop_init() - Initialize a context block for a nexthop update
  *
@@ -3408,6 +3509,47 @@ dplane_route_update_internal(struct route_node *rn,
        return result;
 }
 
+static enum zebra_dplane_result dplane_tc_update_internal(enum dplane_op_e op)
+{
+       enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+       int ret = EINVAL;
+       struct zebra_dplane_ctx *ctx = NULL;
+
+       /* Obtain context block */
+       ctx = dplane_ctx_alloc();
+
+       if (!ctx) {
+               ret = ENOMEM;
+               goto done;
+       }
+
+       /* Init context with info from zebra data structs */
+       ret = dplane_ctx_tc_init(ctx, op);
+
+       if (ret == AOK)
+               ret = dplane_update_enqueue(ctx);
+
+done:
+       /* Update counter */
+       atomic_fetch_add_explicit(&zdplane_info.dg_tcs_in, 1,
+                                 memory_order_relaxed);
+       if (ret == AOK) {
+               result = ZEBRA_DPLANE_REQUEST_QUEUED;
+       } else {
+               atomic_fetch_add_explicit(&zdplane_info.dg_tcs_errors, 1,
+                                         memory_order_relaxed);
+               if (ctx)
+                       dplane_ctx_free(&ctx);
+       }
+
+       return result;
+}
+
+enum zebra_dplane_result dplane_tc_update(void)
+{
+       return dplane_tc_update_internal(DPLANE_OP_TC_UPDATE);
+}
+
 /**
  * dplane_nexthop_update_internal() - Helper for enqueuing nexthop changes
  *
@@ -3420,7 +3562,7 @@ static enum zebra_dplane_result
 dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op)
 {
        enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
-       int ret = EINVAL;
+       int ret;
        struct zebra_dplane_ctx *ctx = NULL;
 
        /* Obtain context block */
@@ -3698,7 +3840,7 @@ dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
                        struct zebra_dplane_ctx *notif_ctx)
 {
        enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
-       int ret = EINVAL;
+       int ret;
        struct zebra_dplane_ctx *ctx = NULL;
        struct nhlfe_list_head *head;
        struct zebra_nhlfe *nhlfe, *new_nhlfe;
@@ -3913,6 +4055,47 @@ dplane_br_port_update(const struct interface *ifp, bool non_df,
        return result;
 }
 
+enum zebra_dplane_result
+dplane_intf_mpls_modify_state(const struct interface *ifp, bool set)
+{
+       enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+       struct zebra_dplane_ctx *ctx;
+       struct zebra_ns *zns;
+       int ret = EINVAL;
+
+       ctx = dplane_ctx_alloc();
+       ctx->zd_op = DPLANE_OP_INTF_NETCONFIG;
+       ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+       ctx->zd_vrf_id = ifp->vrf->vrf_id;
+       strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
+
+       zns = zebra_ns_lookup(ifp->vrf->vrf_id);
+       dplane_ctx_ns_init(ctx, zns, false);
+
+       ctx->zd_ifindex = ifp->ifindex;
+       if (set)
+               dplane_ctx_set_netconf_mpls(ctx, DPLANE_NETCONF_STATUS_ENABLED);
+       else
+               dplane_ctx_set_netconf_mpls(ctx,
+                                           DPLANE_NETCONF_STATUS_DISABLED);
+       /* Increment counter */
+       atomic_fetch_add_explicit(&zdplane_info.dg_intf_changes, 1,
+                                 memory_order_relaxed);
+
+       ret = dplane_update_enqueue(ctx);
+
+       if (ret == AOK)
+               result = ZEBRA_DPLANE_REQUEST_QUEUED;
+       else {
+               /* Error counter */
+               atomic_fetch_add_explicit(&zdplane_info.dg_intf_changes_errors,
+                                         1, memory_order_relaxed);
+               dplane_ctx_free(&ctx);
+       }
+
+       return result;
+}
+
 /*
  * Enqueue interface address add for the dataplane.
  */
@@ -4032,7 +4215,7 @@ static enum zebra_dplane_result
 dplane_intf_update_internal(const struct interface *ifp, enum dplane_op_e op)
 {
        enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
-       int ret = EINVAL;
+       int ret;
        struct zebra_dplane_ctx *ctx = NULL;
 
        /* Obtain context block */
@@ -4900,6 +5083,13 @@ int dplane_show_helper(struct vty *vty, bool detailed)
        vty_out(vty, "Intf addr updates:        %"PRIu64"\n", incoming);
        vty_out(vty, "Intf addr errors:         %"PRIu64"\n", errs);
 
+       incoming = atomic_load_explicit(&zdplane_info.dg_intf_changes,
+                                       memory_order_relaxed);
+       errs = atomic_load_explicit(&zdplane_info.dg_intf_changes_errors,
+                                   memory_order_relaxed);
+       vty_out(vty, "Intf change updates:        %" PRIu64 "\n", incoming);
+       vty_out(vty, "Intf change errors:         %" PRIu64 "\n", errs);
+
        incoming = atomic_load_explicit(&zdplane_info.dg_macs_in,
                                        memory_order_relaxed);
        errs = atomic_load_explicit(&zdplane_info.dg_mac_errors,
@@ -5541,6 +5731,13 @@ static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
                           dplane_ctx_get_ifindex(ctx),
                           dplane_ctx_intf_is_protodown(ctx));
                break;
+
+       /* TODO: more detailed log */
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
+               zlog_debug("Dplane tc ifidx %u", dplane_ctx_get_ifindex(ctx));
+               break;
        }
 }
 
@@ -5684,6 +5881,14 @@ static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
                                                  1, memory_order_relaxed);
                break;
 
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
+               if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+                       atomic_fetch_add_explicit(&zdplane_info.dg_tcs_errors,
+                                                 1, memory_order_relaxed);
+               break;
+
        /* Ignore 'notifications' - no-op */
        case DPLANE_OP_SYS_ROUTE_ADD:
        case DPLANE_OP_SYS_ROUTE_DELETE:
@@ -6043,8 +6248,8 @@ static void dplane_check_shutdown_status(struct thread *event)
                zns_info_list_del(&zdplane_info.dg_zns_list, zi);
 
                if (zdplane_info.dg_master) {
-                       thread_cancel(&zi->t_read);
-                       thread_cancel(&zi->t_request);
+                       THREAD_OFF(zi->t_read);
+                       THREAD_OFF(zi->t_request);
                }
 
                XFREE(MTYPE_DP_NS, zi);
index d940bd9568e38f3481fe43125f75cf29ee059e7b..8b239a9ba19f8ebfe61b908b6e5e5fe8b449ae5d 100644 (file)
@@ -193,6 +193,11 @@ enum dplane_op_e {
        DPLANE_OP_INTF_INSTALL,
        DPLANE_OP_INTF_UPDATE,
        DPLANE_OP_INTF_DELETE,
+
+       /* Traffic control */
+       DPLANE_OP_TC_INSTALL,
+       DPLANE_OP_TC_UPDATE,
+       DPLANE_OP_TC_DELETE,
 };
 
 /*
@@ -378,6 +383,16 @@ uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx);
 void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance);
 uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx);
 
+/* Accessors for traffic control context */
+uint64_t dplane_ctx_tc_get_rate(const struct zebra_dplane_ctx *ctx);
+uint64_t dplane_ctx_tc_get_ceil(const struct zebra_dplane_ctx *ctx);
+uint32_t dplane_ctx_tc_get_filter_bm(const struct zebra_dplane_ctx *ctx);
+const struct prefix *
+dplane_ctx_tc_get_src_ip(const struct zebra_dplane_ctx *ctx);
+const struct prefix *
+dplane_ctx_tc_get_dst_ip(const struct zebra_dplane_ctx *ctx);
+uint8_t dplane_ctx_tc_get_ip_proto(const struct zebra_dplane_ctx *ctx);
+
 void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh);
 void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
                               const struct nexthop_group *nhg);
@@ -690,6 +705,8 @@ enum zebra_dplane_result dplane_lsp_notif_update(struct zebra_lsp *lsp,
 enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw);
 enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw);
 
+enum zebra_dplane_result
+dplane_intf_mpls_modify_state(const struct interface *ifp, const bool set);
 /*
  * Enqueue interface address changes for the dataplane.
  */
@@ -705,6 +722,13 @@ enum zebra_dplane_result dplane_intf_add(const struct interface *ifp);
 enum zebra_dplane_result dplane_intf_update(const struct interface *ifp);
 enum zebra_dplane_result dplane_intf_delete(const struct interface *ifp);
 
+/*
+ * Enqueue interface link changes for the dataplane.
+ */
+enum zebra_dplane_result dplane_tc_add(void);
+enum zebra_dplane_result dplane_tc_update(void);
+enum zebra_dplane_result dplane_tc_delete(void);
+
 /*
  * Link layer operations for the dataplane.
  */
@@ -847,6 +871,9 @@ int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
 int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
                         const struct interface *ifp);
 
+/* Encode traffic control information into data plane context. */
+int dplane_ctx_tc_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op);
+
 /* Retrieve the limit on the number of pending, unprocessed updates. */
 uint32_t dplane_get_in_queue_limit(void);
 
index 38a36cc7aef0b291a995c4322cb1343cf97edec3..cbdc17653b9a26e7c864a2b5d323bf10382eb3b1 100644 (file)
@@ -374,8 +374,9 @@ static char *zebra_evpn_zebra_mac_flag_dump(struct zebra_mac *mac, char *buf,
                                                                : "",
                CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE) ? "DUP " : "",
                CHECK_FLAG(mac->flags, ZEBRA_MAC_FPM_SENT) ? "FPM " : "",
-               CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE) ? "LOC Active "
-                                                                : "",
+               CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE)
+                       ? "PEER Active "
+                       : "",
                CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_PROXY) ? "PROXY " : "",
                CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE)
                        ? "LOC Inactive "
index d463411dea17278ef99b90c6533d82aa1500ba54..6d90a603f7b80c02f485c3ef0548cee6be9cd6c6 100644 (file)
@@ -702,11 +702,6 @@ struct zebra_neigh *zebra_evpn_proc_sync_neigh_update(
                        n->flags |= ZEBRA_NEIGH_LOCAL_INACTIVE;
                }
 
-               if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT))
-                       SET_FLAG(n->flags, ZEBRA_NEIGH_ES_PEER_PROXY);
-               else
-                       SET_FLAG(n->flags, ZEBRA_NEIGH_ES_PEER_ACTIVE);
-
                if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT)) {
                        SET_FLAG(n->flags, ZEBRA_NEIGH_ES_PEER_PROXY);
                        /* if the neigh was peer-active previously we
index 39b7156ce4f0c91c3a23eb45fa5bda2ecaea81da..21acaa823c6231908035600ab2d37894acbb8805 100644 (file)
@@ -496,7 +496,7 @@ static inline void zfpm_write_on(void)
  */
 static inline void zfpm_read_off(void)
 {
-       thread_cancel(&zfpm_g->t_read);
+       THREAD_OFF(zfpm_g->t_read);
 }
 
 /*
@@ -504,12 +504,12 @@ static inline void zfpm_read_off(void)
  */
 static inline void zfpm_write_off(void)
 {
-       thread_cancel(&zfpm_g->t_write);
+       THREAD_OFF(zfpm_g->t_write);
 }
 
 static inline void zfpm_connect_off(void)
 {
-       thread_cancel(&zfpm_g->t_connect);
+       THREAD_OFF(zfpm_g->t_connect);
 }
 
 /*
@@ -583,7 +583,7 @@ static void zfpm_connection_up(const char *detail)
        /*
         * Start thread to push existing routes to the FPM.
         */
-       thread_cancel(&zfpm_g->t_conn_up);
+       THREAD_OFF(zfpm_g->t_conn_up);
 
        zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
        zfpm_g->fpm_mac_dump_done = false;
@@ -1687,7 +1687,7 @@ static void zfpm_stop_stats_timer(void)
                return;
 
        zfpm_debug("Stopping existing stats timer");
-       thread_cancel(&zfpm_g->t_stats);
+       THREAD_OFF(zfpm_g->t_stats);
 }
 
 /*
index 8237bebf3b1bb0a596ad4f7aa932ca8fd8392b09..3010a516b964390b40ba12f1ff0702a7649b7dec 100644 (file)
@@ -53,7 +53,7 @@ DEFINE_MTYPE_STATIC(ZEBRA, LSP, "MPLS LSP object");
 DEFINE_MTYPE_STATIC(ZEBRA, FEC, "MPLS FEC object");
 DEFINE_MTYPE_STATIC(ZEBRA, NHLFE, "MPLS nexthop object");
 
-int mpls_enabled;
+bool mpls_enabled;
 bool mpls_pw_reach_strict; /* Strict reachability checking */
 
 /* static function declarations */
@@ -1035,15 +1035,12 @@ static void lsp_processq_del(struct work_queue *wq, void *data)
        struct zebra_lsp *lsp;
        struct hash *lsp_table;
        struct zebra_nhlfe *nhlfe;
-       bool in_shutdown = false;
 
        /* If zebra is shutting down, don't delete any structs,
         * just ignore this callback. The LSPs will be cleaned up
         * during the shutdown processing.
         */
-       in_shutdown = atomic_load_explicit(&zrouter.in_shutdown,
-                                          memory_order_relaxed);
-       if (in_shutdown)
+       if (zebra_router_in_shutdown())
                return;
 
        zvrf = vrf_info_lookup(VRF_DEFAULT);
@@ -1142,6 +1139,21 @@ static void lsp_check_free(struct hash *lsp_table, struct zebra_lsp **plsp)
                lsp_free(lsp_table, plsp);
 }
 
+static void lsp_free_nhlfe(struct zebra_lsp *lsp)
+{
+       struct zebra_nhlfe *nhlfe;
+
+       while ((nhlfe = nhlfe_list_first(&lsp->nhlfe_list))) {
+               nhlfe_list_del(&lsp->nhlfe_list, nhlfe);
+               nhlfe_free(nhlfe);
+       }
+
+       while ((nhlfe = nhlfe_list_first(&lsp->backup_nhlfe_list))) {
+               nhlfe_list_del(&lsp->backup_nhlfe_list, nhlfe);
+               nhlfe_free(nhlfe);
+       }
+}
+
 /*
  * Dtor for an LSP: remove from ile hash, release any internal allocations,
  * free LSP object.
@@ -1149,7 +1161,6 @@ static void lsp_check_free(struct hash *lsp_table, struct zebra_lsp **plsp)
 static void lsp_free(struct hash *lsp_table, struct zebra_lsp **plsp)
 {
        struct zebra_lsp *lsp;
-       struct zebra_nhlfe *nhlfe;
 
        if (plsp == NULL || *plsp == NULL)
                return;
@@ -1160,13 +1171,7 @@ static void lsp_free(struct hash *lsp_table, struct zebra_lsp **plsp)
                zlog_debug("Free LSP in-label %u flags 0x%x",
                           lsp->ile.in_label, lsp->flags);
 
-       /* Free nhlfes, if any. */
-       frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe)
-               nhlfe_del(nhlfe);
-
-       /* Free backup nhlfes, if any. */
-       frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe)
-               nhlfe_del(nhlfe);
+       lsp_free_nhlfe(lsp);
 
        hash_release(lsp_table, &lsp->ile);
        XFREE(MTYPE_LSP, lsp);
@@ -1743,14 +1748,9 @@ static int lsp_cmp(const struct zebra_lsp *lsp1, const struct zebra_lsp *lsp2)
 /*
  * Initialize work queue for processing changed LSPs.
  */
-static int mpls_processq_init(void)
+static void mpls_processq_init(void)
 {
        zrouter.lsp_process_q = work_queue_new(zrouter.master, "LSP processing");
-       if (!zrouter.lsp_process_q) {
-               flog_err(EC_ZEBRA_WQ_NONEXISTENT,
-                        "%s: could not initialise work queue!", __func__);
-               return -1;
-       }
 
        zrouter.lsp_process_q->spec.workfunc = &lsp_process;
        zrouter.lsp_process_q->spec.del_item_data = &lsp_processq_del;
@@ -1758,8 +1758,6 @@ static int mpls_processq_init(void)
        zrouter.lsp_process_q->spec.completion_func = &lsp_processq_complete;
        zrouter.lsp_process_q->spec.max_retries = 0;
        zrouter.lsp_process_q->spec.hold = 10;
-
-       return 0;
 }
 
 
@@ -1855,8 +1853,6 @@ void zebra_mpls_lsp_dplane_result(struct zebra_dplane_ctx *ctx)
                break;
 
        } /* Switch */
-
-       dplane_ctx_fini(&ctx);
 }
 
 /*
@@ -2064,7 +2060,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
        /* Look for zebra LSP object */
        zvrf = vrf_info_lookup(VRF_DEFAULT);
        if (zvrf == NULL)
-               goto done;
+               return;
 
        lsp_table = zvrf->lsp_table;
 
@@ -2074,7 +2070,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
                if (is_debug)
                        zlog_debug("dplane LSP notif: in-label %u not found",
                                   dplane_ctx_get_in_label(ctx));
-               goto done;
+               return;
        }
 
        /*
@@ -2147,9 +2143,6 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
                UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
                clear_nhlfe_installed(lsp);
        }
-
-done:
-       dplane_ctx_fini(&ctx);
 }
 
 /*
@@ -2754,9 +2747,9 @@ static bool ftn_update_nexthop(bool add_p, struct nexthop *nexthop,
        return true;
 }
 
-int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
-                      struct prefix *prefix, uint8_t route_type,
-                      unsigned short route_instance)
+void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+                       struct prefix *prefix, uint8_t route_type,
+                       unsigned short route_instance)
 {
        struct route_table *table;
        struct route_node *rn;
@@ -2768,7 +2761,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
        /* Lookup table.  */
        table = zebra_vrf_table(afi, SAFI_UNICAST, zvrf_id(zvrf));
        if (!table)
-               return -1;
+               return;
 
        /* Lookup existing route */
        rn = route_node_get(table, prefix);
@@ -2779,7 +2772,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
                        break;
        }
        if (re == NULL)
-               return -1;
+               return;
 
        /*
         * Nexthops are now shared by multiple routes, so we have to make
@@ -2808,8 +2801,6 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
        zebra_nhg_free(new_nhe);
 
        rib_queue_add(rn);
-
-       return 0;
 }
 
 /*
@@ -2891,8 +2882,8 @@ static bool ftn_update_znh(bool add_p, enum lsp_types_t type,
  * There are several changes that need to be made, in several zebra
  * data structures, so we want to do all the work required at once.
  */
-int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
-                            const struct zapi_labels *zl)
+void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+                             const struct zapi_labels *zl)
 {
        int i, counter, ret = 0;
        char buf[NEXTHOP_STRLEN];
@@ -2913,7 +2904,7 @@ int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
                /* Lookup table. */
                lsp_table = zvrf->lsp_table;
                if (!lsp_table)
-                       return -1;
+                       return;
 
                /* Find or create LSP object */
                tmp_ile.in_label = zl->local_label;
@@ -3077,8 +3068,6 @@ znh_done:
 
        if (new_nhe)
                zebra_nhg_free(new_nhe);
-
-       return ret;
 }
 
 /*
@@ -3674,6 +3663,7 @@ int zebra_mpls_static_lsp_del(struct zebra_vrf *zvrf, mpls_label_t in_label,
         */
        if (nhlfe_list_first(&lsp->nhlfe_list) == NULL) {
                lsp = hash_release(slsp_table, &tmp_ile);
+               lsp_free_nhlfe(lsp);
                XFREE(MTYPE_LSP, lsp);
        }
 
@@ -4010,6 +4000,15 @@ void zebra_mpls_client_cleanup_vrf_label(uint8_t proto)
        }
 }
 
+static void lsp_table_free(void *p)
+{
+       struct zebra_lsp *lsp = p;
+
+       lsp_free_nhlfe(lsp);
+
+       XFREE(MTYPE_LSP, lsp);
+}
+
 /*
  * Called upon process exiting, need to delete LSP forwarding
  * entries from the kernel.
@@ -4018,9 +4017,9 @@ void zebra_mpls_client_cleanup_vrf_label(uint8_t proto)
 void zebra_mpls_close_tables(struct zebra_vrf *zvrf)
 {
        hash_iterate(zvrf->lsp_table, lsp_uninstall_from_kernel, NULL);
-       hash_clean(zvrf->lsp_table, NULL);
+       hash_clean(zvrf->lsp_table, lsp_table_free);
        hash_free(zvrf->lsp_table);
-       hash_clean(zvrf->slsp_table, NULL);
+       hash_clean(zvrf->slsp_table, lsp_table_free);
        hash_free(zvrf->slsp_table);
        route_table_finish(zvrf->fec_table[AFI_IP]);
        route_table_finish(zvrf->fec_table[AFI_IP6]);
@@ -4051,12 +4050,23 @@ void zebra_mpls_init_tables(struct zebra_vrf *zvrf)
        zvrf->mpls_srgb.end_label = MPLS_DEFAULT_MAX_SRGB_LABEL;
 }
 
+void zebra_mpls_turned_on(void)
+{
+       if (!mpls_enabled) {
+               mpls_processq_init();
+               mpls_enabled = true;
+       }
+
+       hook_register(zserv_client_close, zebra_mpls_cleanup_fecs_for_client);
+       hook_register(zserv_client_close, zebra_mpls_cleanup_zclient_labels);
+}
+
 /*
  * Global MPLS initialization.
  */
 void zebra_mpls_init(void)
 {
-       mpls_enabled = 0;
+       mpls_enabled = false;
        mpls_pw_reach_strict = false;
 
        if (mpls_kernel_init() < 0) {
@@ -4065,9 +4075,5 @@ void zebra_mpls_init(void)
                return;
        }
 
-       if (!mpls_processq_init())
-               mpls_enabled = 1;
-
-       hook_register(zserv_client_close, zebra_mpls_cleanup_fecs_for_client);
-       hook_register(zserv_client_close, zebra_mpls_cleanup_zclient_labels);
+       zebra_mpls_turned_on();
 }
index a8c4e1a60c60ebe81914c68ef0f49f6d829be26c..a114f01339c13c8e8fe1d9fd416f71fc63861f36 100644 (file)
@@ -261,15 +261,15 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf,
  * Handle zapi request to install/uninstall LSP and
  * (optionally) FEC-To-NHLFE (FTN) bindings.
  */
-int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
-                            const struct zapi_labels *zl);
+void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+                             const struct zapi_labels *zl);
 
 /*
  * Uninstall all NHLFEs bound to a single FEC.
  */
-int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
-                      struct prefix *prefix, uint8_t route_type,
-                      unsigned short route_instance);
+void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+                       struct prefix *prefix, uint8_t route_type,
+                       unsigned short route_instance);
 
 /*
  * Install/update a NHLFE for an LSP in the forwarding table. This may be
@@ -394,6 +394,13 @@ void zebra_mpls_close_tables(struct zebra_vrf *zvrf);
  */
 void zebra_mpls_init_tables(struct zebra_vrf *zvrf);
 
+/*
+ * If mpls is turned on *after* FRR is brought
+ * up let's actually notice this and turn on
+ * the relevant bits to make it work.
+ */
+void zebra_mpls_turned_on(void);
+
 /*
  * Global MPLS initialization.
  */
@@ -569,7 +576,7 @@ static inline int mpls_should_lsps_be_processed(struct route_node *rn)
 }
 
 /* Global variables. */
-extern int mpls_enabled;
+extern bool mpls_enabled;
 extern bool mpls_pw_reach_strict; /* Strict pseudowire reachability checking */
 
 #ifdef __cplusplus
index 2fca2a06151ac66f40f82ff0b3b7b39fbb990f2b..09e9a62f68147afab01dd2ec7c089015bffa33c8 100644 (file)
@@ -272,8 +272,13 @@ void zebra_neigh_init(void)
 
 void zebra_neigh_terminate(void)
 {
+       struct zebra_neigh_ent *n, *next;
+
        if (!zrouter.neigh_info)
                return;
 
+       RB_FOREACH_SAFE (n, zebra_neigh_rb_head, &zneigh_info->neigh_rb_tree,
+                        next)
+               zebra_neigh_free(n);
        XFREE(MTYPE_ZNEIGH_INFO, zneigh_info);
 }
index b3cb0612425cb134d3793b4cd2acce2f7c4b2021..6ad54d5c505d8f27341a32385a61807a6a65c54c 100644 (file)
@@ -413,7 +413,7 @@ void zebra_ns_notify_close(void)
                fd = zebra_netns_notify_current->u.fd;
 
        if (zebra_netns_notify_current->master != NULL)
-               thread_cancel(&zebra_netns_notify_current);
+               THREAD_OFF(zebra_netns_notify_current);
 
        /* auto-removal of notify items */
        if (fd > 0)
index 9a0f48158f036959285eb5382171bf67b9495b40..1964c763c579ad6e51c3bceebc0e51239c6f52b7 100644 (file)
@@ -1058,7 +1058,7 @@ static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
        /* If we're in shutdown, this interface event needs to clean
         * up installed NHGs, so don't clear that flag directly.
         */
-       if (!zrouter.in_shutdown)
+       if (!zebra_router_in_shutdown())
                UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
 
        /* Update validity of nexthops depending on it */
@@ -1611,18 +1611,68 @@ void zebra_nhg_free(struct nhg_hash_entry *nhe)
                                   nhe->nhg.nexthop);
        }
 
-       if (nhe->refcnt)
-               zlog_debug("nhe_id=%pNG hash refcnt=%d", nhe, nhe->refcnt);
+       THREAD_OFF(nhe->timer);
 
        zebra_nhg_free_members(nhe);
 
        XFREE(MTYPE_NHG, nhe);
 }
 
+/*
+ * Let's just drop the memory associated with each item
+ */
 void zebra_nhg_hash_free(void *p)
 {
-       zebra_nhg_release_all_deps((struct nhg_hash_entry *)p);
-       zebra_nhg_free((struct nhg_hash_entry *)p);
+       struct nhg_hash_entry *nhe = p;
+
+       if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
+               /* Group or singleton? */
+               if (nhe->nhg.nexthop && nhe->nhg.nexthop->next)
+                       zlog_debug("%s: nhe %p (%u), refcnt %d", __func__, nhe,
+                                  nhe->id, nhe->refcnt);
+               else
+                       zlog_debug("%s: nhe %p (%pNG), refcnt %d, NH %pNHv",
+                                  __func__, nhe, nhe, nhe->refcnt,
+                                  nhe->nhg.nexthop);
+       }
+
+       THREAD_OFF(nhe->timer);
+
+       nexthops_free(nhe->nhg.nexthop);
+
+       XFREE(MTYPE_NHG, nhe);
+}
+
+/*
+ * On cleanup there are nexthop groups that have not
+ * been resolved at all( a nhe->id of 0 ).  As such
+ * zebra needs to clean up the memory associated with
+ * those entries.
+ */
+void zebra_nhg_hash_free_zero_id(struct hash_bucket *b, void *arg)
+{
+       struct nhg_hash_entry *nhe = b->data;
+       struct nhg_connected *dep;
+
+       while ((dep = nhg_connected_tree_pop(&nhe->nhg_depends))) {
+               if (dep->nhe->id == 0)
+                       zebra_nhg_hash_free(dep->nhe);
+
+               nhg_connected_free(dep);
+       }
+
+       while ((dep = nhg_connected_tree_pop(&nhe->nhg_dependents)))
+               nhg_connected_free(dep);
+
+       if (nhe->backup_info && nhe->backup_info->nhe->id == 0) {
+               while ((dep = nhg_connected_tree_pop(
+                               &nhe->backup_info->nhe->nhg_depends)))
+                       nhg_connected_free(dep);
+
+               zebra_nhg_hash_free(nhe->backup_info->nhe);
+
+               XFREE(MTYPE_NHG, nhe->backup_info);
+       }
 }
 
 static void zebra_nhg_timer(struct thread *thread)
@@ -1644,13 +1694,14 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
 
        nhe->refcnt--;
 
-       if (!zrouter.in_shutdown && nhe->refcnt <= 0 &&
+       if (!zebra_router_in_shutdown() && nhe->refcnt <= 0 &&
            CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) &&
            !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) {
                nhe->refcnt = 1;
                SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
                thread_add_timer(zrouter.master, zebra_nhg_timer, nhe,
                                 zrouter.nhg_keep, &nhe->timer);
+               return;
        }
 
        if (!zebra_nhg_depends_is_empty(nhe))
@@ -3074,10 +3125,11 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
        case DPLANE_OP_INTF_INSTALL:
        case DPLANE_OP_INTF_UPDATE:
        case DPLANE_OP_INTF_DELETE:
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
                break;
        }
-
-       dplane_ctx_fini(&ctx);
 }
 
 static int zebra_nhg_sweep_entry(struct hash_bucket *bucket, void *arg)
index 6d2ab248f9a7d6078967e4e9c81712a47cba95b5..62f71f943fddd17a2ef9dc30df6de88f1538e3ff 100644 (file)
@@ -256,6 +256,7 @@ struct nhg_hash_entry *zebra_nhg_alloc(void);
 void zebra_nhg_free(struct nhg_hash_entry *nhe);
 /* In order to clear a generic hash, we need a generic api, sigh. */
 void zebra_nhg_hash_free(void *p);
+void zebra_nhg_hash_free_zero_id(struct hash_bucket *b, void *arg);
 
 /* Init an nhe, for use in a hash lookup for example. There's some fuzziness
  * if the nhe represents only a single nexthop, so we try to capture that
index 3d757566e05db9d986f79ec06dcd3d9d7944e03c..d18c5fd5ebb4cb07831feb00d383e377fcf7d924 100644 (file)
@@ -247,7 +247,7 @@ uint32_t zebra_opaque_enqueue_batch(struct stream_fifo *batch)
        /* Dequeue messages from the incoming batch, and save them
         * on the module fifo.
         */
-       frr_with_mutex(&zo_info.mutex) {
+       frr_with_mutex (&zo_info.mutex) {
                msg = stream_fifo_pop(batch);
                while (msg) {
                        stream_fifo_push(&zo_info.in_fifo, msg);
@@ -288,7 +288,7 @@ static void process_messages(struct thread *event)
         * Dequeue some messages from the incoming queue, temporarily
         * save them on the local fifo
         */
-       frr_with_mutex(&zo_info.mutex) {
+       frr_with_mutex (&zo_info.mutex) {
 
                for (i = 0; i < zo_info.msgs_per_cycle; i++) {
                        msg = stream_fifo_pop(&zo_info.in_fifo);
index e6424fea4f78dff9428c288f86bb90bbecea08e5..43e21a6d34073068ce35ece9f3e8a4484b783bd6 100644 (file)
@@ -1068,9 +1068,6 @@ void zebra_pbr_dplane_result(struct zebra_dplane_ctx *ctx)
                        EC_ZEBRA_PBR_RULE_UPDATE,
                        "Context received in pbr rule dplane result handler with incorrect OP code (%u)",
                        op);
-
-
-       dplane_ctx_fini(&ctx);
 }
 
 /*
index 3127d2d3042619a3358aa034adf3a96768fa8d2d..4a18eb021e8edab9729316a20515138d37626ae1 100644 (file)
@@ -157,9 +157,9 @@ void zebra_ptm_finish(void)
                free(ptm_cb.in_data);
 
        /* Cancel events. */
-       thread_cancel(&ptm_cb.t_read);
-       thread_cancel(&ptm_cb.t_write);
-       thread_cancel(&ptm_cb.t_timer);
+       THREAD_OFF(ptm_cb.t_read);
+       THREAD_OFF(ptm_cb.t_write);
+       THREAD_OFF(ptm_cb.t_timer);
 
        if (ptm_cb.wb)
                buffer_free(ptm_cb.wb);
@@ -213,7 +213,7 @@ static int zebra_ptm_send_message(char *data, int size)
                                 ptm_cb.reconnect_time, &ptm_cb.t_timer);
                return -1;
        case BUFFER_EMPTY:
-               thread_cancel(&ptm_cb.t_write);
+               THREAD_OFF(ptm_cb.t_write);
                break;
        case BUFFER_PENDING:
                thread_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
index 7d1153f21f4dd64fcccb206a73a097136b0a5759..be089fc7594c8bf55ea6db7e8b6617c3f711543a 100644 (file)
@@ -101,13 +101,15 @@ void zebra_pw_del(struct zebra_vrf *zvrf, struct zebra_pw *pw)
        if (pw->status == PW_FORWARDING) {
                hook_call(pw_uninstall, pw);
                dplane_pw_uninstall(pw);
-       } else if (pw->install_retry_timer)
-               thread_cancel(&pw->install_retry_timer);
+       }
+
+       THREAD_OFF(pw->install_retry_timer);
 
        /* unlink and release memory */
        RB_REMOVE(zebra_pw_head, &zvrf->pseudowires, pw);
        if (pw->protocol == ZEBRA_ROUTE_STATIC)
                RB_REMOVE(zebra_static_pw_head, &zvrf->static_pseudowires, pw);
+
        XFREE(MTYPE_PW, pw);
 }
 
@@ -219,7 +221,7 @@ void zebra_pw_install_failure(struct zebra_pw *pw, int pwstatus)
                        pw->vrf_id, pw->ifname, PW_INSTALL_RETRY_INTERVAL);
 
        /* schedule to retry later */
-       thread_cancel(&pw->install_retry_timer);
+       THREAD_OFF(pw->install_retry_timer);
        thread_add_timer(zrouter.master, zebra_pw_install_retry, pw,
                         PW_INSTALL_RETRY_INTERVAL, &pw->install_retry_timer);
 
@@ -230,7 +232,6 @@ static void zebra_pw_install_retry(struct thread *thread)
 {
        struct zebra_pw *pw = THREAD_ARG(thread);
 
-       pw->install_retry_timer = NULL;
        zebra_pw_install(pw);
 }
 
index 63f15b0f20f4c0793bdbe2d12517ced5b0fe0b99..03bda8cc33e580d6d6689c2c01b876ca8dfd4be8 100644 (file)
@@ -77,54 +77,65 @@ static struct dplane_ctx_q rib_dplane_q;
 DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason),
            (rn, reason));
 
-/* Should we allow non FRR processes to delete our routes */
-extern int allow_delete;
+/* Meta Q's specific names */
+enum meta_queue_indexes {
+       META_QUEUE_NHG,
+       META_QUEUE_EVPN,
+       META_QUEUE_CONNECTED,
+       META_QUEUE_KERNEL,
+       META_QUEUE_STATIC,
+       META_QUEUE_NOTBGP,
+       META_QUEUE_BGP,
+       META_QUEUE_OTHER,
+};
 
 /* Each route type's string and default distance value. */
 static const struct {
        int key;
        uint8_t distance;
-       uint8_t meta_q_map;
+       enum meta_queue_indexes meta_q_map;
 } route_info[ZEBRA_ROUTE_MAX] = {
-       [ZEBRA_ROUTE_NHG] = {ZEBRA_ROUTE_NHG, 255 /* Unneeded for nhg's */, 0},
-       [ZEBRA_ROUTE_SYSTEM] = {ZEBRA_ROUTE_SYSTEM, 0, 7},
-       [ZEBRA_ROUTE_KERNEL] = {ZEBRA_ROUTE_KERNEL, 0, 3},
-       [ZEBRA_ROUTE_CONNECT] = {ZEBRA_ROUTE_CONNECT, 0, 2},
-       [ZEBRA_ROUTE_STATIC] = {ZEBRA_ROUTE_STATIC, 1, 4},
-       [ZEBRA_ROUTE_RIP] = {ZEBRA_ROUTE_RIP, 120, 5},
-       [ZEBRA_ROUTE_RIPNG] = {ZEBRA_ROUTE_RIPNG, 120, 5},
-       [ZEBRA_ROUTE_OSPF] = {ZEBRA_ROUTE_OSPF, 110, 5},
-       [ZEBRA_ROUTE_OSPF6] = {ZEBRA_ROUTE_OSPF6, 110, 5},
-       [ZEBRA_ROUTE_ISIS] = {ZEBRA_ROUTE_ISIS, 115, 5},
-       [ZEBRA_ROUTE_BGP] = {ZEBRA_ROUTE_BGP, 20 /* IBGP is 200. */, 6},
-       [ZEBRA_ROUTE_PIM] = {ZEBRA_ROUTE_PIM, 255, 7},
-       [ZEBRA_ROUTE_EIGRP] = {ZEBRA_ROUTE_EIGRP, 90, 5},
-       [ZEBRA_ROUTE_NHRP] = {ZEBRA_ROUTE_NHRP, 10, 5},
-       [ZEBRA_ROUTE_HSLS] = {ZEBRA_ROUTE_HSLS, 255, 7},
-       [ZEBRA_ROUTE_OLSR] = {ZEBRA_ROUTE_OLSR, 255, 7},
-       [ZEBRA_ROUTE_TABLE] = {ZEBRA_ROUTE_TABLE, 150, 4},
-       [ZEBRA_ROUTE_LDP] = {ZEBRA_ROUTE_LDP, 150, 7},
-       [ZEBRA_ROUTE_VNC] = {ZEBRA_ROUTE_VNC, 20, 6},
-       [ZEBRA_ROUTE_VNC_DIRECT] = {ZEBRA_ROUTE_VNC_DIRECT, 20, 6},
-       [ZEBRA_ROUTE_VNC_DIRECT_RH] = {ZEBRA_ROUTE_VNC_DIRECT_RH, 20, 6},
-       [ZEBRA_ROUTE_BGP_DIRECT] = {ZEBRA_ROUTE_BGP_DIRECT, 20, 6},
-       [ZEBRA_ROUTE_BGP_DIRECT_EXT] = {ZEBRA_ROUTE_BGP_DIRECT_EXT, 20, 6},
-       [ZEBRA_ROUTE_BABEL] = {ZEBRA_ROUTE_BABEL, 100, 5},
-       [ZEBRA_ROUTE_SHARP] = {ZEBRA_ROUTE_SHARP, 150, 7},
-       [ZEBRA_ROUTE_PBR] = {ZEBRA_ROUTE_PBR, 200, 7},
-       [ZEBRA_ROUTE_BFD] = {ZEBRA_ROUTE_BFD, 255, 7},
-       [ZEBRA_ROUTE_OPENFABRIC] = {ZEBRA_ROUTE_OPENFABRIC, 115, 5},
-       [ZEBRA_ROUTE_VRRP] = {ZEBRA_ROUTE_VRRP, 255, 7},
-       [ZEBRA_ROUTE_SRTE] = {ZEBRA_ROUTE_SRTE, 255, 7},
-       [ZEBRA_ROUTE_ALL] = {ZEBRA_ROUTE_ALL, 255, 7},
+       [ZEBRA_ROUTE_NHG] = {ZEBRA_ROUTE_NHG, 255 /* Unneeded for nhg's */,
+                            META_QUEUE_NHG},
+       [ZEBRA_ROUTE_SYSTEM] = {ZEBRA_ROUTE_SYSTEM, 0, META_QUEUE_KERNEL},
+       [ZEBRA_ROUTE_KERNEL] = {ZEBRA_ROUTE_KERNEL, 0, META_QUEUE_KERNEL},
+       [ZEBRA_ROUTE_CONNECT] = {ZEBRA_ROUTE_CONNECT, 0, META_QUEUE_CONNECTED},
+       [ZEBRA_ROUTE_STATIC] = {ZEBRA_ROUTE_STATIC, 1, META_QUEUE_STATIC},
+       [ZEBRA_ROUTE_RIP] = {ZEBRA_ROUTE_RIP, 120, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_RIPNG] = {ZEBRA_ROUTE_RIPNG, 120, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_OSPF] = {ZEBRA_ROUTE_OSPF, 110, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_OSPF6] = {ZEBRA_ROUTE_OSPF6, 110, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_ISIS] = {ZEBRA_ROUTE_ISIS, 115, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_BGP] = {ZEBRA_ROUTE_BGP, 20 /* IBGP is 200. */,
+                            META_QUEUE_BGP},
+       [ZEBRA_ROUTE_PIM] = {ZEBRA_ROUTE_PIM, 255, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_EIGRP] = {ZEBRA_ROUTE_EIGRP, 90, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_NHRP] = {ZEBRA_ROUTE_NHRP, 10, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_HSLS] = {ZEBRA_ROUTE_HSLS, 255, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_OLSR] = {ZEBRA_ROUTE_OLSR, 255, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_TABLE] = {ZEBRA_ROUTE_TABLE, 150, META_QUEUE_STATIC},
+       [ZEBRA_ROUTE_LDP] = {ZEBRA_ROUTE_LDP, 150, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_VNC] = {ZEBRA_ROUTE_VNC, 20, META_QUEUE_BGP},
+       [ZEBRA_ROUTE_VNC_DIRECT] = {ZEBRA_ROUTE_VNC_DIRECT, 20, META_QUEUE_BGP},
+       [ZEBRA_ROUTE_VNC_DIRECT_RH] = {ZEBRA_ROUTE_VNC_DIRECT_RH, 20,
+                                      META_QUEUE_BGP},
+       [ZEBRA_ROUTE_BGP_DIRECT] = {ZEBRA_ROUTE_BGP_DIRECT, 20, META_QUEUE_BGP},
+       [ZEBRA_ROUTE_BGP_DIRECT_EXT] = {ZEBRA_ROUTE_BGP_DIRECT_EXT, 20,
+                                       META_QUEUE_BGP},
+       [ZEBRA_ROUTE_BABEL] = {ZEBRA_ROUTE_BABEL, 100, META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_SHARP] = {ZEBRA_ROUTE_SHARP, 150, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_PBR] = {ZEBRA_ROUTE_PBR, 200, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_BFD] = {ZEBRA_ROUTE_BFD, 255, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_OPENFABRIC] = {ZEBRA_ROUTE_OPENFABRIC, 115,
+                                   META_QUEUE_NOTBGP},
+       [ZEBRA_ROUTE_VRRP] = {ZEBRA_ROUTE_VRRP, 255, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_SRTE] = {ZEBRA_ROUTE_SRTE, 255, META_QUEUE_OTHER},
+       [ZEBRA_ROUTE_ALL] = {ZEBRA_ROUTE_ALL, 255, META_QUEUE_OTHER},
        /* Any new route type added to zebra, should be mirrored here */
 
        /* no entry/default: 150 */
 };
 
-/* EVPN/VXLAN subqueue is number 1 */
-#define META_QUEUE_EVPN 1
-
 /* Wrapper struct for nhg workqueue items; a 'ctx' is an incoming update
  * from the OS, and an 'nhe' is a nhe update.
  */
@@ -167,6 +178,30 @@ struct wq_evpn_wrapper {
 #pragma FRR printfrr_ext "%pZN" (struct route_node *)
 #endif
 
+static const char *subqueue2str(enum meta_queue_indexes index)
+{
+       switch (index) {
+       case META_QUEUE_NHG:
+               return "NHG Objects";
+       case META_QUEUE_EVPN:
+               return "EVPN/VxLan Objects";
+       case META_QUEUE_CONNECTED:
+               return "Connected Routes";
+       case META_QUEUE_KERNEL:
+               return "Kernel Routes";
+       case META_QUEUE_STATIC:
+               return "Static Routes";
+       case META_QUEUE_NOTBGP:
+               return "RIP/OSPF/ISIS/EIGRP/NHRP Routes";
+       case META_QUEUE_BGP:
+               return "BGP Routes";
+       case META_QUEUE_OTHER:
+               return "Other Routes";
+       }
+
+       return "Unknown";
+}
+
 printfrr_ext_autoreg_p("ZN", printfrr_zebra_node);
 static ssize_t printfrr_zebra_node(struct fbuf *buf, struct printfrr_eargs *ea,
                                   const void *ptr)
@@ -2087,9 +2122,6 @@ done:
 
        if (rn)
                route_unlock_node(rn);
-
-       /* Return context to dataplane module */
-       dplane_ctx_fini(&ctx);
 }
 
 /*
@@ -2323,9 +2355,6 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
 done:
        if (rn)
                route_unlock_node(rn);
-
-       /* Return context to dataplane module */
-       dplane_ctx_fini(&ctx);
 }
 
 /*
@@ -2392,7 +2421,7 @@ static void process_subq_nhg(struct listnode *lnode)
        struct nhg_ctx *ctx;
        struct nhg_hash_entry *nhe, *newnhe;
        struct wq_nhg_wrapper *w;
-       uint8_t qindex = route_info[ZEBRA_ROUTE_NHG].meta_q_map;
+       uint8_t qindex = META_QUEUE_NHG;
 
        w = listgetdata(lnode);
 
@@ -2407,8 +2436,8 @@ static void process_subq_nhg(struct listnode *lnode)
 
                if (IS_ZEBRA_DEBUG_RIB_DETAILED)
                        zlog_debug(
-                               "NHG Context id=%u dequeued from sub-queue %u",
-                               ctx->id, qindex);
+                               "NHG Context id=%u dequeued from sub-queue %s",
+                               ctx->id, subqueue2str(qindex));
 
 
                /* Process nexthop group updates coming 'up' from the OS */
@@ -2418,8 +2447,8 @@ static void process_subq_nhg(struct listnode *lnode)
                nhe = w->u.nhe;
 
                if (IS_ZEBRA_DEBUG_RIB_DETAILED)
-                       zlog_debug("NHG %u dequeued from sub-queue %u",
-                                  nhe->id, qindex);
+                       zlog_debug("NHG %u dequeued from sub-queue %s", nhe->id,
+                                  subqueue2str(qindex));
 
                /* Process incoming nhg update, probably from a proto daemon */
                newnhe = zebra_nhg_proto_add(nhe->id, nhe->type,
@@ -2465,9 +2494,9 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex)
                if (dest)
                        re = re_list_first(&dest->routes);
 
-               zlog_debug("%s(%u:%u):%pRN rn %p dequeued from sub-queue %u",
+               zlog_debug("%s(%u:%u):%pRN rn %p dequeued from sub-queue %s",
                           zvrf_name(zvrf), zvrf_id(zvrf), re ? re->table : 0,
-                          rnode, rnode, qindex);
+                          rnode, rnode, subqueue2str(qindex));
        }
 
        if (rnode->info)
@@ -2481,19 +2510,30 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex)
  * Examine the specified subqueue; process one entry and return 1 if
  * there is a node, return 0 otherwise.
  */
-static unsigned int process_subq(struct list *subq, uint8_t qindex)
+static unsigned int process_subq(struct list *subq,
+                                enum meta_queue_indexes qindex)
 {
        struct listnode *lnode = listhead(subq);
 
        if (!lnode)
                return 0;
 
-       if (qindex == META_QUEUE_EVPN)
+       switch (qindex) {
+       case META_QUEUE_EVPN:
                process_subq_evpn(lnode);
-       else if (qindex == route_info[ZEBRA_ROUTE_NHG].meta_q_map)
+               break;
+       case META_QUEUE_NHG:
                process_subq_nhg(lnode);
-       else
+               break;
+       case META_QUEUE_CONNECTED:
+       case META_QUEUE_KERNEL:
+       case META_QUEUE_STATIC:
+       case META_QUEUE_NOTBGP:
+       case META_QUEUE_BGP:
+       case META_QUEUE_OTHER:
                process_subq_route(lnode, qindex);
+               break;
+       }
 
        list_delete_node(subq, lnode);
 
@@ -2578,8 +2618,8 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
                       RIB_ROUTE_QUEUED(qindex))) {
                if (IS_ZEBRA_DEBUG_RIB_DETAILED)
                        rnode_debug(rn, re->vrf_id,
-                                   "rn %p is already queued in sub-queue %u",
-                                   (void *)rn, qindex);
+                                   "rn %p is already queued in sub-queue %s",
+                                   (void *)rn, subqueue2str(qindex));
                return -1;
        }
 
@@ -2589,8 +2629,8 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
        mq->size++;
 
        if (IS_ZEBRA_DEBUG_RIB_DETAILED)
-               rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %u",
-                           (void *)rn, qindex);
+               rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s",
+                           (void *)rn, subqueue2str(qindex));
 
        return 0;
 }
@@ -2598,7 +2638,7 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
 static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
 {
        struct nhg_ctx *ctx = NULL;
-       uint8_t qindex = route_info[ZEBRA_ROUTE_NHG].meta_q_map;
+       uint8_t qindex = META_QUEUE_NHG;
        struct wq_nhg_wrapper *w;
 
        ctx = (struct nhg_ctx *)data;
@@ -2615,8 +2655,8 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
        mq->size++;
 
        if (IS_ZEBRA_DEBUG_RIB_DETAILED)
-               zlog_debug("NHG Context id=%u queued into sub-queue %u",
-                          ctx->id, qindex);
+               zlog_debug("NHG Context id=%u queued into sub-queue %s",
+                          ctx->id, subqueue2str(qindex));
 
        return 0;
 }
@@ -2624,7 +2664,7 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
 static int rib_meta_queue_nhg_add(struct meta_queue *mq, void *data)
 {
        struct nhg_hash_entry *nhe = NULL;
-       uint8_t qindex = route_info[ZEBRA_ROUTE_NHG].meta_q_map;
+       uint8_t qindex = META_QUEUE_NHG;
        struct wq_nhg_wrapper *w;
 
        nhe = (struct nhg_hash_entry *)data;
@@ -2641,8 +2681,8 @@ static int rib_meta_queue_nhg_add(struct meta_queue *mq, void *data)
        mq->size++;
 
        if (IS_ZEBRA_DEBUG_RIB_DETAILED)
-               zlog_debug("NHG id=%u queued into sub-queue %u",
-                          nhe->id, qindex);
+               zlog_debug("NHG id=%u queued into sub-queue %s", nhe->id,
+                          subqueue2str(qindex));
 
        return 0;
 }
@@ -2918,131 +2958,130 @@ int zebra_rib_queue_evpn_rem_vtep_del(vrf_id_t vrf_id, vni_t vni,
        return mq_add_handler(w, rib_meta_queue_evpn_add);
 }
 
+
+/* Create new meta queue.
+   A destructor function doesn't seem to be necessary here.
+ */
+static struct meta_queue *meta_queue_new(void)
+{
+       struct meta_queue *new;
+       unsigned i;
+
+       new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct meta_queue));
+
+       for (i = 0; i < MQ_SIZE; i++) {
+               new->subq[i] = list_new();
+               assert(new->subq[i]);
+       }
+
+       return new;
+}
+
 /* Clean up the EVPN meta-queue list */
-static void evpn_meta_queue_free(struct list *l)
+static void evpn_meta_queue_free(struct meta_queue *mq, struct list *l,
+                                struct zebra_vrf *zvrf)
 {
-       struct listnode *node;
+       struct listnode *node, *nnode;
        struct wq_evpn_wrapper *w;
 
        /* Free the node wrapper object, and the struct it wraps */
-       while ((node = listhead(l)) != NULL) {
-               w = node->data;
+       for (ALL_LIST_ELEMENTS(l, node, nnode, w)) {
+               if (zvrf) {
+                       vrf_id_t vrf_id = zvrf->vrf->vrf_id;
+
+                       if (w->vrf_id != vrf_id)
+                               continue;
+               }
+
                node->data = NULL;
 
                XFREE(MTYPE_WQ_WRAPPER, w);
 
                list_delete_node(l, node);
+               mq->size--;
        }
 }
 
 /* Clean up the nhg meta-queue list */
-static void nhg_meta_queue_free(struct list *l)
+static void nhg_meta_queue_free(struct meta_queue *mq, struct list *l,
+                               struct zebra_vrf *zvrf)
 {
        struct wq_nhg_wrapper *w;
-       struct listnode *node;
+       struct listnode *node, *nnode;
 
        /* Free the node wrapper object, and the struct it wraps */
-       while ((node = listhead(l)) != NULL) {
-               w = node->data;
-               node->data = NULL;
+       for (ALL_LIST_ELEMENTS(l, node, nnode, w)) {
+               if (zvrf) {
+                       vrf_id_t vrf_id = zvrf->vrf->vrf_id;
 
+                       if (w->type == WQ_NHG_WRAPPER_TYPE_CTX &&
+                           w->u.ctx->vrf_id != vrf_id)
+                               continue;
+                       else if (w->type == WQ_NHG_WRAPPER_TYPE_NHG &&
+                                w->u.nhe->vrf_id != vrf_id)
+                               continue;
+               }
                if (w->type == WQ_NHG_WRAPPER_TYPE_CTX)
                        nhg_ctx_free(&w->u.ctx);
                else if (w->type == WQ_NHG_WRAPPER_TYPE_NHG)
                        zebra_nhg_free(w->u.nhe);
 
+               node->data = NULL;
                XFREE(MTYPE_WQ_WRAPPER, w);
 
                list_delete_node(l, node);
+               mq->size--;
        }
 }
 
-/* Create new meta queue.
-   A destructor function doesn't seem to be necessary here.
- */
-static struct meta_queue *meta_queue_new(void)
+static void rib_meta_queue_free(struct meta_queue *mq, struct list *l,
+                               struct zebra_vrf *zvrf)
 {
-       struct meta_queue *new;
-       unsigned i;
+       struct route_node *rnode;
+       struct listnode *node, *nnode;
 
-       new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct meta_queue));
+       for (ALL_LIST_ELEMENTS(l, node, nnode, rnode)) {
+               rib_dest_t *dest = rib_dest_from_rnode(rnode);
 
-       for (i = 0; i < MQ_SIZE; i++) {
-               new->subq[i] = list_new();
-               assert(new->subq[i]);
-       }
-
-       return new;
-}
-
-void meta_queue_free(struct meta_queue *mq)
-{
-       unsigned i;
-
-       for (i = 0; i < MQ_SIZE; i++) {
-               /* Some subqueues may need cleanup - nhgs for example */
-               if (i == route_info[ZEBRA_ROUTE_NHG].meta_q_map)
-                       nhg_meta_queue_free(mq->subq[i]);
-               else if (i == META_QUEUE_EVPN)
-                       evpn_meta_queue_free(mq->subq[i]);
+               if (dest && rib_dest_vrf(dest) != zvrf)
+                       continue;
 
-               list_delete(&mq->subq[i]);
+               route_unlock_node(rnode);
+               node->data = NULL;
+               list_delete_node(l, node);
+               mq->size--;
        }
-
-       XFREE(MTYPE_WORK_QUEUE, mq);
 }
 
-void rib_meta_queue_free_vrf(struct meta_queue *mq, struct zebra_vrf *zvrf)
+
+void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
 {
-       vrf_id_t vrf_id = zvrf->vrf->vrf_id;
-       unsigned int i;
+       enum meta_queue_indexes i;
 
        for (i = 0; i < MQ_SIZE; i++) {
-               struct listnode *lnode, *nnode;
-               void *data;
-               bool del;
-
-               for (ALL_LIST_ELEMENTS(mq->subq[i], lnode, nnode, data)) {
-                       del = false;
-
-                       if (i == META_QUEUE_EVPN) {
-                               struct wq_evpn_wrapper *w = data;
-
-                               if (w->vrf_id == vrf_id) {
-                                       XFREE(MTYPE_WQ_WRAPPER, w);
-                                       del = true;
-                               }
-                       } else if (i ==
-                                  route_info[ZEBRA_ROUTE_NHG].meta_q_map) {
-                               struct wq_nhg_wrapper *w = data;
-
-                               if (w->type == WQ_NHG_WRAPPER_TYPE_CTX &&
-                                   w->u.ctx->vrf_id == vrf_id) {
-                                       nhg_ctx_free(&w->u.ctx);
-                                       XFREE(MTYPE_WQ_WRAPPER, w);
-                                       del = true;
-                               } else if (w->type == WQ_NHG_WRAPPER_TYPE_NHG &&
-                                          w->u.nhe->vrf_id == vrf_id) {
-                                       zebra_nhg_free(w->u.nhe);
-                                       XFREE(MTYPE_WQ_WRAPPER, w);
-                                       del = true;
-                               }
-                       } else {
-                               struct route_node *rnode = data;
-                               rib_dest_t *dest = rib_dest_from_rnode(rnode);
-
-                               if (dest && rib_dest_vrf(dest) == zvrf) {
-                                       route_unlock_node(rnode);
-                                       del = true;
-                               }
-                       }
-
-                       if (del) {
-                               list_delete_node(mq->subq[i], lnode);
-                               mq->size--;
-                       }
+               /* Some subqueues may need cleanup - nhgs for example */
+               switch (i) {
+               case META_QUEUE_NHG:
+                       nhg_meta_queue_free(mq, mq->subq[i], zvrf);
+                       break;
+               case META_QUEUE_EVPN:
+                       evpn_meta_queue_free(mq, mq->subq[i], zvrf);
+                       break;
+               case META_QUEUE_CONNECTED:
+               case META_QUEUE_KERNEL:
+               case META_QUEUE_STATIC:
+               case META_QUEUE_NOTBGP:
+               case META_QUEUE_BGP:
+               case META_QUEUE_OTHER:
+                       rib_meta_queue_free(mq, mq->subq[i], zvrf);
+                       break;
                }
+               if (!zvrf)
+                       list_delete(&mq->subq[i]);
        }
+
+       if (!zvrf)
+               XFREE(MTYPE_WORK_QUEUE, mq);
 }
 
 /* initialise zebra rib work queue */
@@ -3722,8 +3761,8 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
                                            rn, fib,
                                            zebra_route_string(fib->type));
                        }
-                       if (allow_delete
-                           || CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) {
+                       if (zrouter.allow_delete ||
+                           CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) {
                                UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED);
                                /* Unset flags. */
                                for (rtnh = fib->nhe->nhg.nexthop; rtnh;
@@ -3768,8 +3807,8 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
        if (same) {
                struct nexthop *tmp_nh;
 
-               if (fromkernel && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE)
-                   && !allow_delete) {
+               if (fromkernel && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE) &&
+                   !zrouter.allow_delete) {
                        rib_install_kernel(rn, same, NULL);
                        route_unlock_node(rn);
 
@@ -4191,7 +4230,7 @@ void rib_close_table(struct route_table *table)
 /*
  * Handler for async dataplane results after a pseudowire installation
  */
-static int handle_pw_result(struct zebra_dplane_ctx *ctx)
+static void handle_pw_result(struct zebra_dplane_ctx *ctx)
 {
        struct zebra_pw *pw;
        struct zebra_vrf *vrf;
@@ -4200,7 +4239,7 @@ static int handle_pw_result(struct zebra_dplane_ctx *ctx)
         * result for installation attempts here.
         */
        if (dplane_ctx_get_op(ctx) != DPLANE_OP_PW_INSTALL)
-               goto done;
+               return;
 
        if (dplane_ctx_get_status(ctx) != ZEBRA_DPLANE_REQUEST_SUCCESS) {
                vrf = zebra_vrf_lookup_by_id(dplane_ctx_get_vrf(ctx));
@@ -4209,14 +4248,8 @@ static int handle_pw_result(struct zebra_dplane_ctx *ctx)
                        zebra_pw_install_failure(pw,
                                                 dplane_ctx_get_pw_status(ctx));
        }
-
-done:
-       dplane_ctx_fini(&ctx);
-
-       return 0;
 }
 
-
 /*
  * Handle results from the dataplane system. Dequeue update context
  * structs, dispatch to appropriate internal handlers.
@@ -4233,7 +4266,7 @@ static void rib_process_dplane_results(struct thread *thread)
                TAILQ_INIT(&ctxlist);
 
                /* Take lock controlling queue of results */
-               frr_with_mutex(&dplane_mutex) {
+               frr_with_mutex (&dplane_mutex) {
                        /* Dequeue list of context structs */
                        dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
                }
@@ -4289,7 +4322,6 @@ static void rib_process_dplane_results(struct thread *thread)
                        case DPLANE_OP_ROUTE_INSTALL:
                        case DPLANE_OP_ROUTE_UPDATE:
                        case DPLANE_OP_ROUTE_DELETE:
-                       {
                                /* Bit of special case for route updates
                                 * that were generated by async notifications:
                                 * we don't want to continue processing these
@@ -4297,10 +4329,7 @@ static void rib_process_dplane_results(struct thread *thread)
                                 */
                                if (dplane_ctx_get_notif_provider(ctx) == 0)
                                        rib_process_result(ctx);
-                               else
-                                       dplane_ctx_fini(&ctx);
-                       }
-                       break;
+                               break;
 
                        case DPLANE_OP_ROUTE_NOTIFY:
                                rib_process_dplane_notify(ctx);
@@ -4315,17 +4344,13 @@ static void rib_process_dplane_results(struct thread *thread)
                        case DPLANE_OP_LSP_INSTALL:
                        case DPLANE_OP_LSP_UPDATE:
                        case DPLANE_OP_LSP_DELETE:
-                       {
                                /* Bit of special case for LSP updates
                                 * that were generated by async notifications:
                                 * we don't want to continue processing these.
                                 */
                                if (dplane_ctx_get_notif_provider(ctx) == 0)
                                        zebra_mpls_lsp_dplane_result(ctx);
-                               else
-                                       dplane_ctx_fini(&ctx);
-                       }
-                       break;
+                               break;
 
                        case DPLANE_OP_LSP_NOTIFY:
                                zebra_mpls_process_dplane_notify(ctx);
@@ -4338,8 +4363,6 @@ static void rib_process_dplane_results(struct thread *thread)
 
                        case DPLANE_OP_SYS_ROUTE_ADD:
                        case DPLANE_OP_SYS_ROUTE_DELETE:
-                               /* No further processing in zebra for these. */
-                               dplane_ctx_fini(&ctx);
                                break;
 
                        case DPLANE_OP_MAC_INSTALL:
@@ -4368,6 +4391,11 @@ static void rib_process_dplane_results(struct thread *thread)
                                zebra_if_dplane_result(ctx);
                                break;
 
+                       case DPLANE_OP_TC_INSTALL:
+                       case DPLANE_OP_TC_UPDATE:
+                       case DPLANE_OP_TC_DELETE:
+                               break;
+
                        /* Some op codes not handled here */
                        case DPLANE_OP_ADDR_INSTALL:
                        case DPLANE_OP_ADDR_UNINSTALL:
@@ -4383,12 +4411,11 @@ static void rib_process_dplane_results(struct thread *thread)
                        case DPLANE_OP_NEIGH_TABLE_UPDATE:
                        case DPLANE_OP_GRE_SET:
                        case DPLANE_OP_NONE:
-                               /* Don't expect this: just return the struct? */
-                               dplane_ctx_fini(&ctx);
                                break;
 
                        } /* Dispatch by op code */
 
+                       dplane_ctx_fini(&ctx);
                        ctx = dplane_ctx_dequeue(&ctxlist);
                }
 
@@ -4403,7 +4430,7 @@ static void rib_process_dplane_results(struct thread *thread)
 static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
 {
        /* Take lock controlling queue of results */
-       frr_with_mutex(&dplane_mutex) {
+       frr_with_mutex (&dplane_mutex) {
                /* Enqueue context blocks */
                dplane_ctx_list_append(&rib_dplane_q, ctxlist);
        }
index 9fccda9e0896aec31f125486a1d46e4287799849..c66849863e86b14c4584ba1014420fa03835f2ed 100644 (file)
@@ -239,13 +239,14 @@ void zebra_router_terminate(void)
                zebra_router_free_table(zrt);
 
        work_queue_free_and_null(&zrouter.ribq);
-       meta_queue_free(zrouter.mq);
+       meta_queue_free(zrouter.mq, NULL);
 
        zebra_vxlan_disable();
        zebra_mlag_terminate();
        zebra_neigh_terminate();
 
        /* Free NHE in ID table only since it has unhashable entries as well */
+       hash_iterate(zrouter.nhgs_id, zebra_nhg_hash_free_zero_id, NULL);
        hash_clean(zrouter.nhgs_id, zebra_nhg_hash_free);
        hash_free(zrouter.nhgs_id);
        hash_clean(zrouter.nhgs, NULL);
@@ -278,6 +279,8 @@ void zebra_router_init(bool asic_offload, bool notify_on_ack)
 {
        zrouter.sequence_num = 0;
 
+       zrouter.allow_delete = false;
+
        zrouter.packets_to_process = ZEBRA_ZAPI_PACKETS_TO_PROCESS;
 
        zrouter.nhg_keep = ZEBRA_DEFAULT_NHG_KEEP_TIMER;
index d51e7a2b7d5fc2bb9d6f4ff6036ef84ab678e443..992bcd5c08c07c0bd008ebc34f350b1f29fbc2a1 100644 (file)
@@ -229,6 +229,9 @@ struct zebra_router {
 
 #define ZEBRA_DEFAULT_NHG_KEEP_TIMER 180
        uint32_t nhg_keep;
+
+       /* Should we allow non FRR processes to delete our routes */
+       bool allow_delete;
 };
 
 #define GRACEFUL_RESTART_TIME 60
@@ -282,6 +285,11 @@ static inline void zebra_router_set_supports_nhgs(bool support)
        zrouter.supports_nhgs = support;
 }
 
+static inline bool zebra_router_in_shutdown(void)
+{
+       return atomic_load_explicit(&zrouter.in_shutdown, memory_order_relaxed);
+}
+
 /* zebra_northbound.c */
 extern const struct frr_yang_module_info frr_zebra_info;
 
index d247f87708031814c955494f05f5d4bd88e7c44c..2e2f4159cd90ed7b4bf67e82a4ff0c99df20360e 100644 (file)
@@ -329,14 +329,6 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
                lua_setfield(L, -2, "ipset");
                break;
        }
-       case DPLANE_OP_ADDR_INSTALL:
-       case DPLANE_OP_ADDR_UNINSTALL:
-       case DPLANE_OP_INTF_ADDR_ADD:
-       case DPLANE_OP_INTF_ADDR_DEL:
-       case DPLANE_OP_INTF_INSTALL:
-       case DPLANE_OP_INTF_UPDATE:
-       case DPLANE_OP_INTF_DELETE:
-               break;
        case DPLANE_OP_NEIGH_INSTALL:
        case DPLANE_OP_NEIGH_UPDATE:
        case DPLANE_OP_NEIGH_DELETE:
@@ -418,6 +410,17 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
                }
                lua_setfield(L, -2, "gre");
 
+       case DPLANE_OP_ADDR_INSTALL:
+       case DPLANE_OP_ADDR_UNINSTALL:
+       case DPLANE_OP_INTF_ADDR_ADD:
+       case DPLANE_OP_INTF_ADDR_DEL:
+       case DPLANE_OP_INTF_INSTALL:
+       case DPLANE_OP_INTF_UPDATE:
+       case DPLANE_OP_INTF_DELETE:
+       case DPLANE_OP_TC_INSTALL:
+       case DPLANE_OP_TC_UPDATE:
+       case DPLANE_OP_TC_DELETE:
+               /* Not currently handled */
        case DPLANE_OP_INTF_NETCONFIG: /*NYI*/
        case DPLANE_OP_NONE:
                break;
index 553864d089e2c17cc32a4c2b20c2ca50fc568c97..6624f0beb993913543ed8645fd47e73e9ac571b7 100644 (file)
@@ -218,7 +218,7 @@ static int zebra_vrf_disable(struct vrf *vrf)
                if_nbr_ipv6ll_to_ipv4ll_neigh_del_all(ifp);
 
        /* clean-up work queues */
-       rib_meta_queue_free_vrf(zrouter.mq, zvrf);
+       meta_queue_free(zrouter.mq, zvrf);
 
        /* Cleanup (free) routing tables and NHT tables. */
        for (afi = AFI_IP; afi <= AFI_IP6; afi++) {
@@ -253,7 +253,7 @@ static int zebra_vrf_delete(struct vrf *vrf)
        table_manager_disable(zvrf);
 
        /* clean-up work queues */
-       rib_meta_queue_free_vrf(zrouter.mq, zvrf);
+       meta_queue_free(zrouter.mq, zvrf);
 
        /* Free Vxlan and MPLS. */
        zebra_vxlan_close_tables(zvrf);
index 6ae55cdba42b0fdec27c22ec1aaf9f8dbc1aa72c..525e0366e78263381b31abe98bfd8d5f63665e43 100644 (file)
@@ -66,8 +66,6 @@
 #include "zebra/rtadv.h"
 #include "zebra/zebra_neigh.h"
 
-extern int allow_delete;
-
 /* context to manage dumps in multiple tables or vrfs */
 struct route_show_ctx {
        bool multi;       /* dump multiple tables or vrf */
@@ -2699,7 +2697,7 @@ DEFUN (allow_external_route_update,
        "allow-external-route-update",
        "Allow FRR routes to be overwritten by external processes\n")
 {
-       allow_delete = 1;
+       zrouter.allow_delete = true;
 
        return CMD_SUCCESS;
 }
@@ -2710,7 +2708,7 @@ DEFUN (no_allow_external_route_update,
        NO_STR
        "Allow FRR routes to be overwritten by external processes\n")
 {
-       allow_delete = 0;
+       zrouter.allow_delete = false;
 
        return CMD_SUCCESS;
 }
@@ -3911,7 +3909,7 @@ DEFPY (zebra_nexthop_group_keep,
 
 static int config_write_protocol(struct vty *vty)
 {
-       if (allow_delete)
+       if (zrouter.allow_delete)
                vty_out(vty, "allow-external-route-update\n");
 
        if (zrouter.nhg_keep != ZEBRA_DEFAULT_NHG_KEEP_TIMER)
@@ -4011,6 +4009,8 @@ DEFUN (show_zebra,
        ttable_add_row(table, "Kernel NHG|%s",
                       zrouter.supports_nhgs ? "Available" : "Unavailable");
 
+       ttable_add_row(table, "Allow Non FRR route deletion|%s",
+                      zrouter.allow_delete ? "Yes" : "No");
        ttable_add_row(table, "v4 All LinkDown Routes|%s",
                       zrouter.all_linkdownv4 ? "On" : "Off");
        ttable_add_row(table, "v4 Default LinkDown Routes|%s",
@@ -4033,6 +4033,7 @@ DEFUN (show_zebra,
        vty_out(vty, "%s\n", out);
        XFREE(MTYPE_TMP, out);
 
+       ttable_del(table);
        vty_out(vty,
                "                            Route      Route      Neighbor   LSP        LSP\n");
        vty_out(vty,
index 4cf309f7fc32546957e4feaafa35223991885745..5a6321ae7e538e944b4456fe2bb0bab25b4b2648 100644 (file)
@@ -4038,6 +4038,19 @@ int zebra_vxlan_dp_network_mac_add(struct interface *ifp,
        struct zebra_evpn_es *es;
        struct interface *acc_ifp;
 
+       /* If netlink message is with vid, it will have no nexthop.
+        * So skip it.
+        */
+       if (vid) {
+               if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC)
+                       zlog_debug("dpAdd MAC %pEA VID %u - ignore as no nhid",
+                                  macaddr, vid);
+               return 0;
+       }
+
+       /* Get vxlan's vid for netlink message has no it. */
+       vid = ((struct zebra_if *)ifp->info)->l2info.vxl.access_vlan;
+
        /* if remote mac delete the local entry */
        if (!nhg_id || !zebra_evpn_nhg_is_local_es(nhg_id, &es)
            || !zebra_evpn_es_local_mac_via_network_port(es)) {
@@ -4049,7 +4062,7 @@ int zebra_vxlan_dp_network_mac_add(struct interface *ifp,
        }
 
        /* If local MAC on a down local ES translate the network-mac-add
-        * to a local-inactive-mac-add
+        * to a local-active-mac-add
         */
        if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC)
                zlog_debug("dpAdd local-nw-MAC %pEA VID %u", macaddr, vid);
@@ -6260,8 +6273,7 @@ static int zebra_evpn_cfg_clean_up(struct zserv *client)
  */
 extern void zebra_vxlan_handle_result(struct zebra_dplane_ctx *ctx)
 {
-       /* TODO -- anything other than freeing the context? */
-       dplane_ctx_fini(&ctx);
+       return;
 }
 
 /* Cleanup BGP EVPN configuration upon client disconnect */
index 403f6c0d99da58004fad633726def67570b8f124..f76b29deff94b1b1dc7a5092a5555cb9eeb65841 100644 (file)
@@ -239,7 +239,7 @@ static void zserv_write(struct thread *thread)
 
        cache = stream_fifo_new();
 
-       frr_with_mutex(&client->obuf_mtx) {
+       frr_with_mutex (&client->obuf_mtx) {
                while (stream_fifo_head(client->obuf_fifo))
                        stream_fifo_push(cache,
                                         stream_fifo_pop(client->obuf_fifo));
@@ -432,7 +432,7 @@ static void zserv_read(struct thread *thread)
                                      memory_order_relaxed);
 
                /* publish read packets on client's input queue */
-               frr_with_mutex(&client->ibuf_mtx) {
+               frr_with_mutex (&client->ibuf_mtx) {
                        while (cache->head)
                                stream_fifo_push(client->ibuf_fifo,
                                                 stream_fifo_pop(cache));
@@ -501,7 +501,7 @@ static void zserv_process_messages(struct thread *thread)
        uint32_t p2p = zrouter.packets_to_process;
        bool need_resched = false;
 
-       frr_with_mutex(&client->ibuf_mtx) {
+       frr_with_mutex (&client->ibuf_mtx) {
                uint32_t i;
                for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
                     ++i) {
@@ -531,7 +531,7 @@ static void zserv_process_messages(struct thread *thread)
 
 int zserv_send_message(struct zserv *client, struct stream *msg)
 {
-       frr_with_mutex(&client->obuf_mtx) {
+       frr_with_mutex (&client->obuf_mtx) {
                stream_fifo_push(client->obuf_fifo, msg);
        }
 
@@ -547,7 +547,7 @@ int zserv_send_batch(struct zserv *client, struct stream_fifo *fifo)
 {
        struct stream *msg;
 
-       frr_with_mutex(&client->obuf_mtx) {
+       frr_with_mutex (&client->obuf_mtx) {
                msg = stream_fifo_pop(fifo);
                while (msg) {
                        stream_fifo_push(client->obuf_fifo, msg);
@@ -684,7 +684,7 @@ void zserv_close_client(struct zserv *client)
         * Final check in case the client struct is in use in another
         * pthread: if not in-use, continue and free the client
         */
-       frr_with_mutex(&client_mutex) {
+       frr_with_mutex (&client_mutex) {
                if (client->busy_count <= 0) {
                        /* remove from client list */
                        listnode_delete(zrouter.client_list, client);
@@ -761,7 +761,7 @@ static struct zserv *zserv_client_create(int sock)
        }
 
        /* Add this client to linked list. */
-       frr_with_mutex(&client_mutex) {
+       frr_with_mutex (&client_mutex) {
                listnode_add(zrouter.client_list, client);
        }
 
@@ -797,7 +797,7 @@ struct zserv *zserv_acquire_client(uint8_t proto, unsigned short instance,
 {
        struct zserv *client = NULL;
 
-       frr_with_mutex(&client_mutex) {
+       frr_with_mutex (&client_mutex) {
                client = find_client_internal(proto, instance, session_id);
                if (client) {
                        /* Don't return a dead/closed client object */
@@ -823,7 +823,7 @@ void zserv_release_client(struct zserv *client)
         * for it to be deleted as soon as we release the lock, so we won't
         * touch the object again.
         */
-       frr_with_mutex(&client_mutex) {
+       frr_with_mutex (&client_mutex) {
                client->busy_count--;
 
                if (client->busy_count <= 0) {
@@ -1229,7 +1229,7 @@ struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
 {
        struct zserv *client;
 
-       frr_with_mutex(&client_mutex) {
+       frr_with_mutex (&client_mutex) {
                client = find_client_internal(proto, instance, 0);
        }
 
@@ -1244,7 +1244,7 @@ struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance,
 {
        struct zserv *client;
 
-       frr_with_mutex(&client_mutex) {
+       frr_with_mutex (&client_mutex) {
                client = find_client_internal(proto, instance, session_id);
        }