1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/mrp_bridge.h>
4 #include "br_private_mrp.h"
6 static const u8 mrp_test_dmac
[ETH_ALEN
] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
8 static struct net_bridge_port
*br_mrp_get_port(struct net_bridge
*br
,
11 struct net_bridge_port
*res
= NULL
;
12 struct net_bridge_port
*port
;
14 list_for_each_entry(port
, &br
->port_list
, list
) {
15 if (port
->dev
->ifindex
== ifindex
) {
24 static struct br_mrp
*br_mrp_find_id(struct net_bridge
*br
, u32 ring_id
)
26 struct br_mrp
*res
= NULL
;
29 list_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
30 lockdep_rtnl_is_held()) {
31 if (mrp
->ring_id
== ring_id
) {
40 static bool br_mrp_unique_ifindex(struct net_bridge
*br
, u32 ifindex
)
44 list_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
45 lockdep_rtnl_is_held()) {
46 struct net_bridge_port
*p
;
48 p
= rtnl_dereference(mrp
->p_port
);
49 if (p
&& p
->dev
->ifindex
== ifindex
)
52 p
= rtnl_dereference(mrp
->s_port
);
53 if (p
&& p
->dev
->ifindex
== ifindex
)
60 static struct br_mrp
*br_mrp_find_port(struct net_bridge
*br
,
61 struct net_bridge_port
*p
)
63 struct br_mrp
*res
= NULL
;
66 list_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
67 lockdep_rtnl_is_held()) {
68 if (rcu_access_pointer(mrp
->p_port
) == p
||
69 rcu_access_pointer(mrp
->s_port
) == p
) {
78 static int br_mrp_next_seq(struct br_mrp
*mrp
)
84 static struct sk_buff
*br_mrp_skb_alloc(struct net_bridge_port
*p
,
85 const u8
*src
, const u8
*dst
)
87 struct ethhdr
*eth_hdr
;
91 skb
= dev_alloc_skb(MRP_MAX_FRAME_LENGTH
);
96 skb
->protocol
= htons(ETH_P_MRP
);
97 skb
->priority
= MRP_FRAME_PRIO
;
98 skb_reserve(skb
, sizeof(*eth_hdr
));
100 eth_hdr
= skb_push(skb
, sizeof(*eth_hdr
));
101 ether_addr_copy(eth_hdr
->h_dest
, dst
);
102 ether_addr_copy(eth_hdr
->h_source
, src
);
103 eth_hdr
->h_proto
= htons(ETH_P_MRP
);
105 version
= skb_put(skb
, sizeof(*version
));
106 *version
= cpu_to_be16(MRP_VERSION
);
111 static void br_mrp_skb_tlv(struct sk_buff
*skb
,
112 enum br_mrp_tlv_header_type type
,
115 struct br_mrp_tlv_hdr
*hdr
;
117 hdr
= skb_put(skb
, sizeof(*hdr
));
119 hdr
->length
= length
;
122 static void br_mrp_skb_common(struct sk_buff
*skb
, struct br_mrp
*mrp
)
124 struct br_mrp_common_hdr
*hdr
;
126 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_COMMON
, sizeof(*hdr
));
128 hdr
= skb_put(skb
, sizeof(*hdr
));
129 hdr
->seq_id
= cpu_to_be16(br_mrp_next_seq(mrp
));
130 memset(hdr
->domain
, 0xff, MRP_DOMAIN_UUID_LENGTH
);
133 static struct sk_buff
*br_mrp_alloc_test_skb(struct br_mrp
*mrp
,
134 struct net_bridge_port
*p
,
135 enum br_mrp_port_role_type port_role
)
137 struct br_mrp_ring_test_hdr
*hdr
= NULL
;
138 struct sk_buff
*skb
= NULL
;
143 skb
= br_mrp_skb_alloc(p
, p
->dev
->dev_addr
, mrp_test_dmac
);
147 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_RING_TEST
, sizeof(*hdr
));
148 hdr
= skb_put(skb
, sizeof(*hdr
));
150 hdr
->prio
= cpu_to_be16(MRP_DEFAULT_PRIO
);
151 ether_addr_copy(hdr
->sa
, p
->br
->dev
->dev_addr
);
152 hdr
->port_role
= cpu_to_be16(port_role
);
153 hdr
->state
= cpu_to_be16(mrp
->ring_state
);
154 hdr
->transitions
= cpu_to_be16(mrp
->ring_transitions
);
155 hdr
->timestamp
= cpu_to_be32(jiffies_to_msecs(jiffies
));
157 br_mrp_skb_common(skb
, mrp
);
158 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_END
, 0x0);
163 static void br_mrp_test_work_expired(struct work_struct
*work
)
165 struct delayed_work
*del_work
= to_delayed_work(work
);
166 struct br_mrp
*mrp
= container_of(del_work
, struct br_mrp
, test_work
);
167 struct net_bridge_port
*p
;
168 bool notify_open
= false;
171 if (time_before_eq(mrp
->test_end
, jiffies
))
174 if (mrp
->test_count_miss
< mrp
->test_max_miss
) {
175 mrp
->test_count_miss
++;
177 /* Notify that the ring is open only if the ring state is
178 * closed, otherwise it would continue to notify at every
181 if (mrp
->ring_state
== BR_MRP_RING_STATE_CLOSED
)
187 p
= rcu_dereference(mrp
->p_port
);
189 skb
= br_mrp_alloc_test_skb(mrp
, p
, BR_MRP_PORT_ROLE_PRIMARY
);
193 skb_reset_network_header(skb
);
196 if (notify_open
&& !mrp
->ring_role_offloaded
)
197 br_mrp_port_open(p
->dev
, true);
200 p
= rcu_dereference(mrp
->s_port
);
202 skb
= br_mrp_alloc_test_skb(mrp
, p
, BR_MRP_PORT_ROLE_SECONDARY
);
206 skb_reset_network_header(skb
);
209 if (notify_open
&& !mrp
->ring_role_offloaded
)
210 br_mrp_port_open(p
->dev
, true);
216 queue_delayed_work(system_wq
, &mrp
->test_work
,
217 usecs_to_jiffies(mrp
->test_interval
));
220 /* Deletes the MRP instance.
221 * note: called under rtnl_lock
223 static void br_mrp_del_impl(struct net_bridge
*br
, struct br_mrp
*mrp
)
225 struct net_bridge_port
*p
;
228 /* Stop sending MRP_Test frames */
229 cancel_delayed_work_sync(&mrp
->test_work
);
230 br_mrp_switchdev_send_ring_test(br
, mrp
, 0, 0, 0);
232 br_mrp_switchdev_del(br
, mrp
);
234 /* Reset the ports */
235 p
= rtnl_dereference(mrp
->p_port
);
237 spin_lock_bh(&br
->lock
);
238 state
= netif_running(br
->dev
) ?
239 BR_STATE_FORWARDING
: BR_STATE_DISABLED
;
241 p
->flags
&= ~BR_MRP_AWARE
;
242 spin_unlock_bh(&br
->lock
);
243 br_mrp_port_switchdev_set_state(p
, state
);
244 rcu_assign_pointer(mrp
->p_port
, NULL
);
247 p
= rtnl_dereference(mrp
->s_port
);
249 spin_lock_bh(&br
->lock
);
250 state
= netif_running(br
->dev
) ?
251 BR_STATE_FORWARDING
: BR_STATE_DISABLED
;
253 p
->flags
&= ~BR_MRP_AWARE
;
254 spin_unlock_bh(&br
->lock
);
255 br_mrp_port_switchdev_set_state(p
, state
);
256 rcu_assign_pointer(mrp
->s_port
, NULL
);
259 list_del_rcu(&mrp
->list
);
263 /* Adds a new MRP instance.
264 * note: called under rtnl_lock
266 int br_mrp_add(struct net_bridge
*br
, struct br_mrp_instance
*instance
)
268 struct net_bridge_port
*p
;
272 /* If the ring exists, it is not possible to create another one with the
275 mrp
= br_mrp_find_id(br
, instance
->ring_id
);
279 if (!br_mrp_get_port(br
, instance
->p_ifindex
) ||
280 !br_mrp_get_port(br
, instance
->s_ifindex
))
283 /* It is not possible to have the same port part of multiple rings */
284 if (!br_mrp_unique_ifindex(br
, instance
->p_ifindex
) ||
285 !br_mrp_unique_ifindex(br
, instance
->s_ifindex
))
288 mrp
= kzalloc(sizeof(*mrp
), GFP_KERNEL
);
292 mrp
->ring_id
= instance
->ring_id
;
294 p
= br_mrp_get_port(br
, instance
->p_ifindex
);
295 spin_lock_bh(&br
->lock
);
296 p
->state
= BR_STATE_FORWARDING
;
297 p
->flags
|= BR_MRP_AWARE
;
298 spin_unlock_bh(&br
->lock
);
299 rcu_assign_pointer(mrp
->p_port
, p
);
301 p
= br_mrp_get_port(br
, instance
->s_ifindex
);
302 spin_lock_bh(&br
->lock
);
303 p
->state
= BR_STATE_FORWARDING
;
304 p
->flags
|= BR_MRP_AWARE
;
305 spin_unlock_bh(&br
->lock
);
306 rcu_assign_pointer(mrp
->s_port
, p
);
308 INIT_DELAYED_WORK(&mrp
->test_work
, br_mrp_test_work_expired
);
309 list_add_tail_rcu(&mrp
->list
, &br
->mrp_list
);
311 err
= br_mrp_switchdev_add(br
, mrp
);
318 br_mrp_del_impl(br
, mrp
);
323 /* Deletes the MRP instance from which the port is part of
324 * note: called under rtnl_lock
326 void br_mrp_port_del(struct net_bridge
*br
, struct net_bridge_port
*p
)
328 struct br_mrp
*mrp
= br_mrp_find_port(br
, p
);
330 /* If the port is not part of a MRP instance just bail out */
334 br_mrp_del_impl(br
, mrp
);
337 /* Deletes existing MRP instance based on ring_id
338 * note: called under rtnl_lock
340 int br_mrp_del(struct net_bridge
*br
, struct br_mrp_instance
*instance
)
342 struct br_mrp
*mrp
= br_mrp_find_id(br
, instance
->ring_id
);
347 br_mrp_del_impl(br
, mrp
);
352 /* Set port state, port state can be forwarding, blocked or disabled
353 * note: already called with rtnl_lock
355 int br_mrp_set_port_state(struct net_bridge_port
*p
,
356 enum br_mrp_port_state_type state
)
358 if (!p
|| !(p
->flags
& BR_MRP_AWARE
))
361 spin_lock_bh(&p
->br
->lock
);
363 if (state
== BR_MRP_PORT_STATE_FORWARDING
)
364 p
->state
= BR_STATE_FORWARDING
;
366 p
->state
= BR_STATE_BLOCKING
;
368 spin_unlock_bh(&p
->br
->lock
);
370 br_mrp_port_switchdev_set_state(p
, state
);
375 /* Set port role, port role can be primary or secondary
376 * note: already called with rtnl_lock
378 int br_mrp_set_port_role(struct net_bridge_port
*p
,
379 struct br_mrp_port_role
*role
)
383 if (!p
|| !(p
->flags
& BR_MRP_AWARE
))
386 mrp
= br_mrp_find_id(p
->br
, role
->ring_id
);
391 if (role
->role
== BR_MRP_PORT_ROLE_PRIMARY
)
392 rcu_assign_pointer(mrp
->p_port
, p
);
394 rcu_assign_pointer(mrp
->s_port
, p
);
396 br_mrp_port_switchdev_set_role(p
, role
->role
);
401 /* Set ring state, ring state can be only Open or Closed
402 * note: already called with rtnl_lock
404 int br_mrp_set_ring_state(struct net_bridge
*br
,
405 struct br_mrp_ring_state
*state
)
407 struct br_mrp
*mrp
= br_mrp_find_id(br
, state
->ring_id
);
412 if (mrp
->ring_state
== BR_MRP_RING_STATE_CLOSED
&&
413 state
->ring_state
!= BR_MRP_RING_STATE_CLOSED
)
414 mrp
->ring_transitions
++;
416 mrp
->ring_state
= state
->ring_state
;
418 br_mrp_switchdev_set_ring_state(br
, mrp
, state
->ring_state
);
423 /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
424 * MRC(Media Redundancy Client).
425 * note: already called with rtnl_lock
427 int br_mrp_set_ring_role(struct net_bridge
*br
,
428 struct br_mrp_ring_role
*role
)
430 struct br_mrp
*mrp
= br_mrp_find_id(br
, role
->ring_id
);
436 mrp
->ring_role
= role
->ring_role
;
438 /* If there is an error just bailed out */
439 err
= br_mrp_switchdev_set_ring_role(br
, mrp
, role
->ring_role
);
440 if (err
&& err
!= -EOPNOTSUPP
)
443 /* Now detect if the HW actually applied the role or not. If the HW
444 * applied the role it means that the SW will not to do those operations
445 * anymore. For example if the role ir MRM then the HW will notify the
446 * SW when ring is open, but if the is not pushed to the HW the SW will
447 * need to detect when the ring is open
449 mrp
->ring_role_offloaded
= err
== -EOPNOTSUPP
? 0 : 1;
454 /* Start to generate MRP test frames, the frames are generated by HW and if it
455 * fails, they are generated by the SW.
456 * note: already called with rtnl_lock
458 int br_mrp_start_test(struct net_bridge
*br
,
459 struct br_mrp_start_test
*test
)
461 struct br_mrp
*mrp
= br_mrp_find_id(br
, test
->ring_id
);
466 /* Try to push it to the HW and if it fails then continue to generate in
467 * SW and if that also fails then return error
469 if (!br_mrp_switchdev_send_ring_test(br
, mrp
, test
->interval
,
470 test
->max_miss
, test
->period
))
473 mrp
->test_interval
= test
->interval
;
474 mrp
->test_end
= jiffies
+ usecs_to_jiffies(test
->period
);
475 mrp
->test_max_miss
= test
->max_miss
;
476 mrp
->test_count_miss
= 0;
477 queue_delayed_work(system_wq
, &mrp
->test_work
,
478 usecs_to_jiffies(test
->interval
));
483 /* Process only MRP Test frame. All the other MRP frames are processed by
484 * userspace application
485 * note: already called with rcu_read_lock
487 static void br_mrp_mrm_process(struct br_mrp
*mrp
, struct net_bridge_port
*port
,
490 const struct br_mrp_tlv_hdr
*hdr
;
491 struct br_mrp_tlv_hdr _hdr
;
493 /* Each MRP header starts with a version field which is 16 bits.
494 * Therefore skip the version and get directly the TLV header.
496 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
500 if (hdr
->type
!= BR_MRP_TLV_HEADER_RING_TEST
)
503 mrp
->test_count_miss
= 0;
505 /* Notify the userspace that the ring is closed only when the ring is
508 if (mrp
->ring_state
!= BR_MRP_RING_STATE_CLOSED
)
509 br_mrp_port_open(port
->dev
, false);
512 /* This will just forward the frame to the other mrp ring port(MRC role) or will
514 * note: already called with rcu_read_lock
516 static int br_mrp_rcv(struct net_bridge_port
*p
,
517 struct sk_buff
*skb
, struct net_device
*dev
)
519 struct net_device
*s_dev
, *p_dev
, *d_dev
;
520 struct net_bridge_port
*p_port
, *s_port
;
521 struct net_bridge
*br
;
522 struct sk_buff
*nskb
;
525 /* If port is disabled don't accept any frames */
526 if (p
->state
== BR_STATE_DISABLED
)
530 mrp
= br_mrp_find_port(br
, p
);
534 p_port
= rcu_dereference(mrp
->p_port
);
538 s_port
= rcu_dereference(mrp
->s_port
);
542 /* If the role is MRM then don't forward the frames */
543 if (mrp
->ring_role
== BR_MRP_RING_ROLE_MRM
) {
544 br_mrp_mrm_process(mrp
, p
, skb
);
548 /* Clone the frame and forward it on the other MRP port */
549 nskb
= skb_clone(skb
, GFP_ATOMIC
);
562 skb_push(nskb
, ETH_HLEN
);
563 dev_queue_xmit(nskb
);
568 /* Check if the frame was received on a port that is part of MRP ring
569 * and if the frame has MRP eth. In that case process the frame otherwise do
571 * note: already called with rcu_read_lock
573 int br_mrp_process(struct net_bridge_port
*p
, struct sk_buff
*skb
)
575 /* If there is no MRP instance do normal forwarding */
576 if (likely(!(p
->flags
& BR_MRP_AWARE
)))
579 if (unlikely(skb
->protocol
== htons(ETH_P_MRP
)))
580 return br_mrp_rcv(p
, skb
, p
->dev
);
586 bool br_mrp_enabled(struct net_bridge
*br
)
588 return !list_empty(&br
->mrp_list
);