]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/hsr/hsr_forward.c
Merge tag 'riscv/for-v5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv...
[mirror_ubuntu-hirsute-kernel.git] / net / hsr / hsr_forward.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 */
7
8 #include "hsr_forward.h"
9 #include <linux/types.h>
10 #include <linux/skbuff.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_vlan.h>
13 #include "hsr_main.h"
14 #include "hsr_framereg.h"
15
16 struct hsr_node;
17
18 struct hsr_frame_info {
19 struct sk_buff *skb_std;
20 struct sk_buff *skb_hsr;
21 struct hsr_port *port_rcv;
22 struct hsr_node *node_src;
23 u16 sequence_nr;
24 bool is_supervision;
25 bool is_vlan;
26 bool is_local_dest;
27 bool is_local_exclusive;
28 };
29
30 /* The uses I can see for these HSR supervision frames are:
31 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
32 * 22") to reset any sequence_nr counters belonging to that node. Useful if
33 * the other node's counter has been reset for some reason.
34 * --
35 * Or not - resetting the counter and bridging the frame would create a
36 * loop, unfortunately.
37 *
38 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
39 * frame is received from a particular node, we know something is wrong.
40 * We just register these (as with normal frames) and throw them away.
41 *
42 * 3) Allow different MAC addresses for the two slave interfaces, using the
43 * MacAddressA field.
44 */
45 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
46 {
47 struct ethhdr *eth_hdr;
48 struct hsr_sup_tag *hsr_sup_tag;
49 struct hsrv1_ethhdr_sp *hsr_V1_hdr;
50
51 WARN_ON_ONCE(!skb_mac_header_was_set(skb));
52 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
53
54 /* Correct addr? */
55 if (!ether_addr_equal(eth_hdr->h_dest,
56 hsr->sup_multicast_addr))
57 return false;
58
59 /* Correct ether type?. */
60 if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
61 eth_hdr->h_proto == htons(ETH_P_HSR)))
62 return false;
63
64 /* Get the supervision header from correct location. */
65 if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
66 hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
67 if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
68 return false;
69
70 hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
71 } else {
72 hsr_sup_tag =
73 &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
74 }
75
76 if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
77 hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK)
78 return false;
79 if (hsr_sup_tag->HSR_TLV_length != 12 &&
80 hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
81 return false;
82
83 return true;
84 }
85
86 static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
87 struct hsr_frame_info *frame)
88 {
89 struct sk_buff *skb;
90 int copylen;
91 unsigned char *dst, *src;
92
93 skb_pull(skb_in, HSR_HLEN);
94 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
95 skb_push(skb_in, HSR_HLEN);
96 if (!skb)
97 return NULL;
98
99 skb_reset_mac_header(skb);
100
101 if (skb->ip_summed == CHECKSUM_PARTIAL)
102 skb->csum_start -= HSR_HLEN;
103
104 copylen = 2 * ETH_ALEN;
105 if (frame->is_vlan)
106 copylen += VLAN_HLEN;
107 src = skb_mac_header(skb_in);
108 dst = skb_mac_header(skb);
109 memcpy(dst, src, copylen);
110
111 skb->protocol = eth_hdr(skb)->h_proto;
112 return skb;
113 }
114
115 static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
116 struct hsr_port *port)
117 {
118 if (!frame->skb_std)
119 frame->skb_std = create_stripped_skb(frame->skb_hsr, frame);
120 return skb_clone(frame->skb_std, GFP_ATOMIC);
121 }
122
123 static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
124 struct hsr_port *port, u8 proto_version)
125 {
126 struct hsr_ethhdr *hsr_ethhdr;
127 int lane_id;
128 int lsdu_size;
129
130 if (port->type == HSR_PT_SLAVE_A)
131 lane_id = 0;
132 else
133 lane_id = 1;
134
135 lsdu_size = skb->len - 14;
136 if (frame->is_vlan)
137 lsdu_size -= 4;
138
139 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
140
141 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
142 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
143 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
144 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
145 hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
146 ETH_P_HSR : ETH_P_PRP);
147 }
148
149 static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
150 struct hsr_frame_info *frame,
151 struct hsr_port *port)
152 {
153 int movelen;
154 unsigned char *dst, *src;
155 struct sk_buff *skb;
156
157 /* Create the new skb with enough headroom to fit the HSR tag */
158 skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
159 if (!skb)
160 return NULL;
161 skb_reset_mac_header(skb);
162
163 if (skb->ip_summed == CHECKSUM_PARTIAL)
164 skb->csum_start += HSR_HLEN;
165
166 movelen = ETH_HLEN;
167 if (frame->is_vlan)
168 movelen += VLAN_HLEN;
169
170 src = skb_mac_header(skb);
171 dst = skb_push(skb, HSR_HLEN);
172 memmove(dst, src, movelen);
173 skb_reset_mac_header(skb);
174
175 hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
176
177 return skb;
178 }
179
180 /* If the original frame was an HSR tagged frame, just clone it to be sent
181 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
182 */
183 static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
184 struct hsr_port *port)
185 {
186 if (frame->skb_hsr)
187 return skb_clone(frame->skb_hsr, GFP_ATOMIC);
188
189 if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) {
190 WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
191 return NULL;
192 }
193
194 return create_tagged_skb(frame->skb_std, frame, port);
195 }
196
197 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
198 struct hsr_node *node_src)
199 {
200 bool was_multicast_frame;
201 int res;
202
203 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
204 hsr_addr_subst_source(node_src, skb);
205 skb_pull(skb, ETH_HLEN);
206 res = netif_rx(skb);
207 if (res == NET_RX_DROP) {
208 dev->stats.rx_dropped++;
209 } else {
210 dev->stats.rx_packets++;
211 dev->stats.rx_bytes += skb->len;
212 if (was_multicast_frame)
213 dev->stats.multicast++;
214 }
215 }
216
217 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
218 struct hsr_frame_info *frame)
219 {
220 if (frame->port_rcv->type == HSR_PT_MASTER) {
221 hsr_addr_subst_dest(frame->node_src, skb, port);
222
223 /* Address substitution (IEC62439-3 pp 26, 50): replace mac
224 * address of outgoing frame with that of the outgoing slave's.
225 */
226 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
227 }
228 return dev_queue_xmit(skb);
229 }
230
231 /* Forward the frame through all devices except:
232 * - Back through the receiving device
233 * - If it's a HSR frame: through a device where it has passed before
234 * - To the local HSR master only if the frame is directly addressed to it, or
235 * a non-supervision multicast or broadcast frame.
236 *
237 * HSR slave devices should insert a HSR tag into the frame, or forward the
238 * frame unchanged if it's already tagged. Interlink devices should strip HSR
239 * tags if they're of the non-HSR type (but only after duplicate discard). The
240 * master device always strips HSR tags.
241 */
242 static void hsr_forward_do(struct hsr_frame_info *frame)
243 {
244 struct hsr_port *port;
245 struct sk_buff *skb;
246
247 hsr_for_each_port(frame->port_rcv->hsr, port) {
248 /* Don't send frame back the way it came */
249 if (port == frame->port_rcv)
250 continue;
251
252 /* Don't deliver locally unless we should */
253 if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
254 continue;
255
256 /* Deliver frames directly addressed to us to master only */
257 if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
258 continue;
259
260 /* Don't send frame over port where it has been sent before */
261 if (hsr_register_frame_out(port, frame->node_src,
262 frame->sequence_nr))
263 continue;
264
265 if (frame->is_supervision && port->type == HSR_PT_MASTER) {
266 hsr_handle_sup_frame(frame->skb_hsr,
267 frame->node_src,
268 frame->port_rcv);
269 continue;
270 }
271
272 if (port->type != HSR_PT_MASTER)
273 skb = frame_get_tagged_skb(frame, port);
274 else
275 skb = frame_get_stripped_skb(frame, port);
276 if (!skb) {
277 /* FIXME: Record the dropped frame? */
278 continue;
279 }
280
281 skb->dev = port->dev;
282 if (port->type == HSR_PT_MASTER)
283 hsr_deliver_master(skb, port->dev, frame->node_src);
284 else
285 hsr_xmit(skb, port, frame);
286 }
287 }
288
289 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
290 struct hsr_frame_info *frame)
291 {
292 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
293 frame->is_local_exclusive = true;
294 skb->pkt_type = PACKET_HOST;
295 } else {
296 frame->is_local_exclusive = false;
297 }
298
299 if (skb->pkt_type == PACKET_HOST ||
300 skb->pkt_type == PACKET_MULTICAST ||
301 skb->pkt_type == PACKET_BROADCAST) {
302 frame->is_local_dest = true;
303 } else {
304 frame->is_local_dest = false;
305 }
306 }
307
308 static int hsr_fill_frame_info(struct hsr_frame_info *frame,
309 struct sk_buff *skb, struct hsr_port *port)
310 {
311 struct ethhdr *ethhdr;
312 unsigned long irqflags;
313
314 frame->is_supervision = is_supervision_frame(port->hsr, skb);
315 frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
316 if (!frame->node_src)
317 return -1; /* Unknown node and !is_supervision, or no mem */
318
319 ethhdr = (struct ethhdr *)skb_mac_header(skb);
320 frame->is_vlan = false;
321 if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
322 frame->is_vlan = true;
323 /* FIXME: */
324 WARN_ONCE(1, "HSR: VLAN not yet supported");
325 }
326 if (ethhdr->h_proto == htons(ETH_P_PRP) ||
327 ethhdr->h_proto == htons(ETH_P_HSR)) {
328 frame->skb_std = NULL;
329 frame->skb_hsr = skb;
330 frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
331 } else {
332 frame->skb_std = skb;
333 frame->skb_hsr = NULL;
334 /* Sequence nr for the master node */
335 spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
336 frame->sequence_nr = port->hsr->sequence_nr;
337 port->hsr->sequence_nr++;
338 spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
339 }
340
341 frame->port_rcv = port;
342 check_local_dest(port->hsr, skb, frame);
343
344 return 0;
345 }
346
347 /* Must be called holding rcu read lock (because of the port parameter) */
348 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
349 {
350 struct hsr_frame_info frame;
351
352 if (skb_mac_header(skb) != skb->data) {
353 WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
354 __FILE__, __LINE__, port->dev->name);
355 goto out_drop;
356 }
357
358 if (hsr_fill_frame_info(&frame, skb, port) < 0)
359 goto out_drop;
360 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
361 hsr_forward_do(&frame);
362 /* Gets called for ingress frames as well as egress from master port.
363 * So check and increment stats for master port only here.
364 */
365 if (port->type == HSR_PT_MASTER) {
366 port->dev->stats.tx_packets++;
367 port->dev->stats.tx_bytes += skb->len;
368 }
369
370 if (frame.skb_hsr)
371 kfree_skb(frame.skb_hsr);
372 if (frame.skb_std)
373 kfree_skb(frame.skb_std);
374 return;
375
376 out_drop:
377 port->dev->stats.tx_dropped++;
378 kfree_skb(skb);
379 }