]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bridge/br_netlink.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-artful-kernel.git] / net / bridge / br_netlink.c
1 /*
2 * Bridge netlink control interface
3 *
4 * Authors:
5 * Stephen Hemminger <shemminger@osdl.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/etherdevice.h>
16 #include <net/rtnetlink.h>
17 #include <net/net_namespace.h>
18 #include <net/sock.h>
19 #include <uapi/linux/if_bridge.h>
20
21 #include "br_private.h"
22 #include "br_private_stp.h"
23 #include "br_private_tunnel.h"
24
25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
26 u32 filter_mask)
27 {
28 struct net_bridge_vlan *v;
29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
30 u16 flags, pvid;
31 int num_vlans = 0;
32
33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
34 return 0;
35
36 pvid = br_get_pvid(vg);
37 /* Count number of vlan infos */
38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
39 flags = 0;
40 /* only a context, bridge vlan not activated */
41 if (!br_vlan_should_use(v))
42 continue;
43 if (v->vid == pvid)
44 flags |= BRIDGE_VLAN_INFO_PVID;
45
46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
47 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
48
49 if (vid_range_start == 0) {
50 goto initvars;
51 } else if ((v->vid - vid_range_end) == 1 &&
52 flags == vid_range_flags) {
53 vid_range_end = v->vid;
54 continue;
55 } else {
56 if ((vid_range_end - vid_range_start) > 0)
57 num_vlans += 2;
58 else
59 num_vlans += 1;
60 }
61 initvars:
62 vid_range_start = v->vid;
63 vid_range_end = v->vid;
64 vid_range_flags = flags;
65 }
66
67 if (vid_range_start != 0) {
68 if ((vid_range_end - vid_range_start) > 0)
69 num_vlans += 2;
70 else
71 num_vlans += 1;
72 }
73
74 return num_vlans;
75 }
76
77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
78 u32 filter_mask)
79 {
80 int num_vlans;
81
82 if (!vg)
83 return 0;
84
85 if (filter_mask & RTEXT_FILTER_BRVLAN)
86 return vg->num_vlans;
87
88 rcu_read_lock();
89 num_vlans = __get_num_vlan_infos(vg, filter_mask);
90 rcu_read_unlock();
91
92 return num_vlans;
93 }
94
95 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
96 u32 filter_mask)
97 {
98 struct net_bridge_vlan_group *vg = NULL;
99 struct net_bridge_port *p = NULL;
100 struct net_bridge *br;
101 int num_vlan_infos;
102 size_t vinfo_sz = 0;
103
104 rcu_read_lock();
105 if (br_port_exists(dev)) {
106 p = br_port_get_rcu(dev);
107 vg = nbp_vlan_group_rcu(p);
108 } else if (dev->priv_flags & IFF_EBRIDGE) {
109 br = netdev_priv(dev);
110 vg = br_vlan_group_rcu(br);
111 }
112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
113 rcu_read_unlock();
114
115 if (p && (p->flags & BR_VLAN_TUNNEL))
116 vinfo_sz += br_get_vlan_tunnel_info_size(vg);
117
118 /* Each VLAN is returned in bridge_vlan_info along with flags */
119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
120
121 return vinfo_sz;
122 }
123
124 static inline size_t br_port_info_size(void)
125 {
126 return nla_total_size(1) /* IFLA_BRPORT_STATE */
127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
128 + nla_total_size(4) /* IFLA_BRPORT_COST */
129 + nla_total_size(1) /* IFLA_BRPORT_MODE */
130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
136 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
137 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
138 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
139 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
140 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
141 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
142 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
145 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
146 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
147 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
148 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
150 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
151 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
152 #endif
153 + 0;
154 }
155
156 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
157 {
158 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
159 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
160 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
161 + nla_total_size(4) /* IFLA_MASTER */
162 + nla_total_size(4) /* IFLA_MTU */
163 + nla_total_size(4) /* IFLA_LINK */
164 + nla_total_size(1) /* IFLA_OPERSTATE */
165 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
166 + nla_total_size(br_get_link_af_size_filtered(dev,
167 filter_mask)); /* IFLA_AF_SPEC */
168 }
169
170 static int br_port_fill_attrs(struct sk_buff *skb,
171 const struct net_bridge_port *p)
172 {
173 u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
174 u64 timerval;
175
176 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
177 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
178 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
179 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
180 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
181 nla_put_u8(skb, IFLA_BRPORT_PROTECT,
182 !!(p->flags & BR_ROOT_BLOCK)) ||
183 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
184 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
185 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
186 !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
187 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
188 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
189 !!(p->flags & BR_FLOOD)) ||
190 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
191 !!(p->flags & BR_MCAST_FLOOD)) ||
192 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
193 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
194 !!(p->flags & BR_PROXYARP_WIFI)) ||
195 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
196 &p->designated_root) ||
197 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
198 &p->designated_bridge) ||
199 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
200 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
201 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
202 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
203 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
204 p->topology_change_ack) ||
205 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
206 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
207 BR_VLAN_TUNNEL)))
208 return -EMSGSIZE;
209
210 timerval = br_timer_value(&p->message_age_timer);
211 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
212 IFLA_BRPORT_PAD))
213 return -EMSGSIZE;
214 timerval = br_timer_value(&p->forward_delay_timer);
215 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
216 IFLA_BRPORT_PAD))
217 return -EMSGSIZE;
218 timerval = br_timer_value(&p->hold_timer);
219 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
220 IFLA_BRPORT_PAD))
221 return -EMSGSIZE;
222
223 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
224 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
225 p->multicast_router))
226 return -EMSGSIZE;
227 #endif
228
229 return 0;
230 }
231
232 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
233 u16 vid_end, u16 flags)
234 {
235 struct bridge_vlan_info vinfo;
236
237 if ((vid_end - vid_start) > 0) {
238 /* add range to skb */
239 vinfo.vid = vid_start;
240 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
241 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
242 sizeof(vinfo), &vinfo))
243 goto nla_put_failure;
244
245 vinfo.vid = vid_end;
246 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
247 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
248 sizeof(vinfo), &vinfo))
249 goto nla_put_failure;
250 } else {
251 vinfo.vid = vid_start;
252 vinfo.flags = flags;
253 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
254 sizeof(vinfo), &vinfo))
255 goto nla_put_failure;
256 }
257
258 return 0;
259
260 nla_put_failure:
261 return -EMSGSIZE;
262 }
263
264 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
265 struct net_bridge_vlan_group *vg)
266 {
267 struct net_bridge_vlan *v;
268 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
269 u16 flags, pvid;
270 int err = 0;
271
272 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
273 * and mark vlan info with begin and end flags
274 * if vlaninfo represents a range
275 */
276 pvid = br_get_pvid(vg);
277 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
278 flags = 0;
279 if (!br_vlan_should_use(v))
280 continue;
281 if (v->vid == pvid)
282 flags |= BRIDGE_VLAN_INFO_PVID;
283
284 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
285 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
286
287 if (vid_range_start == 0) {
288 goto initvars;
289 } else if ((v->vid - vid_range_end) == 1 &&
290 flags == vid_range_flags) {
291 vid_range_end = v->vid;
292 continue;
293 } else {
294 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
295 vid_range_end,
296 vid_range_flags);
297 if (err)
298 return err;
299 }
300
301 initvars:
302 vid_range_start = v->vid;
303 vid_range_end = v->vid;
304 vid_range_flags = flags;
305 }
306
307 if (vid_range_start != 0) {
308 /* Call it once more to send any left over vlans */
309 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
310 vid_range_end,
311 vid_range_flags);
312 if (err)
313 return err;
314 }
315
316 return 0;
317 }
318
319 static int br_fill_ifvlaninfo(struct sk_buff *skb,
320 struct net_bridge_vlan_group *vg)
321 {
322 struct bridge_vlan_info vinfo;
323 struct net_bridge_vlan *v;
324 u16 pvid;
325
326 pvid = br_get_pvid(vg);
327 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
328 if (!br_vlan_should_use(v))
329 continue;
330
331 vinfo.vid = v->vid;
332 vinfo.flags = 0;
333 if (v->vid == pvid)
334 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
335
336 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
337 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
338
339 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
340 sizeof(vinfo), &vinfo))
341 goto nla_put_failure;
342 }
343
344 return 0;
345
346 nla_put_failure:
347 return -EMSGSIZE;
348 }
349
350 /*
351 * Create one netlink message for one interface
352 * Contains port and master info as well as carrier and bridge state.
353 */
354 static int br_fill_ifinfo(struct sk_buff *skb,
355 struct net_bridge_port *port,
356 u32 pid, u32 seq, int event, unsigned int flags,
357 u32 filter_mask, const struct net_device *dev)
358 {
359 struct net_bridge *br;
360 struct ifinfomsg *hdr;
361 struct nlmsghdr *nlh;
362 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
363
364 if (port)
365 br = port->br;
366 else
367 br = netdev_priv(dev);
368
369 br_debug(br, "br_fill_info event %d port %s master %s\n",
370 event, dev->name, br->dev->name);
371
372 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
373 if (nlh == NULL)
374 return -EMSGSIZE;
375
376 hdr = nlmsg_data(nlh);
377 hdr->ifi_family = AF_BRIDGE;
378 hdr->__ifi_pad = 0;
379 hdr->ifi_type = dev->type;
380 hdr->ifi_index = dev->ifindex;
381 hdr->ifi_flags = dev_get_flags(dev);
382 hdr->ifi_change = 0;
383
384 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
385 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
386 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
387 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
388 (dev->addr_len &&
389 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
390 (dev->ifindex != dev_get_iflink(dev) &&
391 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
392 goto nla_put_failure;
393
394 if (event == RTM_NEWLINK && port) {
395 struct nlattr *nest
396 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
397
398 if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
399 goto nla_put_failure;
400 nla_nest_end(skb, nest);
401 }
402
403 /* Check if the VID information is requested */
404 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
405 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
406 struct net_bridge_vlan_group *vg;
407 struct nlattr *af;
408 int err;
409
410 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
411 rcu_read_lock();
412 if (port)
413 vg = nbp_vlan_group_rcu(port);
414 else
415 vg = br_vlan_group_rcu(br);
416
417 if (!vg || !vg->num_vlans) {
418 rcu_read_unlock();
419 goto done;
420 }
421 af = nla_nest_start(skb, IFLA_AF_SPEC);
422 if (!af) {
423 rcu_read_unlock();
424 goto nla_put_failure;
425 }
426 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
427 err = br_fill_ifvlaninfo_compressed(skb, vg);
428 else
429 err = br_fill_ifvlaninfo(skb, vg);
430
431 if (port && (port->flags & BR_VLAN_TUNNEL))
432 err = br_fill_vlan_tunnel_info(skb, vg);
433 rcu_read_unlock();
434 if (err)
435 goto nla_put_failure;
436 nla_nest_end(skb, af);
437 }
438
439 done:
440 nlmsg_end(skb, nlh);
441 return 0;
442
443 nla_put_failure:
444 nlmsg_cancel(skb, nlh);
445 return -EMSGSIZE;
446 }
447
448 /*
449 * Notify listeners of a change in port information
450 */
451 void br_ifinfo_notify(int event, struct net_bridge_port *port)
452 {
453 struct net *net;
454 struct sk_buff *skb;
455 int err = -ENOBUFS;
456 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
457
458 if (!port)
459 return;
460
461 net = dev_net(port->dev);
462 br_debug(port->br, "port %u(%s) event %d\n",
463 (unsigned int)port->port_no, port->dev->name, event);
464
465 skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
466 if (skb == NULL)
467 goto errout;
468
469 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
470 if (err < 0) {
471 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
472 WARN_ON(err == -EMSGSIZE);
473 kfree_skb(skb);
474 goto errout;
475 }
476 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
477 return;
478 errout:
479 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
480 }
481
482
483 /*
484 * Dump information about all ports, in response to GETLINK
485 */
486 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
487 struct net_device *dev, u32 filter_mask, int nlflags)
488 {
489 struct net_bridge_port *port = br_port_get_rtnl(dev);
490
491 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
492 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
493 return 0;
494
495 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
496 filter_mask, dev);
497 }
498
499 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
500 int cmd, struct bridge_vlan_info *vinfo)
501 {
502 int err = 0;
503
504 switch (cmd) {
505 case RTM_SETLINK:
506 if (p) {
507 /* if the MASTER flag is set this will act on the global
508 * per-VLAN entry as well
509 */
510 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
511 if (err)
512 break;
513 } else {
514 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
515 err = br_vlan_add(br, vinfo->vid, vinfo->flags);
516 }
517 break;
518
519 case RTM_DELLINK:
520 if (p) {
521 nbp_vlan_delete(p, vinfo->vid);
522 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
523 br_vlan_delete(p->br, vinfo->vid);
524 } else {
525 br_vlan_delete(br, vinfo->vid);
526 }
527 break;
528 }
529
530 return err;
531 }
532
533 static int br_process_vlan_info(struct net_bridge *br,
534 struct net_bridge_port *p, int cmd,
535 struct bridge_vlan_info *vinfo_curr,
536 struct bridge_vlan_info **vinfo_last)
537 {
538 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
539 return -EINVAL;
540
541 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
542 /* check if we are already processing a range */
543 if (*vinfo_last)
544 return -EINVAL;
545 *vinfo_last = vinfo_curr;
546 /* don't allow range of pvids */
547 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
548 return -EINVAL;
549 return 0;
550 }
551
552 if (*vinfo_last) {
553 struct bridge_vlan_info tmp_vinfo;
554 int v, err;
555
556 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
557 return -EINVAL;
558
559 if (vinfo_curr->vid <= (*vinfo_last)->vid)
560 return -EINVAL;
561
562 memcpy(&tmp_vinfo, *vinfo_last,
563 sizeof(struct bridge_vlan_info));
564 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
565 tmp_vinfo.vid = v;
566 err = br_vlan_info(br, p, cmd, &tmp_vinfo);
567 if (err)
568 break;
569 }
570 *vinfo_last = NULL;
571
572 return 0;
573 }
574
575 return br_vlan_info(br, p, cmd, vinfo_curr);
576 }
577
578 static int br_afspec(struct net_bridge *br,
579 struct net_bridge_port *p,
580 struct nlattr *af_spec,
581 int cmd)
582 {
583 struct bridge_vlan_info *vinfo_curr = NULL;
584 struct bridge_vlan_info *vinfo_last = NULL;
585 struct nlattr *attr;
586 struct vtunnel_info tinfo_last = {};
587 struct vtunnel_info tinfo_curr = {};
588 int err = 0, rem;
589
590 nla_for_each_nested(attr, af_spec, rem) {
591 err = 0;
592 switch (nla_type(attr)) {
593 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
594 if (!(p->flags & BR_VLAN_TUNNEL))
595 return -EINVAL;
596 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
597 if (err)
598 return err;
599 err = br_process_vlan_tunnel_info(br, p, cmd,
600 &tinfo_curr,
601 &tinfo_last);
602 if (err)
603 return err;
604 break;
605 case IFLA_BRIDGE_VLAN_INFO:
606 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
607 return -EINVAL;
608 vinfo_curr = nla_data(attr);
609 err = br_process_vlan_info(br, p, cmd, vinfo_curr,
610 &vinfo_last);
611 if (err)
612 return err;
613 break;
614 }
615 }
616
617 return err;
618 }
619
620 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
621 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
622 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
623 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
624 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
625 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
626 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
627 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
628 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
629 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
630 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
631 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
632 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
633 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
634 };
635
636 /* Change the state of the port and notify spanning tree */
637 static int br_set_port_state(struct net_bridge_port *p, u8 state)
638 {
639 if (state > BR_STATE_BLOCKING)
640 return -EINVAL;
641
642 /* if kernel STP is running, don't allow changes */
643 if (p->br->stp_enabled == BR_KERNEL_STP)
644 return -EBUSY;
645
646 /* if device is not up, change is not allowed
647 * if link is not present, only allowable state is disabled
648 */
649 if (!netif_running(p->dev) ||
650 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
651 return -ENETDOWN;
652
653 br_set_state(p, state);
654 br_port_state_selection(p->br);
655 return 0;
656 }
657
658 /* Set/clear or port flags based on attribute */
659 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
660 int attrtype, unsigned long mask)
661 {
662 if (tb[attrtype]) {
663 u8 flag = nla_get_u8(tb[attrtype]);
664 if (flag)
665 p->flags |= mask;
666 else
667 p->flags &= ~mask;
668 }
669 }
670
671 /* Process bridge protocol info on port */
672 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
673 {
674 unsigned long old_flags = p->flags;
675 bool br_vlan_tunnel_old = false;
676 int err;
677
678 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
679 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
680 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
681 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
682 br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
683 br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
684 br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
685 br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
686 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
687 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
688
689 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
690 br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
691 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
692 nbp_vlan_tunnel_info_flush(p);
693
694 if (tb[IFLA_BRPORT_COST]) {
695 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
696 if (err)
697 return err;
698 }
699
700 if (tb[IFLA_BRPORT_PRIORITY]) {
701 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
702 if (err)
703 return err;
704 }
705
706 if (tb[IFLA_BRPORT_STATE]) {
707 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
708 if (err)
709 return err;
710 }
711
712 if (tb[IFLA_BRPORT_FLUSH])
713 br_fdb_delete_by_port(p->br, p, 0, 0);
714
715 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
716 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
717 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
718
719 err = br_multicast_set_port_router(p, mcast_router);
720 if (err)
721 return err;
722 }
723 #endif
724 br_port_flags_change(p, old_flags ^ p->flags);
725 return 0;
726 }
727
728 /* Change state and parameters on port. */
729 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
730 {
731 struct nlattr *protinfo;
732 struct nlattr *afspec;
733 struct net_bridge_port *p;
734 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
735 int err = 0;
736
737 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
738 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
739 if (!protinfo && !afspec)
740 return 0;
741
742 p = br_port_get_rtnl(dev);
743 /* We want to accept dev as bridge itself if the AF_SPEC
744 * is set to see if someone is setting vlan info on the bridge
745 */
746 if (!p && !afspec)
747 return -EINVAL;
748
749 if (p && protinfo) {
750 if (protinfo->nla_type & NLA_F_NESTED) {
751 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo,
752 br_port_policy, NULL);
753 if (err)
754 return err;
755
756 spin_lock_bh(&p->br->lock);
757 err = br_setport(p, tb);
758 spin_unlock_bh(&p->br->lock);
759 } else {
760 /* Binary compatibility with old RSTP */
761 if (nla_len(protinfo) < sizeof(u8))
762 return -EINVAL;
763
764 spin_lock_bh(&p->br->lock);
765 err = br_set_port_state(p, nla_get_u8(protinfo));
766 spin_unlock_bh(&p->br->lock);
767 }
768 if (err)
769 goto out;
770 }
771
772 if (afspec) {
773 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
774 afspec, RTM_SETLINK);
775 }
776
777 if (err == 0)
778 br_ifinfo_notify(RTM_NEWLINK, p);
779 out:
780 return err;
781 }
782
783 /* Delete port information */
784 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
785 {
786 struct nlattr *afspec;
787 struct net_bridge_port *p;
788 int err = 0;
789
790 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
791 if (!afspec)
792 return 0;
793
794 p = br_port_get_rtnl(dev);
795 /* We want to accept dev as bridge itself as well */
796 if (!p && !(dev->priv_flags & IFF_EBRIDGE))
797 return -EINVAL;
798
799 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
800 afspec, RTM_DELLINK);
801 if (err == 0)
802 /* Send RTM_NEWLINK because userspace
803 * expects RTM_NEWLINK for vlan dels
804 */
805 br_ifinfo_notify(RTM_NEWLINK, p);
806
807 return err;
808 }
809 static int br_validate(struct nlattr *tb[], struct nlattr *data[])
810 {
811 if (tb[IFLA_ADDRESS]) {
812 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
813 return -EINVAL;
814 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
815 return -EADDRNOTAVAIL;
816 }
817
818 if (!data)
819 return 0;
820
821 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
822 if (data[IFLA_BR_VLAN_PROTOCOL]) {
823 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
824 case htons(ETH_P_8021Q):
825 case htons(ETH_P_8021AD):
826 break;
827 default:
828 return -EPROTONOSUPPORT;
829 }
830 }
831 #endif
832
833 return 0;
834 }
835
836 static int br_port_slave_changelink(struct net_device *brdev,
837 struct net_device *dev,
838 struct nlattr *tb[],
839 struct nlattr *data[])
840 {
841 struct net_bridge *br = netdev_priv(brdev);
842 int ret;
843
844 if (!data)
845 return 0;
846
847 spin_lock_bh(&br->lock);
848 ret = br_setport(br_port_get_rtnl(dev), data);
849 spin_unlock_bh(&br->lock);
850
851 return ret;
852 }
853
854 static int br_port_fill_slave_info(struct sk_buff *skb,
855 const struct net_device *brdev,
856 const struct net_device *dev)
857 {
858 return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
859 }
860
861 static size_t br_port_get_slave_size(const struct net_device *brdev,
862 const struct net_device *dev)
863 {
864 return br_port_info_size();
865 }
866
867 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
868 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
869 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
870 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
871 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
872 [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
873 [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
874 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
875 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
876 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
877 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
878 .len = ETH_ALEN },
879 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
880 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
881 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
882 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
883 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
884 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
885 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
886 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
887 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
888 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
889 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
890 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
891 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
892 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
893 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
894 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
895 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
896 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
897 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
898 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
899 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
900 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
901 };
902
903 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
904 struct nlattr *data[])
905 {
906 struct net_bridge *br = netdev_priv(brdev);
907 int err;
908
909 if (!data)
910 return 0;
911
912 if (data[IFLA_BR_FORWARD_DELAY]) {
913 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
914 if (err)
915 return err;
916 }
917
918 if (data[IFLA_BR_HELLO_TIME]) {
919 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
920 if (err)
921 return err;
922 }
923
924 if (data[IFLA_BR_MAX_AGE]) {
925 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
926 if (err)
927 return err;
928 }
929
930 if (data[IFLA_BR_AGEING_TIME]) {
931 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
932 if (err)
933 return err;
934 }
935
936 if (data[IFLA_BR_STP_STATE]) {
937 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
938
939 br_stp_set_enabled(br, stp_enabled);
940 }
941
942 if (data[IFLA_BR_PRIORITY]) {
943 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
944
945 br_stp_set_bridge_priority(br, priority);
946 }
947
948 if (data[IFLA_BR_VLAN_FILTERING]) {
949 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
950
951 err = __br_vlan_filter_toggle(br, vlan_filter);
952 if (err)
953 return err;
954 }
955
956 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
957 if (data[IFLA_BR_VLAN_PROTOCOL]) {
958 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
959
960 err = __br_vlan_set_proto(br, vlan_proto);
961 if (err)
962 return err;
963 }
964
965 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
966 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
967
968 err = __br_vlan_set_default_pvid(br, defpvid);
969 if (err)
970 return err;
971 }
972
973 if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
974 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
975
976 err = br_vlan_set_stats(br, vlan_stats);
977 if (err)
978 return err;
979 }
980 #endif
981
982 if (data[IFLA_BR_GROUP_FWD_MASK]) {
983 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
984
985 if (fwd_mask & BR_GROUPFWD_RESTRICTED)
986 return -EINVAL;
987 br->group_fwd_mask = fwd_mask;
988 }
989
990 if (data[IFLA_BR_GROUP_ADDR]) {
991 u8 new_addr[ETH_ALEN];
992
993 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
994 return -EINVAL;
995 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
996 if (!is_link_local_ether_addr(new_addr))
997 return -EINVAL;
998 if (new_addr[5] == 1 || /* 802.3x Pause address */
999 new_addr[5] == 2 || /* 802.3ad Slow protocols */
1000 new_addr[5] == 3) /* 802.1X PAE address */
1001 return -EINVAL;
1002 spin_lock_bh(&br->lock);
1003 memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
1004 spin_unlock_bh(&br->lock);
1005 br->group_addr_set = true;
1006 br_recalculate_fwd_mask(br);
1007 }
1008
1009 if (data[IFLA_BR_FDB_FLUSH])
1010 br_fdb_flush(br);
1011
1012 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1013 if (data[IFLA_BR_MCAST_ROUTER]) {
1014 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
1015
1016 err = br_multicast_set_router(br, multicast_router);
1017 if (err)
1018 return err;
1019 }
1020
1021 if (data[IFLA_BR_MCAST_SNOOPING]) {
1022 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
1023
1024 err = br_multicast_toggle(br, mcast_snooping);
1025 if (err)
1026 return err;
1027 }
1028
1029 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
1030 u8 val;
1031
1032 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1033 br->multicast_query_use_ifaddr = !!val;
1034 }
1035
1036 if (data[IFLA_BR_MCAST_QUERIER]) {
1037 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
1038
1039 err = br_multicast_set_querier(br, mcast_querier);
1040 if (err)
1041 return err;
1042 }
1043
1044 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
1045 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
1046
1047 br->hash_elasticity = val;
1048 }
1049
1050 if (data[IFLA_BR_MCAST_HASH_MAX]) {
1051 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1052
1053 err = br_multicast_set_hash_max(br, hash_max);
1054 if (err)
1055 return err;
1056 }
1057
1058 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1059 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1060
1061 br->multicast_last_member_count = val;
1062 }
1063
1064 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1065 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1066
1067 br->multicast_startup_query_count = val;
1068 }
1069
1070 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1071 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1072
1073 br->multicast_last_member_interval = clock_t_to_jiffies(val);
1074 }
1075
1076 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1077 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1078
1079 br->multicast_membership_interval = clock_t_to_jiffies(val);
1080 }
1081
1082 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1083 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1084
1085 br->multicast_querier_interval = clock_t_to_jiffies(val);
1086 }
1087
1088 if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1089 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1090
1091 br->multicast_query_interval = clock_t_to_jiffies(val);
1092 }
1093
1094 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1095 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1096
1097 br->multicast_query_response_interval = clock_t_to_jiffies(val);
1098 }
1099
1100 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1101 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1102
1103 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
1104 }
1105
1106 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1107 __u8 mcast_stats;
1108
1109 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1110 br->multicast_stats_enabled = !!mcast_stats;
1111 }
1112
1113 if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
1114 __u8 igmp_version;
1115
1116 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
1117 err = br_multicast_set_igmp_version(br, igmp_version);
1118 if (err)
1119 return err;
1120 }
1121
1122 #if IS_ENABLED(CONFIG_IPV6)
1123 if (data[IFLA_BR_MCAST_MLD_VERSION]) {
1124 __u8 mld_version;
1125
1126 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
1127 err = br_multicast_set_mld_version(br, mld_version);
1128 if (err)
1129 return err;
1130 }
1131 #endif
1132 #endif
1133 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1134 if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1135 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1136
1137 br->nf_call_iptables = val ? true : false;
1138 }
1139
1140 if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1141 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1142
1143 br->nf_call_ip6tables = val ? true : false;
1144 }
1145
1146 if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1147 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1148
1149 br->nf_call_arptables = val ? true : false;
1150 }
1151 #endif
1152
1153 return 0;
1154 }
1155
1156 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1157 struct nlattr *tb[], struct nlattr *data[])
1158 {
1159 struct net_bridge *br = netdev_priv(dev);
1160 int err;
1161
1162 if (tb[IFLA_ADDRESS]) {
1163 spin_lock_bh(&br->lock);
1164 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1165 spin_unlock_bh(&br->lock);
1166 }
1167
1168 err = register_netdevice(dev);
1169 if (err)
1170 return err;
1171
1172 err = br_changelink(dev, tb, data);
1173 if (err)
1174 unregister_netdevice(dev);
1175 return err;
1176 }
1177
1178 static size_t br_get_size(const struct net_device *brdev)
1179 {
1180 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
1181 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
1182 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
1183 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
1184 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
1185 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
1186 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
1187 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1188 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
1189 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
1190 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
1191 #endif
1192 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
1193 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
1194 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
1195 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
1196 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
1197 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
1198 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1199 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1200 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1201 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1202 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1203 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
1204 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1205 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
1206 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
1207 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1208 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
1209 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
1210 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
1211 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
1212 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1213 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1214 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1215 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1216 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1217 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1218 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1219 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1220 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
1221 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
1222 #endif
1223 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1224 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
1225 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
1226 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
1227 #endif
1228 0;
1229 }
1230
1231 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1232 {
1233 struct net_bridge *br = netdev_priv(brdev);
1234 u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1235 u32 hello_time = jiffies_to_clock_t(br->hello_time);
1236 u32 age_time = jiffies_to_clock_t(br->max_age);
1237 u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1238 u32 stp_enabled = br->stp_enabled;
1239 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1240 u8 vlan_enabled = br_vlan_enabled(br);
1241 u64 clockval;
1242
1243 clockval = br_timer_value(&br->hello_timer);
1244 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1245 return -EMSGSIZE;
1246 clockval = br_timer_value(&br->tcn_timer);
1247 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1248 return -EMSGSIZE;
1249 clockval = br_timer_value(&br->topology_change_timer);
1250 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1251 IFLA_BR_PAD))
1252 return -EMSGSIZE;
1253 clockval = br_timer_value(&br->gc_work.timer);
1254 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1255 return -EMSGSIZE;
1256
1257 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1258 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1259 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1260 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1261 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1262 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1263 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1264 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1265 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1266 &br->bridge_id) ||
1267 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1268 &br->designated_root) ||
1269 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1270 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1271 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1272 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1273 br->topology_change_detected) ||
1274 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
1275 return -EMSGSIZE;
1276
1277 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1278 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1279 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1280 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
1281 return -EMSGSIZE;
1282 #endif
1283 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1284 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1285 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
1286 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1287 br->multicast_query_use_ifaddr) ||
1288 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
1289 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1290 br->multicast_stats_enabled) ||
1291 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
1292 br->hash_elasticity) ||
1293 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1294 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1295 br->multicast_last_member_count) ||
1296 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1297 br->multicast_startup_query_count) ||
1298 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
1299 br->multicast_igmp_version))
1300 return -EMSGSIZE;
1301 #if IS_ENABLED(CONFIG_IPV6)
1302 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
1303 br->multicast_mld_version))
1304 return -EMSGSIZE;
1305 #endif
1306 clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1307 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1308 IFLA_BR_PAD))
1309 return -EMSGSIZE;
1310 clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1311 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1312 IFLA_BR_PAD))
1313 return -EMSGSIZE;
1314 clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1315 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1316 IFLA_BR_PAD))
1317 return -EMSGSIZE;
1318 clockval = jiffies_to_clock_t(br->multicast_query_interval);
1319 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1320 IFLA_BR_PAD))
1321 return -EMSGSIZE;
1322 clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1323 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1324 IFLA_BR_PAD))
1325 return -EMSGSIZE;
1326 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1327 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1328 IFLA_BR_PAD))
1329 return -EMSGSIZE;
1330 #endif
1331 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1332 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1333 br->nf_call_iptables ? 1 : 0) ||
1334 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1335 br->nf_call_ip6tables ? 1 : 0) ||
1336 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1337 br->nf_call_arptables ? 1 : 0))
1338 return -EMSGSIZE;
1339 #endif
1340
1341 return 0;
1342 }
1343
1344 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1345 {
1346 struct net_bridge_port *p = NULL;
1347 struct net_bridge_vlan_group *vg;
1348 struct net_bridge_vlan *v;
1349 struct net_bridge *br;
1350 int numvls = 0;
1351
1352 switch (attr) {
1353 case IFLA_STATS_LINK_XSTATS:
1354 br = netdev_priv(dev);
1355 vg = br_vlan_group(br);
1356 break;
1357 case IFLA_STATS_LINK_XSTATS_SLAVE:
1358 p = br_port_get_rtnl(dev);
1359 if (!p)
1360 return 0;
1361 br = p->br;
1362 vg = nbp_vlan_group(p);
1363 break;
1364 default:
1365 return 0;
1366 }
1367
1368 if (vg) {
1369 /* we need to count all, even placeholder entries */
1370 list_for_each_entry(v, &vg->vlan_list, vlist)
1371 numvls++;
1372 }
1373
1374 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1375 nla_total_size(sizeof(struct br_mcast_stats)) +
1376 nla_total_size(0);
1377 }
1378
1379 static int br_fill_linkxstats(struct sk_buff *skb,
1380 const struct net_device *dev,
1381 int *prividx, int attr)
1382 {
1383 struct nlattr *nla __maybe_unused;
1384 struct net_bridge_port *p = NULL;
1385 struct net_bridge_vlan_group *vg;
1386 struct net_bridge_vlan *v;
1387 struct net_bridge *br;
1388 struct nlattr *nest;
1389 int vl_idx = 0;
1390
1391 switch (attr) {
1392 case IFLA_STATS_LINK_XSTATS:
1393 br = netdev_priv(dev);
1394 vg = br_vlan_group(br);
1395 break;
1396 case IFLA_STATS_LINK_XSTATS_SLAVE:
1397 p = br_port_get_rtnl(dev);
1398 if (!p)
1399 return 0;
1400 br = p->br;
1401 vg = nbp_vlan_group(p);
1402 break;
1403 default:
1404 return -EINVAL;
1405 }
1406
1407 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
1408 if (!nest)
1409 return -EMSGSIZE;
1410
1411 if (vg) {
1412 u16 pvid;
1413
1414 pvid = br_get_pvid(vg);
1415 list_for_each_entry(v, &vg->vlan_list, vlist) {
1416 struct bridge_vlan_xstats vxi;
1417 struct br_vlan_stats stats;
1418
1419 if (++vl_idx < *prividx)
1420 continue;
1421 memset(&vxi, 0, sizeof(vxi));
1422 vxi.vid = v->vid;
1423 vxi.flags = v->flags;
1424 if (v->vid == pvid)
1425 vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1426 br_vlan_get_stats(v, &stats);
1427 vxi.rx_bytes = stats.rx_bytes;
1428 vxi.rx_packets = stats.rx_packets;
1429 vxi.tx_bytes = stats.tx_bytes;
1430 vxi.tx_packets = stats.tx_packets;
1431
1432 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1433 goto nla_put_failure;
1434 }
1435 }
1436
1437 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1438 if (++vl_idx >= *prividx) {
1439 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1440 sizeof(struct br_mcast_stats),
1441 BRIDGE_XSTATS_PAD);
1442 if (!nla)
1443 goto nla_put_failure;
1444 br_multicast_get_stats(br, p, nla_data(nla));
1445 }
1446 #endif
1447 nla_nest_end(skb, nest);
1448 *prividx = 0;
1449
1450 return 0;
1451
1452 nla_put_failure:
1453 nla_nest_end(skb, nest);
1454 *prividx = vl_idx;
1455
1456 return -EMSGSIZE;
1457 }
1458
1459 static struct rtnl_af_ops br_af_ops __read_mostly = {
1460 .family = AF_BRIDGE,
1461 .get_link_af_size = br_get_link_af_size_filtered,
1462 };
1463
1464 struct rtnl_link_ops br_link_ops __read_mostly = {
1465 .kind = "bridge",
1466 .priv_size = sizeof(struct net_bridge),
1467 .setup = br_dev_setup,
1468 .maxtype = IFLA_BR_MAX,
1469 .policy = br_policy,
1470 .validate = br_validate,
1471 .newlink = br_dev_newlink,
1472 .changelink = br_changelink,
1473 .dellink = br_dev_delete,
1474 .get_size = br_get_size,
1475 .fill_info = br_fill_info,
1476 .fill_linkxstats = br_fill_linkxstats,
1477 .get_linkxstats_size = br_get_linkxstats_size,
1478
1479 .slave_maxtype = IFLA_BRPORT_MAX,
1480 .slave_policy = br_port_policy,
1481 .slave_changelink = br_port_slave_changelink,
1482 .get_slave_size = br_port_get_slave_size,
1483 .fill_slave_info = br_port_fill_slave_info,
1484 };
1485
1486 int __init br_netlink_init(void)
1487 {
1488 int err;
1489
1490 br_mdb_init();
1491 rtnl_af_register(&br_af_ops);
1492
1493 err = rtnl_link_register(&br_link_ops);
1494 if (err)
1495 goto out_af;
1496
1497 return 0;
1498
1499 out_af:
1500 rtnl_af_unregister(&br_af_ops);
1501 br_mdb_uninit();
1502 return err;
1503 }
1504
1505 void br_netlink_fini(void)
1506 {
1507 br_mdb_uninit();
1508 rtnl_af_unregister(&br_af_ops);
1509 rtnl_link_unregister(&br_link_ops);
1510 }