]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/8021q/vlan_netlink.c
x86/mm: Add TLB purge to free pmd/pte page interfaces
[mirror_ubuntu-bionic-kernel.git] / net / 8021q / vlan_netlink.c
1 /*
2 * VLAN netlink control interface
3 *
4 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/module.h>
15 #include <net/net_namespace.h>
16 #include <net/netlink.h>
17 #include <net/rtnetlink.h>
18 #include "vlan.h"
19
20
21 static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
22 [IFLA_VLAN_ID] = { .type = NLA_U16 },
23 [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
24 [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
25 [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
26 [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
27 };
28
29 static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
30 [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
31 };
32
33
34 static inline int vlan_validate_qos_map(struct nlattr *attr)
35 {
36 if (!attr)
37 return 0;
38 return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy,
39 NULL);
40 }
41
42 static int vlan_validate(struct nlattr *tb[], struct nlattr *data[],
43 struct netlink_ext_ack *extack)
44 {
45 struct ifla_vlan_flags *flags;
46 u16 id;
47 int err;
48
49 if (tb[IFLA_ADDRESS]) {
50 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
51 return -EINVAL;
52 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
53 return -EADDRNOTAVAIL;
54 }
55
56 if (!data)
57 return -EINVAL;
58
59 if (data[IFLA_VLAN_PROTOCOL]) {
60 switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
61 case htons(ETH_P_8021Q):
62 case htons(ETH_P_8021AD):
63 break;
64 default:
65 return -EPROTONOSUPPORT;
66 }
67 }
68
69 if (data[IFLA_VLAN_ID]) {
70 id = nla_get_u16(data[IFLA_VLAN_ID]);
71 if (id >= VLAN_VID_MASK)
72 return -ERANGE;
73 }
74 if (data[IFLA_VLAN_FLAGS]) {
75 flags = nla_data(data[IFLA_VLAN_FLAGS]);
76 if ((flags->flags & flags->mask) &
77 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
78 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
79 return -EINVAL;
80 }
81
82 err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
83 if (err < 0)
84 return err;
85 err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
86 if (err < 0)
87 return err;
88 return 0;
89 }
90
91 static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
92 struct nlattr *data[],
93 struct netlink_ext_ack *extack)
94 {
95 struct ifla_vlan_flags *flags;
96 struct ifla_vlan_qos_mapping *m;
97 struct nlattr *attr;
98 int rem;
99
100 if (data[IFLA_VLAN_FLAGS]) {
101 flags = nla_data(data[IFLA_VLAN_FLAGS]);
102 vlan_dev_change_flags(dev, flags->flags, flags->mask);
103 }
104 if (data[IFLA_VLAN_INGRESS_QOS]) {
105 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
106 m = nla_data(attr);
107 vlan_dev_set_ingress_priority(dev, m->to, m->from);
108 }
109 }
110 if (data[IFLA_VLAN_EGRESS_QOS]) {
111 nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
112 m = nla_data(attr);
113 vlan_dev_set_egress_priority(dev, m->from, m->to);
114 }
115 }
116 return 0;
117 }
118
119 static int vlan_newlink(struct net *src_net, struct net_device *dev,
120 struct nlattr *tb[], struct nlattr *data[],
121 struct netlink_ext_ack *extack)
122 {
123 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
124 struct net_device *real_dev;
125 unsigned int max_mtu;
126 __be16 proto;
127 int err;
128
129 if (!data[IFLA_VLAN_ID])
130 return -EINVAL;
131
132 if (!tb[IFLA_LINK])
133 return -EINVAL;
134 real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
135 if (!real_dev)
136 return -ENODEV;
137
138 if (data[IFLA_VLAN_PROTOCOL])
139 proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
140 else
141 proto = htons(ETH_P_8021Q);
142
143 vlan->vlan_proto = proto;
144 vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
145 vlan->real_dev = real_dev;
146 dev->priv_flags |= (real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
147 vlan->flags = VLAN_FLAG_REORDER_HDR;
148
149 err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id);
150 if (err < 0)
151 return err;
152
153 max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
154 real_dev->mtu;
155 if (!tb[IFLA_MTU])
156 dev->mtu = max_mtu;
157 else if (dev->mtu > max_mtu)
158 return -EINVAL;
159
160 err = vlan_changelink(dev, tb, data, extack);
161 if (err < 0)
162 return err;
163
164 return register_vlan_dev(dev, extack);
165 }
166
167 static inline size_t vlan_qos_map_size(unsigned int n)
168 {
169 if (n == 0)
170 return 0;
171 /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
172 return nla_total_size(sizeof(struct nlattr)) +
173 nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
174 }
175
176 static size_t vlan_get_size(const struct net_device *dev)
177 {
178 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
179
180 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
181 nla_total_size(2) + /* IFLA_VLAN_ID */
182 nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
183 vlan_qos_map_size(vlan->nr_ingress_mappings) +
184 vlan_qos_map_size(vlan->nr_egress_mappings);
185 }
186
187 static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
188 {
189 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
190 struct vlan_priority_tci_mapping *pm;
191 struct ifla_vlan_flags f;
192 struct ifla_vlan_qos_mapping m;
193 struct nlattr *nest;
194 unsigned int i;
195
196 if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
197 nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
198 goto nla_put_failure;
199 if (vlan->flags) {
200 f.flags = vlan->flags;
201 f.mask = ~0;
202 if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
203 goto nla_put_failure;
204 }
205 if (vlan->nr_ingress_mappings) {
206 nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
207 if (nest == NULL)
208 goto nla_put_failure;
209
210 for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
211 if (!vlan->ingress_priority_map[i])
212 continue;
213
214 m.from = i;
215 m.to = vlan->ingress_priority_map[i];
216 if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
217 sizeof(m), &m))
218 goto nla_put_failure;
219 }
220 nla_nest_end(skb, nest);
221 }
222
223 if (vlan->nr_egress_mappings) {
224 nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS);
225 if (nest == NULL)
226 goto nla_put_failure;
227
228 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
229 for (pm = vlan->egress_priority_map[i]; pm;
230 pm = pm->next) {
231 if (!pm->vlan_qos)
232 continue;
233
234 m.from = pm->priority;
235 m.to = (pm->vlan_qos >> 13) & 0x7;
236 if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
237 sizeof(m), &m))
238 goto nla_put_failure;
239 }
240 }
241 nla_nest_end(skb, nest);
242 }
243 return 0;
244
245 nla_put_failure:
246 return -EMSGSIZE;
247 }
248
249 static struct net *vlan_get_link_net(const struct net_device *dev)
250 {
251 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
252
253 return dev_net(real_dev);
254 }
255
256 struct rtnl_link_ops vlan_link_ops __read_mostly = {
257 .kind = "vlan",
258 .maxtype = IFLA_VLAN_MAX,
259 .policy = vlan_policy,
260 .priv_size = sizeof(struct vlan_dev_priv),
261 .setup = vlan_setup,
262 .validate = vlan_validate,
263 .newlink = vlan_newlink,
264 .changelink = vlan_changelink,
265 .dellink = unregister_vlan_dev,
266 .get_size = vlan_get_size,
267 .fill_info = vlan_fill_info,
268 .get_link_net = vlan_get_link_net,
269 };
270
271 int __init vlan_netlink_init(void)
272 {
273 return rtnl_link_register(&vlan_link_ops);
274 }
275
276 void __exit vlan_netlink_fini(void)
277 {
278 rtnl_link_unregister(&vlan_link_ops);
279 }
280
281 MODULE_ALIAS_RTNL_LINK("vlan");