]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/dcb/dcbnl.c
3f5a5f71057675f8fde30c618c7f59a55379a04b
[mirror_ubuntu-hirsute-kernel.git] / net / dcb / dcbnl.c
1 /*
2 * Copyright (c) 2008-2011, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Description: Data Center Bridging netlink interface
17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */
19
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/init.h>
29 #include <net/sock.h>
30
31 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
32 * intended to allow network traffic with differing requirements
33 * (highly reliable, no drops vs. best effort vs. low latency) to operate
34 * and co-exist on Ethernet. Current DCB features are:
35 *
36 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
37 * framework for assigning bandwidth guarantees to traffic classes.
38 *
39 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
40 * can work independently for each 802.1p priority.
41 *
42 * Congestion Notification - provides a mechanism for end-to-end congestion
43 * control for protocols which do not have built-in congestion management.
44 *
45 * More information about the emerging standards for these Ethernet features
46 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
47 *
48 * This file implements an rtnetlink interface to allow configuration of DCB
49 * features for capable devices.
50 */
51
52 /**************** DCB attribute policies *************************************/
53
54 /* DCB netlink attributes policy */
55 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
56 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
57 [DCB_ATTR_STATE] = {.type = NLA_U8},
58 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
59 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
60 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
61 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
62 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
63 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
64 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
65 [DCB_ATTR_APP] = {.type = NLA_NESTED},
66 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
67 [DCB_ATTR_DCBX] = {.type = NLA_U8},
68 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
69 };
70
71 /* DCB priority flow control to User Priority nested attributes */
72 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
73 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
75 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
76 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
77 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
78 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
79 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
80 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
81 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
82 };
83
84 /* DCB priority grouping nested attributes */
85 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
86 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
88 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
89 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
90 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
91 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
92 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
93 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
94 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
95 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
96 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
97 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
98 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
99 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
100 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
101 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
102 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
103 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
104 };
105
106 /* DCB traffic class nested attributes. */
107 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
108 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
109 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
110 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
111 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
112 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
113 };
114
115 /* DCB capabilities nested attributes. */
116 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
117 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
118 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
119 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
120 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
121 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
122 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
123 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
124 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
125 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
126 };
127
128 /* DCB capabilities nested attributes. */
129 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
130 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
131 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
132 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
133 };
134
135 /* DCB BCN nested attributes. */
136 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
137 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
138 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
139 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
140 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
141 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
142 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
143 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
144 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
145 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
146 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
147 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
148 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
149 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
150 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
151 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
152 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
153 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
154 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
155 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
156 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
157 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
158 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
159 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
160 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
161 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
162 };
163
164 /* DCB APP nested attributes. */
165 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
166 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
167 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
168 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
169 };
170
171 /* IEEE 802.1Qaz nested attributes. */
172 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
173 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
174 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
175 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
176 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
177 [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
178 [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
179 };
180
181 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
182 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
183 };
184
185 /* DCB number of traffic classes nested attributes. */
186 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
187 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
188 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
189 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
190 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
191 };
192
193 static LIST_HEAD(dcb_app_list);
194 static DEFINE_SPINLOCK(dcb_lock);
195
196 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
197 u32 flags, struct nlmsghdr **nlhp)
198 {
199 struct sk_buff *skb;
200 struct dcbmsg *dcb;
201 struct nlmsghdr *nlh;
202
203 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
204 if (!skb)
205 return NULL;
206
207 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
208 BUG_ON(!nlh);
209
210 dcb = nlmsg_data(nlh);
211 dcb->dcb_family = AF_UNSPEC;
212 dcb->cmd = cmd;
213 dcb->dcb_pad = 0;
214
215 if (nlhp)
216 *nlhp = nlh;
217
218 return skb;
219 }
220
221 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
222 u32 seq, struct nlattr **tb, struct sk_buff *skb)
223 {
224 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
225 if (!netdev->dcbnl_ops->getstate)
226 return -EOPNOTSUPP;
227
228 return nla_put_u8(skb, DCB_ATTR_STATE,
229 netdev->dcbnl_ops->getstate(netdev));
230 }
231
232 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
233 u32 seq, struct nlattr **tb, struct sk_buff *skb)
234 {
235 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
236 u8 value;
237 int ret;
238 int i;
239 int getall = 0;
240
241 if (!tb[DCB_ATTR_PFC_CFG])
242 return -EINVAL;
243
244 if (!netdev->dcbnl_ops->getpfccfg)
245 return -EOPNOTSUPP;
246
247 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
248 tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL);
249 if (ret)
250 return ret;
251
252 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
253 if (!nest)
254 return -EMSGSIZE;
255
256 if (data[DCB_PFC_UP_ATTR_ALL])
257 getall = 1;
258
259 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
260 if (!getall && !data[i])
261 continue;
262
263 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
264 &value);
265 ret = nla_put_u8(skb, i, value);
266 if (ret) {
267 nla_nest_cancel(skb, nest);
268 return ret;
269 }
270 }
271 nla_nest_end(skb, nest);
272
273 return 0;
274 }
275
276 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
277 u32 seq, struct nlattr **tb, struct sk_buff *skb)
278 {
279 u8 perm_addr[MAX_ADDR_LEN];
280
281 if (!netdev->dcbnl_ops->getpermhwaddr)
282 return -EOPNOTSUPP;
283
284 memset(perm_addr, 0, sizeof(perm_addr));
285 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
286
287 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
288 }
289
290 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
291 u32 seq, struct nlattr **tb, struct sk_buff *skb)
292 {
293 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
294 u8 value;
295 int ret;
296 int i;
297 int getall = 0;
298
299 if (!tb[DCB_ATTR_CAP])
300 return -EINVAL;
301
302 if (!netdev->dcbnl_ops->getcap)
303 return -EOPNOTSUPP;
304
305 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
306 dcbnl_cap_nest, NULL);
307 if (ret)
308 return ret;
309
310 nest = nla_nest_start(skb, DCB_ATTR_CAP);
311 if (!nest)
312 return -EMSGSIZE;
313
314 if (data[DCB_CAP_ATTR_ALL])
315 getall = 1;
316
317 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
318 if (!getall && !data[i])
319 continue;
320
321 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
322 ret = nla_put_u8(skb, i, value);
323 if (ret) {
324 nla_nest_cancel(skb, nest);
325 return ret;
326 }
327 }
328 }
329 nla_nest_end(skb, nest);
330
331 return 0;
332 }
333
334 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
335 u32 seq, struct nlattr **tb, struct sk_buff *skb)
336 {
337 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
338 u8 value;
339 int ret;
340 int i;
341 int getall = 0;
342
343 if (!tb[DCB_ATTR_NUMTCS])
344 return -EINVAL;
345
346 if (!netdev->dcbnl_ops->getnumtcs)
347 return -EOPNOTSUPP;
348
349 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
350 dcbnl_numtcs_nest, NULL);
351 if (ret)
352 return ret;
353
354 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
355 if (!nest)
356 return -EMSGSIZE;
357
358 if (data[DCB_NUMTCS_ATTR_ALL])
359 getall = 1;
360
361 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
362 if (!getall && !data[i])
363 continue;
364
365 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
366 if (!ret) {
367 ret = nla_put_u8(skb, i, value);
368 if (ret) {
369 nla_nest_cancel(skb, nest);
370 return ret;
371 }
372 } else
373 return -EINVAL;
374 }
375 nla_nest_end(skb, nest);
376
377 return 0;
378 }
379
380 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
381 u32 seq, struct nlattr **tb, struct sk_buff *skb)
382 {
383 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
384 int ret;
385 u8 value;
386 int i;
387
388 if (!tb[DCB_ATTR_NUMTCS])
389 return -EINVAL;
390
391 if (!netdev->dcbnl_ops->setnumtcs)
392 return -EOPNOTSUPP;
393
394 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
395 dcbnl_numtcs_nest, NULL);
396 if (ret)
397 return ret;
398
399 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
400 if (data[i] == NULL)
401 continue;
402
403 value = nla_get_u8(data[i]);
404
405 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
406 if (ret)
407 break;
408 }
409
410 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
411 }
412
413 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
414 u32 seq, struct nlattr **tb, struct sk_buff *skb)
415 {
416 if (!netdev->dcbnl_ops->getpfcstate)
417 return -EOPNOTSUPP;
418
419 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
420 netdev->dcbnl_ops->getpfcstate(netdev));
421 }
422
423 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
424 u32 seq, struct nlattr **tb, struct sk_buff *skb)
425 {
426 u8 value;
427
428 if (!tb[DCB_ATTR_PFC_STATE])
429 return -EINVAL;
430
431 if (!netdev->dcbnl_ops->setpfcstate)
432 return -EOPNOTSUPP;
433
434 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
435
436 netdev->dcbnl_ops->setpfcstate(netdev, value);
437
438 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
439 }
440
441 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
442 u32 seq, struct nlattr **tb, struct sk_buff *skb)
443 {
444 struct nlattr *app_nest;
445 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
446 u16 id;
447 u8 up, idtype;
448 int ret;
449
450 if (!tb[DCB_ATTR_APP])
451 return -EINVAL;
452
453 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
454 dcbnl_app_nest, NULL);
455 if (ret)
456 return ret;
457
458 /* all must be non-null */
459 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
460 (!app_tb[DCB_APP_ATTR_ID]))
461 return -EINVAL;
462
463 /* either by eth type or by socket number */
464 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
465 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
466 (idtype != DCB_APP_IDTYPE_PORTNUM))
467 return -EINVAL;
468
469 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
470
471 if (netdev->dcbnl_ops->getapp) {
472 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
473 if (ret < 0)
474 return ret;
475 else
476 up = ret;
477 } else {
478 struct dcb_app app = {
479 .selector = idtype,
480 .protocol = id,
481 };
482 up = dcb_getapp(netdev, &app);
483 }
484
485 app_nest = nla_nest_start(skb, DCB_ATTR_APP);
486 if (!app_nest)
487 return -EMSGSIZE;
488
489 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
490 if (ret)
491 goto out_cancel;
492
493 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
494 if (ret)
495 goto out_cancel;
496
497 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
498 if (ret)
499 goto out_cancel;
500
501 nla_nest_end(skb, app_nest);
502
503 return 0;
504
505 out_cancel:
506 nla_nest_cancel(skb, app_nest);
507 return ret;
508 }
509
510 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
511 u32 seq, struct nlattr **tb, struct sk_buff *skb)
512 {
513 int ret;
514 u16 id;
515 u8 up, idtype;
516 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
517
518 if (!tb[DCB_ATTR_APP])
519 return -EINVAL;
520
521 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
522 dcbnl_app_nest, NULL);
523 if (ret)
524 return ret;
525
526 /* all must be non-null */
527 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
528 (!app_tb[DCB_APP_ATTR_ID]) ||
529 (!app_tb[DCB_APP_ATTR_PRIORITY]))
530 return -EINVAL;
531
532 /* either by eth type or by socket number */
533 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
534 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
535 (idtype != DCB_APP_IDTYPE_PORTNUM))
536 return -EINVAL;
537
538 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
539 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
540
541 if (netdev->dcbnl_ops->setapp) {
542 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
543 if (ret < 0)
544 return ret;
545 } else {
546 struct dcb_app app;
547 app.selector = idtype;
548 app.protocol = id;
549 app.priority = up;
550 ret = dcb_setapp(netdev, &app);
551 }
552
553 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
554 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
555
556 return ret;
557 }
558
559 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
560 struct nlattr **tb, struct sk_buff *skb, int dir)
561 {
562 struct nlattr *pg_nest, *param_nest, *data;
563 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
564 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
565 u8 prio, pgid, tc_pct, up_map;
566 int ret;
567 int getall = 0;
568 int i;
569
570 if (!tb[DCB_ATTR_PG_CFG])
571 return -EINVAL;
572
573 if (!netdev->dcbnl_ops->getpgtccfgtx ||
574 !netdev->dcbnl_ops->getpgtccfgrx ||
575 !netdev->dcbnl_ops->getpgbwgcfgtx ||
576 !netdev->dcbnl_ops->getpgbwgcfgrx)
577 return -EOPNOTSUPP;
578
579 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG],
580 dcbnl_pg_nest, NULL);
581 if (ret)
582 return ret;
583
584 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
585 if (!pg_nest)
586 return -EMSGSIZE;
587
588 if (pg_tb[DCB_PG_ATTR_TC_ALL])
589 getall = 1;
590
591 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
592 if (!getall && !pg_tb[i])
593 continue;
594
595 if (pg_tb[DCB_PG_ATTR_TC_ALL])
596 data = pg_tb[DCB_PG_ATTR_TC_ALL];
597 else
598 data = pg_tb[i];
599 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data,
600 dcbnl_tc_param_nest, NULL);
601 if (ret)
602 goto err_pg;
603
604 param_nest = nla_nest_start(skb, i);
605 if (!param_nest)
606 goto err_pg;
607
608 pgid = DCB_ATTR_VALUE_UNDEFINED;
609 prio = DCB_ATTR_VALUE_UNDEFINED;
610 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
611 up_map = DCB_ATTR_VALUE_UNDEFINED;
612
613 if (dir) {
614 /* Rx */
615 netdev->dcbnl_ops->getpgtccfgrx(netdev,
616 i - DCB_PG_ATTR_TC_0, &prio,
617 &pgid, &tc_pct, &up_map);
618 } else {
619 /* Tx */
620 netdev->dcbnl_ops->getpgtccfgtx(netdev,
621 i - DCB_PG_ATTR_TC_0, &prio,
622 &pgid, &tc_pct, &up_map);
623 }
624
625 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
626 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
627 ret = nla_put_u8(skb,
628 DCB_TC_ATTR_PARAM_PGID, pgid);
629 if (ret)
630 goto err_param;
631 }
632 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
633 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
634 ret = nla_put_u8(skb,
635 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
636 if (ret)
637 goto err_param;
638 }
639 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
640 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
641 ret = nla_put_u8(skb,
642 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
643 if (ret)
644 goto err_param;
645 }
646 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
647 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
648 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
649 tc_pct);
650 if (ret)
651 goto err_param;
652 }
653 nla_nest_end(skb, param_nest);
654 }
655
656 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
657 getall = 1;
658 else
659 getall = 0;
660
661 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
662 if (!getall && !pg_tb[i])
663 continue;
664
665 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
666
667 if (dir) {
668 /* Rx */
669 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
670 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
671 } else {
672 /* Tx */
673 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
674 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
675 }
676 ret = nla_put_u8(skb, i, tc_pct);
677 if (ret)
678 goto err_pg;
679 }
680
681 nla_nest_end(skb, pg_nest);
682
683 return 0;
684
685 err_param:
686 nla_nest_cancel(skb, param_nest);
687 err_pg:
688 nla_nest_cancel(skb, pg_nest);
689
690 return -EMSGSIZE;
691 }
692
693 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
694 u32 seq, struct nlattr **tb, struct sk_buff *skb)
695 {
696 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
697 }
698
699 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
700 u32 seq, struct nlattr **tb, struct sk_buff *skb)
701 {
702 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
703 }
704
705 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
706 u32 seq, struct nlattr **tb, struct sk_buff *skb)
707 {
708 u8 value;
709
710 if (!tb[DCB_ATTR_STATE])
711 return -EINVAL;
712
713 if (!netdev->dcbnl_ops->setstate)
714 return -EOPNOTSUPP;
715
716 value = nla_get_u8(tb[DCB_ATTR_STATE]);
717
718 return nla_put_u8(skb, DCB_ATTR_STATE,
719 netdev->dcbnl_ops->setstate(netdev, value));
720 }
721
722 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
723 u32 seq, struct nlattr **tb, struct sk_buff *skb)
724 {
725 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
726 int i;
727 int ret;
728 u8 value;
729
730 if (!tb[DCB_ATTR_PFC_CFG])
731 return -EINVAL;
732
733 if (!netdev->dcbnl_ops->setpfccfg)
734 return -EOPNOTSUPP;
735
736 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
737 tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL);
738 if (ret)
739 return ret;
740
741 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
742 if (data[i] == NULL)
743 continue;
744 value = nla_get_u8(data[i]);
745 netdev->dcbnl_ops->setpfccfg(netdev,
746 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
747 }
748
749 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
750 }
751
752 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
753 u32 seq, struct nlattr **tb, struct sk_buff *skb)
754 {
755 int ret;
756
757 if (!tb[DCB_ATTR_SET_ALL])
758 return -EINVAL;
759
760 if (!netdev->dcbnl_ops->setall)
761 return -EOPNOTSUPP;
762
763 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
764 netdev->dcbnl_ops->setall(netdev));
765 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
766
767 return ret;
768 }
769
770 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
771 u32 seq, struct nlattr **tb, struct sk_buff *skb,
772 int dir)
773 {
774 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
775 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
776 int ret;
777 int i;
778 u8 pgid;
779 u8 up_map;
780 u8 prio;
781 u8 tc_pct;
782
783 if (!tb[DCB_ATTR_PG_CFG])
784 return -EINVAL;
785
786 if (!netdev->dcbnl_ops->setpgtccfgtx ||
787 !netdev->dcbnl_ops->setpgtccfgrx ||
788 !netdev->dcbnl_ops->setpgbwgcfgtx ||
789 !netdev->dcbnl_ops->setpgbwgcfgrx)
790 return -EOPNOTSUPP;
791
792 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG],
793 dcbnl_pg_nest, NULL);
794 if (ret)
795 return ret;
796
797 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
798 if (!pg_tb[i])
799 continue;
800
801 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
802 pg_tb[i], dcbnl_tc_param_nest, NULL);
803 if (ret)
804 return ret;
805
806 pgid = DCB_ATTR_VALUE_UNDEFINED;
807 prio = DCB_ATTR_VALUE_UNDEFINED;
808 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
809 up_map = DCB_ATTR_VALUE_UNDEFINED;
810
811 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
812 prio =
813 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
814
815 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
816 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
817
818 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
819 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
820
821 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
822 up_map =
823 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
824
825 /* dir: Tx = 0, Rx = 1 */
826 if (dir) {
827 /* Rx */
828 netdev->dcbnl_ops->setpgtccfgrx(netdev,
829 i - DCB_PG_ATTR_TC_0,
830 prio, pgid, tc_pct, up_map);
831 } else {
832 /* Tx */
833 netdev->dcbnl_ops->setpgtccfgtx(netdev,
834 i - DCB_PG_ATTR_TC_0,
835 prio, pgid, tc_pct, up_map);
836 }
837 }
838
839 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
840 if (!pg_tb[i])
841 continue;
842
843 tc_pct = nla_get_u8(pg_tb[i]);
844
845 /* dir: Tx = 0, Rx = 1 */
846 if (dir) {
847 /* Rx */
848 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
849 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
850 } else {
851 /* Tx */
852 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
853 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
854 }
855 }
856
857 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
858 }
859
860 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
861 u32 seq, struct nlattr **tb, struct sk_buff *skb)
862 {
863 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
864 }
865
866 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
867 u32 seq, struct nlattr **tb, struct sk_buff *skb)
868 {
869 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
870 }
871
872 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
873 u32 seq, struct nlattr **tb, struct sk_buff *skb)
874 {
875 struct nlattr *bcn_nest;
876 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
877 u8 value_byte;
878 u32 value_integer;
879 int ret;
880 bool getall = false;
881 int i;
882
883 if (!tb[DCB_ATTR_BCN])
884 return -EINVAL;
885
886 if (!netdev->dcbnl_ops->getbcnrp ||
887 !netdev->dcbnl_ops->getbcncfg)
888 return -EOPNOTSUPP;
889
890 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN],
891 dcbnl_bcn_nest, NULL);
892 if (ret)
893 return ret;
894
895 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
896 if (!bcn_nest)
897 return -EMSGSIZE;
898
899 if (bcn_tb[DCB_BCN_ATTR_ALL])
900 getall = true;
901
902 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
903 if (!getall && !bcn_tb[i])
904 continue;
905
906 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
907 &value_byte);
908 ret = nla_put_u8(skb, i, value_byte);
909 if (ret)
910 goto err_bcn;
911 }
912
913 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
914 if (!getall && !bcn_tb[i])
915 continue;
916
917 netdev->dcbnl_ops->getbcncfg(netdev, i,
918 &value_integer);
919 ret = nla_put_u32(skb, i, value_integer);
920 if (ret)
921 goto err_bcn;
922 }
923
924 nla_nest_end(skb, bcn_nest);
925
926 return 0;
927
928 err_bcn:
929 nla_nest_cancel(skb, bcn_nest);
930 return ret;
931 }
932
933 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
934 u32 seq, struct nlattr **tb, struct sk_buff *skb)
935 {
936 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
937 int i;
938 int ret;
939 u8 value_byte;
940 u32 value_int;
941
942 if (!tb[DCB_ATTR_BCN])
943 return -EINVAL;
944
945 if (!netdev->dcbnl_ops->setbcncfg ||
946 !netdev->dcbnl_ops->setbcnrp)
947 return -EOPNOTSUPP;
948
949 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN],
950 dcbnl_pfc_up_nest, NULL);
951 if (ret)
952 return ret;
953
954 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
955 if (data[i] == NULL)
956 continue;
957 value_byte = nla_get_u8(data[i]);
958 netdev->dcbnl_ops->setbcnrp(netdev,
959 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
960 }
961
962 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
963 if (data[i] == NULL)
964 continue;
965 value_int = nla_get_u32(data[i]);
966 netdev->dcbnl_ops->setbcncfg(netdev,
967 i, value_int);
968 }
969
970 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
971 }
972
973 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
974 int app_nested_type, int app_info_type,
975 int app_entry_type)
976 {
977 struct dcb_peer_app_info info;
978 struct dcb_app *table = NULL;
979 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
980 u16 app_count;
981 int err;
982
983
984 /**
985 * retrieve the peer app configuration form the driver. If the driver
986 * handlers fail exit without doing anything
987 */
988 err = ops->peer_getappinfo(netdev, &info, &app_count);
989 if (!err && app_count) {
990 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
991 if (!table)
992 return -ENOMEM;
993
994 err = ops->peer_getapptable(netdev, table);
995 }
996
997 if (!err) {
998 u16 i;
999 struct nlattr *app;
1000
1001 /**
1002 * build the message, from here on the only possible failure
1003 * is due to the skb size
1004 */
1005 err = -EMSGSIZE;
1006
1007 app = nla_nest_start(skb, app_nested_type);
1008 if (!app)
1009 goto nla_put_failure;
1010
1011 if (app_info_type &&
1012 nla_put(skb, app_info_type, sizeof(info), &info))
1013 goto nla_put_failure;
1014
1015 for (i = 0; i < app_count; i++) {
1016 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1017 &table[i]))
1018 goto nla_put_failure;
1019 }
1020 nla_nest_end(skb, app);
1021 }
1022 err = 0;
1023
1024 nla_put_failure:
1025 kfree(table);
1026 return err;
1027 }
1028
1029 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
1030 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1031 {
1032 struct nlattr *ieee, *app;
1033 struct dcb_app_type *itr;
1034 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1035 int dcbx;
1036 int err;
1037
1038 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1039 return -EMSGSIZE;
1040
1041 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1042 if (!ieee)
1043 return -EMSGSIZE;
1044
1045 if (ops->ieee_getets) {
1046 struct ieee_ets ets;
1047 memset(&ets, 0, sizeof(ets));
1048 err = ops->ieee_getets(netdev, &ets);
1049 if (!err &&
1050 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1051 return -EMSGSIZE;
1052 }
1053
1054 if (ops->ieee_getmaxrate) {
1055 struct ieee_maxrate maxrate;
1056 memset(&maxrate, 0, sizeof(maxrate));
1057 err = ops->ieee_getmaxrate(netdev, &maxrate);
1058 if (!err) {
1059 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1060 sizeof(maxrate), &maxrate);
1061 if (err)
1062 return -EMSGSIZE;
1063 }
1064 }
1065
1066 if (ops->ieee_getqcn) {
1067 struct ieee_qcn qcn;
1068
1069 memset(&qcn, 0, sizeof(qcn));
1070 err = ops->ieee_getqcn(netdev, &qcn);
1071 if (!err) {
1072 err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1073 sizeof(qcn), &qcn);
1074 if (err)
1075 return -EMSGSIZE;
1076 }
1077 }
1078
1079 if (ops->ieee_getqcnstats) {
1080 struct ieee_qcn_stats qcn_stats;
1081
1082 memset(&qcn_stats, 0, sizeof(qcn_stats));
1083 err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1084 if (!err) {
1085 err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1086 sizeof(qcn_stats), &qcn_stats);
1087 if (err)
1088 return -EMSGSIZE;
1089 }
1090 }
1091
1092 if (ops->ieee_getpfc) {
1093 struct ieee_pfc pfc;
1094 memset(&pfc, 0, sizeof(pfc));
1095 err = ops->ieee_getpfc(netdev, &pfc);
1096 if (!err &&
1097 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1098 return -EMSGSIZE;
1099 }
1100
1101 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1102 if (!app)
1103 return -EMSGSIZE;
1104
1105 spin_lock_bh(&dcb_lock);
1106 list_for_each_entry(itr, &dcb_app_list, list) {
1107 if (itr->ifindex == netdev->ifindex) {
1108 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1109 &itr->app);
1110 if (err) {
1111 spin_unlock_bh(&dcb_lock);
1112 return -EMSGSIZE;
1113 }
1114 }
1115 }
1116
1117 if (netdev->dcbnl_ops->getdcbx)
1118 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1119 else
1120 dcbx = -EOPNOTSUPP;
1121
1122 spin_unlock_bh(&dcb_lock);
1123 nla_nest_end(skb, app);
1124
1125 /* get peer info if available */
1126 if (ops->ieee_peer_getets) {
1127 struct ieee_ets ets;
1128 memset(&ets, 0, sizeof(ets));
1129 err = ops->ieee_peer_getets(netdev, &ets);
1130 if (!err &&
1131 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1132 return -EMSGSIZE;
1133 }
1134
1135 if (ops->ieee_peer_getpfc) {
1136 struct ieee_pfc pfc;
1137 memset(&pfc, 0, sizeof(pfc));
1138 err = ops->ieee_peer_getpfc(netdev, &pfc);
1139 if (!err &&
1140 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1141 return -EMSGSIZE;
1142 }
1143
1144 if (ops->peer_getappinfo && ops->peer_getapptable) {
1145 err = dcbnl_build_peer_app(netdev, skb,
1146 DCB_ATTR_IEEE_PEER_APP,
1147 DCB_ATTR_IEEE_APP_UNSPEC,
1148 DCB_ATTR_IEEE_APP);
1149 if (err)
1150 return -EMSGSIZE;
1151 }
1152
1153 nla_nest_end(skb, ieee);
1154 if (dcbx >= 0) {
1155 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1156 if (err)
1157 return -EMSGSIZE;
1158 }
1159
1160 return 0;
1161 }
1162
1163 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1164 int dir)
1165 {
1166 u8 pgid, up_map, prio, tc_pct;
1167 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1168 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1169 struct nlattr *pg = nla_nest_start(skb, i);
1170
1171 if (!pg)
1172 return -EMSGSIZE;
1173
1174 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1175 struct nlattr *tc_nest = nla_nest_start(skb, i);
1176
1177 if (!tc_nest)
1178 return -EMSGSIZE;
1179
1180 pgid = DCB_ATTR_VALUE_UNDEFINED;
1181 prio = DCB_ATTR_VALUE_UNDEFINED;
1182 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1183 up_map = DCB_ATTR_VALUE_UNDEFINED;
1184
1185 if (!dir)
1186 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1187 &prio, &pgid, &tc_pct, &up_map);
1188 else
1189 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1190 &prio, &pgid, &tc_pct, &up_map);
1191
1192 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1193 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1194 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1195 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1196 return -EMSGSIZE;
1197 nla_nest_end(skb, tc_nest);
1198 }
1199
1200 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1201 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1202
1203 if (!dir)
1204 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1205 &tc_pct);
1206 else
1207 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1208 &tc_pct);
1209 if (nla_put_u8(skb, i, tc_pct))
1210 return -EMSGSIZE;
1211 }
1212 nla_nest_end(skb, pg);
1213 return 0;
1214 }
1215
1216 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1217 {
1218 struct nlattr *cee, *app;
1219 struct dcb_app_type *itr;
1220 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1221 int dcbx, i, err = -EMSGSIZE;
1222 u8 value;
1223
1224 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1225 goto nla_put_failure;
1226 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1227 if (!cee)
1228 goto nla_put_failure;
1229
1230 /* local pg */
1231 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1232 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1233 if (err)
1234 goto nla_put_failure;
1235 }
1236
1237 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1238 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1239 if (err)
1240 goto nla_put_failure;
1241 }
1242
1243 /* local pfc */
1244 if (ops->getpfccfg) {
1245 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1246
1247 if (!pfc_nest)
1248 goto nla_put_failure;
1249
1250 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1251 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1252 if (nla_put_u8(skb, i, value))
1253 goto nla_put_failure;
1254 }
1255 nla_nest_end(skb, pfc_nest);
1256 }
1257
1258 /* local app */
1259 spin_lock_bh(&dcb_lock);
1260 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1261 if (!app)
1262 goto dcb_unlock;
1263
1264 list_for_each_entry(itr, &dcb_app_list, list) {
1265 if (itr->ifindex == netdev->ifindex) {
1266 struct nlattr *app_nest = nla_nest_start(skb,
1267 DCB_ATTR_APP);
1268 if (!app_nest)
1269 goto dcb_unlock;
1270
1271 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1272 itr->app.selector);
1273 if (err)
1274 goto dcb_unlock;
1275
1276 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1277 itr->app.protocol);
1278 if (err)
1279 goto dcb_unlock;
1280
1281 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1282 itr->app.priority);
1283 if (err)
1284 goto dcb_unlock;
1285
1286 nla_nest_end(skb, app_nest);
1287 }
1288 }
1289 nla_nest_end(skb, app);
1290
1291 if (netdev->dcbnl_ops->getdcbx)
1292 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1293 else
1294 dcbx = -EOPNOTSUPP;
1295
1296 spin_unlock_bh(&dcb_lock);
1297
1298 /* features flags */
1299 if (ops->getfeatcfg) {
1300 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1301 if (!feat)
1302 goto nla_put_failure;
1303
1304 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1305 i++)
1306 if (!ops->getfeatcfg(netdev, i, &value) &&
1307 nla_put_u8(skb, i, value))
1308 goto nla_put_failure;
1309
1310 nla_nest_end(skb, feat);
1311 }
1312
1313 /* peer info if available */
1314 if (ops->cee_peer_getpg) {
1315 struct cee_pg pg;
1316 memset(&pg, 0, sizeof(pg));
1317 err = ops->cee_peer_getpg(netdev, &pg);
1318 if (!err &&
1319 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1320 goto nla_put_failure;
1321 }
1322
1323 if (ops->cee_peer_getpfc) {
1324 struct cee_pfc pfc;
1325 memset(&pfc, 0, sizeof(pfc));
1326 err = ops->cee_peer_getpfc(netdev, &pfc);
1327 if (!err &&
1328 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1329 goto nla_put_failure;
1330 }
1331
1332 if (ops->peer_getappinfo && ops->peer_getapptable) {
1333 err = dcbnl_build_peer_app(netdev, skb,
1334 DCB_ATTR_CEE_PEER_APP_TABLE,
1335 DCB_ATTR_CEE_PEER_APP_INFO,
1336 DCB_ATTR_CEE_PEER_APP);
1337 if (err)
1338 goto nla_put_failure;
1339 }
1340 nla_nest_end(skb, cee);
1341
1342 /* DCBX state */
1343 if (dcbx >= 0) {
1344 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1345 if (err)
1346 goto nla_put_failure;
1347 }
1348 return 0;
1349
1350 dcb_unlock:
1351 spin_unlock_bh(&dcb_lock);
1352 nla_put_failure:
1353 err = -EMSGSIZE;
1354 return err;
1355 }
1356
1357 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1358 u32 seq, u32 portid, int dcbx_ver)
1359 {
1360 struct net *net = dev_net(dev);
1361 struct sk_buff *skb;
1362 struct nlmsghdr *nlh;
1363 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1364 int err;
1365
1366 if (!ops)
1367 return -EOPNOTSUPP;
1368
1369 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1370 if (!skb)
1371 return -ENOBUFS;
1372
1373 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1374 err = dcbnl_ieee_fill(skb, dev);
1375 else
1376 err = dcbnl_cee_fill(skb, dev);
1377
1378 if (err < 0) {
1379 /* Report error to broadcast listeners */
1380 nlmsg_free(skb);
1381 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1382 } else {
1383 /* End nlmsg and notify broadcast listeners */
1384 nlmsg_end(skb, nlh);
1385 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1386 }
1387
1388 return err;
1389 }
1390
1391 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1392 u32 seq, u32 portid)
1393 {
1394 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1395 }
1396 EXPORT_SYMBOL(dcbnl_ieee_notify);
1397
1398 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1399 u32 seq, u32 portid)
1400 {
1401 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1402 }
1403 EXPORT_SYMBOL(dcbnl_cee_notify);
1404
1405 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
1406 * If any requested operation can not be completed
1407 * the entire msg is aborted and error value is returned.
1408 * No attempt is made to reconcile the case where only part of the
1409 * cmd can be completed.
1410 */
1411 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1412 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1413 {
1414 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1415 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1416 int err;
1417
1418 if (!ops)
1419 return -EOPNOTSUPP;
1420
1421 if (!tb[DCB_ATTR_IEEE])
1422 return -EINVAL;
1423
1424 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE],
1425 dcbnl_ieee_policy, NULL);
1426 if (err)
1427 return err;
1428
1429 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1430 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1431 err = ops->ieee_setets(netdev, ets);
1432 if (err)
1433 goto err;
1434 }
1435
1436 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1437 struct ieee_maxrate *maxrate =
1438 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1439 err = ops->ieee_setmaxrate(netdev, maxrate);
1440 if (err)
1441 goto err;
1442 }
1443
1444 if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1445 struct ieee_qcn *qcn =
1446 nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1447
1448 err = ops->ieee_setqcn(netdev, qcn);
1449 if (err)
1450 goto err;
1451 }
1452
1453 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1454 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1455 err = ops->ieee_setpfc(netdev, pfc);
1456 if (err)
1457 goto err;
1458 }
1459
1460 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1461 struct nlattr *attr;
1462 int rem;
1463
1464 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1465 struct dcb_app *app_data;
1466 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1467 continue;
1468 app_data = nla_data(attr);
1469 if (ops->ieee_setapp)
1470 err = ops->ieee_setapp(netdev, app_data);
1471 else
1472 err = dcb_ieee_setapp(netdev, app_data);
1473 if (err)
1474 goto err;
1475 }
1476 }
1477
1478 err:
1479 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1480 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1481 return err;
1482 }
1483
1484 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1485 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1486 {
1487 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1488
1489 if (!ops)
1490 return -EOPNOTSUPP;
1491
1492 return dcbnl_ieee_fill(skb, netdev);
1493 }
1494
1495 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1496 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1497 {
1498 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1499 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1500 int err;
1501
1502 if (!ops)
1503 return -EOPNOTSUPP;
1504
1505 if (!tb[DCB_ATTR_IEEE])
1506 return -EINVAL;
1507
1508 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE],
1509 dcbnl_ieee_policy, NULL);
1510 if (err)
1511 return err;
1512
1513 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1514 struct nlattr *attr;
1515 int rem;
1516
1517 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1518 struct dcb_app *app_data;
1519
1520 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1521 continue;
1522 app_data = nla_data(attr);
1523 if (ops->ieee_delapp)
1524 err = ops->ieee_delapp(netdev, app_data);
1525 else
1526 err = dcb_ieee_delapp(netdev, app_data);
1527 if (err)
1528 goto err;
1529 }
1530 }
1531
1532 err:
1533 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1534 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1535 return err;
1536 }
1537
1538
1539 /* DCBX configuration */
1540 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1541 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1542 {
1543 if (!netdev->dcbnl_ops->getdcbx)
1544 return -EOPNOTSUPP;
1545
1546 return nla_put_u8(skb, DCB_ATTR_DCBX,
1547 netdev->dcbnl_ops->getdcbx(netdev));
1548 }
1549
1550 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1551 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1552 {
1553 u8 value;
1554
1555 if (!netdev->dcbnl_ops->setdcbx)
1556 return -EOPNOTSUPP;
1557
1558 if (!tb[DCB_ATTR_DCBX])
1559 return -EINVAL;
1560
1561 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1562
1563 return nla_put_u8(skb, DCB_ATTR_DCBX,
1564 netdev->dcbnl_ops->setdcbx(netdev, value));
1565 }
1566
1567 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1568 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1569 {
1570 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1571 u8 value;
1572 int ret, i;
1573 int getall = 0;
1574
1575 if (!netdev->dcbnl_ops->getfeatcfg)
1576 return -EOPNOTSUPP;
1577
1578 if (!tb[DCB_ATTR_FEATCFG])
1579 return -EINVAL;
1580
1581 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX,
1582 tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL);
1583 if (ret)
1584 return ret;
1585
1586 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1587 if (!nest)
1588 return -EMSGSIZE;
1589
1590 if (data[DCB_FEATCFG_ATTR_ALL])
1591 getall = 1;
1592
1593 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1594 if (!getall && !data[i])
1595 continue;
1596
1597 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1598 if (!ret)
1599 ret = nla_put_u8(skb, i, value);
1600
1601 if (ret) {
1602 nla_nest_cancel(skb, nest);
1603 goto nla_put_failure;
1604 }
1605 }
1606 nla_nest_end(skb, nest);
1607
1608 nla_put_failure:
1609 return ret;
1610 }
1611
1612 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1613 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1614 {
1615 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1616 int ret, i;
1617 u8 value;
1618
1619 if (!netdev->dcbnl_ops->setfeatcfg)
1620 return -ENOTSUPP;
1621
1622 if (!tb[DCB_ATTR_FEATCFG])
1623 return -EINVAL;
1624
1625 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX,
1626 tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL);
1627
1628 if (ret)
1629 goto err;
1630
1631 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1632 if (data[i] == NULL)
1633 continue;
1634
1635 value = nla_get_u8(data[i]);
1636
1637 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1638
1639 if (ret)
1640 goto err;
1641 }
1642 err:
1643 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1644
1645 return ret;
1646 }
1647
1648 /* Handle CEE DCBX GET commands. */
1649 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1650 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1651 {
1652 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1653
1654 if (!ops)
1655 return -EOPNOTSUPP;
1656
1657 return dcbnl_cee_fill(skb, netdev);
1658 }
1659
1660 struct reply_func {
1661 /* reply netlink message type */
1662 int type;
1663
1664 /* function to fill message contents */
1665 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1666 struct nlattr **, struct sk_buff *);
1667 };
1668
1669 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1670 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1671 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1672 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1673 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1674 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1675 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1676 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1677 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1678 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1679 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1680 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1681 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1682 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1683 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1684 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1685 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1686 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1687 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1688 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1689 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1690 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1691 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1692 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1693 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1694 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1695 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1696 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1697 };
1698
1699 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
1700 {
1701 struct net *net = sock_net(skb->sk);
1702 struct net_device *netdev;
1703 struct dcbmsg *dcb = nlmsg_data(nlh);
1704 struct nlattr *tb[DCB_ATTR_MAX + 1];
1705 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1706 int ret = -EINVAL;
1707 struct sk_buff *reply_skb;
1708 struct nlmsghdr *reply_nlh = NULL;
1709 const struct reply_func *fn;
1710
1711 if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1712 return -EPERM;
1713
1714 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1715 dcbnl_rtnl_policy, NULL);
1716 if (ret < 0)
1717 return ret;
1718
1719 if (dcb->cmd > DCB_CMD_MAX)
1720 return -EINVAL;
1721
1722 /* check if a reply function has been defined for the command */
1723 fn = &reply_funcs[dcb->cmd];
1724 if (!fn->cb)
1725 return -EOPNOTSUPP;
1726
1727 if (!tb[DCB_ATTR_IFNAME])
1728 return -EINVAL;
1729
1730 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1731 if (!netdev)
1732 return -ENODEV;
1733
1734 if (!netdev->dcbnl_ops)
1735 return -EOPNOTSUPP;
1736
1737 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1738 nlh->nlmsg_flags, &reply_nlh);
1739 if (!reply_skb)
1740 return -ENOBUFS;
1741
1742 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1743 if (ret < 0) {
1744 nlmsg_free(reply_skb);
1745 goto out;
1746 }
1747
1748 nlmsg_end(reply_skb, reply_nlh);
1749
1750 ret = rtnl_unicast(reply_skb, net, portid);
1751 out:
1752 return ret;
1753 }
1754
1755 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1756 int ifindex, int prio)
1757 {
1758 struct dcb_app_type *itr;
1759
1760 list_for_each_entry(itr, &dcb_app_list, list) {
1761 if (itr->app.selector == app->selector &&
1762 itr->app.protocol == app->protocol &&
1763 itr->ifindex == ifindex &&
1764 (!prio || itr->app.priority == prio))
1765 return itr;
1766 }
1767
1768 return NULL;
1769 }
1770
1771 static int dcb_app_add(const struct dcb_app *app, int ifindex)
1772 {
1773 struct dcb_app_type *entry;
1774
1775 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1776 if (!entry)
1777 return -ENOMEM;
1778
1779 memcpy(&entry->app, app, sizeof(*app));
1780 entry->ifindex = ifindex;
1781 list_add(&entry->list, &dcb_app_list);
1782
1783 return 0;
1784 }
1785
1786 /**
1787 * dcb_getapp - retrieve the DCBX application user priority
1788 *
1789 * On success returns a non-zero 802.1p user priority bitmap
1790 * otherwise returns 0 as the invalid user priority bitmap to
1791 * indicate an error.
1792 */
1793 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1794 {
1795 struct dcb_app_type *itr;
1796 u8 prio = 0;
1797
1798 spin_lock_bh(&dcb_lock);
1799 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1800 prio = itr->app.priority;
1801 spin_unlock_bh(&dcb_lock);
1802
1803 return prio;
1804 }
1805 EXPORT_SYMBOL(dcb_getapp);
1806
1807 /**
1808 * dcb_setapp - add CEE dcb application data to app list
1809 *
1810 * Priority 0 is an invalid priority in CEE spec. This routine
1811 * removes applications from the app list if the priority is
1812 * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
1813 */
1814 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1815 {
1816 struct dcb_app_type *itr;
1817 struct dcb_app_type event;
1818 int err = 0;
1819
1820 event.ifindex = dev->ifindex;
1821 memcpy(&event.app, new, sizeof(event.app));
1822 if (dev->dcbnl_ops->getdcbx)
1823 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1824
1825 spin_lock_bh(&dcb_lock);
1826 /* Search for existing match and replace */
1827 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
1828 if (new->priority)
1829 itr->app.priority = new->priority;
1830 else {
1831 list_del(&itr->list);
1832 kfree(itr);
1833 }
1834 goto out;
1835 }
1836 /* App type does not exist add new application type */
1837 if (new->priority)
1838 err = dcb_app_add(new, dev->ifindex);
1839 out:
1840 spin_unlock_bh(&dcb_lock);
1841 if (!err)
1842 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1843 return err;
1844 }
1845 EXPORT_SYMBOL(dcb_setapp);
1846
1847 /**
1848 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
1849 *
1850 * Helper routine which on success returns a non-zero 802.1Qaz user
1851 * priority bitmap otherwise returns 0 to indicate the dcb_app was
1852 * not found in APP list.
1853 */
1854 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1855 {
1856 struct dcb_app_type *itr;
1857 u8 prio = 0;
1858
1859 spin_lock_bh(&dcb_lock);
1860 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1861 prio |= 1 << itr->app.priority;
1862 spin_unlock_bh(&dcb_lock);
1863
1864 return prio;
1865 }
1866 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1867
1868 /**
1869 * dcb_ieee_setapp - add IEEE dcb application data to app list
1870 *
1871 * This adds Application data to the list. Multiple application
1872 * entries may exists for the same selector and protocol as long
1873 * as the priorities are different. Priority is expected to be a
1874 * 3-bit unsigned integer
1875 */
1876 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1877 {
1878 struct dcb_app_type event;
1879 int err = 0;
1880
1881 event.ifindex = dev->ifindex;
1882 memcpy(&event.app, new, sizeof(event.app));
1883 if (dev->dcbnl_ops->getdcbx)
1884 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1885
1886 spin_lock_bh(&dcb_lock);
1887 /* Search for existing match and abort if found */
1888 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1889 err = -EEXIST;
1890 goto out;
1891 }
1892
1893 err = dcb_app_add(new, dev->ifindex);
1894 out:
1895 spin_unlock_bh(&dcb_lock);
1896 if (!err)
1897 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1898 return err;
1899 }
1900 EXPORT_SYMBOL(dcb_ieee_setapp);
1901
1902 /**
1903 * dcb_ieee_delapp - delete IEEE dcb application data from list
1904 *
1905 * This removes a matching APP data from the APP list
1906 */
1907 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1908 {
1909 struct dcb_app_type *itr;
1910 struct dcb_app_type event;
1911 int err = -ENOENT;
1912
1913 event.ifindex = dev->ifindex;
1914 memcpy(&event.app, del, sizeof(event.app));
1915 if (dev->dcbnl_ops->getdcbx)
1916 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1917
1918 spin_lock_bh(&dcb_lock);
1919 /* Search for existing match and remove it. */
1920 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1921 list_del(&itr->list);
1922 kfree(itr);
1923 err = 0;
1924 }
1925
1926 spin_unlock_bh(&dcb_lock);
1927 if (!err)
1928 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1929 return err;
1930 }
1931 EXPORT_SYMBOL(dcb_ieee_delapp);
1932
1933 static int __init dcbnl_init(void)
1934 {
1935 INIT_LIST_HEAD(&dcb_app_list);
1936
1937 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
1938 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
1939
1940 return 0;
1941 }
1942 device_initcall(dcbnl_init);