]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/sched/sch_atm.c
KVM: SVM: Move spec control call after restore of GS
[mirror_ubuntu-artful-kernel.git] / net / sched / sch_atm.c
1 /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
2
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/string.h>
10 #include <linux/errno.h>
11 #include <linux/skbuff.h>
12 #include <linux/atmdev.h>
13 #include <linux/atmclip.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/file.h> /* for fput */
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19
20 /*
21 * The ATM queuing discipline provides a framework for invoking classifiers
22 * (aka "filters"), which in turn select classes of this queuing discipline.
23 * Each class maps the flow(s) it is handling to a given VC. Multiple classes
24 * may share the same VC.
25 *
26 * When creating a class, VCs are specified by passing the number of the open
27 * socket descriptor by which the calling process references the VC. The kernel
28 * keeps the VC open at least until all classes using it are removed.
29 *
30 * In this file, most functions are named atm_tc_* to avoid confusion with all
31 * the atm_* in net/atm. This naming convention differs from what's used in the
32 * rest of net/sched.
33 *
34 * Known bugs:
35 * - sometimes messes up the IP stack
36 * - any manipulations besides the few operations described in the README, are
37 * untested and likely to crash the system
38 * - should lock the flow while there is data in the queue (?)
39 */
40
41 #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
42
43 struct atm_flow_data {
44 struct Qdisc *q; /* FIFO, TBF, etc. */
45 struct tcf_proto __rcu *filter_list;
46 struct tcf_block *block;
47 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
48 void (*old_pop)(struct atm_vcc *vcc,
49 struct sk_buff *skb); /* chaining */
50 struct atm_qdisc_data *parent; /* parent qdisc */
51 struct socket *sock; /* for closing */
52 u32 classid; /* x:y type ID */
53 int ref; /* reference count */
54 struct gnet_stats_basic_packed bstats;
55 struct gnet_stats_queue qstats;
56 struct list_head list;
57 struct atm_flow_data *excess; /* flow for excess traffic;
58 NULL to set CLP instead */
59 int hdr_len;
60 unsigned char hdr[0]; /* header data; MUST BE LAST */
61 };
62
63 struct atm_qdisc_data {
64 struct atm_flow_data link; /* unclassified skbs go here */
65 struct list_head flows; /* NB: "link" is also on this
66 list */
67 struct tasklet_struct task; /* dequeue tasklet */
68 };
69
70 /* ------------------------- Class/flow operations ------------------------- */
71
72 static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
73 {
74 struct atm_qdisc_data *p = qdisc_priv(sch);
75 struct atm_flow_data *flow;
76
77 list_for_each_entry(flow, &p->flows, list) {
78 if (flow->classid == classid)
79 return flow;
80 }
81 return NULL;
82 }
83
84 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
85 struct Qdisc *new, struct Qdisc **old)
86 {
87 struct atm_qdisc_data *p = qdisc_priv(sch);
88 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
89
90 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
91 sch, p, flow, new, old);
92 if (list_empty(&flow->list))
93 return -EINVAL;
94 if (!new)
95 new = &noop_qdisc;
96 *old = flow->q;
97 flow->q = new;
98 if (*old)
99 qdisc_reset(*old);
100 return 0;
101 }
102
103 static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
104 {
105 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
106
107 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
108 return flow ? flow->q : NULL;
109 }
110
111 static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
112 {
113 struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
114 struct atm_flow_data *flow;
115
116 pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
117 flow = lookup_flow(sch, classid);
118 if (flow)
119 flow->ref++;
120 pr_debug("atm_tc_get: flow %p\n", flow);
121 return (unsigned long)flow;
122 }
123
124 static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
125 unsigned long parent, u32 classid)
126 {
127 return atm_tc_get(sch, classid);
128 }
129
130 /*
131 * atm_tc_put handles all destructions, including the ones that are explicitly
132 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
133 * anything that still seems to be in use.
134 */
135 static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
136 {
137 struct atm_qdisc_data *p = qdisc_priv(sch);
138 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
139
140 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
141 if (--flow->ref)
142 return;
143 pr_debug("atm_tc_put: destroying\n");
144 list_del_init(&flow->list);
145 pr_debug("atm_tc_put: qdisc %p\n", flow->q);
146 qdisc_destroy(flow->q);
147 tcf_block_put(flow->block);
148 if (flow->sock) {
149 pr_debug("atm_tc_put: f_count %ld\n",
150 file_count(flow->sock->file));
151 flow->vcc->pop = flow->old_pop;
152 sockfd_put(flow->sock);
153 }
154 if (flow->excess)
155 atm_tc_put(sch, (unsigned long)flow->excess);
156 if (flow != &p->link)
157 kfree(flow);
158 /*
159 * If flow == &p->link, the qdisc no longer works at this point and
160 * needs to be removed. (By the caller of atm_tc_put.)
161 */
162 }
163
164 static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
165 {
166 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
167
168 pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
169 VCC2FLOW(vcc)->old_pop(vcc, skb);
170 tasklet_schedule(&p->task);
171 }
172
173 static const u8 llc_oui_ip[] = {
174 0xaa, /* DSAP: non-ISO */
175 0xaa, /* SSAP: non-ISO */
176 0x03, /* Ctrl: Unnumbered Information Command PDU */
177 0x00, /* OUI: EtherType */
178 0x00, 0x00,
179 0x08, 0x00
180 }; /* Ethertype IP (0800) */
181
182 static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
183 [TCA_ATM_FD] = { .type = NLA_U32 },
184 [TCA_ATM_EXCESS] = { .type = NLA_U32 },
185 };
186
187 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
188 struct nlattr **tca, unsigned long *arg)
189 {
190 struct atm_qdisc_data *p = qdisc_priv(sch);
191 struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
192 struct atm_flow_data *excess = NULL;
193 struct nlattr *opt = tca[TCA_OPTIONS];
194 struct nlattr *tb[TCA_ATM_MAX + 1];
195 struct socket *sock;
196 int fd, error, hdr_len;
197 void *hdr;
198
199 pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
200 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
201 /*
202 * The concept of parents doesn't apply for this qdisc.
203 */
204 if (parent && parent != TC_H_ROOT && parent != sch->handle)
205 return -EINVAL;
206 /*
207 * ATM classes cannot be changed. In order to change properties of the
208 * ATM connection, that socket needs to be modified directly (via the
209 * native ATM API. In order to send a flow to a different VC, the old
210 * class needs to be removed and a new one added. (This may be changed
211 * later.)
212 */
213 if (flow)
214 return -EBUSY;
215 if (opt == NULL)
216 return -EINVAL;
217
218 error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy, NULL);
219 if (error < 0)
220 return error;
221
222 if (!tb[TCA_ATM_FD])
223 return -EINVAL;
224 fd = nla_get_u32(tb[TCA_ATM_FD]);
225 pr_debug("atm_tc_change: fd %d\n", fd);
226 if (tb[TCA_ATM_HDR]) {
227 hdr_len = nla_len(tb[TCA_ATM_HDR]);
228 hdr = nla_data(tb[TCA_ATM_HDR]);
229 } else {
230 hdr_len = RFC1483LLC_LEN;
231 hdr = NULL; /* default LLC/SNAP for IP */
232 }
233 if (!tb[TCA_ATM_EXCESS])
234 excess = NULL;
235 else {
236 excess = (struct atm_flow_data *)
237 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
238 if (!excess)
239 return -ENOENT;
240 }
241 pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
242 opt->nla_type, nla_len(opt), hdr_len);
243 sock = sockfd_lookup(fd, &error);
244 if (!sock)
245 return error; /* f_count++ */
246 pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
247 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
248 error = -EPROTOTYPE;
249 goto err_out;
250 }
251 /* @@@ should check if the socket is really operational or we'll crash
252 on vcc->send */
253 if (classid) {
254 if (TC_H_MAJ(classid ^ sch->handle)) {
255 pr_debug("atm_tc_change: classid mismatch\n");
256 error = -EINVAL;
257 goto err_out;
258 }
259 } else {
260 int i;
261 unsigned long cl;
262
263 for (i = 1; i < 0x8000; i++) {
264 classid = TC_H_MAKE(sch->handle, 0x8000 | i);
265 cl = atm_tc_get(sch, classid);
266 if (!cl)
267 break;
268 atm_tc_put(sch, cl);
269 }
270 }
271 pr_debug("atm_tc_change: new id %x\n", classid);
272 flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
273 pr_debug("atm_tc_change: flow %p\n", flow);
274 if (!flow) {
275 error = -ENOBUFS;
276 goto err_out;
277 }
278
279 error = tcf_block_get(&flow->block, &flow->filter_list);
280 if (error) {
281 kfree(flow);
282 goto err_out;
283 }
284
285 flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
286 if (!flow->q)
287 flow->q = &noop_qdisc;
288 pr_debug("atm_tc_change: qdisc %p\n", flow->q);
289 flow->sock = sock;
290 flow->vcc = ATM_SD(sock); /* speedup */
291 flow->vcc->user_back = flow;
292 pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
293 flow->old_pop = flow->vcc->pop;
294 flow->parent = p;
295 flow->vcc->pop = sch_atm_pop;
296 flow->classid = classid;
297 flow->ref = 1;
298 flow->excess = excess;
299 list_add(&flow->list, &p->link.list);
300 flow->hdr_len = hdr_len;
301 if (hdr)
302 memcpy(flow->hdr, hdr, hdr_len);
303 else
304 memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
305 *arg = (unsigned long)flow;
306 return 0;
307 err_out:
308 if (excess)
309 atm_tc_put(sch, (unsigned long)excess);
310 sockfd_put(sock);
311 return error;
312 }
313
314 static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
315 {
316 struct atm_qdisc_data *p = qdisc_priv(sch);
317 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
318
319 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
320 if (list_empty(&flow->list))
321 return -EINVAL;
322 if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
323 return -EBUSY;
324 /*
325 * Reference count must be 2: one for "keepalive" (set at class
326 * creation), and one for the reference held when calling delete.
327 */
328 if (flow->ref < 2) {
329 pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
330 return -EINVAL;
331 }
332 if (flow->ref > 2)
333 return -EBUSY; /* catch references via excess, etc. */
334 atm_tc_put(sch, arg);
335 return 0;
336 }
337
338 static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
339 {
340 struct atm_qdisc_data *p = qdisc_priv(sch);
341 struct atm_flow_data *flow;
342
343 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
344 if (walker->stop)
345 return;
346 list_for_each_entry(flow, &p->flows, list) {
347 if (walker->count >= walker->skip &&
348 walker->fn(sch, (unsigned long)flow, walker) < 0) {
349 walker->stop = 1;
350 break;
351 }
352 walker->count++;
353 }
354 }
355
356 static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
357 {
358 struct atm_qdisc_data *p = qdisc_priv(sch);
359 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
360
361 pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
362 return flow ? flow->block : p->link.block;
363 }
364
365 /* --------------------------- Qdisc operations ---------------------------- */
366
367 static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
368 struct sk_buff **to_free)
369 {
370 struct atm_qdisc_data *p = qdisc_priv(sch);
371 struct atm_flow_data *flow;
372 struct tcf_result res;
373 int result;
374 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
375
376 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
377 result = TC_ACT_OK; /* be nice to gcc */
378 flow = NULL;
379 if (TC_H_MAJ(skb->priority) != sch->handle ||
380 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
381 struct tcf_proto *fl;
382
383 list_for_each_entry(flow, &p->flows, list) {
384 fl = rcu_dereference_bh(flow->filter_list);
385 if (fl) {
386 result = tcf_classify(skb, fl, &res, true);
387 if (result < 0)
388 continue;
389 flow = (struct atm_flow_data *)res.class;
390 if (!flow)
391 flow = lookup_flow(sch, res.classid);
392 goto done;
393 }
394 }
395 flow = NULL;
396 done:
397 ;
398 }
399 if (!flow) {
400 flow = &p->link;
401 } else {
402 if (flow->vcc)
403 ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
404 /*@@@ looks good ... but it's not supposed to work :-) */
405 #ifdef CONFIG_NET_CLS_ACT
406 switch (result) {
407 case TC_ACT_QUEUED:
408 case TC_ACT_STOLEN:
409 case TC_ACT_TRAP:
410 __qdisc_drop(skb, to_free);
411 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
412 case TC_ACT_SHOT:
413 __qdisc_drop(skb, to_free);
414 goto drop;
415 case TC_ACT_RECLASSIFY:
416 if (flow->excess)
417 flow = flow->excess;
418 else
419 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
420 break;
421 }
422 #endif
423 }
424
425 ret = qdisc_enqueue(skb, flow->q, to_free);
426 if (ret != NET_XMIT_SUCCESS) {
427 drop: __maybe_unused
428 if (net_xmit_drop_count(ret)) {
429 qdisc_qstats_drop(sch);
430 if (flow)
431 flow->qstats.drops++;
432 }
433 return ret;
434 }
435 /*
436 * Okay, this may seem weird. We pretend we've dropped the packet if
437 * it goes via ATM. The reason for this is that the outer qdisc
438 * expects to be able to q->dequeue the packet later on if we return
439 * success at this place. Also, sch->q.qdisc needs to reflect whether
440 * there is a packet egligible for dequeuing or not. Note that the
441 * statistics of the outer qdisc are necessarily wrong because of all
442 * this. There's currently no correct solution for this.
443 */
444 if (flow == &p->link) {
445 sch->q.qlen++;
446 return NET_XMIT_SUCCESS;
447 }
448 tasklet_schedule(&p->task);
449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
450 }
451
452 /*
453 * Dequeue packets and send them over ATM. Note that we quite deliberately
454 * avoid checking net_device's flow control here, simply because sch_atm
455 * uses its own channels, which have nothing to do with any CLIP/LANE/or
456 * non-ATM interfaces.
457 */
458
459 static void sch_atm_dequeue(unsigned long data)
460 {
461 struct Qdisc *sch = (struct Qdisc *)data;
462 struct atm_qdisc_data *p = qdisc_priv(sch);
463 struct atm_flow_data *flow;
464 struct sk_buff *skb;
465
466 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
467 list_for_each_entry(flow, &p->flows, list) {
468 if (flow == &p->link)
469 continue;
470 /*
471 * If traffic is properly shaped, this won't generate nasty
472 * little bursts. Otherwise, it may ... (but that's okay)
473 */
474 while ((skb = flow->q->ops->peek(flow->q))) {
475 if (!atm_may_send(flow->vcc, skb->truesize))
476 break;
477
478 skb = qdisc_dequeue_peeked(flow->q);
479 if (unlikely(!skb))
480 break;
481
482 qdisc_bstats_update(sch, skb);
483 bstats_update(&flow->bstats, skb);
484 pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
485 /* remove any LL header somebody else has attached */
486 skb_pull(skb, skb_network_offset(skb));
487 if (skb_headroom(skb) < flow->hdr_len) {
488 struct sk_buff *new;
489
490 new = skb_realloc_headroom(skb, flow->hdr_len);
491 dev_kfree_skb(skb);
492 if (!new)
493 continue;
494 skb = new;
495 }
496 pr_debug("sch_atm_dequeue: ip %p, data %p\n",
497 skb_network_header(skb), skb->data);
498 ATM_SKB(skb)->vcc = flow->vcc;
499 memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
500 flow->hdr_len);
501 refcount_add(skb->truesize,
502 &sk_atm(flow->vcc)->sk_wmem_alloc);
503 /* atm.atm_options are already set by atm_tc_enqueue */
504 flow->vcc->send(flow->vcc, skb);
505 }
506 }
507 }
508
509 static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
510 {
511 struct atm_qdisc_data *p = qdisc_priv(sch);
512 struct sk_buff *skb;
513
514 pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
515 tasklet_schedule(&p->task);
516 skb = qdisc_dequeue_peeked(p->link.q);
517 if (skb)
518 sch->q.qlen--;
519 return skb;
520 }
521
522 static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
523 {
524 struct atm_qdisc_data *p = qdisc_priv(sch);
525
526 pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
527
528 return p->link.q->ops->peek(p->link.q);
529 }
530
531 static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
532 {
533 struct atm_qdisc_data *p = qdisc_priv(sch);
534 int err;
535
536 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
537 INIT_LIST_HEAD(&p->flows);
538 INIT_LIST_HEAD(&p->link.list);
539 list_add(&p->link.list, &p->flows);
540 p->link.q = qdisc_create_dflt(sch->dev_queue,
541 &pfifo_qdisc_ops, sch->handle);
542 if (!p->link.q)
543 p->link.q = &noop_qdisc;
544 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
545
546 err = tcf_block_get(&p->link.block, &p->link.filter_list);
547 if (err)
548 return err;
549
550 p->link.vcc = NULL;
551 p->link.sock = NULL;
552 p->link.classid = sch->handle;
553 p->link.ref = 1;
554 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
555 return 0;
556 }
557
558 static void atm_tc_reset(struct Qdisc *sch)
559 {
560 struct atm_qdisc_data *p = qdisc_priv(sch);
561 struct atm_flow_data *flow;
562
563 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
564 list_for_each_entry(flow, &p->flows, list)
565 qdisc_reset(flow->q);
566 sch->q.qlen = 0;
567 }
568
569 static void atm_tc_destroy(struct Qdisc *sch)
570 {
571 struct atm_qdisc_data *p = qdisc_priv(sch);
572 struct atm_flow_data *flow, *tmp;
573
574 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
575 list_for_each_entry(flow, &p->flows, list) {
576 tcf_block_put(flow->block);
577 flow->block = NULL;
578 }
579
580 list_for_each_entry_safe(flow, tmp, &p->flows, list) {
581 if (flow->ref > 1)
582 pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
583 atm_tc_put(sch, (unsigned long)flow);
584 }
585 tasklet_kill(&p->task);
586 }
587
588 static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
589 struct sk_buff *skb, struct tcmsg *tcm)
590 {
591 struct atm_qdisc_data *p = qdisc_priv(sch);
592 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
593 struct nlattr *nest;
594
595 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
596 sch, p, flow, skb, tcm);
597 if (list_empty(&flow->list))
598 return -EINVAL;
599 tcm->tcm_handle = flow->classid;
600 tcm->tcm_info = flow->q->handle;
601
602 nest = nla_nest_start(skb, TCA_OPTIONS);
603 if (nest == NULL)
604 goto nla_put_failure;
605
606 if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
607 goto nla_put_failure;
608 if (flow->vcc) {
609 struct sockaddr_atmpvc pvc;
610 int state;
611
612 memset(&pvc, 0, sizeof(pvc));
613 pvc.sap_family = AF_ATMPVC;
614 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
615 pvc.sap_addr.vpi = flow->vcc->vpi;
616 pvc.sap_addr.vci = flow->vcc->vci;
617 if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
618 goto nla_put_failure;
619 state = ATM_VF2VS(flow->vcc->flags);
620 if (nla_put_u32(skb, TCA_ATM_STATE, state))
621 goto nla_put_failure;
622 }
623 if (flow->excess) {
624 if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
625 goto nla_put_failure;
626 } else {
627 if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
628 goto nla_put_failure;
629 }
630 return nla_nest_end(skb, nest);
631
632 nla_put_failure:
633 nla_nest_cancel(skb, nest);
634 return -1;
635 }
636 static int
637 atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
638 struct gnet_dump *d)
639 {
640 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
641
642 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
643 d, NULL, &flow->bstats) < 0 ||
644 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
645 return -1;
646
647 return 0;
648 }
649
650 static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
651 {
652 return 0;
653 }
654
655 static const struct Qdisc_class_ops atm_class_ops = {
656 .graft = atm_tc_graft,
657 .leaf = atm_tc_leaf,
658 .get = atm_tc_get,
659 .put = atm_tc_put,
660 .change = atm_tc_change,
661 .delete = atm_tc_delete,
662 .walk = atm_tc_walk,
663 .tcf_block = atm_tc_tcf_block,
664 .bind_tcf = atm_tc_bind_filter,
665 .unbind_tcf = atm_tc_put,
666 .dump = atm_tc_dump_class,
667 .dump_stats = atm_tc_dump_class_stats,
668 };
669
670 static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
671 .cl_ops = &atm_class_ops,
672 .id = "atm",
673 .priv_size = sizeof(struct atm_qdisc_data),
674 .enqueue = atm_tc_enqueue,
675 .dequeue = atm_tc_dequeue,
676 .peek = atm_tc_peek,
677 .init = atm_tc_init,
678 .reset = atm_tc_reset,
679 .destroy = atm_tc_destroy,
680 .dump = atm_tc_dump,
681 .owner = THIS_MODULE,
682 };
683
684 static int __init atm_init(void)
685 {
686 return register_qdisc(&atm_qdisc_ops);
687 }
688
689 static void __exit atm_exit(void)
690 {
691 unregister_qdisc(&atm_qdisc_ops);
692 }
693
694 module_init(atm_init)
695 module_exit(atm_exit)
696 MODULE_LICENSE("GPL");