]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/em_meta.c
cls_flower: Fix incorrect idr release when failing to modify rule
[mirror_ubuntu-bionic-kernel.git] / net / sched / em_meta.c
1 /*
2 * net/sched/em_meta.c Metadata ematch
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * The metadata ematch compares two meta objects where each object
14 * represents either a meta value stored in the kernel or a static
15 * value provided by userspace. The objects are not provided by
16 * userspace itself but rather a definition providing the information
17 * to build them. Every object is of a certain type which must be
18 * equal to the object it is being compared to.
19 *
20 * The definition of a objects conists of the type (meta type), a
21 * identifier (meta id) and additional type specific information.
22 * The meta id is either TCF_META_TYPE_VALUE for values provided by
23 * userspace or a index to the meta operations table consisting of
24 * function pointers to type specific meta data collectors returning
25 * the value of the requested meta value.
26 *
27 * lvalue rvalue
28 * +-----------+ +-----------+
29 * | type: INT | | type: INT |
30 * def | id: DEV | | id: VALUE |
31 * | data: | | data: 3 |
32 * +-----------+ +-----------+
33 * | |
34 * ---> meta_ops[INT][DEV](...) |
35 * | |
36 * ----------- |
37 * V V
38 * +-----------+ +-----------+
39 * | type: INT | | type: INT |
40 * obj | id: DEV | | id: VALUE |
41 * | data: 2 |<--data got filled out | data: 3 |
42 * +-----------+ +-----------+
43 * | |
44 * --------------> 2 equals 3 <--------------
45 *
46 * This is a simplified schema, the complexity varies depending
47 * on the meta type. Obviously, the length of the data must also
48 * be provided for non-numeric types.
49 *
50 * Additionally, type dependent modifiers such as shift operators
51 * or mask may be applied to extend the functionaliy. As of now,
52 * the variable length type supports shifting the byte string to
53 * the right, eating up any number of octets and thus supporting
54 * wildcard interface name comparisons such as "ppp%" matching
55 * ppp0..9.
56 *
57 * NOTE: Certain meta values depend on other subsystems and are
58 * only available if that subsystem is enabled in the kernel.
59 */
60
61 #include <linux/slab.h>
62 #include <linux/module.h>
63 #include <linux/types.h>
64 #include <linux/kernel.h>
65 #include <linux/sched.h>
66 #include <linux/sched/loadavg.h>
67 #include <linux/string.h>
68 #include <linux/skbuff.h>
69 #include <linux/random.h>
70 #include <linux/if_vlan.h>
71 #include <linux/tc_ematch/tc_em_meta.h>
72 #include <net/dst.h>
73 #include <net/route.h>
74 #include <net/pkt_cls.h>
75 #include <net/sock.h>
76
77 struct meta_obj {
78 unsigned long value;
79 unsigned int len;
80 };
81
82 struct meta_value {
83 struct tcf_meta_val hdr;
84 unsigned long val;
85 unsigned int len;
86 };
87
88 struct meta_match {
89 struct meta_value lvalue;
90 struct meta_value rvalue;
91 };
92
93 static inline int meta_id(struct meta_value *v)
94 {
95 return TCF_META_ID(v->hdr.kind);
96 }
97
98 static inline int meta_type(struct meta_value *v)
99 {
100 return TCF_META_TYPE(v->hdr.kind);
101 }
102
103 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
104 struct tcf_pkt_info *info, struct meta_value *v, \
105 struct meta_obj *dst, int *err)
106
107 /**************************************************************************
108 * System status & misc
109 **************************************************************************/
110
111 META_COLLECTOR(int_random)
112 {
113 get_random_bytes(&dst->value, sizeof(dst->value));
114 }
115
116 static inline unsigned long fixed_loadavg(int load)
117 {
118 int rnd_load = load + (FIXED_1/200);
119 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
120
121 return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
122 }
123
124 META_COLLECTOR(int_loadavg_0)
125 {
126 dst->value = fixed_loadavg(avenrun[0]);
127 }
128
129 META_COLLECTOR(int_loadavg_1)
130 {
131 dst->value = fixed_loadavg(avenrun[1]);
132 }
133
134 META_COLLECTOR(int_loadavg_2)
135 {
136 dst->value = fixed_loadavg(avenrun[2]);
137 }
138
139 /**************************************************************************
140 * Device names & indices
141 **************************************************************************/
142
143 static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
144 {
145 if (unlikely(dev == NULL))
146 return -1;
147
148 dst->value = dev->ifindex;
149 return 0;
150 }
151
152 static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
153 {
154 if (unlikely(dev == NULL))
155 return -1;
156
157 dst->value = (unsigned long) dev->name;
158 dst->len = strlen(dev->name);
159 return 0;
160 }
161
162 META_COLLECTOR(int_dev)
163 {
164 *err = int_dev(skb->dev, dst);
165 }
166
167 META_COLLECTOR(var_dev)
168 {
169 *err = var_dev(skb->dev, dst);
170 }
171
172 /**************************************************************************
173 * vlan tag
174 **************************************************************************/
175
176 META_COLLECTOR(int_vlan_tag)
177 {
178 unsigned short tag;
179
180 if (skb_vlan_tag_present(skb))
181 dst->value = skb_vlan_tag_get(skb);
182 else if (!__vlan_get_tag(skb, &tag))
183 dst->value = tag;
184 else
185 *err = -1;
186 }
187
188
189
190 /**************************************************************************
191 * skb attributes
192 **************************************************************************/
193
194 META_COLLECTOR(int_priority)
195 {
196 dst->value = skb->priority;
197 }
198
199 META_COLLECTOR(int_protocol)
200 {
201 /* Let userspace take care of the byte ordering */
202 dst->value = tc_skb_protocol(skb);
203 }
204
205 META_COLLECTOR(int_pkttype)
206 {
207 dst->value = skb->pkt_type;
208 }
209
210 META_COLLECTOR(int_pktlen)
211 {
212 dst->value = skb->len;
213 }
214
215 META_COLLECTOR(int_datalen)
216 {
217 dst->value = skb->data_len;
218 }
219
220 META_COLLECTOR(int_maclen)
221 {
222 dst->value = skb->mac_len;
223 }
224
225 META_COLLECTOR(int_rxhash)
226 {
227 dst->value = skb_get_hash(skb);
228 }
229
230 /**************************************************************************
231 * Netfilter
232 **************************************************************************/
233
234 META_COLLECTOR(int_mark)
235 {
236 dst->value = skb->mark;
237 }
238
239 /**************************************************************************
240 * Traffic Control
241 **************************************************************************/
242
243 META_COLLECTOR(int_tcindex)
244 {
245 dst->value = skb->tc_index;
246 }
247
248 /**************************************************************************
249 * Routing
250 **************************************************************************/
251
252 META_COLLECTOR(int_rtclassid)
253 {
254 if (unlikely(skb_dst(skb) == NULL))
255 *err = -1;
256 else
257 #ifdef CONFIG_IP_ROUTE_CLASSID
258 dst->value = skb_dst(skb)->tclassid;
259 #else
260 dst->value = 0;
261 #endif
262 }
263
264 META_COLLECTOR(int_rtiif)
265 {
266 if (unlikely(skb_rtable(skb) == NULL))
267 *err = -1;
268 else
269 dst->value = inet_iif(skb);
270 }
271
272 /**************************************************************************
273 * Socket Attributes
274 **************************************************************************/
275
276 #define skip_nonlocal(skb) \
277 (unlikely(skb->sk == NULL))
278
279 META_COLLECTOR(int_sk_family)
280 {
281 if (skip_nonlocal(skb)) {
282 *err = -1;
283 return;
284 }
285 dst->value = skb->sk->sk_family;
286 }
287
288 META_COLLECTOR(int_sk_state)
289 {
290 if (skip_nonlocal(skb)) {
291 *err = -1;
292 return;
293 }
294 dst->value = skb->sk->sk_state;
295 }
296
297 META_COLLECTOR(int_sk_reuse)
298 {
299 if (skip_nonlocal(skb)) {
300 *err = -1;
301 return;
302 }
303 dst->value = skb->sk->sk_reuse;
304 }
305
306 META_COLLECTOR(int_sk_bound_if)
307 {
308 if (skip_nonlocal(skb)) {
309 *err = -1;
310 return;
311 }
312 /* No error if bound_dev_if is 0, legal userspace check */
313 dst->value = skb->sk->sk_bound_dev_if;
314 }
315
316 META_COLLECTOR(var_sk_bound_if)
317 {
318 if (skip_nonlocal(skb)) {
319 *err = -1;
320 return;
321 }
322
323 if (skb->sk->sk_bound_dev_if == 0) {
324 dst->value = (unsigned long) "any";
325 dst->len = 3;
326 } else {
327 struct net_device *dev;
328
329 rcu_read_lock();
330 dev = dev_get_by_index_rcu(sock_net(skb->sk),
331 skb->sk->sk_bound_dev_if);
332 *err = var_dev(dev, dst);
333 rcu_read_unlock();
334 }
335 }
336
337 META_COLLECTOR(int_sk_refcnt)
338 {
339 if (skip_nonlocal(skb)) {
340 *err = -1;
341 return;
342 }
343 dst->value = refcount_read(&skb->sk->sk_refcnt);
344 }
345
346 META_COLLECTOR(int_sk_rcvbuf)
347 {
348 const struct sock *sk = skb_to_full_sk(skb);
349
350 if (!sk) {
351 *err = -1;
352 return;
353 }
354 dst->value = sk->sk_rcvbuf;
355 }
356
357 META_COLLECTOR(int_sk_shutdown)
358 {
359 const struct sock *sk = skb_to_full_sk(skb);
360
361 if (!sk) {
362 *err = -1;
363 return;
364 }
365 dst->value = sk->sk_shutdown;
366 }
367
368 META_COLLECTOR(int_sk_proto)
369 {
370 const struct sock *sk = skb_to_full_sk(skb);
371
372 if (!sk) {
373 *err = -1;
374 return;
375 }
376 dst->value = sk->sk_protocol;
377 }
378
379 META_COLLECTOR(int_sk_type)
380 {
381 const struct sock *sk = skb_to_full_sk(skb);
382
383 if (!sk) {
384 *err = -1;
385 return;
386 }
387 dst->value = sk->sk_type;
388 }
389
390 META_COLLECTOR(int_sk_rmem_alloc)
391 {
392 const struct sock *sk = skb_to_full_sk(skb);
393
394 if (!sk) {
395 *err = -1;
396 return;
397 }
398 dst->value = sk_rmem_alloc_get(sk);
399 }
400
401 META_COLLECTOR(int_sk_wmem_alloc)
402 {
403 const struct sock *sk = skb_to_full_sk(skb);
404
405 if (!sk) {
406 *err = -1;
407 return;
408 }
409 dst->value = sk_wmem_alloc_get(sk);
410 }
411
412 META_COLLECTOR(int_sk_omem_alloc)
413 {
414 const struct sock *sk = skb_to_full_sk(skb);
415
416 if (!sk) {
417 *err = -1;
418 return;
419 }
420 dst->value = atomic_read(&sk->sk_omem_alloc);
421 }
422
423 META_COLLECTOR(int_sk_rcv_qlen)
424 {
425 const struct sock *sk = skb_to_full_sk(skb);
426
427 if (!sk) {
428 *err = -1;
429 return;
430 }
431 dst->value = sk->sk_receive_queue.qlen;
432 }
433
434 META_COLLECTOR(int_sk_snd_qlen)
435 {
436 const struct sock *sk = skb_to_full_sk(skb);
437
438 if (!sk) {
439 *err = -1;
440 return;
441 }
442 dst->value = sk->sk_write_queue.qlen;
443 }
444
445 META_COLLECTOR(int_sk_wmem_queued)
446 {
447 const struct sock *sk = skb_to_full_sk(skb);
448
449 if (!sk) {
450 *err = -1;
451 return;
452 }
453 dst->value = sk->sk_wmem_queued;
454 }
455
456 META_COLLECTOR(int_sk_fwd_alloc)
457 {
458 const struct sock *sk = skb_to_full_sk(skb);
459
460 if (!sk) {
461 *err = -1;
462 return;
463 }
464 dst->value = sk->sk_forward_alloc;
465 }
466
467 META_COLLECTOR(int_sk_sndbuf)
468 {
469 const struct sock *sk = skb_to_full_sk(skb);
470
471 if (!sk) {
472 *err = -1;
473 return;
474 }
475 dst->value = sk->sk_sndbuf;
476 }
477
478 META_COLLECTOR(int_sk_alloc)
479 {
480 const struct sock *sk = skb_to_full_sk(skb);
481
482 if (!sk) {
483 *err = -1;
484 return;
485 }
486 dst->value = (__force int) sk->sk_allocation;
487 }
488
489 META_COLLECTOR(int_sk_hash)
490 {
491 if (skip_nonlocal(skb)) {
492 *err = -1;
493 return;
494 }
495 dst->value = skb->sk->sk_hash;
496 }
497
498 META_COLLECTOR(int_sk_lingertime)
499 {
500 const struct sock *sk = skb_to_full_sk(skb);
501
502 if (!sk) {
503 *err = -1;
504 return;
505 }
506 dst->value = sk->sk_lingertime / HZ;
507 }
508
509 META_COLLECTOR(int_sk_err_qlen)
510 {
511 const struct sock *sk = skb_to_full_sk(skb);
512
513 if (!sk) {
514 *err = -1;
515 return;
516 }
517 dst->value = sk->sk_error_queue.qlen;
518 }
519
520 META_COLLECTOR(int_sk_ack_bl)
521 {
522 const struct sock *sk = skb_to_full_sk(skb);
523
524 if (!sk) {
525 *err = -1;
526 return;
527 }
528 dst->value = sk->sk_ack_backlog;
529 }
530
531 META_COLLECTOR(int_sk_max_ack_bl)
532 {
533 const struct sock *sk = skb_to_full_sk(skb);
534
535 if (!sk) {
536 *err = -1;
537 return;
538 }
539 dst->value = sk->sk_max_ack_backlog;
540 }
541
542 META_COLLECTOR(int_sk_prio)
543 {
544 const struct sock *sk = skb_to_full_sk(skb);
545
546 if (!sk) {
547 *err = -1;
548 return;
549 }
550 dst->value = sk->sk_priority;
551 }
552
553 META_COLLECTOR(int_sk_rcvlowat)
554 {
555 const struct sock *sk = skb_to_full_sk(skb);
556
557 if (!sk) {
558 *err = -1;
559 return;
560 }
561 dst->value = sk->sk_rcvlowat;
562 }
563
564 META_COLLECTOR(int_sk_rcvtimeo)
565 {
566 const struct sock *sk = skb_to_full_sk(skb);
567
568 if (!sk) {
569 *err = -1;
570 return;
571 }
572 dst->value = sk->sk_rcvtimeo / HZ;
573 }
574
575 META_COLLECTOR(int_sk_sndtimeo)
576 {
577 const struct sock *sk = skb_to_full_sk(skb);
578
579 if (!sk) {
580 *err = -1;
581 return;
582 }
583 dst->value = sk->sk_sndtimeo / HZ;
584 }
585
586 META_COLLECTOR(int_sk_sendmsg_off)
587 {
588 const struct sock *sk = skb_to_full_sk(skb);
589
590 if (!sk) {
591 *err = -1;
592 return;
593 }
594 dst->value = sk->sk_frag.offset;
595 }
596
597 META_COLLECTOR(int_sk_write_pend)
598 {
599 const struct sock *sk = skb_to_full_sk(skb);
600
601 if (!sk) {
602 *err = -1;
603 return;
604 }
605 dst->value = sk->sk_write_pending;
606 }
607
608 /**************************************************************************
609 * Meta value collectors assignment table
610 **************************************************************************/
611
612 struct meta_ops {
613 void (*get)(struct sk_buff *, struct tcf_pkt_info *,
614 struct meta_value *, struct meta_obj *, int *);
615 };
616
617 #define META_ID(name) TCF_META_ID_##name
618 #define META_FUNC(name) { .get = meta_##name }
619
620 /* Meta value operations table listing all meta value collectors and
621 * assigns them to a type and meta id. */
622 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
623 [TCF_META_TYPE_VAR] = {
624 [META_ID(DEV)] = META_FUNC(var_dev),
625 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
626 },
627 [TCF_META_TYPE_INT] = {
628 [META_ID(RANDOM)] = META_FUNC(int_random),
629 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
630 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
631 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
632 [META_ID(DEV)] = META_FUNC(int_dev),
633 [META_ID(PRIORITY)] = META_FUNC(int_priority),
634 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
635 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
636 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
637 [META_ID(DATALEN)] = META_FUNC(int_datalen),
638 [META_ID(MACLEN)] = META_FUNC(int_maclen),
639 [META_ID(NFMARK)] = META_FUNC(int_mark),
640 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
641 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
642 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
643 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
644 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
645 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
646 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
647 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
648 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
649 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
650 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
651 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
652 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
653 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
654 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
655 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
656 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
657 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
658 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
659 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
660 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
661 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
662 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
663 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
664 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
665 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
666 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
667 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
668 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
669 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
670 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
671 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
672 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
673 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
674 }
675 };
676
677 static inline struct meta_ops *meta_ops(struct meta_value *val)
678 {
679 return &__meta_ops[meta_type(val)][meta_id(val)];
680 }
681
682 /**************************************************************************
683 * Type specific operations for TCF_META_TYPE_VAR
684 **************************************************************************/
685
686 static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
687 {
688 int r = a->len - b->len;
689
690 if (r == 0)
691 r = memcmp((void *) a->value, (void *) b->value, a->len);
692
693 return r;
694 }
695
696 static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
697 {
698 int len = nla_len(nla);
699
700 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
701 if (dst->val == 0UL)
702 return -ENOMEM;
703 dst->len = len;
704 return 0;
705 }
706
707 static void meta_var_destroy(struct meta_value *v)
708 {
709 kfree((void *) v->val);
710 }
711
712 static void meta_var_apply_extras(struct meta_value *v,
713 struct meta_obj *dst)
714 {
715 int shift = v->hdr.shift;
716
717 if (shift && shift < dst->len)
718 dst->len -= shift;
719 }
720
721 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
722 {
723 if (v->val && v->len &&
724 nla_put(skb, tlv, v->len, (void *) v->val))
725 goto nla_put_failure;
726 return 0;
727
728 nla_put_failure:
729 return -1;
730 }
731
732 /**************************************************************************
733 * Type specific operations for TCF_META_TYPE_INT
734 **************************************************************************/
735
736 static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
737 {
738 /* Let gcc optimize it, the unlikely is not really based on
739 * some numbers but jump free code for mismatches seems
740 * more logical. */
741 if (unlikely(a->value == b->value))
742 return 0;
743 else if (a->value < b->value)
744 return -1;
745 else
746 return 1;
747 }
748
749 static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
750 {
751 if (nla_len(nla) >= sizeof(unsigned long)) {
752 dst->val = *(unsigned long *) nla_data(nla);
753 dst->len = sizeof(unsigned long);
754 } else if (nla_len(nla) == sizeof(u32)) {
755 dst->val = nla_get_u32(nla);
756 dst->len = sizeof(u32);
757 } else
758 return -EINVAL;
759
760 return 0;
761 }
762
763 static void meta_int_apply_extras(struct meta_value *v,
764 struct meta_obj *dst)
765 {
766 if (v->hdr.shift)
767 dst->value >>= v->hdr.shift;
768
769 if (v->val)
770 dst->value &= v->val;
771 }
772
773 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
774 {
775 if (v->len == sizeof(unsigned long)) {
776 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
777 goto nla_put_failure;
778 } else if (v->len == sizeof(u32)) {
779 if (nla_put_u32(skb, tlv, v->val))
780 goto nla_put_failure;
781 }
782
783 return 0;
784
785 nla_put_failure:
786 return -1;
787 }
788
789 /**************************************************************************
790 * Type specific operations table
791 **************************************************************************/
792
793 struct meta_type_ops {
794 void (*destroy)(struct meta_value *);
795 int (*compare)(struct meta_obj *, struct meta_obj *);
796 int (*change)(struct meta_value *, struct nlattr *);
797 void (*apply_extras)(struct meta_value *, struct meta_obj *);
798 int (*dump)(struct sk_buff *, struct meta_value *, int);
799 };
800
801 static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
802 [TCF_META_TYPE_VAR] = {
803 .destroy = meta_var_destroy,
804 .compare = meta_var_compare,
805 .change = meta_var_change,
806 .apply_extras = meta_var_apply_extras,
807 .dump = meta_var_dump
808 },
809 [TCF_META_TYPE_INT] = {
810 .compare = meta_int_compare,
811 .change = meta_int_change,
812 .apply_extras = meta_int_apply_extras,
813 .dump = meta_int_dump
814 }
815 };
816
817 static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
818 {
819 return &__meta_type_ops[meta_type(v)];
820 }
821
822 /**************************************************************************
823 * Core
824 **************************************************************************/
825
826 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
827 struct meta_value *v, struct meta_obj *dst)
828 {
829 int err = 0;
830
831 if (meta_id(v) == TCF_META_ID_VALUE) {
832 dst->value = v->val;
833 dst->len = v->len;
834 return 0;
835 }
836
837 meta_ops(v)->get(skb, info, v, dst, &err);
838 if (err < 0)
839 return err;
840
841 if (meta_type_ops(v)->apply_extras)
842 meta_type_ops(v)->apply_extras(v, dst);
843
844 return 0;
845 }
846
847 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
848 struct tcf_pkt_info *info)
849 {
850 int r;
851 struct meta_match *meta = (struct meta_match *) m->data;
852 struct meta_obj l_value, r_value;
853
854 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
855 meta_get(skb, info, &meta->rvalue, &r_value) < 0)
856 return 0;
857
858 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
859
860 switch (meta->lvalue.hdr.op) {
861 case TCF_EM_OPND_EQ:
862 return !r;
863 case TCF_EM_OPND_LT:
864 return r < 0;
865 case TCF_EM_OPND_GT:
866 return r > 0;
867 }
868
869 return 0;
870 }
871
872 static void meta_delete(struct meta_match *meta)
873 {
874 if (meta) {
875 const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
876
877 if (ops && ops->destroy) {
878 ops->destroy(&meta->lvalue);
879 ops->destroy(&meta->rvalue);
880 }
881 }
882
883 kfree(meta);
884 }
885
886 static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
887 {
888 if (nla) {
889 if (nla_len(nla) == 0)
890 return -EINVAL;
891
892 return meta_type_ops(dst)->change(dst, nla);
893 }
894
895 return 0;
896 }
897
898 static inline int meta_is_supported(struct meta_value *val)
899 {
900 return !meta_id(val) || meta_ops(val)->get;
901 }
902
903 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
904 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
905 };
906
907 static int em_meta_change(struct net *net, void *data, int len,
908 struct tcf_ematch *m)
909 {
910 int err;
911 struct nlattr *tb[TCA_EM_META_MAX + 1];
912 struct tcf_meta_hdr *hdr;
913 struct meta_match *meta = NULL;
914
915 err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy, NULL);
916 if (err < 0)
917 goto errout;
918
919 err = -EINVAL;
920 if (tb[TCA_EM_META_HDR] == NULL)
921 goto errout;
922 hdr = nla_data(tb[TCA_EM_META_HDR]);
923
924 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
925 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
926 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
927 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
928 goto errout;
929
930 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
931 if (meta == NULL) {
932 err = -ENOMEM;
933 goto errout;
934 }
935
936 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
937 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
938
939 if (!meta_is_supported(&meta->lvalue) ||
940 !meta_is_supported(&meta->rvalue)) {
941 err = -EOPNOTSUPP;
942 goto errout;
943 }
944
945 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
946 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
947 goto errout;
948
949 m->datalen = sizeof(*meta);
950 m->data = (unsigned long) meta;
951
952 err = 0;
953 errout:
954 if (err && meta)
955 meta_delete(meta);
956 return err;
957 }
958
959 static void em_meta_destroy(struct tcf_ematch *m)
960 {
961 if (m)
962 meta_delete((struct meta_match *) m->data);
963 }
964
965 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
966 {
967 struct meta_match *meta = (struct meta_match *) em->data;
968 struct tcf_meta_hdr hdr;
969 const struct meta_type_ops *ops;
970
971 memset(&hdr, 0, sizeof(hdr));
972 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
973 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
974
975 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
976 goto nla_put_failure;
977
978 ops = meta_type_ops(&meta->lvalue);
979 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
980 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
981 goto nla_put_failure;
982
983 return 0;
984
985 nla_put_failure:
986 return -1;
987 }
988
989 static struct tcf_ematch_ops em_meta_ops = {
990 .kind = TCF_EM_META,
991 .change = em_meta_change,
992 .match = em_meta_match,
993 .destroy = em_meta_destroy,
994 .dump = em_meta_dump,
995 .owner = THIS_MODULE,
996 .link = LIST_HEAD_INIT(em_meta_ops.link)
997 };
998
999 static int __init init_em_meta(void)
1000 {
1001 return tcf_em_register(&em_meta_ops);
1002 }
1003
1004 static void __exit exit_em_meta(void)
1005 {
1006 tcf_em_unregister(&em_meta_ops);
1007 }
1008
1009 MODULE_LICENSE("GPL");
1010
1011 module_init(init_em_meta);
1012 module_exit(exit_em_meta);
1013
1014 MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);