2 * net/sched/cls_flow.c Generic flow classifier
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <net/inet_sock.h>
27 #include <net/pkt_cls.h>
29 #include <net/route.h>
30 #include <net/flow_dissector.h>
32 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
33 #include <net/netfilter/nf_conntrack.h>
37 struct list_head filters
;
42 struct list_head list
;
44 struct tcf_ematch_tree ematches
;
46 struct timer_list perturb_timer
;
63 static inline u32
addr_fold(void *addr
)
65 unsigned long a
= (unsigned long)addr
;
67 return (a
& 0xFFFFFFFF) ^ (BITS_PER_LONG
> 32 ? a
>> 32 : 0);
70 static u32
flow_get_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
72 __be32 src
= flow_get_u32_src(flow
);
77 return addr_fold(skb
->sk
);
80 static u32
flow_get_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
82 __be32 dst
= flow_get_u32_dst(flow
);
87 return addr_fold(skb_dst(skb
)) ^ (__force u16
) tc_skb_protocol(skb
);
90 static u32
flow_get_proto(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
92 return flow
->basic
.ip_proto
;
95 static u32
flow_get_proto_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
97 if (flow
->ports
.ports
)
98 return ntohs(flow
->ports
.src
);
100 return addr_fold(skb
->sk
);
103 static u32
flow_get_proto_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
105 if (flow
->ports
.ports
)
106 return ntohs(flow
->ports
.dst
);
108 return addr_fold(skb_dst(skb
)) ^ (__force u16
) tc_skb_protocol(skb
);
111 static u32
flow_get_iif(const struct sk_buff
*skb
)
116 static u32
flow_get_priority(const struct sk_buff
*skb
)
118 return skb
->priority
;
121 static u32
flow_get_mark(const struct sk_buff
*skb
)
126 static u32
flow_get_nfct(const struct sk_buff
*skb
)
128 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
129 return addr_fold(skb
->nfct
);
135 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
136 #define CTTUPLE(skb, member) \
138 enum ip_conntrack_info ctinfo; \
139 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
142 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
145 #define CTTUPLE(skb, member) \
152 static u32
flow_get_nfct_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
154 switch (tc_skb_protocol(skb
)) {
155 case htons(ETH_P_IP
):
156 return ntohl(CTTUPLE(skb
, src
.u3
.ip
));
157 case htons(ETH_P_IPV6
):
158 return ntohl(CTTUPLE(skb
, src
.u3
.ip6
[3]));
161 return flow_get_src(skb
, flow
);
164 static u32
flow_get_nfct_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
166 switch (tc_skb_protocol(skb
)) {
167 case htons(ETH_P_IP
):
168 return ntohl(CTTUPLE(skb
, dst
.u3
.ip
));
169 case htons(ETH_P_IPV6
):
170 return ntohl(CTTUPLE(skb
, dst
.u3
.ip6
[3]));
173 return flow_get_dst(skb
, flow
);
176 static u32
flow_get_nfct_proto_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
178 return ntohs(CTTUPLE(skb
, src
.u
.all
));
180 return flow_get_proto_src(skb
, flow
);
183 static u32
flow_get_nfct_proto_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
185 return ntohs(CTTUPLE(skb
, dst
.u
.all
));
187 return flow_get_proto_dst(skb
, flow
);
190 static u32
flow_get_rtclassid(const struct sk_buff
*skb
)
192 #ifdef CONFIG_IP_ROUTE_CLASSID
194 return skb_dst(skb
)->tclassid
;
199 static u32
flow_get_skuid(const struct sk_buff
*skb
)
201 struct sock
*sk
= skb_to_full_sk(skb
);
203 if (sk
&& sk
->sk_socket
&& sk
->sk_socket
->file
) {
204 kuid_t skuid
= sk
->sk_socket
->file
->f_cred
->fsuid
;
206 return from_kuid(&init_user_ns
, skuid
);
211 static u32
flow_get_skgid(const struct sk_buff
*skb
)
213 struct sock
*sk
= skb_to_full_sk(skb
);
215 if (sk
&& sk
->sk_socket
&& sk
->sk_socket
->file
) {
216 kgid_t skgid
= sk
->sk_socket
->file
->f_cred
->fsgid
;
218 return from_kgid(&init_user_ns
, skgid
);
223 static u32
flow_get_vlan_tag(const struct sk_buff
*skb
)
225 u16
uninitialized_var(tag
);
227 if (vlan_get_tag(skb
, &tag
) < 0)
229 return tag
& VLAN_VID_MASK
;
232 static u32
flow_get_rxhash(struct sk_buff
*skb
)
234 return skb_get_hash(skb
);
237 static u32
flow_key_get(struct sk_buff
*skb
, int key
, struct flow_keys
*flow
)
241 return flow_get_src(skb
, flow
);
243 return flow_get_dst(skb
, flow
);
245 return flow_get_proto(skb
, flow
);
246 case FLOW_KEY_PROTO_SRC
:
247 return flow_get_proto_src(skb
, flow
);
248 case FLOW_KEY_PROTO_DST
:
249 return flow_get_proto_dst(skb
, flow
);
251 return flow_get_iif(skb
);
252 case FLOW_KEY_PRIORITY
:
253 return flow_get_priority(skb
);
255 return flow_get_mark(skb
);
257 return flow_get_nfct(skb
);
258 case FLOW_KEY_NFCT_SRC
:
259 return flow_get_nfct_src(skb
, flow
);
260 case FLOW_KEY_NFCT_DST
:
261 return flow_get_nfct_dst(skb
, flow
);
262 case FLOW_KEY_NFCT_PROTO_SRC
:
263 return flow_get_nfct_proto_src(skb
, flow
);
264 case FLOW_KEY_NFCT_PROTO_DST
:
265 return flow_get_nfct_proto_dst(skb
, flow
);
266 case FLOW_KEY_RTCLASSID
:
267 return flow_get_rtclassid(skb
);
269 return flow_get_skuid(skb
);
271 return flow_get_skgid(skb
);
272 case FLOW_KEY_VLAN_TAG
:
273 return flow_get_vlan_tag(skb
);
274 case FLOW_KEY_RXHASH
:
275 return flow_get_rxhash(skb
);
282 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
283 (1 << FLOW_KEY_DST) | \
284 (1 << FLOW_KEY_PROTO) | \
285 (1 << FLOW_KEY_PROTO_SRC) | \
286 (1 << FLOW_KEY_PROTO_DST) | \
287 (1 << FLOW_KEY_NFCT_SRC) | \
288 (1 << FLOW_KEY_NFCT_DST) | \
289 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
290 (1 << FLOW_KEY_NFCT_PROTO_DST))
292 static int flow_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
293 struct tcf_result
*res
)
295 struct flow_head
*head
= rcu_dereference_bh(tp
->root
);
296 struct flow_filter
*f
;
302 list_for_each_entry_rcu(f
, &head
->filters
, list
) {
303 u32 keys
[FLOW_KEY_MAX
+ 1];
304 struct flow_keys flow_keys
;
306 if (!tcf_em_tree_match(skb
, &f
->ematches
, NULL
))
309 keymask
= f
->keymask
;
310 if (keymask
& FLOW_KEYS_NEEDED
)
311 skb_flow_dissect_flow_keys(skb
, &flow_keys
, 0);
313 for (n
= 0; n
< f
->nkeys
; n
++) {
314 key
= ffs(keymask
) - 1;
315 keymask
&= ~(1 << key
);
316 keys
[n
] = flow_key_get(skb
, key
, &flow_keys
);
319 if (f
->mode
== FLOW_MODE_HASH
)
320 classid
= jhash2(keys
, f
->nkeys
, f
->hashrnd
);
323 classid
= (classid
& f
->mask
) ^ f
->xor;
324 classid
= (classid
>> f
->rshift
) + f
->addend
;
328 classid
%= f
->divisor
;
331 res
->classid
= TC_H_MAKE(f
->baseclass
, f
->baseclass
+ classid
);
333 r
= tcf_exts_exec(skb
, &f
->exts
, res
);
341 static void flow_perturbation(unsigned long arg
)
343 struct flow_filter
*f
= (struct flow_filter
*)arg
;
345 get_random_bytes(&f
->hashrnd
, 4);
346 if (f
->perturb_period
)
347 mod_timer(&f
->perturb_timer
, jiffies
+ f
->perturb_period
);
350 static const struct nla_policy flow_policy
[TCA_FLOW_MAX
+ 1] = {
351 [TCA_FLOW_KEYS
] = { .type
= NLA_U32
},
352 [TCA_FLOW_MODE
] = { .type
= NLA_U32
},
353 [TCA_FLOW_BASECLASS
] = { .type
= NLA_U32
},
354 [TCA_FLOW_RSHIFT
] = { .type
= NLA_U32
},
355 [TCA_FLOW_ADDEND
] = { .type
= NLA_U32
},
356 [TCA_FLOW_MASK
] = { .type
= NLA_U32
},
357 [TCA_FLOW_XOR
] = { .type
= NLA_U32
},
358 [TCA_FLOW_DIVISOR
] = { .type
= NLA_U32
},
359 [TCA_FLOW_ACT
] = { .type
= NLA_NESTED
},
360 [TCA_FLOW_POLICE
] = { .type
= NLA_NESTED
},
361 [TCA_FLOW_EMATCHES
] = { .type
= NLA_NESTED
},
362 [TCA_FLOW_PERTURB
] = { .type
= NLA_U32
},
365 static void flow_destroy_filter(struct rcu_head
*head
)
367 struct flow_filter
*f
= container_of(head
, struct flow_filter
, rcu
);
369 del_timer_sync(&f
->perturb_timer
);
370 tcf_exts_destroy(&f
->exts
);
371 tcf_em_tree_destroy(&f
->ematches
);
375 static int flow_change(struct net
*net
, struct sk_buff
*in_skb
,
376 struct tcf_proto
*tp
, unsigned long base
,
377 u32 handle
, struct nlattr
**tca
,
378 unsigned long *arg
, bool ovr
)
380 struct flow_head
*head
= rtnl_dereference(tp
->root
);
381 struct flow_filter
*fold
, *fnew
;
382 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
383 struct nlattr
*tb
[TCA_FLOW_MAX
+ 1];
385 struct tcf_ematch_tree t
;
386 unsigned int nkeys
= 0;
387 unsigned int perturb_period
= 0;
396 err
= nla_parse_nested(tb
, TCA_FLOW_MAX
, opt
, flow_policy
);
400 if (tb
[TCA_FLOW_BASECLASS
]) {
401 baseclass
= nla_get_u32(tb
[TCA_FLOW_BASECLASS
]);
402 if (TC_H_MIN(baseclass
) == 0)
406 if (tb
[TCA_FLOW_KEYS
]) {
407 keymask
= nla_get_u32(tb
[TCA_FLOW_KEYS
]);
409 nkeys
= hweight32(keymask
);
413 if (fls(keymask
) - 1 > FLOW_KEY_MAX
)
416 if ((keymask
& (FLOW_KEY_SKUID
|FLOW_KEY_SKGID
)) &&
417 sk_user_ns(NETLINK_CB(in_skb
).sk
) != &init_user_ns
)
421 tcf_exts_init(&e
, TCA_FLOW_ACT
, TCA_FLOW_POLICE
);
422 err
= tcf_exts_validate(net
, tp
, tb
, tca
[TCA_RATE
], &e
, ovr
);
426 err
= tcf_em_tree_validate(tp
, tb
[TCA_FLOW_EMATCHES
], &t
);
431 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
435 tcf_exts_init(&fnew
->exts
, TCA_FLOW_ACT
, TCA_FLOW_POLICE
);
437 fold
= (struct flow_filter
*)*arg
;
440 if (fold
->handle
!= handle
&& handle
)
443 /* Copy fold into fnew */
445 fnew
->handle
= fold
->handle
;
446 fnew
->nkeys
= fold
->nkeys
;
447 fnew
->keymask
= fold
->keymask
;
448 fnew
->mode
= fold
->mode
;
449 fnew
->mask
= fold
->mask
;
450 fnew
->xor = fold
->xor;
451 fnew
->rshift
= fold
->rshift
;
452 fnew
->addend
= fold
->addend
;
453 fnew
->divisor
= fold
->divisor
;
454 fnew
->baseclass
= fold
->baseclass
;
455 fnew
->hashrnd
= fold
->hashrnd
;
458 if (tb
[TCA_FLOW_MODE
])
459 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
460 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
463 if (mode
== FLOW_MODE_HASH
)
464 perturb_period
= fold
->perturb_period
;
465 if (tb
[TCA_FLOW_PERTURB
]) {
466 if (mode
!= FLOW_MODE_HASH
)
468 perturb_period
= nla_get_u32(tb
[TCA_FLOW_PERTURB
]) * HZ
;
474 if (!tb
[TCA_FLOW_KEYS
])
477 mode
= FLOW_MODE_MAP
;
478 if (tb
[TCA_FLOW_MODE
])
479 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
480 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
483 if (tb
[TCA_FLOW_PERTURB
]) {
484 if (mode
!= FLOW_MODE_HASH
)
486 perturb_period
= nla_get_u32(tb
[TCA_FLOW_PERTURB
]) * HZ
;
489 if (TC_H_MAJ(baseclass
) == 0)
490 baseclass
= TC_H_MAKE(tp
->q
->handle
, baseclass
);
491 if (TC_H_MIN(baseclass
) == 0)
492 baseclass
= TC_H_MAKE(baseclass
, 1);
494 fnew
->handle
= handle
;
497 get_random_bytes(&fnew
->hashrnd
, 4);
500 fnew
->perturb_timer
.function
= flow_perturbation
;
501 fnew
->perturb_timer
.data
= (unsigned long)fnew
;
502 init_timer_deferrable(&fnew
->perturb_timer
);
504 tcf_exts_change(tp
, &fnew
->exts
, &e
);
505 tcf_em_tree_change(tp
, &fnew
->ematches
, &t
);
507 netif_keep_dst(qdisc_dev(tp
->q
));
509 if (tb
[TCA_FLOW_KEYS
]) {
510 fnew
->keymask
= keymask
;
516 if (tb
[TCA_FLOW_MASK
])
517 fnew
->mask
= nla_get_u32(tb
[TCA_FLOW_MASK
]);
518 if (tb
[TCA_FLOW_XOR
])
519 fnew
->xor = nla_get_u32(tb
[TCA_FLOW_XOR
]);
520 if (tb
[TCA_FLOW_RSHIFT
])
521 fnew
->rshift
= nla_get_u32(tb
[TCA_FLOW_RSHIFT
]);
522 if (tb
[TCA_FLOW_ADDEND
])
523 fnew
->addend
= nla_get_u32(tb
[TCA_FLOW_ADDEND
]);
525 if (tb
[TCA_FLOW_DIVISOR
])
526 fnew
->divisor
= nla_get_u32(tb
[TCA_FLOW_DIVISOR
]);
528 fnew
->baseclass
= baseclass
;
530 fnew
->perturb_period
= perturb_period
;
532 mod_timer(&fnew
->perturb_timer
, jiffies
+ perturb_period
);
535 list_add_tail_rcu(&fnew
->list
, &head
->filters
);
537 list_replace_rcu(&fold
->list
, &fnew
->list
);
539 *arg
= (unsigned long)fnew
;
542 call_rcu(&fold
->rcu
, flow_destroy_filter
);
546 tcf_em_tree_destroy(&t
);
549 tcf_exts_destroy(&e
);
553 static int flow_delete(struct tcf_proto
*tp
, unsigned long arg
)
555 struct flow_filter
*f
= (struct flow_filter
*)arg
;
557 list_del_rcu(&f
->list
);
558 call_rcu(&f
->rcu
, flow_destroy_filter
);
562 static int flow_init(struct tcf_proto
*tp
)
564 struct flow_head
*head
;
566 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
569 INIT_LIST_HEAD(&head
->filters
);
570 rcu_assign_pointer(tp
->root
, head
);
574 static bool flow_destroy(struct tcf_proto
*tp
, bool force
)
576 struct flow_head
*head
= rtnl_dereference(tp
->root
);
577 struct flow_filter
*f
, *next
;
579 if (!force
&& !list_empty(&head
->filters
))
582 list_for_each_entry_safe(f
, next
, &head
->filters
, list
) {
583 list_del_rcu(&f
->list
);
584 call_rcu(&f
->rcu
, flow_destroy_filter
);
586 RCU_INIT_POINTER(tp
->root
, NULL
);
587 kfree_rcu(head
, rcu
);
591 static unsigned long flow_get(struct tcf_proto
*tp
, u32 handle
)
593 struct flow_head
*head
= rtnl_dereference(tp
->root
);
594 struct flow_filter
*f
;
596 list_for_each_entry(f
, &head
->filters
, list
)
597 if (f
->handle
== handle
)
598 return (unsigned long)f
;
602 static int flow_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
603 struct sk_buff
*skb
, struct tcmsg
*t
)
605 struct flow_filter
*f
= (struct flow_filter
*)fh
;
611 t
->tcm_handle
= f
->handle
;
613 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
615 goto nla_put_failure
;
617 if (nla_put_u32(skb
, TCA_FLOW_KEYS
, f
->keymask
) ||
618 nla_put_u32(skb
, TCA_FLOW_MODE
, f
->mode
))
619 goto nla_put_failure
;
621 if (f
->mask
!= ~0 || f
->xor != 0) {
622 if (nla_put_u32(skb
, TCA_FLOW_MASK
, f
->mask
) ||
623 nla_put_u32(skb
, TCA_FLOW_XOR
, f
->xor))
624 goto nla_put_failure
;
627 nla_put_u32(skb
, TCA_FLOW_RSHIFT
, f
->rshift
))
628 goto nla_put_failure
;
630 nla_put_u32(skb
, TCA_FLOW_ADDEND
, f
->addend
))
631 goto nla_put_failure
;
634 nla_put_u32(skb
, TCA_FLOW_DIVISOR
, f
->divisor
))
635 goto nla_put_failure
;
637 nla_put_u32(skb
, TCA_FLOW_BASECLASS
, f
->baseclass
))
638 goto nla_put_failure
;
640 if (f
->perturb_period
&&
641 nla_put_u32(skb
, TCA_FLOW_PERTURB
, f
->perturb_period
/ HZ
))
642 goto nla_put_failure
;
644 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
645 goto nla_put_failure
;
646 #ifdef CONFIG_NET_EMATCH
647 if (f
->ematches
.hdr
.nmatches
&&
648 tcf_em_tree_dump(skb
, &f
->ematches
, TCA_FLOW_EMATCHES
) < 0)
649 goto nla_put_failure
;
651 nla_nest_end(skb
, nest
);
653 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
654 goto nla_put_failure
;
659 nla_nest_cancel(skb
, nest
);
663 static void flow_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
665 struct flow_head
*head
= rtnl_dereference(tp
->root
);
666 struct flow_filter
*f
;
668 list_for_each_entry(f
, &head
->filters
, list
) {
669 if (arg
->count
< arg
->skip
)
671 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
680 static struct tcf_proto_ops cls_flow_ops __read_mostly
= {
682 .classify
= flow_classify
,
684 .destroy
= flow_destroy
,
685 .change
= flow_change
,
686 .delete = flow_delete
,
690 .owner
= THIS_MODULE
,
693 static int __init
cls_flow_init(void)
695 return register_tcf_proto_ops(&cls_flow_ops
);
698 static void __exit
cls_flow_exit(void)
700 unregister_tcf_proto_ops(&cls_flow_ops
);
703 module_init(cls_flow_init
);
704 module_exit(cls_flow_exit
);
706 MODULE_LICENSE("GPL");
707 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
708 MODULE_DESCRIPTION("TC flow classifier");