2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
34 #include <net/act_api.h>
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
39 #define CSUM_TAB_MASK 15
40 static struct tcf_hashinfo csum_hash_info
;
42 static const struct nla_policy csum_policy
[TCA_CSUM_MAX
+ 1] = {
43 [TCA_CSUM_PARMS
] = { .len
= sizeof(struct tc_csum
), },
46 static int tcf_csum_init(struct net
*n
, struct nlattr
*nla
, struct nlattr
*est
,
47 struct tc_action
*a
, int ovr
, int bind
)
49 struct nlattr
*tb
[TCA_CSUM_MAX
+ 1];
51 struct tcf_common
*pc
;
58 err
= nla_parse_nested(tb
, TCA_CSUM_MAX
, nla
, csum_policy
);
62 if (tb
[TCA_CSUM_PARMS
] == NULL
)
64 parm
= nla_data(tb
[TCA_CSUM_PARMS
]);
66 pc
= tcf_hash_check(parm
->index
, a
, bind
, &csum_hash_info
);
68 pc
= tcf_hash_create(parm
->index
, est
, a
, sizeof(*p
), bind
,
74 if (bind
)/* dont override defaults */
76 tcf_hash_release(pc
, bind
, &csum_hash_info
);
82 spin_lock_bh(&p
->tcf_lock
);
83 p
->tcf_action
= parm
->action
;
84 p
->update_flags
= parm
->update_flags
;
85 spin_unlock_bh(&p
->tcf_lock
);
87 if (ret
== ACT_P_CREATED
)
88 tcf_hash_insert(pc
, &csum_hash_info
);
93 static int tcf_csum_cleanup(struct tc_action
*a
, int bind
)
95 struct tcf_csum
*p
= a
->priv
;
96 return tcf_hash_release(&p
->common
, bind
, &csum_hash_info
);
100 * tcf_csum_skb_nextlayer - Get next layer pointer
101 * @skb: sk_buff to use
102 * @ihl: previous summed headers length
103 * @ipl: complete packet length
104 * @jhl: next header length
106 * Check the expected next layer availability in the specified sk_buff.
107 * Return the next layer pointer if pass, NULL otherwise.
109 static void *tcf_csum_skb_nextlayer(struct sk_buff
*skb
,
110 unsigned int ihl
, unsigned int ipl
,
113 int ntkoff
= skb_network_offset(skb
);
116 if (!pskb_may_pull(skb
, ipl
+ ntkoff
) || (ipl
< hl
) ||
118 !skb_clone_writable(skb
, hl
+ ntkoff
) &&
119 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)))
122 return (void *)(skb_network_header(skb
) + ihl
);
125 static int tcf_csum_ipv4_icmp(struct sk_buff
*skb
,
126 unsigned int ihl
, unsigned int ipl
)
128 struct icmphdr
*icmph
;
130 icmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmph
));
135 skb
->csum
= csum_partial(icmph
, ipl
- ihl
, 0);
136 icmph
->checksum
= csum_fold(skb
->csum
);
138 skb
->ip_summed
= CHECKSUM_NONE
;
143 static int tcf_csum_ipv4_igmp(struct sk_buff
*skb
,
144 unsigned int ihl
, unsigned int ipl
)
146 struct igmphdr
*igmph
;
148 igmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*igmph
));
153 skb
->csum
= csum_partial(igmph
, ipl
- ihl
, 0);
154 igmph
->csum
= csum_fold(skb
->csum
);
156 skb
->ip_summed
= CHECKSUM_NONE
;
161 static int tcf_csum_ipv6_icmp(struct sk_buff
*skb
,
162 unsigned int ihl
, unsigned int ipl
)
164 struct icmp6hdr
*icmp6h
;
165 const struct ipv6hdr
*ip6h
;
167 icmp6h
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmp6h
));
171 ip6h
= ipv6_hdr(skb
);
172 icmp6h
->icmp6_cksum
= 0;
173 skb
->csum
= csum_partial(icmp6h
, ipl
- ihl
, 0);
174 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
175 ipl
- ihl
, IPPROTO_ICMPV6
,
178 skb
->ip_summed
= CHECKSUM_NONE
;
183 static int tcf_csum_ipv4_tcp(struct sk_buff
*skb
,
184 unsigned int ihl
, unsigned int ipl
)
187 const struct iphdr
*iph
;
189 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
195 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
196 tcph
->check
= tcp_v4_check(ipl
- ihl
,
197 iph
->saddr
, iph
->daddr
, skb
->csum
);
199 skb
->ip_summed
= CHECKSUM_NONE
;
204 static int tcf_csum_ipv6_tcp(struct sk_buff
*skb
,
205 unsigned int ihl
, unsigned int ipl
)
208 const struct ipv6hdr
*ip6h
;
210 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
214 ip6h
= ipv6_hdr(skb
);
216 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
217 tcph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
218 ipl
- ihl
, IPPROTO_TCP
,
221 skb
->ip_summed
= CHECKSUM_NONE
;
226 static int tcf_csum_ipv4_udp(struct sk_buff
*skb
,
227 unsigned int ihl
, unsigned int ipl
, int udplite
)
230 const struct iphdr
*iph
;
234 * Support both UDP and UDPLITE checksum algorithms, Don't use
235 * udph->len to get the real length without any protocol check,
236 * UDPLITE uses udph->len for another thing,
237 * Use iph->tot_len, or just ipl.
240 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
245 ul
= ntohs(udph
->len
);
247 if (udplite
|| udph
->check
) {
253 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
254 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
255 skb
->csum
= csum_partial(udph
, ul
, 0);
257 goto ignore_obscure_skb
;
260 goto ignore_obscure_skb
;
262 skb
->csum
= csum_partial(udph
, ul
, 0);
265 udph
->check
= csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
270 udph
->check
= CSUM_MANGLED_0
;
273 skb
->ip_summed
= CHECKSUM_NONE
;
279 static int tcf_csum_ipv6_udp(struct sk_buff
*skb
,
280 unsigned int ihl
, unsigned int ipl
, int udplite
)
283 const struct ipv6hdr
*ip6h
;
287 * Support both UDP and UDPLITE checksum algorithms, Don't use
288 * udph->len to get the real length without any protocol check,
289 * UDPLITE uses udph->len for another thing,
290 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
293 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
297 ip6h
= ipv6_hdr(skb
);
298 ul
= ntohs(udph
->len
);
304 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
306 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
307 skb
->csum
= csum_partial(udph
, ul
, 0);
310 goto ignore_obscure_skb
;
313 goto ignore_obscure_skb
;
315 skb
->csum
= csum_partial(udph
, ul
, 0);
318 udph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
, ul
,
319 udplite
? IPPROTO_UDPLITE
: IPPROTO_UDP
,
323 udph
->check
= CSUM_MANGLED_0
;
325 skb
->ip_summed
= CHECKSUM_NONE
;
331 static int tcf_csum_ipv4(struct sk_buff
*skb
, u32 update_flags
)
333 const struct iphdr
*iph
;
336 ntkoff
= skb_network_offset(skb
);
338 if (!pskb_may_pull(skb
, sizeof(*iph
) + ntkoff
))
343 switch (iph
->frag_off
& htons(IP_OFFSET
) ? 0 : iph
->protocol
) {
345 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
346 if (!tcf_csum_ipv4_icmp(skb
, iph
->ihl
* 4,
347 ntohs(iph
->tot_len
)))
351 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IGMP
)
352 if (!tcf_csum_ipv4_igmp(skb
, iph
->ihl
* 4,
353 ntohs(iph
->tot_len
)))
357 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
358 if (!tcf_csum_ipv4_tcp(skb
, iph
->ihl
* 4,
359 ntohs(iph
->tot_len
)))
363 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
364 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
365 ntohs(iph
->tot_len
), 0))
368 case IPPROTO_UDPLITE
:
369 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
370 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
371 ntohs(iph
->tot_len
), 1))
376 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IPV4HDR
) {
377 if (skb_cloned(skb
) &&
378 !skb_clone_writable(skb
, sizeof(*iph
) + ntkoff
) &&
379 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
382 ip_send_check(ip_hdr(skb
));
391 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr
*ip6xh
,
392 unsigned int ixhl
, unsigned int *pl
)
394 int off
, len
, optlen
;
395 unsigned char *xh
= (void *)ip6xh
;
397 off
= sizeof(*ip6xh
);
406 optlen
= xh
[off
+ 1] + 2;
407 if (optlen
!= 6 || len
< 6 || (off
& 3) != 2)
408 /* wrong jumbo option length/alignment */
410 *pl
= ntohl(*(__be32
*)(xh
+ off
+ 2));
413 optlen
= xh
[off
+ 1] + 2;
415 /* ignore obscure options */
427 static int tcf_csum_ipv6(struct sk_buff
*skb
, u32 update_flags
)
429 struct ipv6hdr
*ip6h
;
430 struct ipv6_opt_hdr
*ip6xh
;
431 unsigned int hl
, ixhl
;
436 ntkoff
= skb_network_offset(skb
);
440 if (!pskb_may_pull(skb
, hl
+ ntkoff
))
443 ip6h
= ipv6_hdr(skb
);
445 pl
= ntohs(ip6h
->payload_len
);
446 nexthdr
= ip6h
->nexthdr
;
450 case NEXTHDR_FRAGMENT
:
452 case NEXTHDR_ROUTING
:
455 if (!pskb_may_pull(skb
, hl
+ sizeof(*ip6xh
) + ntkoff
))
457 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
458 ixhl
= ipv6_optlen(ip6xh
);
459 if (!pskb_may_pull(skb
, hl
+ ixhl
+ ntkoff
))
461 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
462 if ((nexthdr
== NEXTHDR_HOP
) &&
463 !(tcf_csum_ipv6_hopopts(ip6xh
, ixhl
, &pl
)))
465 nexthdr
= ip6xh
->nexthdr
;
469 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
470 if (!tcf_csum_ipv6_icmp(skb
,
471 hl
, pl
+ sizeof(*ip6h
)))
475 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
476 if (!tcf_csum_ipv6_tcp(skb
,
477 hl
, pl
+ sizeof(*ip6h
)))
481 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
482 if (!tcf_csum_ipv6_udp(skb
, hl
,
483 pl
+ sizeof(*ip6h
), 0))
486 case IPPROTO_UDPLITE
:
487 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
488 if (!tcf_csum_ipv6_udp(skb
, hl
,
489 pl
+ sizeof(*ip6h
), 1))
495 } while (pskb_may_pull(skb
, hl
+ 1 + ntkoff
));
505 static int tcf_csum(struct sk_buff
*skb
,
506 const struct tc_action
*a
, struct tcf_result
*res
)
508 struct tcf_csum
*p
= a
->priv
;
512 spin_lock(&p
->tcf_lock
);
513 p
->tcf_tm
.lastuse
= jiffies
;
514 bstats_update(&p
->tcf_bstats
, skb
);
515 action
= p
->tcf_action
;
516 update_flags
= p
->update_flags
;
517 spin_unlock(&p
->tcf_lock
);
519 if (unlikely(action
== TC_ACT_SHOT
))
522 switch (skb
->protocol
) {
523 case cpu_to_be16(ETH_P_IP
):
524 if (!tcf_csum_ipv4(skb
, update_flags
))
527 case cpu_to_be16(ETH_P_IPV6
):
528 if (!tcf_csum_ipv6(skb
, update_flags
))
536 spin_lock(&p
->tcf_lock
);
537 p
->tcf_qstats
.drops
++;
538 spin_unlock(&p
->tcf_lock
);
542 static int tcf_csum_dump(struct sk_buff
*skb
,
543 struct tc_action
*a
, int bind
, int ref
)
545 unsigned char *b
= skb_tail_pointer(skb
);
546 struct tcf_csum
*p
= a
->priv
;
547 struct tc_csum opt
= {
548 .update_flags
= p
->update_flags
,
549 .index
= p
->tcf_index
,
550 .action
= p
->tcf_action
,
551 .refcnt
= p
->tcf_refcnt
- ref
,
552 .bindcnt
= p
->tcf_bindcnt
- bind
,
556 if (nla_put(skb
, TCA_CSUM_PARMS
, sizeof(opt
), &opt
))
557 goto nla_put_failure
;
558 t
.install
= jiffies_to_clock_t(jiffies
- p
->tcf_tm
.install
);
559 t
.lastuse
= jiffies_to_clock_t(jiffies
- p
->tcf_tm
.lastuse
);
560 t
.expires
= jiffies_to_clock_t(p
->tcf_tm
.expires
);
561 if (nla_put(skb
, TCA_CSUM_TM
, sizeof(t
), &t
))
562 goto nla_put_failure
;
571 static struct tc_action_ops act_csum_ops
= {
573 .hinfo
= &csum_hash_info
,
574 .type
= TCA_ACT_CSUM
,
575 .capab
= TCA_CAP_NONE
,
576 .owner
= THIS_MODULE
,
578 .dump
= tcf_csum_dump
,
579 .cleanup
= tcf_csum_cleanup
,
580 .init
= tcf_csum_init
,
583 MODULE_DESCRIPTION("Checksum updating actions");
584 MODULE_LICENSE("GPL");
586 static int __init
csum_init_module(void)
588 int err
= tcf_hashinfo_init(&csum_hash_info
, CSUM_TAB_MASK
);
592 return tcf_register_action(&act_csum_ops
);
595 static void __exit
csum_cleanup_module(void)
597 tcf_unregister_action(&act_csum_ops
);
600 module_init(csum_init_module
);
601 module_exit(csum_cleanup_module
);