]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/dccp/ipv6.c
batman-adv: Initialize lockdep class keys for hashes
[mirror_ubuntu-zesty-kernel.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32 #include <net/secure_seq.h>
33
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37
38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39
40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42
43 static void dccp_v6_hash(struct sock *sk)
44 {
45 if (sk->sk_state != DCCP_CLOSED) {
46 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
47 inet_hash(sk);
48 return;
49 }
50 local_bh_disable();
51 __inet6_hash(sk, NULL);
52 local_bh_enable();
53 }
54 }
55
56 /* add pseudo-header to DCCP checksum stored in skb->csum */
57 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
58 const struct in6_addr *saddr,
59 const struct in6_addr *daddr)
60 {
61 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
62 }
63
64 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
65 {
66 struct ipv6_pinfo *np = inet6_sk(sk);
67 struct dccp_hdr *dh = dccp_hdr(skb);
68
69 dccp_csum_outgoing(skb);
70 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
71 }
72
73 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
74 {
75 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
76 ipv6_hdr(skb)->saddr.s6_addr32,
77 dccp_hdr(skb)->dccph_dport,
78 dccp_hdr(skb)->dccph_sport );
79
80 }
81
82 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
83 u8 type, u8 code, int offset, __be32 info)
84 {
85 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
86 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
87 struct dccp_sock *dp;
88 struct ipv6_pinfo *np;
89 struct sock *sk;
90 int err;
91 __u64 seq;
92 struct net *net = dev_net(skb->dev);
93
94 if (skb->len < offset + sizeof(*dh) ||
95 skb->len < offset + __dccp_basic_hdr_len(dh)) {
96 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
97 ICMP6_MIB_INERRORS);
98 return;
99 }
100
101 sk = inet6_lookup(net, &dccp_hashinfo,
102 &hdr->daddr, dh->dccph_dport,
103 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
104
105 if (sk == NULL) {
106 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
107 ICMP6_MIB_INERRORS);
108 return;
109 }
110
111 if (sk->sk_state == DCCP_TIME_WAIT) {
112 inet_twsk_put(inet_twsk(sk));
113 return;
114 }
115
116 bh_lock_sock(sk);
117 if (sock_owned_by_user(sk))
118 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
119
120 if (sk->sk_state == DCCP_CLOSED)
121 goto out;
122
123 dp = dccp_sk(sk);
124 seq = dccp_hdr_seq(dh);
125 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
126 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
127 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
128 goto out;
129 }
130
131 np = inet6_sk(sk);
132
133 if (type == ICMPV6_PKT_TOOBIG) {
134 struct dst_entry *dst = NULL;
135
136 if (sock_owned_by_user(sk))
137 goto out;
138 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
139 goto out;
140
141 /* icmp should have updated the destination cache entry */
142 dst = __sk_dst_check(sk, np->dst_cookie);
143 if (dst == NULL) {
144 struct inet_sock *inet = inet_sk(sk);
145 struct flowi6 fl6;
146
147 /* BUGGG_FUTURE: Again, it is not clear how
148 to handle rthdr case. Ignore this complexity
149 for now.
150 */
151 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowi6_proto = IPPROTO_DCCP;
153 fl6.daddr = np->daddr;
154 fl6.saddr = np->saddr;
155 fl6.flowi6_oif = sk->sk_bound_dev_if;
156 fl6.fl6_dport = inet->inet_dport;
157 fl6.fl6_sport = inet->inet_sport;
158 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
159
160 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
161 if (IS_ERR(dst)) {
162 sk->sk_err_soft = -PTR_ERR(dst);
163 goto out;
164 }
165 } else
166 dst_hold(dst);
167
168 dst->ops->update_pmtu(dst, ntohl(info));
169
170 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
171 dccp_sync_mss(sk, dst_mtu(dst));
172 } /* else let the usual retransmit timer handle it */
173 dst_release(dst);
174 goto out;
175 }
176
177 icmpv6_err_convert(type, code, &err);
178
179 /* Might be for an request_sock */
180 switch (sk->sk_state) {
181 struct request_sock *req, **prev;
182 case DCCP_LISTEN:
183 if (sock_owned_by_user(sk))
184 goto out;
185
186 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
187 &hdr->daddr, &hdr->saddr,
188 inet6_iif(skb));
189 if (req == NULL)
190 goto out;
191
192 /*
193 * ICMPs are not backlogged, hence we cannot get an established
194 * socket here.
195 */
196 WARN_ON(req->sk != NULL);
197
198 if (!between48(seq, dccp_rsk(req)->dreq_iss,
199 dccp_rsk(req)->dreq_gss)) {
200 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
201 goto out;
202 }
203
204 inet_csk_reqsk_queue_drop(sk, req, prev);
205 goto out;
206
207 case DCCP_REQUESTING:
208 case DCCP_RESPOND: /* Cannot happen.
209 It can, it SYNs are crossed. --ANK */
210 if (!sock_owned_by_user(sk)) {
211 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
212 sk->sk_err = err;
213 /*
214 * Wake people up to see the error
215 * (see connect in sock.c)
216 */
217 sk->sk_error_report(sk);
218 dccp_done(sk);
219 } else
220 sk->sk_err_soft = err;
221 goto out;
222 }
223
224 if (!sock_owned_by_user(sk) && np->recverr) {
225 sk->sk_err = err;
226 sk->sk_error_report(sk);
227 } else
228 sk->sk_err_soft = err;
229
230 out:
231 bh_unlock_sock(sk);
232 sock_put(sk);
233 }
234
235
236 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
237 struct request_values *rv_unused)
238 {
239 struct inet6_request_sock *ireq6 = inet6_rsk(req);
240 struct ipv6_pinfo *np = inet6_sk(sk);
241 struct sk_buff *skb;
242 struct ipv6_txoptions *opt = NULL;
243 struct in6_addr *final_p, final;
244 struct flowi6 fl6;
245 int err = -1;
246 struct dst_entry *dst;
247
248 memset(&fl6, 0, sizeof(fl6));
249 fl6.flowi6_proto = IPPROTO_DCCP;
250 fl6.daddr = ireq6->rmt_addr;
251 fl6.saddr = ireq6->loc_addr;
252 fl6.flowlabel = 0;
253 fl6.flowi6_oif = ireq6->iif;
254 fl6.fl6_dport = inet_rsk(req)->rmt_port;
255 fl6.fl6_sport = inet_rsk(req)->loc_port;
256 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
257
258 opt = np->opt;
259
260 final_p = fl6_update_dst(&fl6, opt, &final);
261
262 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
263 if (IS_ERR(dst)) {
264 err = PTR_ERR(dst);
265 dst = NULL;
266 goto done;
267 }
268
269 skb = dccp_make_response(sk, dst, req);
270 if (skb != NULL) {
271 struct dccp_hdr *dh = dccp_hdr(skb);
272
273 dh->dccph_checksum = dccp_v6_csum_finish(skb,
274 &ireq6->loc_addr,
275 &ireq6->rmt_addr);
276 fl6.daddr = ireq6->rmt_addr;
277 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
278 err = net_xmit_eval(err);
279 }
280
281 done:
282 if (opt != NULL && opt != np->opt)
283 sock_kfree_s(sk, opt, opt->tot_len);
284 dst_release(dst);
285 return err;
286 }
287
288 static void dccp_v6_reqsk_destructor(struct request_sock *req)
289 {
290 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
291 if (inet6_rsk(req)->pktopts != NULL)
292 kfree_skb(inet6_rsk(req)->pktopts);
293 }
294
295 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
296 {
297 const struct ipv6hdr *rxip6h;
298 struct sk_buff *skb;
299 struct flowi6 fl6;
300 struct net *net = dev_net(skb_dst(rxskb)->dev);
301 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
302 struct dst_entry *dst;
303
304 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
305 return;
306
307 if (!ipv6_unicast_destination(rxskb))
308 return;
309
310 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
311 if (skb == NULL)
312 return;
313
314 rxip6h = ipv6_hdr(rxskb);
315 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
316 &rxip6h->daddr);
317
318 memset(&fl6, 0, sizeof(fl6));
319 fl6.daddr = rxip6h->saddr;
320 fl6.saddr = rxip6h->daddr;
321
322 fl6.flowi6_proto = IPPROTO_DCCP;
323 fl6.flowi6_oif = inet6_iif(rxskb);
324 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
325 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
326 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
327
328 /* sk = NULL, but it is safe for now. RST socket required. */
329 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
330 if (!IS_ERR(dst)) {
331 skb_dst_set(skb, dst);
332 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
333 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
334 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
335 return;
336 }
337
338 kfree_skb(skb);
339 }
340
341 static struct request_sock_ops dccp6_request_sock_ops = {
342 .family = AF_INET6,
343 .obj_size = sizeof(struct dccp6_request_sock),
344 .rtx_syn_ack = dccp_v6_send_response,
345 .send_ack = dccp_reqsk_send_ack,
346 .destructor = dccp_v6_reqsk_destructor,
347 .send_reset = dccp_v6_ctl_send_reset,
348 .syn_ack_timeout = dccp_syn_ack_timeout,
349 };
350
351 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
352 {
353 const struct dccp_hdr *dh = dccp_hdr(skb);
354 const struct ipv6hdr *iph = ipv6_hdr(skb);
355 struct sock *nsk;
356 struct request_sock **prev;
357 /* Find possible connection requests. */
358 struct request_sock *req = inet6_csk_search_req(sk, &prev,
359 dh->dccph_sport,
360 &iph->saddr,
361 &iph->daddr,
362 inet6_iif(skb));
363 if (req != NULL)
364 return dccp_check_req(sk, skb, req, prev);
365
366 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
367 &iph->saddr, dh->dccph_sport,
368 &iph->daddr, ntohs(dh->dccph_dport),
369 inet6_iif(skb));
370 if (nsk != NULL) {
371 if (nsk->sk_state != DCCP_TIME_WAIT) {
372 bh_lock_sock(nsk);
373 return nsk;
374 }
375 inet_twsk_put(inet_twsk(nsk));
376 return NULL;
377 }
378
379 return sk;
380 }
381
382 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
383 {
384 struct request_sock *req;
385 struct dccp_request_sock *dreq;
386 struct inet6_request_sock *ireq6;
387 struct ipv6_pinfo *np = inet6_sk(sk);
388 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
389 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
390
391 if (skb->protocol == htons(ETH_P_IP))
392 return dccp_v4_conn_request(sk, skb);
393
394 if (!ipv6_unicast_destination(skb))
395 return 0; /* discard, don't send a reset here */
396
397 if (dccp_bad_service_code(sk, service)) {
398 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
399 goto drop;
400 }
401 /*
402 * There are no SYN attacks on IPv6, yet...
403 */
404 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
405 if (inet_csk_reqsk_queue_is_full(sk))
406 goto drop;
407
408 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
409 goto drop;
410
411 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
412 if (req == NULL)
413 goto drop;
414
415 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
416 goto drop_and_free;
417
418 dreq = dccp_rsk(req);
419 if (dccp_parse_options(sk, dreq, skb))
420 goto drop_and_free;
421
422 if (security_inet_conn_request(sk, skb, req))
423 goto drop_and_free;
424
425 ireq6 = inet6_rsk(req);
426 ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
427 ireq6->loc_addr = ipv6_hdr(skb)->daddr;
428
429 if (ipv6_opt_accepted(sk, skb) ||
430 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
431 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
432 atomic_inc(&skb->users);
433 ireq6->pktopts = skb;
434 }
435 ireq6->iif = sk->sk_bound_dev_if;
436
437 /* So that link locals have meaning */
438 if (!sk->sk_bound_dev_if &&
439 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
440 ireq6->iif = inet6_iif(skb);
441
442 /*
443 * Step 3: Process LISTEN state
444 *
445 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
446 *
447 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
448 */
449 dreq->dreq_isr = dcb->dccpd_seq;
450 dreq->dreq_gsr = dreq->dreq_isr;
451 dreq->dreq_iss = dccp_v6_init_sequence(skb);
452 dreq->dreq_gss = dreq->dreq_iss;
453 dreq->dreq_service = service;
454
455 if (dccp_v6_send_response(sk, req, NULL))
456 goto drop_and_free;
457
458 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
459 return 0;
460
461 drop_and_free:
462 reqsk_free(req);
463 drop:
464 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
465 return -1;
466 }
467
468 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
469 struct sk_buff *skb,
470 struct request_sock *req,
471 struct dst_entry *dst)
472 {
473 struct inet6_request_sock *ireq6 = inet6_rsk(req);
474 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
475 struct inet_sock *newinet;
476 struct dccp6_sock *newdp6;
477 struct sock *newsk;
478 struct ipv6_txoptions *opt;
479
480 if (skb->protocol == htons(ETH_P_IP)) {
481 /*
482 * v6 mapped
483 */
484 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
485 if (newsk == NULL)
486 return NULL;
487
488 newdp6 = (struct dccp6_sock *)newsk;
489 newinet = inet_sk(newsk);
490 newinet->pinet6 = &newdp6->inet6;
491 newnp = inet6_sk(newsk);
492
493 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
494
495 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
496
497 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
498
499 newnp->rcv_saddr = newnp->saddr;
500
501 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
502 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
503 newnp->pktoptions = NULL;
504 newnp->opt = NULL;
505 newnp->mcast_oif = inet6_iif(skb);
506 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
507
508 /*
509 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
510 * here, dccp_create_openreq_child now does this for us, see the comment in
511 * that function for the gory details. -acme
512 */
513
514 /* It is tricky place. Until this moment IPv4 tcp
515 worked with IPv6 icsk.icsk_af_ops.
516 Sync it now.
517 */
518 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
519
520 return newsk;
521 }
522
523 opt = np->opt;
524
525 if (sk_acceptq_is_full(sk))
526 goto out_overflow;
527
528 if (dst == NULL) {
529 struct in6_addr *final_p, final;
530 struct flowi6 fl6;
531
532 memset(&fl6, 0, sizeof(fl6));
533 fl6.flowi6_proto = IPPROTO_DCCP;
534 fl6.daddr = ireq6->rmt_addr;
535 final_p = fl6_update_dst(&fl6, opt, &final);
536 fl6.saddr = ireq6->loc_addr;
537 fl6.flowi6_oif = sk->sk_bound_dev_if;
538 fl6.fl6_dport = inet_rsk(req)->rmt_port;
539 fl6.fl6_sport = inet_rsk(req)->loc_port;
540 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
541
542 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
543 if (IS_ERR(dst))
544 goto out;
545 }
546
547 newsk = dccp_create_openreq_child(sk, req, skb);
548 if (newsk == NULL)
549 goto out_nonewsk;
550
551 /*
552 * No need to charge this sock to the relevant IPv6 refcnt debug socks
553 * count here, dccp_create_openreq_child now does this for us, see the
554 * comment in that function for the gory details. -acme
555 */
556
557 __ip6_dst_store(newsk, dst, NULL, NULL);
558 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
559 NETIF_F_TSO);
560 newdp6 = (struct dccp6_sock *)newsk;
561 newinet = inet_sk(newsk);
562 newinet->pinet6 = &newdp6->inet6;
563 newnp = inet6_sk(newsk);
564
565 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
566
567 newnp->daddr = ireq6->rmt_addr;
568 newnp->saddr = ireq6->loc_addr;
569 newnp->rcv_saddr = ireq6->loc_addr;
570 newsk->sk_bound_dev_if = ireq6->iif;
571
572 /* Now IPv6 options...
573
574 First: no IPv4 options.
575 */
576 newinet->inet_opt = NULL;
577
578 /* Clone RX bits */
579 newnp->rxopt.all = np->rxopt.all;
580
581 /* Clone pktoptions received with SYN */
582 newnp->pktoptions = NULL;
583 if (ireq6->pktopts != NULL) {
584 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
585 consume_skb(ireq6->pktopts);
586 ireq6->pktopts = NULL;
587 if (newnp->pktoptions)
588 skb_set_owner_r(newnp->pktoptions, newsk);
589 }
590 newnp->opt = NULL;
591 newnp->mcast_oif = inet6_iif(skb);
592 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
593
594 /*
595 * Clone native IPv6 options from listening socket (if any)
596 *
597 * Yes, keeping reference count would be much more clever, but we make
598 * one more one thing there: reattach optmem to newsk.
599 */
600 if (opt != NULL) {
601 newnp->opt = ipv6_dup_options(newsk, opt);
602 if (opt != np->opt)
603 sock_kfree_s(sk, opt, opt->tot_len);
604 }
605
606 inet_csk(newsk)->icsk_ext_hdr_len = 0;
607 if (newnp->opt != NULL)
608 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
609 newnp->opt->opt_flen);
610
611 dccp_sync_mss(newsk, dst_mtu(dst));
612
613 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
614 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
615
616 if (__inet_inherit_port(sk, newsk) < 0) {
617 sock_put(newsk);
618 goto out;
619 }
620 __inet6_hash(newsk, NULL);
621
622 return newsk;
623
624 out_overflow:
625 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
626 out_nonewsk:
627 dst_release(dst);
628 out:
629 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
630 if (opt != NULL && opt != np->opt)
631 sock_kfree_s(sk, opt, opt->tot_len);
632 return NULL;
633 }
634
635 /* The socket must have it's spinlock held when we get
636 * here.
637 *
638 * We have a potential double-lock case here, so even when
639 * doing backlog processing we use the BH locking scheme.
640 * This is because we cannot sleep with the original spinlock
641 * held.
642 */
643 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
644 {
645 struct ipv6_pinfo *np = inet6_sk(sk);
646 struct sk_buff *opt_skb = NULL;
647
648 /* Imagine: socket is IPv6. IPv4 packet arrives,
649 goes to IPv4 receive handler and backlogged.
650 From backlog it always goes here. Kerboom...
651 Fortunately, dccp_rcv_established and rcv_established
652 handle them correctly, but it is not case with
653 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
654 */
655
656 if (skb->protocol == htons(ETH_P_IP))
657 return dccp_v4_do_rcv(sk, skb);
658
659 if (sk_filter(sk, skb))
660 goto discard;
661
662 /*
663 * socket locking is here for SMP purposes as backlog rcv is currently
664 * called with bh processing disabled.
665 */
666
667 /* Do Stevens' IPV6_PKTOPTIONS.
668
669 Yes, guys, it is the only place in our code, where we
670 may make it not affecting IPv4.
671 The rest of code is protocol independent,
672 and I do not like idea to uglify IPv4.
673
674 Actually, all the idea behind IPV6_PKTOPTIONS
675 looks not very well thought. For now we latch
676 options, received in the last packet, enqueued
677 by tcp. Feel free to propose better solution.
678 --ANK (980728)
679 */
680 if (np->rxopt.all)
681 /*
682 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
683 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
684 */
685 opt_skb = skb_clone(skb, GFP_ATOMIC);
686
687 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
688 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
689 goto reset;
690 if (opt_skb) {
691 /* XXX This is where we would goto ipv6_pktoptions. */
692 __kfree_skb(opt_skb);
693 }
694 return 0;
695 }
696
697 /*
698 * Step 3: Process LISTEN state
699 * If S.state == LISTEN,
700 * If P.type == Request or P contains a valid Init Cookie option,
701 * (* Must scan the packet's options to check for Init
702 * Cookies. Only Init Cookies are processed here,
703 * however; other options are processed in Step 8. This
704 * scan need only be performed if the endpoint uses Init
705 * Cookies *)
706 * (* Generate a new socket and switch to that socket *)
707 * Set S := new socket for this port pair
708 * S.state = RESPOND
709 * Choose S.ISS (initial seqno) or set from Init Cookies
710 * Initialize S.GAR := S.ISS
711 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
712 * Continue with S.state == RESPOND
713 * (* A Response packet will be generated in Step 11 *)
714 * Otherwise,
715 * Generate Reset(No Connection) unless P.type == Reset
716 * Drop packet and return
717 *
718 * NOTE: the check for the packet types is done in
719 * dccp_rcv_state_process
720 */
721 if (sk->sk_state == DCCP_LISTEN) {
722 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
723
724 if (nsk == NULL)
725 goto discard;
726 /*
727 * Queue it on the new socket if the new socket is active,
728 * otherwise we just shortcircuit this and continue with
729 * the new socket..
730 */
731 if (nsk != sk) {
732 if (dccp_child_process(sk, nsk, skb))
733 goto reset;
734 if (opt_skb != NULL)
735 __kfree_skb(opt_skb);
736 return 0;
737 }
738 }
739
740 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
741 goto reset;
742 if (opt_skb) {
743 /* XXX This is where we would goto ipv6_pktoptions. */
744 __kfree_skb(opt_skb);
745 }
746 return 0;
747
748 reset:
749 dccp_v6_ctl_send_reset(sk, skb);
750 discard:
751 if (opt_skb != NULL)
752 __kfree_skb(opt_skb);
753 kfree_skb(skb);
754 return 0;
755 }
756
757 static int dccp_v6_rcv(struct sk_buff *skb)
758 {
759 const struct dccp_hdr *dh;
760 struct sock *sk;
761 int min_cov;
762
763 /* Step 1: Check header basics */
764
765 if (dccp_invalid_packet(skb))
766 goto discard_it;
767
768 /* Step 1: If header checksum is incorrect, drop packet and return. */
769 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
770 &ipv6_hdr(skb)->daddr)) {
771 DCCP_WARN("dropped packet with invalid checksum\n");
772 goto discard_it;
773 }
774
775 dh = dccp_hdr(skb);
776
777 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
778 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
779
780 if (dccp_packet_without_ack(skb))
781 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
782 else
783 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
784
785 /* Step 2:
786 * Look up flow ID in table and get corresponding socket */
787 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
788 dh->dccph_sport, dh->dccph_dport);
789 /*
790 * Step 2:
791 * If no socket ...
792 */
793 if (sk == NULL) {
794 dccp_pr_debug("failed to look up flow ID in table and "
795 "get corresponding socket\n");
796 goto no_dccp_socket;
797 }
798
799 /*
800 * Step 2:
801 * ... or S.state == TIMEWAIT,
802 * Generate Reset(No Connection) unless P.type == Reset
803 * Drop packet and return
804 */
805 if (sk->sk_state == DCCP_TIME_WAIT) {
806 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
807 inet_twsk_put(inet_twsk(sk));
808 goto no_dccp_socket;
809 }
810
811 /*
812 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
813 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
814 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
815 */
816 min_cov = dccp_sk(sk)->dccps_pcrlen;
817 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
818 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
819 dh->dccph_cscov, min_cov);
820 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
821 goto discard_and_relse;
822 }
823
824 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
825 goto discard_and_relse;
826
827 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
828
829 no_dccp_socket:
830 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
831 goto discard_it;
832 /*
833 * Step 2:
834 * If no socket ...
835 * Generate Reset(No Connection) unless P.type == Reset
836 * Drop packet and return
837 */
838 if (dh->dccph_type != DCCP_PKT_RESET) {
839 DCCP_SKB_CB(skb)->dccpd_reset_code =
840 DCCP_RESET_CODE_NO_CONNECTION;
841 dccp_v6_ctl_send_reset(sk, skb);
842 }
843
844 discard_it:
845 kfree_skb(skb);
846 return 0;
847
848 discard_and_relse:
849 sock_put(sk);
850 goto discard_it;
851 }
852
853 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
854 int addr_len)
855 {
856 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
857 struct inet_connection_sock *icsk = inet_csk(sk);
858 struct inet_sock *inet = inet_sk(sk);
859 struct ipv6_pinfo *np = inet6_sk(sk);
860 struct dccp_sock *dp = dccp_sk(sk);
861 struct in6_addr *saddr = NULL, *final_p, final;
862 struct flowi6 fl6;
863 struct dst_entry *dst;
864 int addr_type;
865 int err;
866
867 dp->dccps_role = DCCP_ROLE_CLIENT;
868
869 if (addr_len < SIN6_LEN_RFC2133)
870 return -EINVAL;
871
872 if (usin->sin6_family != AF_INET6)
873 return -EAFNOSUPPORT;
874
875 memset(&fl6, 0, sizeof(fl6));
876
877 if (np->sndflow) {
878 fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
879 IP6_ECN_flow_init(fl6.flowlabel);
880 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
881 struct ip6_flowlabel *flowlabel;
882 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
883 if (flowlabel == NULL)
884 return -EINVAL;
885 usin->sin6_addr = flowlabel->dst;
886 fl6_sock_release(flowlabel);
887 }
888 }
889 /*
890 * connect() to INADDR_ANY means loopback (BSD'ism).
891 */
892 if (ipv6_addr_any(&usin->sin6_addr))
893 usin->sin6_addr.s6_addr[15] = 1;
894
895 addr_type = ipv6_addr_type(&usin->sin6_addr);
896
897 if (addr_type & IPV6_ADDR_MULTICAST)
898 return -ENETUNREACH;
899
900 if (addr_type & IPV6_ADDR_LINKLOCAL) {
901 if (addr_len >= sizeof(struct sockaddr_in6) &&
902 usin->sin6_scope_id) {
903 /* If interface is set while binding, indices
904 * must coincide.
905 */
906 if (sk->sk_bound_dev_if &&
907 sk->sk_bound_dev_if != usin->sin6_scope_id)
908 return -EINVAL;
909
910 sk->sk_bound_dev_if = usin->sin6_scope_id;
911 }
912
913 /* Connect to link-local address requires an interface */
914 if (!sk->sk_bound_dev_if)
915 return -EINVAL;
916 }
917
918 np->daddr = usin->sin6_addr;
919 np->flow_label = fl6.flowlabel;
920
921 /*
922 * DCCP over IPv4
923 */
924 if (addr_type == IPV6_ADDR_MAPPED) {
925 u32 exthdrlen = icsk->icsk_ext_hdr_len;
926 struct sockaddr_in sin;
927
928 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
929
930 if (__ipv6_only_sock(sk))
931 return -ENETUNREACH;
932
933 sin.sin_family = AF_INET;
934 sin.sin_port = usin->sin6_port;
935 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
936
937 icsk->icsk_af_ops = &dccp_ipv6_mapped;
938 sk->sk_backlog_rcv = dccp_v4_do_rcv;
939
940 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
941 if (err) {
942 icsk->icsk_ext_hdr_len = exthdrlen;
943 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
944 sk->sk_backlog_rcv = dccp_v6_do_rcv;
945 goto failure;
946 }
947 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
948 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
949
950 return err;
951 }
952
953 if (!ipv6_addr_any(&np->rcv_saddr))
954 saddr = &np->rcv_saddr;
955
956 fl6.flowi6_proto = IPPROTO_DCCP;
957 fl6.daddr = np->daddr;
958 fl6.saddr = saddr ? *saddr : np->saddr;
959 fl6.flowi6_oif = sk->sk_bound_dev_if;
960 fl6.fl6_dport = usin->sin6_port;
961 fl6.fl6_sport = inet->inet_sport;
962 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
963
964 final_p = fl6_update_dst(&fl6, np->opt, &final);
965
966 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
967 if (IS_ERR(dst)) {
968 err = PTR_ERR(dst);
969 goto failure;
970 }
971
972 if (saddr == NULL) {
973 saddr = &fl6.saddr;
974 np->rcv_saddr = *saddr;
975 }
976
977 /* set the source address */
978 np->saddr = *saddr;
979 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
980
981 __ip6_dst_store(sk, dst, NULL, NULL);
982
983 icsk->icsk_ext_hdr_len = 0;
984 if (np->opt != NULL)
985 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
986 np->opt->opt_nflen);
987
988 inet->inet_dport = usin->sin6_port;
989
990 dccp_set_state(sk, DCCP_REQUESTING);
991 err = inet6_hash_connect(&dccp_death_row, sk);
992 if (err)
993 goto late_failure;
994
995 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
996 np->daddr.s6_addr32,
997 inet->inet_sport,
998 inet->inet_dport);
999 err = dccp_connect(sk);
1000 if (err)
1001 goto late_failure;
1002
1003 return 0;
1004
1005 late_failure:
1006 dccp_set_state(sk, DCCP_CLOSED);
1007 __sk_dst_reset(sk);
1008 failure:
1009 inet->inet_dport = 0;
1010 sk->sk_route_caps = 0;
1011 return err;
1012 }
1013
1014 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1015 .queue_xmit = inet6_csk_xmit,
1016 .send_check = dccp_v6_send_check,
1017 .rebuild_header = inet6_sk_rebuild_header,
1018 .conn_request = dccp_v6_conn_request,
1019 .syn_recv_sock = dccp_v6_request_recv_sock,
1020 .net_header_len = sizeof(struct ipv6hdr),
1021 .setsockopt = ipv6_setsockopt,
1022 .getsockopt = ipv6_getsockopt,
1023 .addr2sockaddr = inet6_csk_addr2sockaddr,
1024 .sockaddr_len = sizeof(struct sockaddr_in6),
1025 .bind_conflict = inet6_csk_bind_conflict,
1026 #ifdef CONFIG_COMPAT
1027 .compat_setsockopt = compat_ipv6_setsockopt,
1028 .compat_getsockopt = compat_ipv6_getsockopt,
1029 #endif
1030 };
1031
1032 /*
1033 * DCCP over IPv4 via INET6 API
1034 */
1035 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1036 .queue_xmit = ip_queue_xmit,
1037 .send_check = dccp_v4_send_check,
1038 .rebuild_header = inet_sk_rebuild_header,
1039 .conn_request = dccp_v6_conn_request,
1040 .syn_recv_sock = dccp_v6_request_recv_sock,
1041 .net_header_len = sizeof(struct iphdr),
1042 .setsockopt = ipv6_setsockopt,
1043 .getsockopt = ipv6_getsockopt,
1044 .addr2sockaddr = inet6_csk_addr2sockaddr,
1045 .sockaddr_len = sizeof(struct sockaddr_in6),
1046 #ifdef CONFIG_COMPAT
1047 .compat_setsockopt = compat_ipv6_setsockopt,
1048 .compat_getsockopt = compat_ipv6_getsockopt,
1049 #endif
1050 };
1051
1052 /* NOTE: A lot of things set to zero explicitly by call to
1053 * sk_alloc() so need not be done here.
1054 */
1055 static int dccp_v6_init_sock(struct sock *sk)
1056 {
1057 static __u8 dccp_v6_ctl_sock_initialized;
1058 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1059
1060 if (err == 0) {
1061 if (unlikely(!dccp_v6_ctl_sock_initialized))
1062 dccp_v6_ctl_sock_initialized = 1;
1063 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1064 }
1065
1066 return err;
1067 }
1068
1069 static void dccp_v6_destroy_sock(struct sock *sk)
1070 {
1071 dccp_destroy_sock(sk);
1072 inet6_destroy_sock(sk);
1073 }
1074
1075 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1076 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1077 };
1078
1079 static struct proto dccp_v6_prot = {
1080 .name = "DCCPv6",
1081 .owner = THIS_MODULE,
1082 .close = dccp_close,
1083 .connect = dccp_v6_connect,
1084 .disconnect = dccp_disconnect,
1085 .ioctl = dccp_ioctl,
1086 .init = dccp_v6_init_sock,
1087 .setsockopt = dccp_setsockopt,
1088 .getsockopt = dccp_getsockopt,
1089 .sendmsg = dccp_sendmsg,
1090 .recvmsg = dccp_recvmsg,
1091 .backlog_rcv = dccp_v6_do_rcv,
1092 .hash = dccp_v6_hash,
1093 .unhash = inet_unhash,
1094 .accept = inet_csk_accept,
1095 .get_port = inet_csk_get_port,
1096 .shutdown = dccp_shutdown,
1097 .destroy = dccp_v6_destroy_sock,
1098 .orphan_count = &dccp_orphan_count,
1099 .max_header = MAX_DCCP_HEADER,
1100 .obj_size = sizeof(struct dccp6_sock),
1101 .slab_flags = SLAB_DESTROY_BY_RCU,
1102 .rsk_prot = &dccp6_request_sock_ops,
1103 .twsk_prot = &dccp6_timewait_sock_ops,
1104 .h.hashinfo = &dccp_hashinfo,
1105 #ifdef CONFIG_COMPAT
1106 .compat_setsockopt = compat_dccp_setsockopt,
1107 .compat_getsockopt = compat_dccp_getsockopt,
1108 #endif
1109 };
1110
1111 static const struct inet6_protocol dccp_v6_protocol = {
1112 .handler = dccp_v6_rcv,
1113 .err_handler = dccp_v6_err,
1114 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1115 };
1116
1117 static const struct proto_ops inet6_dccp_ops = {
1118 .family = PF_INET6,
1119 .owner = THIS_MODULE,
1120 .release = inet6_release,
1121 .bind = inet6_bind,
1122 .connect = inet_stream_connect,
1123 .socketpair = sock_no_socketpair,
1124 .accept = inet_accept,
1125 .getname = inet6_getname,
1126 .poll = dccp_poll,
1127 .ioctl = inet6_ioctl,
1128 .listen = inet_dccp_listen,
1129 .shutdown = inet_shutdown,
1130 .setsockopt = sock_common_setsockopt,
1131 .getsockopt = sock_common_getsockopt,
1132 .sendmsg = inet_sendmsg,
1133 .recvmsg = sock_common_recvmsg,
1134 .mmap = sock_no_mmap,
1135 .sendpage = sock_no_sendpage,
1136 #ifdef CONFIG_COMPAT
1137 .compat_setsockopt = compat_sock_common_setsockopt,
1138 .compat_getsockopt = compat_sock_common_getsockopt,
1139 #endif
1140 };
1141
1142 static struct inet_protosw dccp_v6_protosw = {
1143 .type = SOCK_DCCP,
1144 .protocol = IPPROTO_DCCP,
1145 .prot = &dccp_v6_prot,
1146 .ops = &inet6_dccp_ops,
1147 .flags = INET_PROTOSW_ICSK,
1148 };
1149
1150 static int __net_init dccp_v6_init_net(struct net *net)
1151 {
1152 if (dccp_hashinfo.bhash == NULL)
1153 return -ESOCKTNOSUPPORT;
1154
1155 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1156 SOCK_DCCP, IPPROTO_DCCP, net);
1157 }
1158
1159 static void __net_exit dccp_v6_exit_net(struct net *net)
1160 {
1161 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1162 }
1163
1164 static struct pernet_operations dccp_v6_ops = {
1165 .init = dccp_v6_init_net,
1166 .exit = dccp_v6_exit_net,
1167 };
1168
1169 static int __init dccp_v6_init(void)
1170 {
1171 int err = proto_register(&dccp_v6_prot, 1);
1172
1173 if (err != 0)
1174 goto out;
1175
1176 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1177 if (err != 0)
1178 goto out_unregister_proto;
1179
1180 inet6_register_protosw(&dccp_v6_protosw);
1181
1182 err = register_pernet_subsys(&dccp_v6_ops);
1183 if (err != 0)
1184 goto out_destroy_ctl_sock;
1185 out:
1186 return err;
1187
1188 out_destroy_ctl_sock:
1189 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1190 inet6_unregister_protosw(&dccp_v6_protosw);
1191 out_unregister_proto:
1192 proto_unregister(&dccp_v6_prot);
1193 goto out;
1194 }
1195
1196 static void __exit dccp_v6_exit(void)
1197 {
1198 unregister_pernet_subsys(&dccp_v6_ops);
1199 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1200 inet6_unregister_protosw(&dccp_v6_protosw);
1201 proto_unregister(&dccp_v6_prot);
1202 }
1203
1204 module_init(dccp_v6_init);
1205 module_exit(dccp_v6_exit);
1206
1207 /*
1208 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1209 * values directly, Also cover the case where the protocol is not specified,
1210 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1211 */
1212 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1213 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1214 MODULE_LICENSE("GPL");
1215 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1216 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");