]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/dccp/ipv6.c
Pull kmalloc into release branch
[mirror_ubuntu-zesty-kernel.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34
35 /* Socket used for sending RSTs and ACKs */
36 static struct socket *dccp_v6_ctl_socket;
37
38 static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
39 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
40 struct request_sock *req);
41 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
42
43 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
44
45 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
46 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
47
48 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
49 {
50 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
51 inet6_csk_bind_conflict);
52 }
53
54 static void dccp_v6_hash(struct sock *sk)
55 {
56 if (sk->sk_state != DCCP_CLOSED) {
57 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
58 dccp_hash(sk);
59 return;
60 }
61 local_bh_disable();
62 __inet6_hash(&dccp_hashinfo, sk);
63 local_bh_enable();
64 }
65 }
66
67 static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
68 struct in6_addr *saddr,
69 struct in6_addr *daddr,
70 unsigned long base)
71 {
72 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
73 }
74
75 static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
76 {
77 const struct dccp_hdr *dh = dccp_hdr(skb);
78
79 if (skb->protocol == htons(ETH_P_IPV6))
80 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
81 skb->nh.ipv6h->saddr.s6_addr32,
82 dh->dccph_dport,
83 dh->dccph_sport);
84
85 return secure_dccp_sequence_number(skb->nh.iph->daddr,
86 skb->nh.iph->saddr,
87 dh->dccph_dport,
88 dh->dccph_sport);
89 }
90
91 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
92 int addr_len)
93 {
94 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
95 struct inet_connection_sock *icsk = inet_csk(sk);
96 struct inet_sock *inet = inet_sk(sk);
97 struct ipv6_pinfo *np = inet6_sk(sk);
98 struct dccp_sock *dp = dccp_sk(sk);
99 struct in6_addr *saddr = NULL, *final_p = NULL, final;
100 struct flowi fl;
101 struct dst_entry *dst;
102 int addr_type;
103 int err;
104
105 dp->dccps_role = DCCP_ROLE_CLIENT;
106
107 if (addr_len < SIN6_LEN_RFC2133)
108 return -EINVAL;
109
110 if (usin->sin6_family != AF_INET6)
111 return -EAFNOSUPPORT;
112
113 memset(&fl, 0, sizeof(fl));
114
115 if (np->sndflow) {
116 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
117 IP6_ECN_flow_init(fl.fl6_flowlabel);
118 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
119 struct ip6_flowlabel *flowlabel;
120 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
121 if (flowlabel == NULL)
122 return -EINVAL;
123 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
124 fl6_sock_release(flowlabel);
125 }
126 }
127 /*
128 * connect() to INADDR_ANY means loopback (BSD'ism).
129 */
130 if (ipv6_addr_any(&usin->sin6_addr))
131 usin->sin6_addr.s6_addr[15] = 1;
132
133 addr_type = ipv6_addr_type(&usin->sin6_addr);
134
135 if (addr_type & IPV6_ADDR_MULTICAST)
136 return -ENETUNREACH;
137
138 if (addr_type & IPV6_ADDR_LINKLOCAL) {
139 if (addr_len >= sizeof(struct sockaddr_in6) &&
140 usin->sin6_scope_id) {
141 /* If interface is set while binding, indices
142 * must coincide.
143 */
144 if (sk->sk_bound_dev_if &&
145 sk->sk_bound_dev_if != usin->sin6_scope_id)
146 return -EINVAL;
147
148 sk->sk_bound_dev_if = usin->sin6_scope_id;
149 }
150
151 /* Connect to link-local address requires an interface */
152 if (!sk->sk_bound_dev_if)
153 return -EINVAL;
154 }
155
156 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
157 np->flow_label = fl.fl6_flowlabel;
158
159 /*
160 * DCCP over IPv4
161 */
162 if (addr_type == IPV6_ADDR_MAPPED) {
163 u32 exthdrlen = icsk->icsk_ext_hdr_len;
164 struct sockaddr_in sin;
165
166 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
167
168 if (__ipv6_only_sock(sk))
169 return -ENETUNREACH;
170
171 sin.sin_family = AF_INET;
172 sin.sin_port = usin->sin6_port;
173 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
174
175 icsk->icsk_af_ops = &dccp_ipv6_mapped;
176 sk->sk_backlog_rcv = dccp_v4_do_rcv;
177
178 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
179 if (err) {
180 icsk->icsk_ext_hdr_len = exthdrlen;
181 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
182 sk->sk_backlog_rcv = dccp_v6_do_rcv;
183 goto failure;
184 } else {
185 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
186 inet->saddr);
187 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
188 inet->rcv_saddr);
189 }
190
191 return err;
192 }
193
194 if (!ipv6_addr_any(&np->rcv_saddr))
195 saddr = &np->rcv_saddr;
196
197 fl.proto = IPPROTO_DCCP;
198 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
199 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
200 fl.oif = sk->sk_bound_dev_if;
201 fl.fl_ip_dport = usin->sin6_port;
202 fl.fl_ip_sport = inet->sport;
203
204 if (np->opt != NULL && np->opt->srcrt != NULL) {
205 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
206
207 ipv6_addr_copy(&final, &fl.fl6_dst);
208 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
209 final_p = &final;
210 }
211
212 err = ip6_dst_lookup(sk, &dst, &fl);
213 if (err)
214 goto failure;
215
216 if (final_p)
217 ipv6_addr_copy(&fl.fl6_dst, final_p);
218
219 err = xfrm_lookup(&dst, &fl, sk, 0);
220 if (err < 0)
221 goto failure;
222
223 if (saddr == NULL) {
224 saddr = &fl.fl6_src;
225 ipv6_addr_copy(&np->rcv_saddr, saddr);
226 }
227
228 /* set the source address */
229 ipv6_addr_copy(&np->saddr, saddr);
230 inet->rcv_saddr = LOOPBACK4_IPV6;
231
232 ip6_dst_store(sk, dst, NULL);
233
234 icsk->icsk_ext_hdr_len = 0;
235 if (np->opt != NULL)
236 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
237 np->opt->opt_nflen);
238
239 inet->dport = usin->sin6_port;
240
241 dccp_set_state(sk, DCCP_REQUESTING);
242 err = inet6_hash_connect(&dccp_death_row, sk);
243 if (err)
244 goto late_failure;
245 /* FIXME */
246 #if 0
247 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
248 np->daddr.s6_addr32,
249 inet->sport,
250 inet->dport);
251 #endif
252 err = dccp_connect(sk);
253 if (err)
254 goto late_failure;
255
256 return 0;
257
258 late_failure:
259 dccp_set_state(sk, DCCP_CLOSED);
260 __sk_dst_reset(sk);
261 failure:
262 inet->dport = 0;
263 sk->sk_route_caps = 0;
264 return err;
265 }
266
267 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
268 int type, int code, int offset, __be32 info)
269 {
270 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
271 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
272 struct ipv6_pinfo *np;
273 struct sock *sk;
274 int err;
275 __u64 seq;
276
277 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
278 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
279
280 if (sk == NULL) {
281 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
282 return;
283 }
284
285 if (sk->sk_state == DCCP_TIME_WAIT) {
286 inet_twsk_put((struct inet_timewait_sock *)sk);
287 return;
288 }
289
290 bh_lock_sock(sk);
291 if (sock_owned_by_user(sk))
292 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
293
294 if (sk->sk_state == DCCP_CLOSED)
295 goto out;
296
297 np = inet6_sk(sk);
298
299 if (type == ICMPV6_PKT_TOOBIG) {
300 struct dst_entry *dst = NULL;
301
302 if (sock_owned_by_user(sk))
303 goto out;
304 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
305 goto out;
306
307 /* icmp should have updated the destination cache entry */
308 dst = __sk_dst_check(sk, np->dst_cookie);
309 if (dst == NULL) {
310 struct inet_sock *inet = inet_sk(sk);
311 struct flowi fl;
312
313 /* BUGGG_FUTURE: Again, it is not clear how
314 to handle rthdr case. Ignore this complexity
315 for now.
316 */
317 memset(&fl, 0, sizeof(fl));
318 fl.proto = IPPROTO_DCCP;
319 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
320 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
321 fl.oif = sk->sk_bound_dev_if;
322 fl.fl_ip_dport = inet->dport;
323 fl.fl_ip_sport = inet->sport;
324
325 err = ip6_dst_lookup(sk, &dst, &fl);
326 if (err) {
327 sk->sk_err_soft = -err;
328 goto out;
329 }
330
331 err = xfrm_lookup(&dst, &fl, sk, 0);
332 if (err < 0) {
333 sk->sk_err_soft = -err;
334 goto out;
335 }
336 } else
337 dst_hold(dst);
338
339 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
340 dccp_sync_mss(sk, dst_mtu(dst));
341 } /* else let the usual retransmit timer handle it */
342 dst_release(dst);
343 goto out;
344 }
345
346 icmpv6_err_convert(type, code, &err);
347
348 seq = DCCP_SKB_CB(skb)->dccpd_seq;
349 /* Might be for an request_sock */
350 switch (sk->sk_state) {
351 struct request_sock *req, **prev;
352 case DCCP_LISTEN:
353 if (sock_owned_by_user(sk))
354 goto out;
355
356 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
357 &hdr->daddr, &hdr->saddr,
358 inet6_iif(skb));
359 if (req == NULL)
360 goto out;
361
362 /*
363 * ICMPs are not backlogged, hence we cannot get an established
364 * socket here.
365 */
366 BUG_TRAP(req->sk == NULL);
367
368 if (seq != dccp_rsk(req)->dreq_iss) {
369 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
370 goto out;
371 }
372
373 inet_csk_reqsk_queue_drop(sk, req, prev);
374 goto out;
375
376 case DCCP_REQUESTING:
377 case DCCP_RESPOND: /* Cannot happen.
378 It can, it SYNs are crossed. --ANK */
379 if (!sock_owned_by_user(sk)) {
380 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
381 sk->sk_err = err;
382 /*
383 * Wake people up to see the error
384 * (see connect in sock.c)
385 */
386 sk->sk_error_report(sk);
387 dccp_done(sk);
388 } else
389 sk->sk_err_soft = err;
390 goto out;
391 }
392
393 if (!sock_owned_by_user(sk) && np->recverr) {
394 sk->sk_err = err;
395 sk->sk_error_report(sk);
396 } else
397 sk->sk_err_soft = err;
398
399 out:
400 bh_unlock_sock(sk);
401 sock_put(sk);
402 }
403
404
405 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
406 struct dst_entry *dst)
407 {
408 struct inet6_request_sock *ireq6 = inet6_rsk(req);
409 struct ipv6_pinfo *np = inet6_sk(sk);
410 struct sk_buff *skb;
411 struct ipv6_txoptions *opt = NULL;
412 struct in6_addr *final_p = NULL, final;
413 struct flowi fl;
414 int err = -1;
415
416 memset(&fl, 0, sizeof(fl));
417 fl.proto = IPPROTO_DCCP;
418 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
419 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
420 fl.fl6_flowlabel = 0;
421 fl.oif = ireq6->iif;
422 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
423 fl.fl_ip_sport = inet_sk(sk)->sport;
424
425 if (dst == NULL) {
426 opt = np->opt;
427 if (opt == NULL &&
428 np->rxopt.bits.osrcrt == 2 &&
429 ireq6->pktopts) {
430 struct sk_buff *pktopts = ireq6->pktopts;
431 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
432
433 if (rxopt->srcrt)
434 opt = ipv6_invert_rthdr(sk,
435 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
436 rxopt->srcrt));
437 }
438
439 if (opt != NULL && opt->srcrt != NULL) {
440 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
441
442 ipv6_addr_copy(&final, &fl.fl6_dst);
443 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
444 final_p = &final;
445 }
446
447 err = ip6_dst_lookup(sk, &dst, &fl);
448 if (err)
449 goto done;
450
451 if (final_p)
452 ipv6_addr_copy(&fl.fl6_dst, final_p);
453
454 err = xfrm_lookup(&dst, &fl, sk, 0);
455 if (err < 0)
456 goto done;
457 }
458
459 skb = dccp_make_response(sk, dst, req);
460 if (skb != NULL) {
461 struct dccp_hdr *dh = dccp_hdr(skb);
462
463 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
464 &ireq6->loc_addr,
465 &ireq6->rmt_addr,
466 csum_partial((char *)dh,
467 skb->len,
468 skb->csum));
469 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
470 err = ip6_xmit(sk, skb, &fl, opt, 0);
471 if (err == NET_XMIT_CN)
472 err = 0;
473 }
474
475 done:
476 if (opt != NULL && opt != np->opt)
477 sock_kfree_s(sk, opt, opt->tot_len);
478 dst_release(dst);
479 return err;
480 }
481
482 static void dccp_v6_reqsk_destructor(struct request_sock *req)
483 {
484 if (inet6_rsk(req)->pktopts != NULL)
485 kfree_skb(inet6_rsk(req)->pktopts);
486 }
487
488 static struct request_sock_ops dccp6_request_sock_ops = {
489 .family = AF_INET6,
490 .obj_size = sizeof(struct dccp6_request_sock),
491 .rtx_syn_ack = dccp_v6_send_response,
492 .send_ack = dccp_v6_reqsk_send_ack,
493 .destructor = dccp_v6_reqsk_destructor,
494 .send_reset = dccp_v6_ctl_send_reset,
495 };
496
497 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
498 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
499 };
500
501 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
502 {
503 struct ipv6_pinfo *np = inet6_sk(sk);
504 struct dccp_hdr *dh = dccp_hdr(skb);
505
506 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
507 len, IPPROTO_DCCP,
508 csum_partial((char *)dh,
509 dh->dccph_doff << 2,
510 skb->csum));
511 }
512
513 static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
514 {
515 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
516 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
517 sizeof(struct dccp_hdr_ext) +
518 sizeof(struct dccp_hdr_reset);
519 struct sk_buff *skb;
520 struct flowi fl;
521 u64 seqno;
522
523 if (rxdh->dccph_type == DCCP_PKT_RESET)
524 return;
525
526 if (!ipv6_unicast_destination(rxskb))
527 return;
528
529 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
530 GFP_ATOMIC);
531 if (skb == NULL)
532 return;
533
534 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
535
536 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
537 dh = dccp_hdr(skb);
538 memset(dh, 0, dccp_hdr_reset_len);
539
540 /* Swap the send and the receive. */
541 dh->dccph_type = DCCP_PKT_RESET;
542 dh->dccph_sport = rxdh->dccph_dport;
543 dh->dccph_dport = rxdh->dccph_sport;
544 dh->dccph_doff = dccp_hdr_reset_len / 4;
545 dh->dccph_x = 1;
546 dccp_hdr_reset(skb)->dccph_reset_code =
547 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
548
549 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
550 seqno = 0;
551 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
552 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
553
554 dccp_hdr_set_seq(dh, seqno);
555 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
556 DCCP_SKB_CB(rxskb)->dccpd_seq);
557
558 memset(&fl, 0, sizeof(fl));
559 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
560 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
561 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
562 sizeof(*dh), IPPROTO_DCCP,
563 skb->csum);
564 fl.proto = IPPROTO_DCCP;
565 fl.oif = inet6_iif(rxskb);
566 fl.fl_ip_dport = dh->dccph_dport;
567 fl.fl_ip_sport = dh->dccph_sport;
568
569 /* sk = NULL, but it is safe for now. RST socket required. */
570 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
571 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
572 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
573 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
574 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
575 return;
576 }
577 }
578
579 kfree_skb(skb);
580 }
581
582 static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb,
583 struct request_sock *req)
584 {
585 struct flowi fl;
586 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
587 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
588 sizeof(struct dccp_hdr_ext) +
589 sizeof(struct dccp_hdr_ack_bits);
590 struct sk_buff *skb;
591
592 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
593 GFP_ATOMIC);
594 if (skb == NULL)
595 return;
596
597 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
598
599 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
600 dh = dccp_hdr(skb);
601 memset(dh, 0, dccp_hdr_ack_len);
602
603 /* Build DCCP header and checksum it. */
604 dh->dccph_type = DCCP_PKT_ACK;
605 dh->dccph_sport = rxdh->dccph_dport;
606 dh->dccph_dport = rxdh->dccph_sport;
607 dh->dccph_doff = dccp_hdr_ack_len / 4;
608 dh->dccph_x = 1;
609
610 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
611 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
612 DCCP_SKB_CB(rxskb)->dccpd_seq);
613
614 memset(&fl, 0, sizeof(fl));
615 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
616 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
617
618 /* FIXME: calculate checksum, IPv4 also should... */
619
620 fl.proto = IPPROTO_DCCP;
621 fl.oif = inet6_iif(rxskb);
622 fl.fl_ip_dport = dh->dccph_dport;
623 fl.fl_ip_sport = dh->dccph_sport;
624
625 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
626 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
627 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
628 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
629 return;
630 }
631 }
632
633 kfree_skb(skb);
634 }
635
636 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
637 {
638 const struct dccp_hdr *dh = dccp_hdr(skb);
639 const struct ipv6hdr *iph = skb->nh.ipv6h;
640 struct sock *nsk;
641 struct request_sock **prev;
642 /* Find possible connection requests. */
643 struct request_sock *req = inet6_csk_search_req(sk, &prev,
644 dh->dccph_sport,
645 &iph->saddr,
646 &iph->daddr,
647 inet6_iif(skb));
648 if (req != NULL)
649 return dccp_check_req(sk, skb, req, prev);
650
651 nsk = __inet6_lookup_established(&dccp_hashinfo,
652 &iph->saddr, dh->dccph_sport,
653 &iph->daddr, ntohs(dh->dccph_dport),
654 inet6_iif(skb));
655 if (nsk != NULL) {
656 if (nsk->sk_state != DCCP_TIME_WAIT) {
657 bh_lock_sock(nsk);
658 return nsk;
659 }
660 inet_twsk_put((struct inet_timewait_sock *)nsk);
661 return NULL;
662 }
663
664 return sk;
665 }
666
667 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
668 {
669 struct inet_request_sock *ireq;
670 struct dccp_sock dp;
671 struct request_sock *req;
672 struct dccp_request_sock *dreq;
673 struct inet6_request_sock *ireq6;
674 struct ipv6_pinfo *np = inet6_sk(sk);
675 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
676 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
677 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
678
679 if (skb->protocol == htons(ETH_P_IP))
680 return dccp_v4_conn_request(sk, skb);
681
682 if (!ipv6_unicast_destination(skb))
683 goto drop;
684
685 if (dccp_bad_service_code(sk, service)) {
686 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
687 goto drop;
688 }
689 /*
690 * There are no SYN attacks on IPv6, yet...
691 */
692 if (inet_csk_reqsk_queue_is_full(sk))
693 goto drop;
694
695 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
696 goto drop;
697
698 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
699 if (req == NULL)
700 goto drop;
701
702 /* FIXME: process options */
703
704 dccp_openreq_init(req, &dp, skb);
705
706 ireq6 = inet6_rsk(req);
707 ireq = inet_rsk(req);
708 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
709 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
710 req->rcv_wnd = 100; /* Fake, option parsing will get the
711 right value */
712 ireq6->pktopts = NULL;
713
714 if (ipv6_opt_accepted(sk, skb) ||
715 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
716 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
717 atomic_inc(&skb->users);
718 ireq6->pktopts = skb;
719 }
720 ireq6->iif = sk->sk_bound_dev_if;
721
722 /* So that link locals have meaning */
723 if (!sk->sk_bound_dev_if &&
724 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
725 ireq6->iif = inet6_iif(skb);
726
727 /*
728 * Step 3: Process LISTEN state
729 *
730 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
731 *
732 * In fact we defer setting S.GSR, S.SWL, S.SWH to
733 * dccp_create_openreq_child.
734 */
735 dreq = dccp_rsk(req);
736 dreq->dreq_isr = dcb->dccpd_seq;
737 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
738 dreq->dreq_service = service;
739
740 if (dccp_v6_send_response(sk, req, NULL))
741 goto drop_and_free;
742
743 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
744 return 0;
745
746 drop_and_free:
747 reqsk_free(req);
748 drop:
749 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
750 dcb->dccpd_reset_code = reset_code;
751 return -1;
752 }
753
754 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
755 struct sk_buff *skb,
756 struct request_sock *req,
757 struct dst_entry *dst)
758 {
759 struct inet6_request_sock *ireq6 = inet6_rsk(req);
760 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
761 struct inet_sock *newinet;
762 struct dccp_sock *newdp;
763 struct dccp6_sock *newdp6;
764 struct sock *newsk;
765 struct ipv6_txoptions *opt;
766
767 if (skb->protocol == htons(ETH_P_IP)) {
768 /*
769 * v6 mapped
770 */
771 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
772 if (newsk == NULL)
773 return NULL;
774
775 newdp6 = (struct dccp6_sock *)newsk;
776 newdp = dccp_sk(newsk);
777 newinet = inet_sk(newsk);
778 newinet->pinet6 = &newdp6->inet6;
779 newnp = inet6_sk(newsk);
780
781 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
782
783 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
784 newinet->daddr);
785
786 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
787 newinet->saddr);
788
789 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
790
791 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
792 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
793 newnp->pktoptions = NULL;
794 newnp->opt = NULL;
795 newnp->mcast_oif = inet6_iif(skb);
796 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
797
798 /*
799 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
800 * here, dccp_create_openreq_child now does this for us, see the comment in
801 * that function for the gory details. -acme
802 */
803
804 /* It is tricky place. Until this moment IPv4 tcp
805 worked with IPv6 icsk.icsk_af_ops.
806 Sync it now.
807 */
808 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
809
810 return newsk;
811 }
812
813 opt = np->opt;
814
815 if (sk_acceptq_is_full(sk))
816 goto out_overflow;
817
818 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
819 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
820
821 if (rxopt->srcrt)
822 opt = ipv6_invert_rthdr(sk,
823 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
824 rxopt->srcrt));
825 }
826
827 if (dst == NULL) {
828 struct in6_addr *final_p = NULL, final;
829 struct flowi fl;
830
831 memset(&fl, 0, sizeof(fl));
832 fl.proto = IPPROTO_DCCP;
833 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
834 if (opt != NULL && opt->srcrt != NULL) {
835 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
836
837 ipv6_addr_copy(&final, &fl.fl6_dst);
838 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
839 final_p = &final;
840 }
841 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
842 fl.oif = sk->sk_bound_dev_if;
843 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
844 fl.fl_ip_sport = inet_sk(sk)->sport;
845
846 if (ip6_dst_lookup(sk, &dst, &fl))
847 goto out;
848
849 if (final_p)
850 ipv6_addr_copy(&fl.fl6_dst, final_p);
851
852 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
853 goto out;
854 }
855
856 newsk = dccp_create_openreq_child(sk, req, skb);
857 if (newsk == NULL)
858 goto out;
859
860 /*
861 * No need to charge this sock to the relevant IPv6 refcnt debug socks
862 * count here, dccp_create_openreq_child now does this for us, see the
863 * comment in that function for the gory details. -acme
864 */
865
866 ip6_dst_store(newsk, dst, NULL);
867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
868 NETIF_F_TSO);
869 newdp6 = (struct dccp6_sock *)newsk;
870 newinet = inet_sk(newsk);
871 newinet->pinet6 = &newdp6->inet6;
872 newdp = dccp_sk(newsk);
873 newnp = inet6_sk(newsk);
874
875 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
876
877 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
878 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
879 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
880 newsk->sk_bound_dev_if = ireq6->iif;
881
882 /* Now IPv6 options...
883
884 First: no IPv4 options.
885 */
886 newinet->opt = NULL;
887
888 /* Clone RX bits */
889 newnp->rxopt.all = np->rxopt.all;
890
891 /* Clone pktoptions received with SYN */
892 newnp->pktoptions = NULL;
893 if (ireq6->pktopts != NULL) {
894 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
895 kfree_skb(ireq6->pktopts);
896 ireq6->pktopts = NULL;
897 if (newnp->pktoptions)
898 skb_set_owner_r(newnp->pktoptions, newsk);
899 }
900 newnp->opt = NULL;
901 newnp->mcast_oif = inet6_iif(skb);
902 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
903
904 /*
905 * Clone native IPv6 options from listening socket (if any)
906 *
907 * Yes, keeping reference count would be much more clever, but we make
908 * one more one thing there: reattach optmem to newsk.
909 */
910 if (opt != NULL) {
911 newnp->opt = ipv6_dup_options(newsk, opt);
912 if (opt != np->opt)
913 sock_kfree_s(sk, opt, opt->tot_len);
914 }
915
916 inet_csk(newsk)->icsk_ext_hdr_len = 0;
917 if (newnp->opt != NULL)
918 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
919 newnp->opt->opt_flen);
920
921 dccp_sync_mss(newsk, dst_mtu(dst));
922
923 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
924
925 __inet6_hash(&dccp_hashinfo, newsk);
926 inet_inherit_port(&dccp_hashinfo, sk, newsk);
927
928 return newsk;
929
930 out_overflow:
931 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
932 out:
933 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
934 if (opt != NULL && opt != np->opt)
935 sock_kfree_s(sk, opt, opt->tot_len);
936 dst_release(dst);
937 return NULL;
938 }
939
940 /* The socket must have it's spinlock held when we get
941 * here.
942 *
943 * We have a potential double-lock case here, so even when
944 * doing backlog processing we use the BH locking scheme.
945 * This is because we cannot sleep with the original spinlock
946 * held.
947 */
948 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
949 {
950 struct ipv6_pinfo *np = inet6_sk(sk);
951 struct sk_buff *opt_skb = NULL;
952
953 /* Imagine: socket is IPv6. IPv4 packet arrives,
954 goes to IPv4 receive handler and backlogged.
955 From backlog it always goes here. Kerboom...
956 Fortunately, dccp_rcv_established and rcv_established
957 handle them correctly, but it is not case with
958 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
959 */
960
961 if (skb->protocol == htons(ETH_P_IP))
962 return dccp_v4_do_rcv(sk, skb);
963
964 if (sk_filter(sk, skb, 0))
965 goto discard;
966
967 /*
968 * socket locking is here for SMP purposes as backlog rcv is currently
969 * called with bh processing disabled.
970 */
971
972 /* Do Stevens' IPV6_PKTOPTIONS.
973
974 Yes, guys, it is the only place in our code, where we
975 may make it not affecting IPv4.
976 The rest of code is protocol independent,
977 and I do not like idea to uglify IPv4.
978
979 Actually, all the idea behind IPV6_PKTOPTIONS
980 looks not very well thought. For now we latch
981 options, received in the last packet, enqueued
982 by tcp. Feel free to propose better solution.
983 --ANK (980728)
984 */
985 if (np->rxopt.all)
986 opt_skb = skb_clone(skb, GFP_ATOMIC);
987
988 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
989 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
990 goto reset;
991 return 0;
992 }
993
994 if (sk->sk_state == DCCP_LISTEN) {
995 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
996
997 if (nsk == NULL)
998 goto discard;
999 /*
1000 * Queue it on the new socket if the new socket is active,
1001 * otherwise we just shortcircuit this and continue with
1002 * the new socket..
1003 */
1004 if (nsk != sk) {
1005 if (dccp_child_process(sk, nsk, skb))
1006 goto reset;
1007 if (opt_skb != NULL)
1008 __kfree_skb(opt_skb);
1009 return 0;
1010 }
1011 }
1012
1013 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1014 goto reset;
1015 return 0;
1016
1017 reset:
1018 dccp_v6_ctl_send_reset(skb);
1019 discard:
1020 if (opt_skb != NULL)
1021 __kfree_skb(opt_skb);
1022 kfree_skb(skb);
1023 return 0;
1024 }
1025
1026 static int dccp_v6_rcv(struct sk_buff **pskb)
1027 {
1028 const struct dccp_hdr *dh;
1029 struct sk_buff *skb = *pskb;
1030 struct sock *sk;
1031
1032 /* Step 1: Check header basics: */
1033
1034 if (dccp_invalid_packet(skb))
1035 goto discard_it;
1036
1037 dh = dccp_hdr(skb);
1038
1039 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1040 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1041
1042 if (dccp_packet_without_ack(skb))
1043 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1044 else
1045 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1046
1047 /* Step 2:
1048 * Look up flow ID in table and get corresponding socket */
1049 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1050 dh->dccph_sport,
1051 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1052 inet6_iif(skb));
1053 /*
1054 * Step 2:
1055 * If no socket ...
1056 * Generate Reset(No Connection) unless P.type == Reset
1057 * Drop packet and return
1058 */
1059 if (sk == NULL)
1060 goto no_dccp_socket;
1061
1062 /*
1063 * Step 2:
1064 * ... or S.state == TIMEWAIT,
1065 * Generate Reset(No Connection) unless P.type == Reset
1066 * Drop packet and return
1067 */
1068 if (sk->sk_state == DCCP_TIME_WAIT)
1069 goto do_time_wait;
1070
1071 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1072 goto discard_and_relse;
1073
1074 return sk_receive_skb(sk, skb) ? -1 : 0;
1075
1076 no_dccp_socket:
1077 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1078 goto discard_it;
1079 /*
1080 * Step 2:
1081 * Generate Reset(No Connection) unless P.type == Reset
1082 * Drop packet and return
1083 */
1084 if (dh->dccph_type != DCCP_PKT_RESET) {
1085 DCCP_SKB_CB(skb)->dccpd_reset_code =
1086 DCCP_RESET_CODE_NO_CONNECTION;
1087 dccp_v6_ctl_send_reset(skb);
1088 }
1089 discard_it:
1090
1091 /*
1092 * Discard frame
1093 */
1094
1095 kfree_skb(skb);
1096 return 0;
1097
1098 discard_and_relse:
1099 sock_put(sk);
1100 goto discard_it;
1101
1102 do_time_wait:
1103 inet_twsk_put((struct inet_timewait_sock *)sk);
1104 goto no_dccp_socket;
1105 }
1106
1107 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1108 .queue_xmit = inet6_csk_xmit,
1109 .send_check = dccp_v6_send_check,
1110 .rebuild_header = inet6_sk_rebuild_header,
1111 .conn_request = dccp_v6_conn_request,
1112 .syn_recv_sock = dccp_v6_request_recv_sock,
1113 .net_header_len = sizeof(struct ipv6hdr),
1114 .setsockopt = ipv6_setsockopt,
1115 .getsockopt = ipv6_getsockopt,
1116 .addr2sockaddr = inet6_csk_addr2sockaddr,
1117 .sockaddr_len = sizeof(struct sockaddr_in6),
1118 #ifdef CONFIG_COMPAT
1119 .compat_setsockopt = compat_ipv6_setsockopt,
1120 .compat_getsockopt = compat_ipv6_getsockopt,
1121 #endif
1122 };
1123
1124 /*
1125 * DCCP over IPv4 via INET6 API
1126 */
1127 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1128 .queue_xmit = ip_queue_xmit,
1129 .send_check = dccp_v4_send_check,
1130 .rebuild_header = inet_sk_rebuild_header,
1131 .conn_request = dccp_v6_conn_request,
1132 .syn_recv_sock = dccp_v6_request_recv_sock,
1133 .net_header_len = sizeof(struct iphdr),
1134 .setsockopt = ipv6_setsockopt,
1135 .getsockopt = ipv6_getsockopt,
1136 .addr2sockaddr = inet6_csk_addr2sockaddr,
1137 .sockaddr_len = sizeof(struct sockaddr_in6),
1138 #ifdef CONFIG_COMPAT
1139 .compat_setsockopt = compat_ipv6_setsockopt,
1140 .compat_getsockopt = compat_ipv6_getsockopt,
1141 #endif
1142 };
1143
1144 /* NOTE: A lot of things set to zero explicitly by call to
1145 * sk_alloc() so need not be done here.
1146 */
1147 static int dccp_v6_init_sock(struct sock *sk)
1148 {
1149 static __u8 dccp_v6_ctl_sock_initialized;
1150 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1151
1152 if (err == 0) {
1153 if (unlikely(!dccp_v6_ctl_sock_initialized))
1154 dccp_v6_ctl_sock_initialized = 1;
1155 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1156 }
1157
1158 return err;
1159 }
1160
1161 static int dccp_v6_destroy_sock(struct sock *sk)
1162 {
1163 dccp_destroy_sock(sk);
1164 return inet6_destroy_sock(sk);
1165 }
1166
1167 static struct proto dccp_v6_prot = {
1168 .name = "DCCPv6",
1169 .owner = THIS_MODULE,
1170 .close = dccp_close,
1171 .connect = dccp_v6_connect,
1172 .disconnect = dccp_disconnect,
1173 .ioctl = dccp_ioctl,
1174 .init = dccp_v6_init_sock,
1175 .setsockopt = dccp_setsockopt,
1176 .getsockopt = dccp_getsockopt,
1177 .sendmsg = dccp_sendmsg,
1178 .recvmsg = dccp_recvmsg,
1179 .backlog_rcv = dccp_v6_do_rcv,
1180 .hash = dccp_v6_hash,
1181 .unhash = dccp_unhash,
1182 .accept = inet_csk_accept,
1183 .get_port = dccp_v6_get_port,
1184 .shutdown = dccp_shutdown,
1185 .destroy = dccp_v6_destroy_sock,
1186 .orphan_count = &dccp_orphan_count,
1187 .max_header = MAX_DCCP_HEADER,
1188 .obj_size = sizeof(struct dccp6_sock),
1189 .rsk_prot = &dccp6_request_sock_ops,
1190 .twsk_prot = &dccp6_timewait_sock_ops,
1191 #ifdef CONFIG_COMPAT
1192 .compat_setsockopt = compat_dccp_setsockopt,
1193 .compat_getsockopt = compat_dccp_getsockopt,
1194 #endif
1195 };
1196
1197 static struct inet6_protocol dccp_v6_protocol = {
1198 .handler = dccp_v6_rcv,
1199 .err_handler = dccp_v6_err,
1200 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1201 };
1202
1203 static struct proto_ops inet6_dccp_ops = {
1204 .family = PF_INET6,
1205 .owner = THIS_MODULE,
1206 .release = inet6_release,
1207 .bind = inet6_bind,
1208 .connect = inet_stream_connect,
1209 .socketpair = sock_no_socketpair,
1210 .accept = inet_accept,
1211 .getname = inet6_getname,
1212 .poll = dccp_poll,
1213 .ioctl = inet6_ioctl,
1214 .listen = inet_dccp_listen,
1215 .shutdown = inet_shutdown,
1216 .setsockopt = sock_common_setsockopt,
1217 .getsockopt = sock_common_getsockopt,
1218 .sendmsg = inet_sendmsg,
1219 .recvmsg = sock_common_recvmsg,
1220 .mmap = sock_no_mmap,
1221 .sendpage = sock_no_sendpage,
1222 #ifdef CONFIG_COMPAT
1223 .compat_setsockopt = compat_sock_common_setsockopt,
1224 .compat_getsockopt = compat_sock_common_getsockopt,
1225 #endif
1226 };
1227
1228 static struct inet_protosw dccp_v6_protosw = {
1229 .type = SOCK_DCCP,
1230 .protocol = IPPROTO_DCCP,
1231 .prot = &dccp_v6_prot,
1232 .ops = &inet6_dccp_ops,
1233 .capability = -1,
1234 .flags = INET_PROTOSW_ICSK,
1235 };
1236
1237 static int __init dccp_v6_init(void)
1238 {
1239 int err = proto_register(&dccp_v6_prot, 1);
1240
1241 if (err != 0)
1242 goto out;
1243
1244 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1245 if (err != 0)
1246 goto out_unregister_proto;
1247
1248 inet6_register_protosw(&dccp_v6_protosw);
1249
1250 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1251 SOCK_DCCP, IPPROTO_DCCP);
1252 if (err != 0)
1253 goto out_unregister_protosw;
1254 out:
1255 return err;
1256 out_unregister_protosw:
1257 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1258 inet6_unregister_protosw(&dccp_v6_protosw);
1259 out_unregister_proto:
1260 proto_unregister(&dccp_v6_prot);
1261 goto out;
1262 }
1263
1264 static void __exit dccp_v6_exit(void)
1265 {
1266 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1267 inet6_unregister_protosw(&dccp_v6_protosw);
1268 proto_unregister(&dccp_v6_prot);
1269 }
1270
1271 module_init(dccp_v6_init);
1272 module_exit(dccp_v6_exit);
1273
1274 /*
1275 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1276 * values directly, Also cover the case where the protocol is not specified,
1277 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1278 */
1279 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1280 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1281 MODULE_LICENSE("GPL");
1282 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1283 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");