]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/dccp/ipv6.c
[NET]: {get|set}sockopt compatibility layer
[mirror_ubuntu-bionic-kernel.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/xfrm.h>
19
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32
33 #include "dccp.h"
34 #include "ipv6.h"
35
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket *dccp_v6_ctl_socket;
38
39 static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
40 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
41 struct request_sock *req);
42 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
43
44 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
45
46 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
47 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
48
49 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
50 {
51 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
52 inet6_csk_bind_conflict);
53 }
54
55 static void dccp_v6_hash(struct sock *sk)
56 {
57 if (sk->sk_state != DCCP_CLOSED) {
58 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
59 dccp_hash(sk);
60 return;
61 }
62 local_bh_disable();
63 __inet6_hash(&dccp_hashinfo, sk);
64 local_bh_enable();
65 }
66 }
67
68 static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
69 struct in6_addr *saddr,
70 struct in6_addr *daddr,
71 unsigned long base)
72 {
73 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
74 }
75
76 static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
77 {
78 const struct dccp_hdr *dh = dccp_hdr(skb);
79
80 if (skb->protocol == htons(ETH_P_IPV6))
81 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
82 skb->nh.ipv6h->saddr.s6_addr32,
83 dh->dccph_dport,
84 dh->dccph_sport);
85
86 return secure_dccp_sequence_number(skb->nh.iph->daddr,
87 skb->nh.iph->saddr,
88 dh->dccph_dport,
89 dh->dccph_sport);
90 }
91
92 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
93 int addr_len)
94 {
95 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
96 struct inet_connection_sock *icsk = inet_csk(sk);
97 struct inet_sock *inet = inet_sk(sk);
98 struct ipv6_pinfo *np = inet6_sk(sk);
99 struct dccp_sock *dp = dccp_sk(sk);
100 struct in6_addr *saddr = NULL, *final_p = NULL, final;
101 struct flowi fl;
102 struct dst_entry *dst;
103 int addr_type;
104 int err;
105
106 dp->dccps_role = DCCP_ROLE_CLIENT;
107
108 if (addr_len < SIN6_LEN_RFC2133)
109 return -EINVAL;
110
111 if (usin->sin6_family != AF_INET6)
112 return -EAFNOSUPPORT;
113
114 memset(&fl, 0, sizeof(fl));
115
116 if (np->sndflow) {
117 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
118 IP6_ECN_flow_init(fl.fl6_flowlabel);
119 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
120 struct ip6_flowlabel *flowlabel;
121 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
122 if (flowlabel == NULL)
123 return -EINVAL;
124 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
125 fl6_sock_release(flowlabel);
126 }
127 }
128 /*
129 * connect() to INADDR_ANY means loopback (BSD'ism).
130 */
131 if (ipv6_addr_any(&usin->sin6_addr))
132 usin->sin6_addr.s6_addr[15] = 1;
133
134 addr_type = ipv6_addr_type(&usin->sin6_addr);
135
136 if (addr_type & IPV6_ADDR_MULTICAST)
137 return -ENETUNREACH;
138
139 if (addr_type & IPV6_ADDR_LINKLOCAL) {
140 if (addr_len >= sizeof(struct sockaddr_in6) &&
141 usin->sin6_scope_id) {
142 /* If interface is set while binding, indices
143 * must coincide.
144 */
145 if (sk->sk_bound_dev_if &&
146 sk->sk_bound_dev_if != usin->sin6_scope_id)
147 return -EINVAL;
148
149 sk->sk_bound_dev_if = usin->sin6_scope_id;
150 }
151
152 /* Connect to link-local address requires an interface */
153 if (!sk->sk_bound_dev_if)
154 return -EINVAL;
155 }
156
157 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
158 np->flow_label = fl.fl6_flowlabel;
159
160 /*
161 * DCCP over IPv4
162 */
163 if (addr_type == IPV6_ADDR_MAPPED) {
164 u32 exthdrlen = icsk->icsk_ext_hdr_len;
165 struct sockaddr_in sin;
166
167 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
168
169 if (__ipv6_only_sock(sk))
170 return -ENETUNREACH;
171
172 sin.sin_family = AF_INET;
173 sin.sin_port = usin->sin6_port;
174 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
175
176 icsk->icsk_af_ops = &dccp_ipv6_mapped;
177 sk->sk_backlog_rcv = dccp_v4_do_rcv;
178
179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
180 if (err) {
181 icsk->icsk_ext_hdr_len = exthdrlen;
182 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
183 sk->sk_backlog_rcv = dccp_v6_do_rcv;
184 goto failure;
185 } else {
186 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
187 inet->saddr);
188 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
189 inet->rcv_saddr);
190 }
191
192 return err;
193 }
194
195 if (!ipv6_addr_any(&np->rcv_saddr))
196 saddr = &np->rcv_saddr;
197
198 fl.proto = IPPROTO_DCCP;
199 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
200 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
201 fl.oif = sk->sk_bound_dev_if;
202 fl.fl_ip_dport = usin->sin6_port;
203 fl.fl_ip_sport = inet->sport;
204
205 if (np->opt != NULL && np->opt->srcrt != NULL) {
206 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
207
208 ipv6_addr_copy(&final, &fl.fl6_dst);
209 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
210 final_p = &final;
211 }
212
213 err = ip6_dst_lookup(sk, &dst, &fl);
214 if (err)
215 goto failure;
216
217 if (final_p)
218 ipv6_addr_copy(&fl.fl6_dst, final_p);
219
220 err = xfrm_lookup(&dst, &fl, sk, 0);
221 if (err < 0)
222 goto failure;
223
224 if (saddr == NULL) {
225 saddr = &fl.fl6_src;
226 ipv6_addr_copy(&np->rcv_saddr, saddr);
227 }
228
229 /* set the source address */
230 ipv6_addr_copy(&np->saddr, saddr);
231 inet->rcv_saddr = LOOPBACK4_IPV6;
232
233 ip6_dst_store(sk, dst, NULL);
234
235 icsk->icsk_ext_hdr_len = 0;
236 if (np->opt != NULL)
237 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
238 np->opt->opt_nflen);
239
240 inet->dport = usin->sin6_port;
241
242 dccp_set_state(sk, DCCP_REQUESTING);
243 err = inet6_hash_connect(&dccp_death_row, sk);
244 if (err)
245 goto late_failure;
246 /* FIXME */
247 #if 0
248 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
249 np->daddr.s6_addr32,
250 inet->sport,
251 inet->dport);
252 #endif
253 err = dccp_connect(sk);
254 if (err)
255 goto late_failure;
256
257 return 0;
258
259 late_failure:
260 dccp_set_state(sk, DCCP_CLOSED);
261 __sk_dst_reset(sk);
262 failure:
263 inet->dport = 0;
264 sk->sk_route_caps = 0;
265 return err;
266 }
267
268 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
269 int type, int code, int offset, __be32 info)
270 {
271 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
272 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
273 struct ipv6_pinfo *np;
274 struct sock *sk;
275 int err;
276 __u64 seq;
277
278 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
279 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
280
281 if (sk == NULL) {
282 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
283 return;
284 }
285
286 if (sk->sk_state == DCCP_TIME_WAIT) {
287 inet_twsk_put((struct inet_timewait_sock *)sk);
288 return;
289 }
290
291 bh_lock_sock(sk);
292 if (sock_owned_by_user(sk))
293 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
294
295 if (sk->sk_state == DCCP_CLOSED)
296 goto out;
297
298 np = inet6_sk(sk);
299
300 if (type == ICMPV6_PKT_TOOBIG) {
301 struct dst_entry *dst = NULL;
302
303 if (sock_owned_by_user(sk))
304 goto out;
305 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
306 goto out;
307
308 /* icmp should have updated the destination cache entry */
309 dst = __sk_dst_check(sk, np->dst_cookie);
310 if (dst == NULL) {
311 struct inet_sock *inet = inet_sk(sk);
312 struct flowi fl;
313
314 /* BUGGG_FUTURE: Again, it is not clear how
315 to handle rthdr case. Ignore this complexity
316 for now.
317 */
318 memset(&fl, 0, sizeof(fl));
319 fl.proto = IPPROTO_DCCP;
320 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
321 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
322 fl.oif = sk->sk_bound_dev_if;
323 fl.fl_ip_dport = inet->dport;
324 fl.fl_ip_sport = inet->sport;
325
326 err = ip6_dst_lookup(sk, &dst, &fl);
327 if (err) {
328 sk->sk_err_soft = -err;
329 goto out;
330 }
331
332 err = xfrm_lookup(&dst, &fl, sk, 0);
333 if (err < 0) {
334 sk->sk_err_soft = -err;
335 goto out;
336 }
337 } else
338 dst_hold(dst);
339
340 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
341 dccp_sync_mss(sk, dst_mtu(dst));
342 } /* else let the usual retransmit timer handle it */
343 dst_release(dst);
344 goto out;
345 }
346
347 icmpv6_err_convert(type, code, &err);
348
349 seq = DCCP_SKB_CB(skb)->dccpd_seq;
350 /* Might be for an request_sock */
351 switch (sk->sk_state) {
352 struct request_sock *req, **prev;
353 case DCCP_LISTEN:
354 if (sock_owned_by_user(sk))
355 goto out;
356
357 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
358 &hdr->daddr, &hdr->saddr,
359 inet6_iif(skb));
360 if (req == NULL)
361 goto out;
362
363 /*
364 * ICMPs are not backlogged, hence we cannot get an established
365 * socket here.
366 */
367 BUG_TRAP(req->sk == NULL);
368
369 if (seq != dccp_rsk(req)->dreq_iss) {
370 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
371 goto out;
372 }
373
374 inet_csk_reqsk_queue_drop(sk, req, prev);
375 goto out;
376
377 case DCCP_REQUESTING:
378 case DCCP_RESPOND: /* Cannot happen.
379 It can, it SYNs are crossed. --ANK */
380 if (!sock_owned_by_user(sk)) {
381 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
382 sk->sk_err = err;
383 /*
384 * Wake people up to see the error
385 * (see connect in sock.c)
386 */
387 sk->sk_error_report(sk);
388 dccp_done(sk);
389 } else
390 sk->sk_err_soft = err;
391 goto out;
392 }
393
394 if (!sock_owned_by_user(sk) && np->recverr) {
395 sk->sk_err = err;
396 sk->sk_error_report(sk);
397 } else
398 sk->sk_err_soft = err;
399
400 out:
401 bh_unlock_sock(sk);
402 sock_put(sk);
403 }
404
405
406 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
407 struct dst_entry *dst)
408 {
409 struct inet6_request_sock *ireq6 = inet6_rsk(req);
410 struct ipv6_pinfo *np = inet6_sk(sk);
411 struct sk_buff *skb;
412 struct ipv6_txoptions *opt = NULL;
413 struct in6_addr *final_p = NULL, final;
414 struct flowi fl;
415 int err = -1;
416
417 memset(&fl, 0, sizeof(fl));
418 fl.proto = IPPROTO_DCCP;
419 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
420 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
421 fl.fl6_flowlabel = 0;
422 fl.oif = ireq6->iif;
423 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
424 fl.fl_ip_sport = inet_sk(sk)->sport;
425
426 if (dst == NULL) {
427 opt = np->opt;
428 if (opt == NULL &&
429 np->rxopt.bits.osrcrt == 2 &&
430 ireq6->pktopts) {
431 struct sk_buff *pktopts = ireq6->pktopts;
432 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
433
434 if (rxopt->srcrt)
435 opt = ipv6_invert_rthdr(sk,
436 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
437 rxopt->srcrt));
438 }
439
440 if (opt != NULL && opt->srcrt != NULL) {
441 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
442
443 ipv6_addr_copy(&final, &fl.fl6_dst);
444 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
445 final_p = &final;
446 }
447
448 err = ip6_dst_lookup(sk, &dst, &fl);
449 if (err)
450 goto done;
451
452 if (final_p)
453 ipv6_addr_copy(&fl.fl6_dst, final_p);
454
455 err = xfrm_lookup(&dst, &fl, sk, 0);
456 if (err < 0)
457 goto done;
458 }
459
460 skb = dccp_make_response(sk, dst, req);
461 if (skb != NULL) {
462 struct dccp_hdr *dh = dccp_hdr(skb);
463
464 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
465 &ireq6->loc_addr,
466 &ireq6->rmt_addr,
467 csum_partial((char *)dh,
468 skb->len,
469 skb->csum));
470 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
471 err = ip6_xmit(sk, skb, &fl, opt, 0);
472 if (err == NET_XMIT_CN)
473 err = 0;
474 }
475
476 done:
477 if (opt != NULL && opt != np->opt)
478 sock_kfree_s(sk, opt, opt->tot_len);
479 dst_release(dst);
480 return err;
481 }
482
483 static void dccp_v6_reqsk_destructor(struct request_sock *req)
484 {
485 if (inet6_rsk(req)->pktopts != NULL)
486 kfree_skb(inet6_rsk(req)->pktopts);
487 }
488
489 static struct request_sock_ops dccp6_request_sock_ops = {
490 .family = AF_INET6,
491 .obj_size = sizeof(struct dccp6_request_sock),
492 .rtx_syn_ack = dccp_v6_send_response,
493 .send_ack = dccp_v6_reqsk_send_ack,
494 .destructor = dccp_v6_reqsk_destructor,
495 .send_reset = dccp_v6_ctl_send_reset,
496 };
497
498 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
499 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
500 };
501
502 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
503 {
504 struct ipv6_pinfo *np = inet6_sk(sk);
505 struct dccp_hdr *dh = dccp_hdr(skb);
506
507 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
508 len, IPPROTO_DCCP,
509 csum_partial((char *)dh,
510 dh->dccph_doff << 2,
511 skb->csum));
512 }
513
514 static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
515 {
516 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
517 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
518 sizeof(struct dccp_hdr_ext) +
519 sizeof(struct dccp_hdr_reset);
520 struct sk_buff *skb;
521 struct flowi fl;
522 u64 seqno;
523
524 if (rxdh->dccph_type == DCCP_PKT_RESET)
525 return;
526
527 if (!ipv6_unicast_destination(rxskb))
528 return;
529
530 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
531 GFP_ATOMIC);
532 if (skb == NULL)
533 return;
534
535 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
536
537 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
538 dh = dccp_hdr(skb);
539 memset(dh, 0, dccp_hdr_reset_len);
540
541 /* Swap the send and the receive. */
542 dh->dccph_type = DCCP_PKT_RESET;
543 dh->dccph_sport = rxdh->dccph_dport;
544 dh->dccph_dport = rxdh->dccph_sport;
545 dh->dccph_doff = dccp_hdr_reset_len / 4;
546 dh->dccph_x = 1;
547 dccp_hdr_reset(skb)->dccph_reset_code =
548 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
549
550 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
551 seqno = 0;
552 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
553 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
554
555 dccp_hdr_set_seq(dh, seqno);
556 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
557 DCCP_SKB_CB(rxskb)->dccpd_seq);
558
559 memset(&fl, 0, sizeof(fl));
560 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
561 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
562 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
563 sizeof(*dh), IPPROTO_DCCP,
564 skb->csum);
565 fl.proto = IPPROTO_DCCP;
566 fl.oif = inet6_iif(rxskb);
567 fl.fl_ip_dport = dh->dccph_dport;
568 fl.fl_ip_sport = dh->dccph_sport;
569
570 /* sk = NULL, but it is safe for now. RST socket required. */
571 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
572 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
573 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
574 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
575 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
576 return;
577 }
578 }
579
580 kfree_skb(skb);
581 }
582
583 static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb,
584 struct request_sock *req)
585 {
586 struct flowi fl;
587 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
588 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
589 sizeof(struct dccp_hdr_ext) +
590 sizeof(struct dccp_hdr_ack_bits);
591 struct sk_buff *skb;
592
593 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
594 GFP_ATOMIC);
595 if (skb == NULL)
596 return;
597
598 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
599
600 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
601 dh = dccp_hdr(skb);
602 memset(dh, 0, dccp_hdr_ack_len);
603
604 /* Build DCCP header and checksum it. */
605 dh->dccph_type = DCCP_PKT_ACK;
606 dh->dccph_sport = rxdh->dccph_dport;
607 dh->dccph_dport = rxdh->dccph_sport;
608 dh->dccph_doff = dccp_hdr_ack_len / 4;
609 dh->dccph_x = 1;
610
611 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
612 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
613 DCCP_SKB_CB(rxskb)->dccpd_seq);
614
615 memset(&fl, 0, sizeof(fl));
616 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
617 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
618
619 /* FIXME: calculate checksum, IPv4 also should... */
620
621 fl.proto = IPPROTO_DCCP;
622 fl.oif = inet6_iif(rxskb);
623 fl.fl_ip_dport = dh->dccph_dport;
624 fl.fl_ip_sport = dh->dccph_sport;
625
626 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
627 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
628 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
629 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
630 return;
631 }
632 }
633
634 kfree_skb(skb);
635 }
636
637 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
638 {
639 const struct dccp_hdr *dh = dccp_hdr(skb);
640 const struct ipv6hdr *iph = skb->nh.ipv6h;
641 struct sock *nsk;
642 struct request_sock **prev;
643 /* Find possible connection requests. */
644 struct request_sock *req = inet6_csk_search_req(sk, &prev,
645 dh->dccph_sport,
646 &iph->saddr,
647 &iph->daddr,
648 inet6_iif(skb));
649 if (req != NULL)
650 return dccp_check_req(sk, skb, req, prev);
651
652 nsk = __inet6_lookup_established(&dccp_hashinfo,
653 &iph->saddr, dh->dccph_sport,
654 &iph->daddr, ntohs(dh->dccph_dport),
655 inet6_iif(skb));
656 if (nsk != NULL) {
657 if (nsk->sk_state != DCCP_TIME_WAIT) {
658 bh_lock_sock(nsk);
659 return nsk;
660 }
661 inet_twsk_put((struct inet_timewait_sock *)nsk);
662 return NULL;
663 }
664
665 return sk;
666 }
667
668 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
669 {
670 struct inet_request_sock *ireq;
671 struct dccp_sock dp;
672 struct request_sock *req;
673 struct dccp_request_sock *dreq;
674 struct inet6_request_sock *ireq6;
675 struct ipv6_pinfo *np = inet6_sk(sk);
676 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
677 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
678 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
679
680 if (skb->protocol == htons(ETH_P_IP))
681 return dccp_v4_conn_request(sk, skb);
682
683 if (!ipv6_unicast_destination(skb))
684 goto drop;
685
686 if (dccp_bad_service_code(sk, service)) {
687 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
688 goto drop;
689 }
690 /*
691 * There are no SYN attacks on IPv6, yet...
692 */
693 if (inet_csk_reqsk_queue_is_full(sk))
694 goto drop;
695
696 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
697 goto drop;
698
699 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
700 if (req == NULL)
701 goto drop;
702
703 /* FIXME: process options */
704
705 dccp_openreq_init(req, &dp, skb);
706
707 ireq6 = inet6_rsk(req);
708 ireq = inet_rsk(req);
709 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
710 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
711 req->rcv_wnd = 100; /* Fake, option parsing will get the
712 right value */
713 ireq6->pktopts = NULL;
714
715 if (ipv6_opt_accepted(sk, skb) ||
716 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
717 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
718 atomic_inc(&skb->users);
719 ireq6->pktopts = skb;
720 }
721 ireq6->iif = sk->sk_bound_dev_if;
722
723 /* So that link locals have meaning */
724 if (!sk->sk_bound_dev_if &&
725 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
726 ireq6->iif = inet6_iif(skb);
727
728 /*
729 * Step 3: Process LISTEN state
730 *
731 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
732 *
733 * In fact we defer setting S.GSR, S.SWL, S.SWH to
734 * dccp_create_openreq_child.
735 */
736 dreq = dccp_rsk(req);
737 dreq->dreq_isr = dcb->dccpd_seq;
738 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
739 dreq->dreq_service = service;
740
741 if (dccp_v6_send_response(sk, req, NULL))
742 goto drop_and_free;
743
744 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
745 return 0;
746
747 drop_and_free:
748 reqsk_free(req);
749 drop:
750 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
751 dcb->dccpd_reset_code = reset_code;
752 return -1;
753 }
754
755 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
756 struct sk_buff *skb,
757 struct request_sock *req,
758 struct dst_entry *dst)
759 {
760 struct inet6_request_sock *ireq6 = inet6_rsk(req);
761 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
762 struct inet_sock *newinet;
763 struct dccp_sock *newdp;
764 struct dccp6_sock *newdp6;
765 struct sock *newsk;
766 struct ipv6_txoptions *opt;
767
768 if (skb->protocol == htons(ETH_P_IP)) {
769 /*
770 * v6 mapped
771 */
772 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
773 if (newsk == NULL)
774 return NULL;
775
776 newdp6 = (struct dccp6_sock *)newsk;
777 newdp = dccp_sk(newsk);
778 newinet = inet_sk(newsk);
779 newinet->pinet6 = &newdp6->inet6;
780 newnp = inet6_sk(newsk);
781
782 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
783
784 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
785 newinet->daddr);
786
787 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
788 newinet->saddr);
789
790 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
791
792 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
793 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
794 newnp->pktoptions = NULL;
795 newnp->opt = NULL;
796 newnp->mcast_oif = inet6_iif(skb);
797 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
798
799 /*
800 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
801 * here, dccp_create_openreq_child now does this for us, see the comment in
802 * that function for the gory details. -acme
803 */
804
805 /* It is tricky place. Until this moment IPv4 tcp
806 worked with IPv6 icsk.icsk_af_ops.
807 Sync it now.
808 */
809 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
810
811 return newsk;
812 }
813
814 opt = np->opt;
815
816 if (sk_acceptq_is_full(sk))
817 goto out_overflow;
818
819 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
820 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
821
822 if (rxopt->srcrt)
823 opt = ipv6_invert_rthdr(sk,
824 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
825 rxopt->srcrt));
826 }
827
828 if (dst == NULL) {
829 struct in6_addr *final_p = NULL, final;
830 struct flowi fl;
831
832 memset(&fl, 0, sizeof(fl));
833 fl.proto = IPPROTO_DCCP;
834 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
835 if (opt != NULL && opt->srcrt != NULL) {
836 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
837
838 ipv6_addr_copy(&final, &fl.fl6_dst);
839 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
840 final_p = &final;
841 }
842 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
843 fl.oif = sk->sk_bound_dev_if;
844 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
845 fl.fl_ip_sport = inet_sk(sk)->sport;
846
847 if (ip6_dst_lookup(sk, &dst, &fl))
848 goto out;
849
850 if (final_p)
851 ipv6_addr_copy(&fl.fl6_dst, final_p);
852
853 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
854 goto out;
855 }
856
857 newsk = dccp_create_openreq_child(sk, req, skb);
858 if (newsk == NULL)
859 goto out;
860
861 /*
862 * No need to charge this sock to the relevant IPv6 refcnt debug socks
863 * count here, dccp_create_openreq_child now does this for us, see the
864 * comment in that function for the gory details. -acme
865 */
866
867 ip6_dst_store(newsk, dst, NULL);
868 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
869 NETIF_F_TSO);
870 newdp6 = (struct dccp6_sock *)newsk;
871 newinet = inet_sk(newsk);
872 newinet->pinet6 = &newdp6->inet6;
873 newdp = dccp_sk(newsk);
874 newnp = inet6_sk(newsk);
875
876 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
877
878 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
879 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
880 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
881 newsk->sk_bound_dev_if = ireq6->iif;
882
883 /* Now IPv6 options...
884
885 First: no IPv4 options.
886 */
887 newinet->opt = NULL;
888
889 /* Clone RX bits */
890 newnp->rxopt.all = np->rxopt.all;
891
892 /* Clone pktoptions received with SYN */
893 newnp->pktoptions = NULL;
894 if (ireq6->pktopts != NULL) {
895 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
896 kfree_skb(ireq6->pktopts);
897 ireq6->pktopts = NULL;
898 if (newnp->pktoptions)
899 skb_set_owner_r(newnp->pktoptions, newsk);
900 }
901 newnp->opt = NULL;
902 newnp->mcast_oif = inet6_iif(skb);
903 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
904
905 /*
906 * Clone native IPv6 options from listening socket (if any)
907 *
908 * Yes, keeping reference count would be much more clever, but we make
909 * one more one thing there: reattach optmem to newsk.
910 */
911 if (opt != NULL) {
912 newnp->opt = ipv6_dup_options(newsk, opt);
913 if (opt != np->opt)
914 sock_kfree_s(sk, opt, opt->tot_len);
915 }
916
917 inet_csk(newsk)->icsk_ext_hdr_len = 0;
918 if (newnp->opt != NULL)
919 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
920 newnp->opt->opt_flen);
921
922 dccp_sync_mss(newsk, dst_mtu(dst));
923
924 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
925
926 __inet6_hash(&dccp_hashinfo, newsk);
927 inet_inherit_port(&dccp_hashinfo, sk, newsk);
928
929 return newsk;
930
931 out_overflow:
932 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
933 out:
934 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
935 if (opt != NULL && opt != np->opt)
936 sock_kfree_s(sk, opt, opt->tot_len);
937 dst_release(dst);
938 return NULL;
939 }
940
941 /* The socket must have it's spinlock held when we get
942 * here.
943 *
944 * We have a potential double-lock case here, so even when
945 * doing backlog processing we use the BH locking scheme.
946 * This is because we cannot sleep with the original spinlock
947 * held.
948 */
949 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
950 {
951 struct ipv6_pinfo *np = inet6_sk(sk);
952 struct sk_buff *opt_skb = NULL;
953
954 /* Imagine: socket is IPv6. IPv4 packet arrives,
955 goes to IPv4 receive handler and backlogged.
956 From backlog it always goes here. Kerboom...
957 Fortunately, dccp_rcv_established and rcv_established
958 handle them correctly, but it is not case with
959 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
960 */
961
962 if (skb->protocol == htons(ETH_P_IP))
963 return dccp_v4_do_rcv(sk, skb);
964
965 if (sk_filter(sk, skb, 0))
966 goto discard;
967
968 /*
969 * socket locking is here for SMP purposes as backlog rcv is currently
970 * called with bh processing disabled.
971 */
972
973 /* Do Stevens' IPV6_PKTOPTIONS.
974
975 Yes, guys, it is the only place in our code, where we
976 may make it not affecting IPv4.
977 The rest of code is protocol independent,
978 and I do not like idea to uglify IPv4.
979
980 Actually, all the idea behind IPV6_PKTOPTIONS
981 looks not very well thought. For now we latch
982 options, received in the last packet, enqueued
983 by tcp. Feel free to propose better solution.
984 --ANK (980728)
985 */
986 if (np->rxopt.all)
987 opt_skb = skb_clone(skb, GFP_ATOMIC);
988
989 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
990 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
991 goto reset;
992 return 0;
993 }
994
995 if (sk->sk_state == DCCP_LISTEN) {
996 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
997
998 if (nsk == NULL)
999 goto discard;
1000 /*
1001 * Queue it on the new socket if the new socket is active,
1002 * otherwise we just shortcircuit this and continue with
1003 * the new socket..
1004 */
1005 if (nsk != sk) {
1006 if (dccp_child_process(sk, nsk, skb))
1007 goto reset;
1008 if (opt_skb != NULL)
1009 __kfree_skb(opt_skb);
1010 return 0;
1011 }
1012 }
1013
1014 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1015 goto reset;
1016 return 0;
1017
1018 reset:
1019 dccp_v6_ctl_send_reset(skb);
1020 discard:
1021 if (opt_skb != NULL)
1022 __kfree_skb(opt_skb);
1023 kfree_skb(skb);
1024 return 0;
1025 }
1026
1027 static int dccp_v6_rcv(struct sk_buff **pskb)
1028 {
1029 const struct dccp_hdr *dh;
1030 struct sk_buff *skb = *pskb;
1031 struct sock *sk;
1032
1033 /* Step 1: Check header basics: */
1034
1035 if (dccp_invalid_packet(skb))
1036 goto discard_it;
1037
1038 dh = dccp_hdr(skb);
1039
1040 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1041 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1042
1043 if (dccp_packet_without_ack(skb))
1044 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1045 else
1046 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1047
1048 /* Step 2:
1049 * Look up flow ID in table and get corresponding socket */
1050 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1051 dh->dccph_sport,
1052 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1053 inet6_iif(skb));
1054 /*
1055 * Step 2:
1056 * If no socket ...
1057 * Generate Reset(No Connection) unless P.type == Reset
1058 * Drop packet and return
1059 */
1060 if (sk == NULL)
1061 goto no_dccp_socket;
1062
1063 /*
1064 * Step 2:
1065 * ... or S.state == TIMEWAIT,
1066 * Generate Reset(No Connection) unless P.type == Reset
1067 * Drop packet and return
1068 */
1069 if (sk->sk_state == DCCP_TIME_WAIT)
1070 goto do_time_wait;
1071
1072 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1073 goto discard_and_relse;
1074
1075 return sk_receive_skb(sk, skb) ? -1 : 0;
1076
1077 no_dccp_socket:
1078 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1079 goto discard_it;
1080 /*
1081 * Step 2:
1082 * Generate Reset(No Connection) unless P.type == Reset
1083 * Drop packet and return
1084 */
1085 if (dh->dccph_type != DCCP_PKT_RESET) {
1086 DCCP_SKB_CB(skb)->dccpd_reset_code =
1087 DCCP_RESET_CODE_NO_CONNECTION;
1088 dccp_v6_ctl_send_reset(skb);
1089 }
1090 discard_it:
1091
1092 /*
1093 * Discard frame
1094 */
1095
1096 kfree_skb(skb);
1097 return 0;
1098
1099 discard_and_relse:
1100 sock_put(sk);
1101 goto discard_it;
1102
1103 do_time_wait:
1104 inet_twsk_put((struct inet_timewait_sock *)sk);
1105 goto no_dccp_socket;
1106 }
1107
1108 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1109 .queue_xmit = inet6_csk_xmit,
1110 .send_check = dccp_v6_send_check,
1111 .rebuild_header = inet6_sk_rebuild_header,
1112 .conn_request = dccp_v6_conn_request,
1113 .syn_recv_sock = dccp_v6_request_recv_sock,
1114 .net_header_len = sizeof(struct ipv6hdr),
1115 .setsockopt = ipv6_setsockopt,
1116 .getsockopt = ipv6_getsockopt,
1117 #ifdef CONFIG_COMPAT
1118 .compat_setsockopt = compat_ipv6_setsockopt,
1119 .compat_getsockopt = compat_ipv6_getsockopt,
1120 #endif
1121 .addr2sockaddr = inet6_csk_addr2sockaddr,
1122 .sockaddr_len = sizeof(struct sockaddr_in6)
1123 };
1124
1125 /*
1126 * DCCP over IPv4 via INET6 API
1127 */
1128 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1129 .queue_xmit = ip_queue_xmit,
1130 .send_check = dccp_v4_send_check,
1131 .rebuild_header = inet_sk_rebuild_header,
1132 .conn_request = dccp_v6_conn_request,
1133 .syn_recv_sock = dccp_v6_request_recv_sock,
1134 .net_header_len = sizeof(struct iphdr),
1135 .setsockopt = ipv6_setsockopt,
1136 .getsockopt = ipv6_getsockopt,
1137 #ifdef CONFIG_COMPAT
1138 .compat_setsockopt = compat_ipv6_setsockopt,
1139 .compat_getsockopt = compat_ipv6_getsockopt,
1140 #endif
1141 .addr2sockaddr = inet6_csk_addr2sockaddr,
1142 .sockaddr_len = sizeof(struct sockaddr_in6)
1143 };
1144
1145 /* NOTE: A lot of things set to zero explicitly by call to
1146 * sk_alloc() so need not be done here.
1147 */
1148 static int dccp_v6_init_sock(struct sock *sk)
1149 {
1150 static __u8 dccp_v6_ctl_sock_initialized;
1151 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1152
1153 if (err == 0) {
1154 if (unlikely(!dccp_v6_ctl_sock_initialized))
1155 dccp_v6_ctl_sock_initialized = 1;
1156 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1157 }
1158
1159 return err;
1160 }
1161
1162 static int dccp_v6_destroy_sock(struct sock *sk)
1163 {
1164 dccp_destroy_sock(sk);
1165 return inet6_destroy_sock(sk);
1166 }
1167
1168 static struct proto dccp_v6_prot = {
1169 .name = "DCCPv6",
1170 .owner = THIS_MODULE,
1171 .close = dccp_close,
1172 .connect = dccp_v6_connect,
1173 .disconnect = dccp_disconnect,
1174 .ioctl = dccp_ioctl,
1175 .init = dccp_v6_init_sock,
1176 .setsockopt = dccp_setsockopt,
1177 .getsockopt = dccp_getsockopt,
1178 #ifdef CONFIG_COMPAT
1179 .compat_setsockopt = compat_dccp_setsockopt,
1180 .compat_getsockopt = compat_dccp_getsockopt,
1181 #endif
1182 .sendmsg = dccp_sendmsg,
1183 .recvmsg = dccp_recvmsg,
1184 .backlog_rcv = dccp_v6_do_rcv,
1185 .hash = dccp_v6_hash,
1186 .unhash = dccp_unhash,
1187 .accept = inet_csk_accept,
1188 .get_port = dccp_v6_get_port,
1189 .shutdown = dccp_shutdown,
1190 .destroy = dccp_v6_destroy_sock,
1191 .orphan_count = &dccp_orphan_count,
1192 .max_header = MAX_DCCP_HEADER,
1193 .obj_size = sizeof(struct dccp6_sock),
1194 .rsk_prot = &dccp6_request_sock_ops,
1195 .twsk_prot = &dccp6_timewait_sock_ops,
1196 };
1197
1198 static struct inet6_protocol dccp_v6_protocol = {
1199 .handler = dccp_v6_rcv,
1200 .err_handler = dccp_v6_err,
1201 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1202 };
1203
1204 static struct proto_ops inet6_dccp_ops = {
1205 .family = PF_INET6,
1206 .owner = THIS_MODULE,
1207 .release = inet6_release,
1208 .bind = inet6_bind,
1209 .connect = inet_stream_connect,
1210 .socketpair = sock_no_socketpair,
1211 .accept = inet_accept,
1212 .getname = inet6_getname,
1213 .poll = dccp_poll,
1214 .ioctl = inet6_ioctl,
1215 .listen = inet_dccp_listen,
1216 .shutdown = inet_shutdown,
1217 .setsockopt = sock_common_setsockopt,
1218 .getsockopt = sock_common_getsockopt,
1219 #ifdef CONFIG_COMPAT
1220 .compat_setsockopt = compat_sock_common_setsockopt,
1221 .compat_getsockopt = compat_sock_common_getsockopt,
1222 #endif
1223 .sendmsg = inet_sendmsg,
1224 .recvmsg = sock_common_recvmsg,
1225 .mmap = sock_no_mmap,
1226 .sendpage = sock_no_sendpage,
1227 };
1228
1229 static struct inet_protosw dccp_v6_protosw = {
1230 .type = SOCK_DCCP,
1231 .protocol = IPPROTO_DCCP,
1232 .prot = &dccp_v6_prot,
1233 .ops = &inet6_dccp_ops,
1234 .capability = -1,
1235 .flags = INET_PROTOSW_ICSK,
1236 };
1237
1238 static int __init dccp_v6_init(void)
1239 {
1240 int err = proto_register(&dccp_v6_prot, 1);
1241
1242 if (err != 0)
1243 goto out;
1244
1245 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1246 if (err != 0)
1247 goto out_unregister_proto;
1248
1249 inet6_register_protosw(&dccp_v6_protosw);
1250
1251 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1252 SOCK_DCCP, IPPROTO_DCCP);
1253 if (err != 0)
1254 goto out_unregister_protosw;
1255 out:
1256 return err;
1257 out_unregister_protosw:
1258 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1259 inet6_unregister_protosw(&dccp_v6_protosw);
1260 out_unregister_proto:
1261 proto_unregister(&dccp_v6_prot);
1262 goto out;
1263 }
1264
1265 static void __exit dccp_v6_exit(void)
1266 {
1267 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1268 inet6_unregister_protosw(&dccp_v6_protosw);
1269 proto_unregister(&dccp_v6_prot);
1270 }
1271
1272 module_init(dccp_v6_init);
1273 module_exit(dccp_v6_exit);
1274
1275 /*
1276 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1277 * values directly, Also cover the case where the protocol is not specified,
1278 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1279 */
1280 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1281 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1282 MODULE_LICENSE("GPL");
1283 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1284 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");