]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/dccp/ipv6.c
[DCCP]: Combine allocating & zeroing header space on skb
[mirror_ubuntu-artful-kernel.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket *dccp_v6_ctl_socket;
38
39 static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
40 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
41 struct request_sock *req);
42 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
43
44 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
45
46 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
47 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
48
49 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
50 {
51 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
52 inet6_csk_bind_conflict);
53 }
54
55 static void dccp_v6_hash(struct sock *sk)
56 {
57 if (sk->sk_state != DCCP_CLOSED) {
58 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
59 dccp_hash(sk);
60 return;
61 }
62 local_bh_disable();
63 __inet6_hash(&dccp_hashinfo, sk);
64 local_bh_enable();
65 }
66 }
67
68 static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
69 struct in6_addr *saddr,
70 struct in6_addr *daddr,
71 unsigned long base)
72 {
73 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
74 }
75
76 static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
77 {
78 const struct dccp_hdr *dh = dccp_hdr(skb);
79
80 if (skb->protocol == htons(ETH_P_IPV6))
81 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
82 skb->nh.ipv6h->saddr.s6_addr32,
83 dh->dccph_dport,
84 dh->dccph_sport);
85
86 return secure_dccp_sequence_number(skb->nh.iph->daddr,
87 skb->nh.iph->saddr,
88 dh->dccph_dport,
89 dh->dccph_sport);
90 }
91
92 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
93 int addr_len)
94 {
95 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
96 struct inet_connection_sock *icsk = inet_csk(sk);
97 struct inet_sock *inet = inet_sk(sk);
98 struct ipv6_pinfo *np = inet6_sk(sk);
99 struct dccp_sock *dp = dccp_sk(sk);
100 struct in6_addr *saddr = NULL, *final_p = NULL, final;
101 struct flowi fl;
102 struct dst_entry *dst;
103 int addr_type;
104 int err;
105
106 dp->dccps_role = DCCP_ROLE_CLIENT;
107
108 if (addr_len < SIN6_LEN_RFC2133)
109 return -EINVAL;
110
111 if (usin->sin6_family != AF_INET6)
112 return -EAFNOSUPPORT;
113
114 memset(&fl, 0, sizeof(fl));
115
116 if (np->sndflow) {
117 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
118 IP6_ECN_flow_init(fl.fl6_flowlabel);
119 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
120 struct ip6_flowlabel *flowlabel;
121 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
122 if (flowlabel == NULL)
123 return -EINVAL;
124 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
125 fl6_sock_release(flowlabel);
126 }
127 }
128 /*
129 * connect() to INADDR_ANY means loopback (BSD'ism).
130 */
131 if (ipv6_addr_any(&usin->sin6_addr))
132 usin->sin6_addr.s6_addr[15] = 1;
133
134 addr_type = ipv6_addr_type(&usin->sin6_addr);
135
136 if (addr_type & IPV6_ADDR_MULTICAST)
137 return -ENETUNREACH;
138
139 if (addr_type & IPV6_ADDR_LINKLOCAL) {
140 if (addr_len >= sizeof(struct sockaddr_in6) &&
141 usin->sin6_scope_id) {
142 /* If interface is set while binding, indices
143 * must coincide.
144 */
145 if (sk->sk_bound_dev_if &&
146 sk->sk_bound_dev_if != usin->sin6_scope_id)
147 return -EINVAL;
148
149 sk->sk_bound_dev_if = usin->sin6_scope_id;
150 }
151
152 /* Connect to link-local address requires an interface */
153 if (!sk->sk_bound_dev_if)
154 return -EINVAL;
155 }
156
157 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
158 np->flow_label = fl.fl6_flowlabel;
159
160 /*
161 * DCCP over IPv4
162 */
163 if (addr_type == IPV6_ADDR_MAPPED) {
164 u32 exthdrlen = icsk->icsk_ext_hdr_len;
165 struct sockaddr_in sin;
166
167 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
168
169 if (__ipv6_only_sock(sk))
170 return -ENETUNREACH;
171
172 sin.sin_family = AF_INET;
173 sin.sin_port = usin->sin6_port;
174 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
175
176 icsk->icsk_af_ops = &dccp_ipv6_mapped;
177 sk->sk_backlog_rcv = dccp_v4_do_rcv;
178
179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
180 if (err) {
181 icsk->icsk_ext_hdr_len = exthdrlen;
182 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
183 sk->sk_backlog_rcv = dccp_v6_do_rcv;
184 goto failure;
185 } else {
186 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
187 inet->saddr);
188 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
189 inet->rcv_saddr);
190 }
191
192 return err;
193 }
194
195 if (!ipv6_addr_any(&np->rcv_saddr))
196 saddr = &np->rcv_saddr;
197
198 fl.proto = IPPROTO_DCCP;
199 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
200 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
201 fl.oif = sk->sk_bound_dev_if;
202 fl.fl_ip_dport = usin->sin6_port;
203 fl.fl_ip_sport = inet->sport;
204 security_sk_classify_flow(sk, &fl);
205
206 if (np->opt != NULL && np->opt->srcrt != NULL) {
207 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
208
209 ipv6_addr_copy(&final, &fl.fl6_dst);
210 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
211 final_p = &final;
212 }
213
214 err = ip6_dst_lookup(sk, &dst, &fl);
215 if (err)
216 goto failure;
217
218 if (final_p)
219 ipv6_addr_copy(&fl.fl6_dst, final_p);
220
221 err = xfrm_lookup(&dst, &fl, sk, 0);
222 if (err < 0)
223 goto failure;
224
225 if (saddr == NULL) {
226 saddr = &fl.fl6_src;
227 ipv6_addr_copy(&np->rcv_saddr, saddr);
228 }
229
230 /* set the source address */
231 ipv6_addr_copy(&np->saddr, saddr);
232 inet->rcv_saddr = LOOPBACK4_IPV6;
233
234 __ip6_dst_store(sk, dst, NULL, NULL);
235
236 icsk->icsk_ext_hdr_len = 0;
237 if (np->opt != NULL)
238 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
239 np->opt->opt_nflen);
240
241 inet->dport = usin->sin6_port;
242
243 dccp_set_state(sk, DCCP_REQUESTING);
244 err = inet6_hash_connect(&dccp_death_row, sk);
245 if (err)
246 goto late_failure;
247 /* FIXME */
248 #if 0
249 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
250 np->daddr.s6_addr32,
251 inet->sport,
252 inet->dport);
253 #endif
254 err = dccp_connect(sk);
255 if (err)
256 goto late_failure;
257
258 return 0;
259
260 late_failure:
261 dccp_set_state(sk, DCCP_CLOSED);
262 __sk_dst_reset(sk);
263 failure:
264 inet->dport = 0;
265 sk->sk_route_caps = 0;
266 return err;
267 }
268
269 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
270 int type, int code, int offset, __be32 info)
271 {
272 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
273 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
274 struct ipv6_pinfo *np;
275 struct sock *sk;
276 int err;
277 __u64 seq;
278
279 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
280 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
281
282 if (sk == NULL) {
283 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
284 return;
285 }
286
287 if (sk->sk_state == DCCP_TIME_WAIT) {
288 inet_twsk_put(inet_twsk(sk));
289 return;
290 }
291
292 bh_lock_sock(sk);
293 if (sock_owned_by_user(sk))
294 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
295
296 if (sk->sk_state == DCCP_CLOSED)
297 goto out;
298
299 np = inet6_sk(sk);
300
301 if (type == ICMPV6_PKT_TOOBIG) {
302 struct dst_entry *dst = NULL;
303
304 if (sock_owned_by_user(sk))
305 goto out;
306 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
307 goto out;
308
309 /* icmp should have updated the destination cache entry */
310 dst = __sk_dst_check(sk, np->dst_cookie);
311 if (dst == NULL) {
312 struct inet_sock *inet = inet_sk(sk);
313 struct flowi fl;
314
315 /* BUGGG_FUTURE: Again, it is not clear how
316 to handle rthdr case. Ignore this complexity
317 for now.
318 */
319 memset(&fl, 0, sizeof(fl));
320 fl.proto = IPPROTO_DCCP;
321 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
322 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
323 fl.oif = sk->sk_bound_dev_if;
324 fl.fl_ip_dport = inet->dport;
325 fl.fl_ip_sport = inet->sport;
326 security_sk_classify_flow(sk, &fl);
327
328 err = ip6_dst_lookup(sk, &dst, &fl);
329 if (err) {
330 sk->sk_err_soft = -err;
331 goto out;
332 }
333
334 err = xfrm_lookup(&dst, &fl, sk, 0);
335 if (err < 0) {
336 sk->sk_err_soft = -err;
337 goto out;
338 }
339 } else
340 dst_hold(dst);
341
342 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
343 dccp_sync_mss(sk, dst_mtu(dst));
344 } /* else let the usual retransmit timer handle it */
345 dst_release(dst);
346 goto out;
347 }
348
349 icmpv6_err_convert(type, code, &err);
350
351 seq = DCCP_SKB_CB(skb)->dccpd_seq;
352 /* Might be for an request_sock */
353 switch (sk->sk_state) {
354 struct request_sock *req, **prev;
355 case DCCP_LISTEN:
356 if (sock_owned_by_user(sk))
357 goto out;
358
359 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
360 &hdr->daddr, &hdr->saddr,
361 inet6_iif(skb));
362 if (req == NULL)
363 goto out;
364
365 /*
366 * ICMPs are not backlogged, hence we cannot get an established
367 * socket here.
368 */
369 BUG_TRAP(req->sk == NULL);
370
371 if (seq != dccp_rsk(req)->dreq_iss) {
372 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
373 goto out;
374 }
375
376 inet_csk_reqsk_queue_drop(sk, req, prev);
377 goto out;
378
379 case DCCP_REQUESTING:
380 case DCCP_RESPOND: /* Cannot happen.
381 It can, it SYNs are crossed. --ANK */
382 if (!sock_owned_by_user(sk)) {
383 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
384 sk->sk_err = err;
385 /*
386 * Wake people up to see the error
387 * (see connect in sock.c)
388 */
389 sk->sk_error_report(sk);
390 dccp_done(sk);
391 } else
392 sk->sk_err_soft = err;
393 goto out;
394 }
395
396 if (!sock_owned_by_user(sk) && np->recverr) {
397 sk->sk_err = err;
398 sk->sk_error_report(sk);
399 } else
400 sk->sk_err_soft = err;
401
402 out:
403 bh_unlock_sock(sk);
404 sock_put(sk);
405 }
406
407
408 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
409 struct dst_entry *dst)
410 {
411 struct inet6_request_sock *ireq6 = inet6_rsk(req);
412 struct ipv6_pinfo *np = inet6_sk(sk);
413 struct sk_buff *skb;
414 struct ipv6_txoptions *opt = NULL;
415 struct in6_addr *final_p = NULL, final;
416 struct flowi fl;
417 int err = -1;
418
419 memset(&fl, 0, sizeof(fl));
420 fl.proto = IPPROTO_DCCP;
421 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
422 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
423 fl.fl6_flowlabel = 0;
424 fl.oif = ireq6->iif;
425 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
426 fl.fl_ip_sport = inet_sk(sk)->sport;
427 security_req_classify_flow(req, &fl);
428
429 if (dst == NULL) {
430 opt = np->opt;
431 if (opt == NULL &&
432 np->rxopt.bits.osrcrt == 2 &&
433 ireq6->pktopts) {
434 struct sk_buff *pktopts = ireq6->pktopts;
435 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
436
437 if (rxopt->srcrt)
438 opt = ipv6_invert_rthdr(sk,
439 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
440 rxopt->srcrt));
441 }
442
443 if (opt != NULL && opt->srcrt != NULL) {
444 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
445
446 ipv6_addr_copy(&final, &fl.fl6_dst);
447 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
448 final_p = &final;
449 }
450
451 err = ip6_dst_lookup(sk, &dst, &fl);
452 if (err)
453 goto done;
454
455 if (final_p)
456 ipv6_addr_copy(&fl.fl6_dst, final_p);
457
458 err = xfrm_lookup(&dst, &fl, sk, 0);
459 if (err < 0)
460 goto done;
461 }
462
463 skb = dccp_make_response(sk, dst, req);
464 if (skb != NULL) {
465 struct dccp_hdr *dh = dccp_hdr(skb);
466
467 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
468 &ireq6->loc_addr,
469 &ireq6->rmt_addr,
470 csum_partial((char *)dh,
471 skb->len,
472 skb->csum));
473 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
474 err = ip6_xmit(sk, skb, &fl, opt, 0);
475 if (err == NET_XMIT_CN)
476 err = 0;
477 }
478
479 done:
480 if (opt != NULL && opt != np->opt)
481 sock_kfree_s(sk, opt, opt->tot_len);
482 dst_release(dst);
483 return err;
484 }
485
486 static void dccp_v6_reqsk_destructor(struct request_sock *req)
487 {
488 if (inet6_rsk(req)->pktopts != NULL)
489 kfree_skb(inet6_rsk(req)->pktopts);
490 }
491
492 static struct request_sock_ops dccp6_request_sock_ops = {
493 .family = AF_INET6,
494 .obj_size = sizeof(struct dccp6_request_sock),
495 .rtx_syn_ack = dccp_v6_send_response,
496 .send_ack = dccp_v6_reqsk_send_ack,
497 .destructor = dccp_v6_reqsk_destructor,
498 .send_reset = dccp_v6_ctl_send_reset,
499 };
500
501 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
502 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
503 };
504
505 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
506 {
507 struct ipv6_pinfo *np = inet6_sk(sk);
508 struct dccp_hdr *dh = dccp_hdr(skb);
509
510 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
511 len, IPPROTO_DCCP,
512 csum_partial((char *)dh,
513 dh->dccph_doff << 2,
514 skb->csum));
515 }
516
517 static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
518 {
519 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
520 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
521 sizeof(struct dccp_hdr_ext) +
522 sizeof(struct dccp_hdr_reset);
523 struct sk_buff *skb;
524 struct flowi fl;
525 u64 seqno;
526
527 if (rxdh->dccph_type == DCCP_PKT_RESET)
528 return;
529
530 if (!ipv6_unicast_destination(rxskb))
531 return;
532
533 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
534 GFP_ATOMIC);
535 if (skb == NULL)
536 return;
537
538 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
539
540 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
541
542 /* Swap the send and the receive. */
543 dh->dccph_type = DCCP_PKT_RESET;
544 dh->dccph_sport = rxdh->dccph_dport;
545 dh->dccph_dport = rxdh->dccph_sport;
546 dh->dccph_doff = dccp_hdr_reset_len / 4;
547 dh->dccph_x = 1;
548 dccp_hdr_reset(skb)->dccph_reset_code =
549 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
550
551 /* See "8.3.1. Abnormal Termination" in RFC 4340 */
552 seqno = 0;
553 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
554 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
555
556 dccp_hdr_set_seq(dh, seqno);
557 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
558 DCCP_SKB_CB(rxskb)->dccpd_seq);
559
560 memset(&fl, 0, sizeof(fl));
561 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
562 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
563 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
564 sizeof(*dh), IPPROTO_DCCP,
565 skb->csum);
566 fl.proto = IPPROTO_DCCP;
567 fl.oif = inet6_iif(rxskb);
568 fl.fl_ip_dport = dh->dccph_dport;
569 fl.fl_ip_sport = dh->dccph_sport;
570 security_skb_classify_flow(rxskb, &fl);
571
572 /* sk = NULL, but it is safe for now. RST socket required. */
573 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
574 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
575 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
576 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
577 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
578 return;
579 }
580 }
581
582 kfree_skb(skb);
583 }
584
585 static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb,
586 struct request_sock *req)
587 {
588 struct flowi fl;
589 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
590 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
591 sizeof(struct dccp_hdr_ext) +
592 sizeof(struct dccp_hdr_ack_bits);
593 struct sk_buff *skb;
594
595 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
596 GFP_ATOMIC);
597 if (skb == NULL)
598 return;
599
600 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
601
602 dh = dccp_zeroed_hdr(skb, dccp_hdr_ack_len);
603
604 /* Build DCCP header and checksum it. */
605 dh->dccph_type = DCCP_PKT_ACK;
606 dh->dccph_sport = rxdh->dccph_dport;
607 dh->dccph_dport = rxdh->dccph_sport;
608 dh->dccph_doff = dccp_hdr_ack_len / 4;
609 dh->dccph_x = 1;
610
611 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
612 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
613 DCCP_SKB_CB(rxskb)->dccpd_seq);
614
615 memset(&fl, 0, sizeof(fl));
616 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
617 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
618
619 /* FIXME: calculate checksum, IPv4 also should... */
620
621 fl.proto = IPPROTO_DCCP;
622 fl.oif = inet6_iif(rxskb);
623 fl.fl_ip_dport = dh->dccph_dport;
624 fl.fl_ip_sport = dh->dccph_sport;
625 security_req_classify_flow(req, &fl);
626
627 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
628 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
629 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
630 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
631 return;
632 }
633 }
634
635 kfree_skb(skb);
636 }
637
638 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
639 {
640 const struct dccp_hdr *dh = dccp_hdr(skb);
641 const struct ipv6hdr *iph = skb->nh.ipv6h;
642 struct sock *nsk;
643 struct request_sock **prev;
644 /* Find possible connection requests. */
645 struct request_sock *req = inet6_csk_search_req(sk, &prev,
646 dh->dccph_sport,
647 &iph->saddr,
648 &iph->daddr,
649 inet6_iif(skb));
650 if (req != NULL)
651 return dccp_check_req(sk, skb, req, prev);
652
653 nsk = __inet6_lookup_established(&dccp_hashinfo,
654 &iph->saddr, dh->dccph_sport,
655 &iph->daddr, ntohs(dh->dccph_dport),
656 inet6_iif(skb));
657 if (nsk != NULL) {
658 if (nsk->sk_state != DCCP_TIME_WAIT) {
659 bh_lock_sock(nsk);
660 return nsk;
661 }
662 inet_twsk_put(inet_twsk(nsk));
663 return NULL;
664 }
665
666 return sk;
667 }
668
669 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
670 {
671 struct dccp_sock dp;
672 struct request_sock *req;
673 struct dccp_request_sock *dreq;
674 struct inet6_request_sock *ireq6;
675 struct ipv6_pinfo *np = inet6_sk(sk);
676 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
677 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
678 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
679
680 if (skb->protocol == htons(ETH_P_IP))
681 return dccp_v4_conn_request(sk, skb);
682
683 if (!ipv6_unicast_destination(skb))
684 goto drop;
685
686 if (dccp_bad_service_code(sk, service)) {
687 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
688 goto drop;
689 }
690 /*
691 * There are no SYN attacks on IPv6, yet...
692 */
693 if (inet_csk_reqsk_queue_is_full(sk))
694 goto drop;
695
696 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
697 goto drop;
698
699 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
700 if (req == NULL)
701 goto drop;
702
703 /* FIXME: process options */
704
705 dccp_openreq_init(req, &dp, skb);
706
707 if (security_inet_conn_request(sk, skb, req))
708 goto drop_and_free;
709
710 ireq6 = inet6_rsk(req);
711 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
712 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
713 req->rcv_wnd = dccp_feat_default_sequence_window;
714 ireq6->pktopts = NULL;
715
716 if (ipv6_opt_accepted(sk, skb) ||
717 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
718 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
719 atomic_inc(&skb->users);
720 ireq6->pktopts = skb;
721 }
722 ireq6->iif = sk->sk_bound_dev_if;
723
724 /* So that link locals have meaning */
725 if (!sk->sk_bound_dev_if &&
726 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
727 ireq6->iif = inet6_iif(skb);
728
729 /*
730 * Step 3: Process LISTEN state
731 *
732 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
733 *
734 * In fact we defer setting S.GSR, S.SWL, S.SWH to
735 * dccp_create_openreq_child.
736 */
737 dreq = dccp_rsk(req);
738 dreq->dreq_isr = dcb->dccpd_seq;
739 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
740 dreq->dreq_service = service;
741
742 if (dccp_v6_send_response(sk, req, NULL))
743 goto drop_and_free;
744
745 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
746 return 0;
747
748 drop_and_free:
749 reqsk_free(req);
750 drop:
751 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
752 dcb->dccpd_reset_code = reset_code;
753 return -1;
754 }
755
756 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
757 struct sk_buff *skb,
758 struct request_sock *req,
759 struct dst_entry *dst)
760 {
761 struct inet6_request_sock *ireq6 = inet6_rsk(req);
762 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
763 struct inet_sock *newinet;
764 struct dccp_sock *newdp;
765 struct dccp6_sock *newdp6;
766 struct sock *newsk;
767 struct ipv6_txoptions *opt;
768
769 if (skb->protocol == htons(ETH_P_IP)) {
770 /*
771 * v6 mapped
772 */
773 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
774 if (newsk == NULL)
775 return NULL;
776
777 newdp6 = (struct dccp6_sock *)newsk;
778 newdp = dccp_sk(newsk);
779 newinet = inet_sk(newsk);
780 newinet->pinet6 = &newdp6->inet6;
781 newnp = inet6_sk(newsk);
782
783 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
784
785 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
786 newinet->daddr);
787
788 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
789 newinet->saddr);
790
791 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
792
793 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
794 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
795 newnp->pktoptions = NULL;
796 newnp->opt = NULL;
797 newnp->mcast_oif = inet6_iif(skb);
798 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
799
800 /*
801 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
802 * here, dccp_create_openreq_child now does this for us, see the comment in
803 * that function for the gory details. -acme
804 */
805
806 /* It is tricky place. Until this moment IPv4 tcp
807 worked with IPv6 icsk.icsk_af_ops.
808 Sync it now.
809 */
810 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
811
812 return newsk;
813 }
814
815 opt = np->opt;
816
817 if (sk_acceptq_is_full(sk))
818 goto out_overflow;
819
820 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
821 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
822
823 if (rxopt->srcrt)
824 opt = ipv6_invert_rthdr(sk,
825 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
826 rxopt->srcrt));
827 }
828
829 if (dst == NULL) {
830 struct in6_addr *final_p = NULL, final;
831 struct flowi fl;
832
833 memset(&fl, 0, sizeof(fl));
834 fl.proto = IPPROTO_DCCP;
835 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
836 if (opt != NULL && opt->srcrt != NULL) {
837 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
838
839 ipv6_addr_copy(&final, &fl.fl6_dst);
840 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
841 final_p = &final;
842 }
843 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
844 fl.oif = sk->sk_bound_dev_if;
845 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
846 fl.fl_ip_sport = inet_sk(sk)->sport;
847 security_sk_classify_flow(sk, &fl);
848
849 if (ip6_dst_lookup(sk, &dst, &fl))
850 goto out;
851
852 if (final_p)
853 ipv6_addr_copy(&fl.fl6_dst, final_p);
854
855 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
856 goto out;
857 }
858
859 newsk = dccp_create_openreq_child(sk, req, skb);
860 if (newsk == NULL)
861 goto out;
862
863 /*
864 * No need to charge this sock to the relevant IPv6 refcnt debug socks
865 * count here, dccp_create_openreq_child now does this for us, see the
866 * comment in that function for the gory details. -acme
867 */
868
869 __ip6_dst_store(newsk, dst, NULL, NULL);
870 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
871 NETIF_F_TSO);
872 newdp6 = (struct dccp6_sock *)newsk;
873 newinet = inet_sk(newsk);
874 newinet->pinet6 = &newdp6->inet6;
875 newdp = dccp_sk(newsk);
876 newnp = inet6_sk(newsk);
877
878 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
879
880 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
881 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
882 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
883 newsk->sk_bound_dev_if = ireq6->iif;
884
885 /* Now IPv6 options...
886
887 First: no IPv4 options.
888 */
889 newinet->opt = NULL;
890
891 /* Clone RX bits */
892 newnp->rxopt.all = np->rxopt.all;
893
894 /* Clone pktoptions received with SYN */
895 newnp->pktoptions = NULL;
896 if (ireq6->pktopts != NULL) {
897 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
898 kfree_skb(ireq6->pktopts);
899 ireq6->pktopts = NULL;
900 if (newnp->pktoptions)
901 skb_set_owner_r(newnp->pktoptions, newsk);
902 }
903 newnp->opt = NULL;
904 newnp->mcast_oif = inet6_iif(skb);
905 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
906
907 /*
908 * Clone native IPv6 options from listening socket (if any)
909 *
910 * Yes, keeping reference count would be much more clever, but we make
911 * one more one thing there: reattach optmem to newsk.
912 */
913 if (opt != NULL) {
914 newnp->opt = ipv6_dup_options(newsk, opt);
915 if (opt != np->opt)
916 sock_kfree_s(sk, opt, opt->tot_len);
917 }
918
919 inet_csk(newsk)->icsk_ext_hdr_len = 0;
920 if (newnp->opt != NULL)
921 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
922 newnp->opt->opt_flen);
923
924 dccp_sync_mss(newsk, dst_mtu(dst));
925
926 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
927
928 __inet6_hash(&dccp_hashinfo, newsk);
929 inet_inherit_port(&dccp_hashinfo, sk, newsk);
930
931 return newsk;
932
933 out_overflow:
934 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
935 out:
936 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
937 if (opt != NULL && opt != np->opt)
938 sock_kfree_s(sk, opt, opt->tot_len);
939 dst_release(dst);
940 return NULL;
941 }
942
943 /* The socket must have it's spinlock held when we get
944 * here.
945 *
946 * We have a potential double-lock case here, so even when
947 * doing backlog processing we use the BH locking scheme.
948 * This is because we cannot sleep with the original spinlock
949 * held.
950 */
951 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
952 {
953 struct ipv6_pinfo *np = inet6_sk(sk);
954 struct sk_buff *opt_skb = NULL;
955
956 /* Imagine: socket is IPv6. IPv4 packet arrives,
957 goes to IPv4 receive handler and backlogged.
958 From backlog it always goes here. Kerboom...
959 Fortunately, dccp_rcv_established and rcv_established
960 handle them correctly, but it is not case with
961 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
962 */
963
964 if (skb->protocol == htons(ETH_P_IP))
965 return dccp_v4_do_rcv(sk, skb);
966
967 if (sk_filter(sk, skb))
968 goto discard;
969
970 /*
971 * socket locking is here for SMP purposes as backlog rcv is currently
972 * called with bh processing disabled.
973 */
974
975 /* Do Stevens' IPV6_PKTOPTIONS.
976
977 Yes, guys, it is the only place in our code, where we
978 may make it not affecting IPv4.
979 The rest of code is protocol independent,
980 and I do not like idea to uglify IPv4.
981
982 Actually, all the idea behind IPV6_PKTOPTIONS
983 looks not very well thought. For now we latch
984 options, received in the last packet, enqueued
985 by tcp. Feel free to propose better solution.
986 --ANK (980728)
987 */
988 if (np->rxopt.all)
989 /*
990 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
991 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
992 */
993 opt_skb = skb_clone(skb, GFP_ATOMIC);
994
995 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
996 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
997 goto reset;
998 if (opt_skb) {
999 /* XXX This is where we would goto ipv6_pktoptions. */
1000 __kfree_skb(opt_skb);
1001 }
1002 return 0;
1003 }
1004
1005 if (sk->sk_state == DCCP_LISTEN) {
1006 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1007
1008 if (nsk == NULL)
1009 goto discard;
1010 /*
1011 * Queue it on the new socket if the new socket is active,
1012 * otherwise we just shortcircuit this and continue with
1013 * the new socket..
1014 */
1015 if (nsk != sk) {
1016 if (dccp_child_process(sk, nsk, skb))
1017 goto reset;
1018 if (opt_skb != NULL)
1019 __kfree_skb(opt_skb);
1020 return 0;
1021 }
1022 }
1023
1024 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1025 goto reset;
1026 if (opt_skb) {
1027 /* XXX This is where we would goto ipv6_pktoptions. */
1028 __kfree_skb(opt_skb);
1029 }
1030 return 0;
1031
1032 reset:
1033 dccp_v6_ctl_send_reset(skb);
1034 discard:
1035 if (opt_skb != NULL)
1036 __kfree_skb(opt_skb);
1037 kfree_skb(skb);
1038 return 0;
1039 }
1040
1041 static int dccp_v6_rcv(struct sk_buff **pskb)
1042 {
1043 const struct dccp_hdr *dh;
1044 struct sk_buff *skb = *pskb;
1045 struct sock *sk;
1046
1047 /* Step 1: Check header basics: */
1048
1049 if (dccp_invalid_packet(skb))
1050 goto discard_it;
1051
1052 dh = dccp_hdr(skb);
1053
1054 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1055 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1056
1057 if (dccp_packet_without_ack(skb))
1058 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1059 else
1060 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1061
1062 /* Step 2:
1063 * Look up flow ID in table and get corresponding socket */
1064 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1065 dh->dccph_sport,
1066 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1067 inet6_iif(skb));
1068 /*
1069 * Step 2:
1070 * If no socket ...
1071 * Generate Reset(No Connection) unless P.type == Reset
1072 * Drop packet and return
1073 */
1074 if (sk == NULL)
1075 goto no_dccp_socket;
1076
1077 /*
1078 * Step 2:
1079 * ... or S.state == TIMEWAIT,
1080 * Generate Reset(No Connection) unless P.type == Reset
1081 * Drop packet and return
1082 */
1083 if (sk->sk_state == DCCP_TIME_WAIT)
1084 goto do_time_wait;
1085
1086 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1087 goto discard_and_relse;
1088
1089 return sk_receive_skb(sk, skb) ? -1 : 0;
1090
1091 no_dccp_socket:
1092 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1093 goto discard_it;
1094 /*
1095 * Step 2:
1096 * Generate Reset(No Connection) unless P.type == Reset
1097 * Drop packet and return
1098 */
1099 if (dh->dccph_type != DCCP_PKT_RESET) {
1100 DCCP_SKB_CB(skb)->dccpd_reset_code =
1101 DCCP_RESET_CODE_NO_CONNECTION;
1102 dccp_v6_ctl_send_reset(skb);
1103 }
1104 discard_it:
1105
1106 /*
1107 * Discard frame
1108 */
1109
1110 kfree_skb(skb);
1111 return 0;
1112
1113 discard_and_relse:
1114 sock_put(sk);
1115 goto discard_it;
1116
1117 do_time_wait:
1118 inet_twsk_put(inet_twsk(sk));
1119 goto no_dccp_socket;
1120 }
1121
1122 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1123 .queue_xmit = inet6_csk_xmit,
1124 .send_check = dccp_v6_send_check,
1125 .rebuild_header = inet6_sk_rebuild_header,
1126 .conn_request = dccp_v6_conn_request,
1127 .syn_recv_sock = dccp_v6_request_recv_sock,
1128 .net_header_len = sizeof(struct ipv6hdr),
1129 .setsockopt = ipv6_setsockopt,
1130 .getsockopt = ipv6_getsockopt,
1131 .addr2sockaddr = inet6_csk_addr2sockaddr,
1132 .sockaddr_len = sizeof(struct sockaddr_in6),
1133 #ifdef CONFIG_COMPAT
1134 .compat_setsockopt = compat_ipv6_setsockopt,
1135 .compat_getsockopt = compat_ipv6_getsockopt,
1136 #endif
1137 };
1138
1139 /*
1140 * DCCP over IPv4 via INET6 API
1141 */
1142 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1143 .queue_xmit = ip_queue_xmit,
1144 .send_check = dccp_v4_send_check,
1145 .rebuild_header = inet_sk_rebuild_header,
1146 .conn_request = dccp_v6_conn_request,
1147 .syn_recv_sock = dccp_v6_request_recv_sock,
1148 .net_header_len = sizeof(struct iphdr),
1149 .setsockopt = ipv6_setsockopt,
1150 .getsockopt = ipv6_getsockopt,
1151 .addr2sockaddr = inet6_csk_addr2sockaddr,
1152 .sockaddr_len = sizeof(struct sockaddr_in6),
1153 #ifdef CONFIG_COMPAT
1154 .compat_setsockopt = compat_ipv6_setsockopt,
1155 .compat_getsockopt = compat_ipv6_getsockopt,
1156 #endif
1157 };
1158
1159 /* NOTE: A lot of things set to zero explicitly by call to
1160 * sk_alloc() so need not be done here.
1161 */
1162 static int dccp_v6_init_sock(struct sock *sk)
1163 {
1164 static __u8 dccp_v6_ctl_sock_initialized;
1165 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1166
1167 if (err == 0) {
1168 if (unlikely(!dccp_v6_ctl_sock_initialized))
1169 dccp_v6_ctl_sock_initialized = 1;
1170 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1171 }
1172
1173 return err;
1174 }
1175
1176 static int dccp_v6_destroy_sock(struct sock *sk)
1177 {
1178 dccp_destroy_sock(sk);
1179 return inet6_destroy_sock(sk);
1180 }
1181
1182 static struct proto dccp_v6_prot = {
1183 .name = "DCCPv6",
1184 .owner = THIS_MODULE,
1185 .close = dccp_close,
1186 .connect = dccp_v6_connect,
1187 .disconnect = dccp_disconnect,
1188 .ioctl = dccp_ioctl,
1189 .init = dccp_v6_init_sock,
1190 .setsockopt = dccp_setsockopt,
1191 .getsockopt = dccp_getsockopt,
1192 .sendmsg = dccp_sendmsg,
1193 .recvmsg = dccp_recvmsg,
1194 .backlog_rcv = dccp_v6_do_rcv,
1195 .hash = dccp_v6_hash,
1196 .unhash = dccp_unhash,
1197 .accept = inet_csk_accept,
1198 .get_port = dccp_v6_get_port,
1199 .shutdown = dccp_shutdown,
1200 .destroy = dccp_v6_destroy_sock,
1201 .orphan_count = &dccp_orphan_count,
1202 .max_header = MAX_DCCP_HEADER,
1203 .obj_size = sizeof(struct dccp6_sock),
1204 .rsk_prot = &dccp6_request_sock_ops,
1205 .twsk_prot = &dccp6_timewait_sock_ops,
1206 #ifdef CONFIG_COMPAT
1207 .compat_setsockopt = compat_dccp_setsockopt,
1208 .compat_getsockopt = compat_dccp_getsockopt,
1209 #endif
1210 };
1211
1212 static struct inet6_protocol dccp_v6_protocol = {
1213 .handler = dccp_v6_rcv,
1214 .err_handler = dccp_v6_err,
1215 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1216 };
1217
1218 static struct proto_ops inet6_dccp_ops = {
1219 .family = PF_INET6,
1220 .owner = THIS_MODULE,
1221 .release = inet6_release,
1222 .bind = inet6_bind,
1223 .connect = inet_stream_connect,
1224 .socketpair = sock_no_socketpair,
1225 .accept = inet_accept,
1226 .getname = inet6_getname,
1227 .poll = dccp_poll,
1228 .ioctl = inet6_ioctl,
1229 .listen = inet_dccp_listen,
1230 .shutdown = inet_shutdown,
1231 .setsockopt = sock_common_setsockopt,
1232 .getsockopt = sock_common_getsockopt,
1233 .sendmsg = inet_sendmsg,
1234 .recvmsg = sock_common_recvmsg,
1235 .mmap = sock_no_mmap,
1236 .sendpage = sock_no_sendpage,
1237 #ifdef CONFIG_COMPAT
1238 .compat_setsockopt = compat_sock_common_setsockopt,
1239 .compat_getsockopt = compat_sock_common_getsockopt,
1240 #endif
1241 };
1242
1243 static struct inet_protosw dccp_v6_protosw = {
1244 .type = SOCK_DCCP,
1245 .protocol = IPPROTO_DCCP,
1246 .prot = &dccp_v6_prot,
1247 .ops = &inet6_dccp_ops,
1248 .capability = -1,
1249 .flags = INET_PROTOSW_ICSK,
1250 };
1251
1252 static int __init dccp_v6_init(void)
1253 {
1254 int err = proto_register(&dccp_v6_prot, 1);
1255
1256 if (err != 0)
1257 goto out;
1258
1259 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1260 if (err != 0)
1261 goto out_unregister_proto;
1262
1263 inet6_register_protosw(&dccp_v6_protosw);
1264
1265 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1266 SOCK_DCCP, IPPROTO_DCCP);
1267 if (err != 0)
1268 goto out_unregister_protosw;
1269 out:
1270 return err;
1271 out_unregister_protosw:
1272 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1273 inet6_unregister_protosw(&dccp_v6_protosw);
1274 out_unregister_proto:
1275 proto_unregister(&dccp_v6_prot);
1276 goto out;
1277 }
1278
1279 static void __exit dccp_v6_exit(void)
1280 {
1281 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1282 inet6_unregister_protosw(&dccp_v6_protosw);
1283 proto_unregister(&dccp_v6_prot);
1284 }
1285
1286 module_init(dccp_v6_init);
1287 module_exit(dccp_v6_exit);
1288
1289 /*
1290 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1291 * values directly, Also cover the case where the protocol is not specified,
1292 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1293 */
1294 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1295 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1296 MODULE_LICENSE("GPL");
1297 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1298 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");