]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/dccp/ipv6.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[mirror_ubuntu-zesty-kernel.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
37
38 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
39 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
40
41 static void dccp_v6_hash(struct sock *sk)
42 {
43 if (sk->sk_state != DCCP_CLOSED) {
44 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
45 inet_hash(sk);
46 return;
47 }
48 local_bh_disable();
49 __inet6_hash(sk);
50 local_bh_enable();
51 }
52 }
53
54 /* add pseudo-header to DCCP checksum stored in skb->csum */
55 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
56 struct in6_addr *saddr,
57 struct in6_addr *daddr)
58 {
59 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
60 }
61
62 static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
63 struct sk_buff *skb)
64 {
65 struct ipv6_pinfo *np = inet6_sk(sk);
66 struct dccp_hdr *dh = dccp_hdr(skb);
67
68 dccp_csum_outgoing(skb);
69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70 }
71
72 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
73 __be16 sport, __be16 dport )
74 {
75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76 }
77
78 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79 {
80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81 ipv6_hdr(skb)->saddr.s6_addr32,
82 dccp_hdr(skb)->dccph_dport,
83 dccp_hdr(skb)->dccph_sport );
84
85 }
86
87 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 u8 type, u8 code, int offset, __be32 info)
89 {
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np;
94 struct sock *sk;
95 int err;
96 __u64 seq;
97 struct net *net = dev_net(skb->dev);
98
99 if (skb->len < offset + sizeof(*dh) ||
100 skb->len < offset + __dccp_basic_hdr_len(dh)) {
101 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
102 ICMP6_MIB_INERRORS);
103 return;
104 }
105
106 sk = inet6_lookup(net, &dccp_hashinfo,
107 &hdr->daddr, dh->dccph_dport,
108 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
109
110 if (sk == NULL) {
111 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
112 ICMP6_MIB_INERRORS);
113 return;
114 }
115
116 if (sk->sk_state == DCCP_TIME_WAIT) {
117 inet_twsk_put(inet_twsk(sk));
118 return;
119 }
120
121 bh_lock_sock(sk);
122 if (sock_owned_by_user(sk))
123 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
124
125 if (sk->sk_state == DCCP_CLOSED)
126 goto out;
127
128 dp = dccp_sk(sk);
129 seq = dccp_hdr_seq(dh);
130 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
131 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
132 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
133 goto out;
134 }
135
136 np = inet6_sk(sk);
137
138 if (type == ICMPV6_PKT_TOOBIG) {
139 struct dst_entry *dst = NULL;
140
141 if (sock_owned_by_user(sk))
142 goto out;
143 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
144 goto out;
145
146 /* icmp should have updated the destination cache entry */
147 dst = __sk_dst_check(sk, np->dst_cookie);
148 if (dst == NULL) {
149 struct inet_sock *inet = inet_sk(sk);
150 struct flowi fl;
151
152 /* BUGGG_FUTURE: Again, it is not clear how
153 to handle rthdr case. Ignore this complexity
154 for now.
155 */
156 memset(&fl, 0, sizeof(fl));
157 fl.proto = IPPROTO_DCCP;
158 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
159 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
160 fl.oif = sk->sk_bound_dev_if;
161 fl.fl_ip_dport = inet->inet_dport;
162 fl.fl_ip_sport = inet->inet_sport;
163 security_sk_classify_flow(sk, &fl);
164
165 err = ip6_dst_lookup(sk, &dst, &fl);
166 if (err) {
167 sk->sk_err_soft = -err;
168 goto out;
169 }
170
171 err = xfrm_lookup(net, &dst, &fl, sk, 0);
172 if (err < 0) {
173 sk->sk_err_soft = -err;
174 goto out;
175 }
176 } else
177 dst_hold(dst);
178
179 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
180 dccp_sync_mss(sk, dst_mtu(dst));
181 } /* else let the usual retransmit timer handle it */
182 dst_release(dst);
183 goto out;
184 }
185
186 icmpv6_err_convert(type, code, &err);
187
188 /* Might be for an request_sock */
189 switch (sk->sk_state) {
190 struct request_sock *req, **prev;
191 case DCCP_LISTEN:
192 if (sock_owned_by_user(sk))
193 goto out;
194
195 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
196 &hdr->daddr, &hdr->saddr,
197 inet6_iif(skb));
198 if (req == NULL)
199 goto out;
200
201 /*
202 * ICMPs are not backlogged, hence we cannot get an established
203 * socket here.
204 */
205 WARN_ON(req->sk != NULL);
206
207 if (seq != dccp_rsk(req)->dreq_iss) {
208 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
209 goto out;
210 }
211
212 inet_csk_reqsk_queue_drop(sk, req, prev);
213 goto out;
214
215 case DCCP_REQUESTING:
216 case DCCP_RESPOND: /* Cannot happen.
217 It can, it SYNs are crossed. --ANK */
218 if (!sock_owned_by_user(sk)) {
219 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
220 sk->sk_err = err;
221 /*
222 * Wake people up to see the error
223 * (see connect in sock.c)
224 */
225 sk->sk_error_report(sk);
226 dccp_done(sk);
227 } else
228 sk->sk_err_soft = err;
229 goto out;
230 }
231
232 if (!sock_owned_by_user(sk) && np->recverr) {
233 sk->sk_err = err;
234 sk->sk_error_report(sk);
235 } else
236 sk->sk_err_soft = err;
237
238 out:
239 bh_unlock_sock(sk);
240 sock_put(sk);
241 }
242
243
244 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
245 {
246 struct inet6_request_sock *ireq6 = inet6_rsk(req);
247 struct ipv6_pinfo *np = inet6_sk(sk);
248 struct sk_buff *skb;
249 struct ipv6_txoptions *opt = NULL;
250 struct in6_addr *final_p = NULL, final;
251 struct flowi fl;
252 int err = -1;
253 struct dst_entry *dst;
254
255 memset(&fl, 0, sizeof(fl));
256 fl.proto = IPPROTO_DCCP;
257 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
258 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
259 fl.fl6_flowlabel = 0;
260 fl.oif = ireq6->iif;
261 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
262 fl.fl_ip_sport = inet_rsk(req)->loc_port;
263 security_req_classify_flow(req, &fl);
264
265 opt = np->opt;
266
267 if (opt != NULL && opt->srcrt != NULL) {
268 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
269
270 ipv6_addr_copy(&final, &fl.fl6_dst);
271 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
272 final_p = &final;
273 }
274
275 err = ip6_dst_lookup(sk, &dst, &fl);
276 if (err)
277 goto done;
278
279 if (final_p)
280 ipv6_addr_copy(&fl.fl6_dst, final_p);
281
282 err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0);
283 if (err < 0)
284 goto done;
285
286 skb = dccp_make_response(sk, dst, req);
287 if (skb != NULL) {
288 struct dccp_hdr *dh = dccp_hdr(skb);
289
290 dh->dccph_checksum = dccp_v6_csum_finish(skb,
291 &ireq6->loc_addr,
292 &ireq6->rmt_addr);
293 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
294 err = ip6_xmit(sk, skb, &fl, opt, 0);
295 err = net_xmit_eval(err);
296 }
297
298 done:
299 if (opt != NULL && opt != np->opt)
300 sock_kfree_s(sk, opt, opt->tot_len);
301 dst_release(dst);
302 return err;
303 }
304
305 static void dccp_v6_reqsk_destructor(struct request_sock *req)
306 {
307 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
308 if (inet6_rsk(req)->pktopts != NULL)
309 kfree_skb(inet6_rsk(req)->pktopts);
310 }
311
312 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
313 {
314 struct ipv6hdr *rxip6h;
315 struct sk_buff *skb;
316 struct flowi fl;
317 struct net *net = dev_net(skb_dst(rxskb)->dev);
318 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
319 struct dst_entry *dst;
320
321 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
322 return;
323
324 if (!ipv6_unicast_destination(rxskb))
325 return;
326
327 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
328 if (skb == NULL)
329 return;
330
331 rxip6h = ipv6_hdr(rxskb);
332 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
333 &rxip6h->daddr);
334
335 memset(&fl, 0, sizeof(fl));
336 ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
337 ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
338
339 fl.proto = IPPROTO_DCCP;
340 fl.oif = inet6_iif(rxskb);
341 fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport;
342 fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport;
343 security_skb_classify_flow(rxskb, &fl);
344
345 /* sk = NULL, but it is safe for now. RST socket required. */
346 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
347 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
348 skb_dst_set(skb, dst);
349 ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
350 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
351 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
352 return;
353 }
354 }
355
356 kfree_skb(skb);
357 }
358
359 static struct request_sock_ops dccp6_request_sock_ops = {
360 .family = AF_INET6,
361 .obj_size = sizeof(struct dccp6_request_sock),
362 .rtx_syn_ack = dccp_v6_send_response,
363 .send_ack = dccp_reqsk_send_ack,
364 .destructor = dccp_v6_reqsk_destructor,
365 .send_reset = dccp_v6_ctl_send_reset,
366 };
367
368 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
369 {
370 const struct dccp_hdr *dh = dccp_hdr(skb);
371 const struct ipv6hdr *iph = ipv6_hdr(skb);
372 struct sock *nsk;
373 struct request_sock **prev;
374 /* Find possible connection requests. */
375 struct request_sock *req = inet6_csk_search_req(sk, &prev,
376 dh->dccph_sport,
377 &iph->saddr,
378 &iph->daddr,
379 inet6_iif(skb));
380 if (req != NULL)
381 return dccp_check_req(sk, skb, req, prev);
382
383 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
384 &iph->saddr, dh->dccph_sport,
385 &iph->daddr, ntohs(dh->dccph_dport),
386 inet6_iif(skb));
387 if (nsk != NULL) {
388 if (nsk->sk_state != DCCP_TIME_WAIT) {
389 bh_lock_sock(nsk);
390 return nsk;
391 }
392 inet_twsk_put(inet_twsk(nsk));
393 return NULL;
394 }
395
396 return sk;
397 }
398
399 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
400 {
401 struct request_sock *req;
402 struct dccp_request_sock *dreq;
403 struct inet6_request_sock *ireq6;
404 struct ipv6_pinfo *np = inet6_sk(sk);
405 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
406 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
407
408 if (skb->protocol == htons(ETH_P_IP))
409 return dccp_v4_conn_request(sk, skb);
410
411 if (!ipv6_unicast_destination(skb))
412 return 0; /* discard, don't send a reset here */
413
414 if (dccp_bad_service_code(sk, service)) {
415 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
416 goto drop;
417 }
418 /*
419 * There are no SYN attacks on IPv6, yet...
420 */
421 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
422 if (inet_csk_reqsk_queue_is_full(sk))
423 goto drop;
424
425 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
426 goto drop;
427
428 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
429 if (req == NULL)
430 goto drop;
431
432 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
433 goto drop_and_free;
434
435 dreq = dccp_rsk(req);
436 if (dccp_parse_options(sk, dreq, skb))
437 goto drop_and_free;
438
439 if (security_inet_conn_request(sk, skb, req))
440 goto drop_and_free;
441
442 ireq6 = inet6_rsk(req);
443 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
444 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
445
446 if (ipv6_opt_accepted(sk, skb) ||
447 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
448 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
449 atomic_inc(&skb->users);
450 ireq6->pktopts = skb;
451 }
452 ireq6->iif = sk->sk_bound_dev_if;
453
454 /* So that link locals have meaning */
455 if (!sk->sk_bound_dev_if &&
456 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
457 ireq6->iif = inet6_iif(skb);
458
459 /*
460 * Step 3: Process LISTEN state
461 *
462 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
463 *
464 * In fact we defer setting S.GSR, S.SWL, S.SWH to
465 * dccp_create_openreq_child.
466 */
467 dreq->dreq_isr = dcb->dccpd_seq;
468 dreq->dreq_iss = dccp_v6_init_sequence(skb);
469 dreq->dreq_service = service;
470
471 if (dccp_v6_send_response(sk, req))
472 goto drop_and_free;
473
474 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
475 return 0;
476
477 drop_and_free:
478 reqsk_free(req);
479 drop:
480 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
481 return -1;
482 }
483
484 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
485 struct sk_buff *skb,
486 struct request_sock *req,
487 struct dst_entry *dst)
488 {
489 struct inet6_request_sock *ireq6 = inet6_rsk(req);
490 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
491 struct inet_sock *newinet;
492 struct dccp_sock *newdp;
493 struct dccp6_sock *newdp6;
494 struct sock *newsk;
495 struct ipv6_txoptions *opt;
496
497 if (skb->protocol == htons(ETH_P_IP)) {
498 /*
499 * v6 mapped
500 */
501 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
502 if (newsk == NULL)
503 return NULL;
504
505 newdp6 = (struct dccp6_sock *)newsk;
506 newdp = dccp_sk(newsk);
507 newinet = inet_sk(newsk);
508 newinet->pinet6 = &newdp6->inet6;
509 newnp = inet6_sk(newsk);
510
511 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
512
513 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
514
515 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
516
517 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
518
519 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
520 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
521 newnp->pktoptions = NULL;
522 newnp->opt = NULL;
523 newnp->mcast_oif = inet6_iif(skb);
524 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
525
526 /*
527 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
528 * here, dccp_create_openreq_child now does this for us, see the comment in
529 * that function for the gory details. -acme
530 */
531
532 /* It is tricky place. Until this moment IPv4 tcp
533 worked with IPv6 icsk.icsk_af_ops.
534 Sync it now.
535 */
536 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
537
538 return newsk;
539 }
540
541 opt = np->opt;
542
543 if (sk_acceptq_is_full(sk))
544 goto out_overflow;
545
546 if (dst == NULL) {
547 struct in6_addr *final_p = NULL, final;
548 struct flowi fl;
549
550 memset(&fl, 0, sizeof(fl));
551 fl.proto = IPPROTO_DCCP;
552 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
553 if (opt != NULL && opt->srcrt != NULL) {
554 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
555
556 ipv6_addr_copy(&final, &fl.fl6_dst);
557 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
558 final_p = &final;
559 }
560 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
561 fl.oif = sk->sk_bound_dev_if;
562 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
563 fl.fl_ip_sport = inet_rsk(req)->loc_port;
564 security_sk_classify_flow(sk, &fl);
565
566 if (ip6_dst_lookup(sk, &dst, &fl))
567 goto out;
568
569 if (final_p)
570 ipv6_addr_copy(&fl.fl6_dst, final_p);
571
572 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
573 goto out;
574 }
575
576 newsk = dccp_create_openreq_child(sk, req, skb);
577 if (newsk == NULL)
578 goto out;
579
580 /*
581 * No need to charge this sock to the relevant IPv6 refcnt debug socks
582 * count here, dccp_create_openreq_child now does this for us, see the
583 * comment in that function for the gory details. -acme
584 */
585
586 __ip6_dst_store(newsk, dst, NULL, NULL);
587 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
588 NETIF_F_TSO);
589 newdp6 = (struct dccp6_sock *)newsk;
590 newinet = inet_sk(newsk);
591 newinet->pinet6 = &newdp6->inet6;
592 newdp = dccp_sk(newsk);
593 newnp = inet6_sk(newsk);
594
595 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
596
597 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
598 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
599 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
600 newsk->sk_bound_dev_if = ireq6->iif;
601
602 /* Now IPv6 options...
603
604 First: no IPv4 options.
605 */
606 newinet->opt = NULL;
607
608 /* Clone RX bits */
609 newnp->rxopt.all = np->rxopt.all;
610
611 /* Clone pktoptions received with SYN */
612 newnp->pktoptions = NULL;
613 if (ireq6->pktopts != NULL) {
614 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
615 kfree_skb(ireq6->pktopts);
616 ireq6->pktopts = NULL;
617 if (newnp->pktoptions)
618 skb_set_owner_r(newnp->pktoptions, newsk);
619 }
620 newnp->opt = NULL;
621 newnp->mcast_oif = inet6_iif(skb);
622 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
623
624 /*
625 * Clone native IPv6 options from listening socket (if any)
626 *
627 * Yes, keeping reference count would be much more clever, but we make
628 * one more one thing there: reattach optmem to newsk.
629 */
630 if (opt != NULL) {
631 newnp->opt = ipv6_dup_options(newsk, opt);
632 if (opt != np->opt)
633 sock_kfree_s(sk, opt, opt->tot_len);
634 }
635
636 inet_csk(newsk)->icsk_ext_hdr_len = 0;
637 if (newnp->opt != NULL)
638 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
639 newnp->opt->opt_flen);
640
641 dccp_sync_mss(newsk, dst_mtu(dst));
642
643 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
644 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
645
646 __inet6_hash(newsk);
647 __inet_inherit_port(sk, newsk);
648
649 return newsk;
650
651 out_overflow:
652 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
653 out:
654 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
655 if (opt != NULL && opt != np->opt)
656 sock_kfree_s(sk, opt, opt->tot_len);
657 dst_release(dst);
658 return NULL;
659 }
660
661 /* The socket must have it's spinlock held when we get
662 * here.
663 *
664 * We have a potential double-lock case here, so even when
665 * doing backlog processing we use the BH locking scheme.
666 * This is because we cannot sleep with the original spinlock
667 * held.
668 */
669 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
670 {
671 struct ipv6_pinfo *np = inet6_sk(sk);
672 struct sk_buff *opt_skb = NULL;
673
674 /* Imagine: socket is IPv6. IPv4 packet arrives,
675 goes to IPv4 receive handler and backlogged.
676 From backlog it always goes here. Kerboom...
677 Fortunately, dccp_rcv_established and rcv_established
678 handle them correctly, but it is not case with
679 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
680 */
681
682 if (skb->protocol == htons(ETH_P_IP))
683 return dccp_v4_do_rcv(sk, skb);
684
685 if (sk_filter(sk, skb))
686 goto discard;
687
688 /*
689 * socket locking is here for SMP purposes as backlog rcv is currently
690 * called with bh processing disabled.
691 */
692
693 /* Do Stevens' IPV6_PKTOPTIONS.
694
695 Yes, guys, it is the only place in our code, where we
696 may make it not affecting IPv4.
697 The rest of code is protocol independent,
698 and I do not like idea to uglify IPv4.
699
700 Actually, all the idea behind IPV6_PKTOPTIONS
701 looks not very well thought. For now we latch
702 options, received in the last packet, enqueued
703 by tcp. Feel free to propose better solution.
704 --ANK (980728)
705 */
706 if (np->rxopt.all)
707 /*
708 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
709 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
710 */
711 opt_skb = skb_clone(skb, GFP_ATOMIC);
712
713 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
714 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
715 goto reset;
716 if (opt_skb) {
717 /* XXX This is where we would goto ipv6_pktoptions. */
718 __kfree_skb(opt_skb);
719 }
720 return 0;
721 }
722
723 /*
724 * Step 3: Process LISTEN state
725 * If S.state == LISTEN,
726 * If P.type == Request or P contains a valid Init Cookie option,
727 * (* Must scan the packet's options to check for Init
728 * Cookies. Only Init Cookies are processed here,
729 * however; other options are processed in Step 8. This
730 * scan need only be performed if the endpoint uses Init
731 * Cookies *)
732 * (* Generate a new socket and switch to that socket *)
733 * Set S := new socket for this port pair
734 * S.state = RESPOND
735 * Choose S.ISS (initial seqno) or set from Init Cookies
736 * Initialize S.GAR := S.ISS
737 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
738 * Continue with S.state == RESPOND
739 * (* A Response packet will be generated in Step 11 *)
740 * Otherwise,
741 * Generate Reset(No Connection) unless P.type == Reset
742 * Drop packet and return
743 *
744 * NOTE: the check for the packet types is done in
745 * dccp_rcv_state_process
746 */
747 if (sk->sk_state == DCCP_LISTEN) {
748 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
749
750 if (nsk == NULL)
751 goto discard;
752 /*
753 * Queue it on the new socket if the new socket is active,
754 * otherwise we just shortcircuit this and continue with
755 * the new socket..
756 */
757 if (nsk != sk) {
758 if (dccp_child_process(sk, nsk, skb))
759 goto reset;
760 if (opt_skb != NULL)
761 __kfree_skb(opt_skb);
762 return 0;
763 }
764 }
765
766 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
767 goto reset;
768 if (opt_skb) {
769 /* XXX This is where we would goto ipv6_pktoptions. */
770 __kfree_skb(opt_skb);
771 }
772 return 0;
773
774 reset:
775 dccp_v6_ctl_send_reset(sk, skb);
776 discard:
777 if (opt_skb != NULL)
778 __kfree_skb(opt_skb);
779 kfree_skb(skb);
780 return 0;
781 }
782
783 static int dccp_v6_rcv(struct sk_buff *skb)
784 {
785 const struct dccp_hdr *dh;
786 struct sock *sk;
787 int min_cov;
788
789 /* Step 1: Check header basics */
790
791 if (dccp_invalid_packet(skb))
792 goto discard_it;
793
794 /* Step 1: If header checksum is incorrect, drop packet and return. */
795 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
796 &ipv6_hdr(skb)->daddr)) {
797 DCCP_WARN("dropped packet with invalid checksum\n");
798 goto discard_it;
799 }
800
801 dh = dccp_hdr(skb);
802
803 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
804 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
805
806 if (dccp_packet_without_ack(skb))
807 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
808 else
809 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
810
811 /* Step 2:
812 * Look up flow ID in table and get corresponding socket */
813 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
814 dh->dccph_sport, dh->dccph_dport);
815 /*
816 * Step 2:
817 * If no socket ...
818 */
819 if (sk == NULL) {
820 dccp_pr_debug("failed to look up flow ID in table and "
821 "get corresponding socket\n");
822 goto no_dccp_socket;
823 }
824
825 /*
826 * Step 2:
827 * ... or S.state == TIMEWAIT,
828 * Generate Reset(No Connection) unless P.type == Reset
829 * Drop packet and return
830 */
831 if (sk->sk_state == DCCP_TIME_WAIT) {
832 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
833 inet_twsk_put(inet_twsk(sk));
834 goto no_dccp_socket;
835 }
836
837 /*
838 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
839 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
840 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
841 */
842 min_cov = dccp_sk(sk)->dccps_pcrlen;
843 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
844 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
845 dh->dccph_cscov, min_cov);
846 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
847 goto discard_and_relse;
848 }
849
850 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
851 goto discard_and_relse;
852
853 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
854
855 no_dccp_socket:
856 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
857 goto discard_it;
858 /*
859 * Step 2:
860 * If no socket ...
861 * Generate Reset(No Connection) unless P.type == Reset
862 * Drop packet and return
863 */
864 if (dh->dccph_type != DCCP_PKT_RESET) {
865 DCCP_SKB_CB(skb)->dccpd_reset_code =
866 DCCP_RESET_CODE_NO_CONNECTION;
867 dccp_v6_ctl_send_reset(sk, skb);
868 }
869
870 discard_it:
871 kfree_skb(skb);
872 return 0;
873
874 discard_and_relse:
875 sock_put(sk);
876 goto discard_it;
877 }
878
879 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
880 int addr_len)
881 {
882 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
883 struct inet_connection_sock *icsk = inet_csk(sk);
884 struct inet_sock *inet = inet_sk(sk);
885 struct ipv6_pinfo *np = inet6_sk(sk);
886 struct dccp_sock *dp = dccp_sk(sk);
887 struct in6_addr *saddr = NULL, *final_p = NULL, final;
888 struct flowi fl;
889 struct dst_entry *dst;
890 int addr_type;
891 int err;
892
893 dp->dccps_role = DCCP_ROLE_CLIENT;
894
895 if (addr_len < SIN6_LEN_RFC2133)
896 return -EINVAL;
897
898 if (usin->sin6_family != AF_INET6)
899 return -EAFNOSUPPORT;
900
901 memset(&fl, 0, sizeof(fl));
902
903 if (np->sndflow) {
904 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
905 IP6_ECN_flow_init(fl.fl6_flowlabel);
906 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
907 struct ip6_flowlabel *flowlabel;
908 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
909 if (flowlabel == NULL)
910 return -EINVAL;
911 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
912 fl6_sock_release(flowlabel);
913 }
914 }
915 /*
916 * connect() to INADDR_ANY means loopback (BSD'ism).
917 */
918 if (ipv6_addr_any(&usin->sin6_addr))
919 usin->sin6_addr.s6_addr[15] = 1;
920
921 addr_type = ipv6_addr_type(&usin->sin6_addr);
922
923 if (addr_type & IPV6_ADDR_MULTICAST)
924 return -ENETUNREACH;
925
926 if (addr_type & IPV6_ADDR_LINKLOCAL) {
927 if (addr_len >= sizeof(struct sockaddr_in6) &&
928 usin->sin6_scope_id) {
929 /* If interface is set while binding, indices
930 * must coincide.
931 */
932 if (sk->sk_bound_dev_if &&
933 sk->sk_bound_dev_if != usin->sin6_scope_id)
934 return -EINVAL;
935
936 sk->sk_bound_dev_if = usin->sin6_scope_id;
937 }
938
939 /* Connect to link-local address requires an interface */
940 if (!sk->sk_bound_dev_if)
941 return -EINVAL;
942 }
943
944 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
945 np->flow_label = fl.fl6_flowlabel;
946
947 /*
948 * DCCP over IPv4
949 */
950 if (addr_type == IPV6_ADDR_MAPPED) {
951 u32 exthdrlen = icsk->icsk_ext_hdr_len;
952 struct sockaddr_in sin;
953
954 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
955
956 if (__ipv6_only_sock(sk))
957 return -ENETUNREACH;
958
959 sin.sin_family = AF_INET;
960 sin.sin_port = usin->sin6_port;
961 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
962
963 icsk->icsk_af_ops = &dccp_ipv6_mapped;
964 sk->sk_backlog_rcv = dccp_v4_do_rcv;
965
966 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
967 if (err) {
968 icsk->icsk_ext_hdr_len = exthdrlen;
969 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
970 sk->sk_backlog_rcv = dccp_v6_do_rcv;
971 goto failure;
972 }
973 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
974 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
975
976 return err;
977 }
978
979 if (!ipv6_addr_any(&np->rcv_saddr))
980 saddr = &np->rcv_saddr;
981
982 fl.proto = IPPROTO_DCCP;
983 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
984 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
985 fl.oif = sk->sk_bound_dev_if;
986 fl.fl_ip_dport = usin->sin6_port;
987 fl.fl_ip_sport = inet->inet_sport;
988 security_sk_classify_flow(sk, &fl);
989
990 if (np->opt != NULL && np->opt->srcrt != NULL) {
991 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
992
993 ipv6_addr_copy(&final, &fl.fl6_dst);
994 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
995 final_p = &final;
996 }
997
998 err = ip6_dst_lookup(sk, &dst, &fl);
999 if (err)
1000 goto failure;
1001
1002 if (final_p)
1003 ipv6_addr_copy(&fl.fl6_dst, final_p);
1004
1005 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
1006 if (err < 0) {
1007 if (err == -EREMOTE)
1008 err = ip6_dst_blackhole(sk, &dst, &fl);
1009 if (err < 0)
1010 goto failure;
1011 }
1012
1013 if (saddr == NULL) {
1014 saddr = &fl.fl6_src;
1015 ipv6_addr_copy(&np->rcv_saddr, saddr);
1016 }
1017
1018 /* set the source address */
1019 ipv6_addr_copy(&np->saddr, saddr);
1020 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1021
1022 __ip6_dst_store(sk, dst, NULL, NULL);
1023
1024 icsk->icsk_ext_hdr_len = 0;
1025 if (np->opt != NULL)
1026 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1027 np->opt->opt_nflen);
1028
1029 inet->inet_dport = usin->sin6_port;
1030
1031 dccp_set_state(sk, DCCP_REQUESTING);
1032 err = inet6_hash_connect(&dccp_death_row, sk);
1033 if (err)
1034 goto late_failure;
1035
1036 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1037 np->daddr.s6_addr32,
1038 inet->inet_sport,
1039 inet->inet_dport);
1040 err = dccp_connect(sk);
1041 if (err)
1042 goto late_failure;
1043
1044 return 0;
1045
1046 late_failure:
1047 dccp_set_state(sk, DCCP_CLOSED);
1048 __sk_dst_reset(sk);
1049 failure:
1050 inet->inet_dport = 0;
1051 sk->sk_route_caps = 0;
1052 return err;
1053 }
1054
1055 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1056 .queue_xmit = inet6_csk_xmit,
1057 .send_check = dccp_v6_send_check,
1058 .rebuild_header = inet6_sk_rebuild_header,
1059 .conn_request = dccp_v6_conn_request,
1060 .syn_recv_sock = dccp_v6_request_recv_sock,
1061 .net_header_len = sizeof(struct ipv6hdr),
1062 .setsockopt = ipv6_setsockopt,
1063 .getsockopt = ipv6_getsockopt,
1064 .addr2sockaddr = inet6_csk_addr2sockaddr,
1065 .sockaddr_len = sizeof(struct sockaddr_in6),
1066 .bind_conflict = inet6_csk_bind_conflict,
1067 #ifdef CONFIG_COMPAT
1068 .compat_setsockopt = compat_ipv6_setsockopt,
1069 .compat_getsockopt = compat_ipv6_getsockopt,
1070 #endif
1071 };
1072
1073 /*
1074 * DCCP over IPv4 via INET6 API
1075 */
1076 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1077 .queue_xmit = ip_queue_xmit,
1078 .send_check = dccp_v4_send_check,
1079 .rebuild_header = inet_sk_rebuild_header,
1080 .conn_request = dccp_v6_conn_request,
1081 .syn_recv_sock = dccp_v6_request_recv_sock,
1082 .net_header_len = sizeof(struct iphdr),
1083 .setsockopt = ipv6_setsockopt,
1084 .getsockopt = ipv6_getsockopt,
1085 .addr2sockaddr = inet6_csk_addr2sockaddr,
1086 .sockaddr_len = sizeof(struct sockaddr_in6),
1087 #ifdef CONFIG_COMPAT
1088 .compat_setsockopt = compat_ipv6_setsockopt,
1089 .compat_getsockopt = compat_ipv6_getsockopt,
1090 #endif
1091 };
1092
1093 /* NOTE: A lot of things set to zero explicitly by call to
1094 * sk_alloc() so need not be done here.
1095 */
1096 static int dccp_v6_init_sock(struct sock *sk)
1097 {
1098 static __u8 dccp_v6_ctl_sock_initialized;
1099 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1100
1101 if (err == 0) {
1102 if (unlikely(!dccp_v6_ctl_sock_initialized))
1103 dccp_v6_ctl_sock_initialized = 1;
1104 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1105 }
1106
1107 return err;
1108 }
1109
1110 static void dccp_v6_destroy_sock(struct sock *sk)
1111 {
1112 dccp_destroy_sock(sk);
1113 inet6_destroy_sock(sk);
1114 }
1115
1116 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1117 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1118 };
1119
1120 static struct proto dccp_v6_prot = {
1121 .name = "DCCPv6",
1122 .owner = THIS_MODULE,
1123 .close = dccp_close,
1124 .connect = dccp_v6_connect,
1125 .disconnect = dccp_disconnect,
1126 .ioctl = dccp_ioctl,
1127 .init = dccp_v6_init_sock,
1128 .setsockopt = dccp_setsockopt,
1129 .getsockopt = dccp_getsockopt,
1130 .sendmsg = dccp_sendmsg,
1131 .recvmsg = dccp_recvmsg,
1132 .backlog_rcv = dccp_v6_do_rcv,
1133 .hash = dccp_v6_hash,
1134 .unhash = inet_unhash,
1135 .accept = inet_csk_accept,
1136 .get_port = inet_csk_get_port,
1137 .shutdown = dccp_shutdown,
1138 .destroy = dccp_v6_destroy_sock,
1139 .orphan_count = &dccp_orphan_count,
1140 .max_header = MAX_DCCP_HEADER,
1141 .obj_size = sizeof(struct dccp6_sock),
1142 .slab_flags = SLAB_DESTROY_BY_RCU,
1143 .rsk_prot = &dccp6_request_sock_ops,
1144 .twsk_prot = &dccp6_timewait_sock_ops,
1145 .h.hashinfo = &dccp_hashinfo,
1146 #ifdef CONFIG_COMPAT
1147 .compat_setsockopt = compat_dccp_setsockopt,
1148 .compat_getsockopt = compat_dccp_getsockopt,
1149 #endif
1150 };
1151
1152 static const struct inet6_protocol dccp_v6_protocol = {
1153 .handler = dccp_v6_rcv,
1154 .err_handler = dccp_v6_err,
1155 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1156 };
1157
1158 static const struct proto_ops inet6_dccp_ops = {
1159 .family = PF_INET6,
1160 .owner = THIS_MODULE,
1161 .release = inet6_release,
1162 .bind = inet6_bind,
1163 .connect = inet_stream_connect,
1164 .socketpair = sock_no_socketpair,
1165 .accept = inet_accept,
1166 .getname = inet6_getname,
1167 .poll = dccp_poll,
1168 .ioctl = inet6_ioctl,
1169 .listen = inet_dccp_listen,
1170 .shutdown = inet_shutdown,
1171 .setsockopt = sock_common_setsockopt,
1172 .getsockopt = sock_common_getsockopt,
1173 .sendmsg = inet_sendmsg,
1174 .recvmsg = sock_common_recvmsg,
1175 .mmap = sock_no_mmap,
1176 .sendpage = sock_no_sendpage,
1177 #ifdef CONFIG_COMPAT
1178 .compat_setsockopt = compat_sock_common_setsockopt,
1179 .compat_getsockopt = compat_sock_common_getsockopt,
1180 #endif
1181 };
1182
1183 static struct inet_protosw dccp_v6_protosw = {
1184 .type = SOCK_DCCP,
1185 .protocol = IPPROTO_DCCP,
1186 .prot = &dccp_v6_prot,
1187 .ops = &inet6_dccp_ops,
1188 .flags = INET_PROTOSW_ICSK,
1189 };
1190
1191 static int dccp_v6_init_net(struct net *net)
1192 {
1193 int err;
1194
1195 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1196 SOCK_DCCP, IPPROTO_DCCP, net);
1197 return err;
1198 }
1199
1200 static void dccp_v6_exit_net(struct net *net)
1201 {
1202 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1203 }
1204
1205 static struct pernet_operations dccp_v6_ops = {
1206 .init = dccp_v6_init_net,
1207 .exit = dccp_v6_exit_net,
1208 };
1209
1210 static int __init dccp_v6_init(void)
1211 {
1212 int err = proto_register(&dccp_v6_prot, 1);
1213
1214 if (err != 0)
1215 goto out;
1216
1217 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1218 if (err != 0)
1219 goto out_unregister_proto;
1220
1221 inet6_register_protosw(&dccp_v6_protosw);
1222
1223 err = register_pernet_subsys(&dccp_v6_ops);
1224 if (err != 0)
1225 goto out_destroy_ctl_sock;
1226 out:
1227 return err;
1228
1229 out_destroy_ctl_sock:
1230 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1231 inet6_unregister_protosw(&dccp_v6_protosw);
1232 out_unregister_proto:
1233 proto_unregister(&dccp_v6_prot);
1234 goto out;
1235 }
1236
1237 static void __exit dccp_v6_exit(void)
1238 {
1239 unregister_pernet_subsys(&dccp_v6_ops);
1240 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1241 inet6_unregister_protosw(&dccp_v6_protosw);
1242 proto_unregister(&dccp_v6_prot);
1243 }
1244
1245 module_init(dccp_v6_init);
1246 module_exit(dccp_v6_exit);
1247
1248 /*
1249 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1250 * values directly, Also cover the case where the protocol is not specified,
1251 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1252 */
1253 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1254 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1255 MODULE_LICENSE("GPL");
1256 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1257 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");