]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/dccp/ipv6.c
ipv6: Use correct data types for ICMPv6 type and code
[mirror_ubuntu-bionic-kernel.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
37
38 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
39 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
40
41 static void dccp_v6_hash(struct sock *sk)
42 {
43 if (sk->sk_state != DCCP_CLOSED) {
44 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
45 inet_hash(sk);
46 return;
47 }
48 local_bh_disable();
49 __inet6_hash(sk);
50 local_bh_enable();
51 }
52 }
53
54 /* add pseudo-header to DCCP checksum stored in skb->csum */
55 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
56 struct in6_addr *saddr,
57 struct in6_addr *daddr)
58 {
59 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
60 }
61
62 static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
63 struct sk_buff *skb)
64 {
65 struct ipv6_pinfo *np = inet6_sk(sk);
66 struct dccp_hdr *dh = dccp_hdr(skb);
67
68 dccp_csum_outgoing(skb);
69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70 }
71
72 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
73 __be16 sport, __be16 dport )
74 {
75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76 }
77
78 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79 {
80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81 ipv6_hdr(skb)->saddr.s6_addr32,
82 dccp_hdr(skb)->dccph_dport,
83 dccp_hdr(skb)->dccph_sport );
84
85 }
86
87 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 u8 type, u8 code, int offset, __be32 info)
89 {
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np;
94 struct sock *sk;
95 int err;
96 __u64 seq;
97 struct net *net = dev_net(skb->dev);
98
99 if (skb->len < offset + sizeof(*dh) ||
100 skb->len < offset + __dccp_basic_hdr_len(dh)) {
101 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
102 ICMP6_MIB_INERRORS);
103 return;
104 }
105
106 sk = inet6_lookup(net, &dccp_hashinfo,
107 &hdr->daddr, dh->dccph_dport,
108 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
109
110 if (sk == NULL) {
111 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
112 ICMP6_MIB_INERRORS);
113 return;
114 }
115
116 if (sk->sk_state == DCCP_TIME_WAIT) {
117 inet_twsk_put(inet_twsk(sk));
118 return;
119 }
120
121 bh_lock_sock(sk);
122 if (sock_owned_by_user(sk))
123 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
124
125 if (sk->sk_state == DCCP_CLOSED)
126 goto out;
127
128 dp = dccp_sk(sk);
129 seq = dccp_hdr_seq(dh);
130 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
131 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
132 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
133 goto out;
134 }
135
136 np = inet6_sk(sk);
137
138 if (type == ICMPV6_PKT_TOOBIG) {
139 struct dst_entry *dst = NULL;
140
141 if (sock_owned_by_user(sk))
142 goto out;
143 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
144 goto out;
145
146 /* icmp should have updated the destination cache entry */
147 dst = __sk_dst_check(sk, np->dst_cookie);
148 if (dst == NULL) {
149 struct inet_sock *inet = inet_sk(sk);
150 struct flowi fl;
151
152 /* BUGGG_FUTURE: Again, it is not clear how
153 to handle rthdr case. Ignore this complexity
154 for now.
155 */
156 memset(&fl, 0, sizeof(fl));
157 fl.proto = IPPROTO_DCCP;
158 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
159 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
160 fl.oif = sk->sk_bound_dev_if;
161 fl.fl_ip_dport = inet->dport;
162 fl.fl_ip_sport = inet->sport;
163 security_sk_classify_flow(sk, &fl);
164
165 err = ip6_dst_lookup(sk, &dst, &fl);
166 if (err) {
167 sk->sk_err_soft = -err;
168 goto out;
169 }
170
171 err = xfrm_lookup(net, &dst, &fl, sk, 0);
172 if (err < 0) {
173 sk->sk_err_soft = -err;
174 goto out;
175 }
176 } else
177 dst_hold(dst);
178
179 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
180 dccp_sync_mss(sk, dst_mtu(dst));
181 } /* else let the usual retransmit timer handle it */
182 dst_release(dst);
183 goto out;
184 }
185
186 icmpv6_err_convert(type, code, &err);
187
188 /* Might be for an request_sock */
189 switch (sk->sk_state) {
190 struct request_sock *req, **prev;
191 case DCCP_LISTEN:
192 if (sock_owned_by_user(sk))
193 goto out;
194
195 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
196 &hdr->daddr, &hdr->saddr,
197 inet6_iif(skb));
198 if (req == NULL)
199 goto out;
200
201 /*
202 * ICMPs are not backlogged, hence we cannot get an established
203 * socket here.
204 */
205 WARN_ON(req->sk != NULL);
206
207 if (seq != dccp_rsk(req)->dreq_iss) {
208 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
209 goto out;
210 }
211
212 inet_csk_reqsk_queue_drop(sk, req, prev);
213 goto out;
214
215 case DCCP_REQUESTING:
216 case DCCP_RESPOND: /* Cannot happen.
217 It can, it SYNs are crossed. --ANK */
218 if (!sock_owned_by_user(sk)) {
219 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
220 sk->sk_err = err;
221 /*
222 * Wake people up to see the error
223 * (see connect in sock.c)
224 */
225 sk->sk_error_report(sk);
226 dccp_done(sk);
227 } else
228 sk->sk_err_soft = err;
229 goto out;
230 }
231
232 if (!sock_owned_by_user(sk) && np->recverr) {
233 sk->sk_err = err;
234 sk->sk_error_report(sk);
235 } else
236 sk->sk_err_soft = err;
237
238 out:
239 bh_unlock_sock(sk);
240 sock_put(sk);
241 }
242
243
244 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
245 {
246 struct inet6_request_sock *ireq6 = inet6_rsk(req);
247 struct ipv6_pinfo *np = inet6_sk(sk);
248 struct sk_buff *skb;
249 struct ipv6_txoptions *opt = NULL;
250 struct in6_addr *final_p = NULL, final;
251 struct flowi fl;
252 int err = -1;
253 struct dst_entry *dst;
254
255 memset(&fl, 0, sizeof(fl));
256 fl.proto = IPPROTO_DCCP;
257 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
258 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
259 fl.fl6_flowlabel = 0;
260 fl.oif = ireq6->iif;
261 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
262 fl.fl_ip_sport = inet_rsk(req)->loc_port;
263 security_req_classify_flow(req, &fl);
264
265 opt = np->opt;
266
267 if (opt != NULL && opt->srcrt != NULL) {
268 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
269
270 ipv6_addr_copy(&final, &fl.fl6_dst);
271 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
272 final_p = &final;
273 }
274
275 err = ip6_dst_lookup(sk, &dst, &fl);
276 if (err)
277 goto done;
278
279 if (final_p)
280 ipv6_addr_copy(&fl.fl6_dst, final_p);
281
282 err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0);
283 if (err < 0)
284 goto done;
285
286 skb = dccp_make_response(sk, dst, req);
287 if (skb != NULL) {
288 struct dccp_hdr *dh = dccp_hdr(skb);
289
290 dh->dccph_checksum = dccp_v6_csum_finish(skb,
291 &ireq6->loc_addr,
292 &ireq6->rmt_addr);
293 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
294 err = ip6_xmit(sk, skb, &fl, opt, 0);
295 err = net_xmit_eval(err);
296 }
297
298 done:
299 if (opt != NULL && opt != np->opt)
300 sock_kfree_s(sk, opt, opt->tot_len);
301 dst_release(dst);
302 return err;
303 }
304
305 static void dccp_v6_reqsk_destructor(struct request_sock *req)
306 {
307 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
308 if (inet6_rsk(req)->pktopts != NULL)
309 kfree_skb(inet6_rsk(req)->pktopts);
310 }
311
312 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
313 {
314 struct ipv6hdr *rxip6h;
315 struct sk_buff *skb;
316 struct flowi fl;
317 struct net *net = dev_net(skb_dst(rxskb)->dev);
318 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
319 struct dst_entry *dst;
320
321 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
322 return;
323
324 if (!ipv6_unicast_destination(rxskb))
325 return;
326
327 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
328 if (skb == NULL)
329 return;
330
331 rxip6h = ipv6_hdr(rxskb);
332 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
333 &rxip6h->daddr);
334
335 memset(&fl, 0, sizeof(fl));
336 ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
337 ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
338
339 fl.proto = IPPROTO_DCCP;
340 fl.oif = inet6_iif(rxskb);
341 fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport;
342 fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport;
343 security_skb_classify_flow(rxskb, &fl);
344
345 /* sk = NULL, but it is safe for now. RST socket required. */
346 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
347 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
348 skb_dst_set(skb, dst);
349 ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
350 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
351 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
352 return;
353 }
354 }
355
356 kfree_skb(skb);
357 }
358
359 static struct request_sock_ops dccp6_request_sock_ops = {
360 .family = AF_INET6,
361 .obj_size = sizeof(struct dccp6_request_sock),
362 .rtx_syn_ack = dccp_v6_send_response,
363 .send_ack = dccp_reqsk_send_ack,
364 .destructor = dccp_v6_reqsk_destructor,
365 .send_reset = dccp_v6_ctl_send_reset,
366 };
367
368 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
369 {
370 const struct dccp_hdr *dh = dccp_hdr(skb);
371 const struct ipv6hdr *iph = ipv6_hdr(skb);
372 struct sock *nsk;
373 struct request_sock **prev;
374 /* Find possible connection requests. */
375 struct request_sock *req = inet6_csk_search_req(sk, &prev,
376 dh->dccph_sport,
377 &iph->saddr,
378 &iph->daddr,
379 inet6_iif(skb));
380 if (req != NULL)
381 return dccp_check_req(sk, skb, req, prev);
382
383 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
384 &iph->saddr, dh->dccph_sport,
385 &iph->daddr, ntohs(dh->dccph_dport),
386 inet6_iif(skb));
387 if (nsk != NULL) {
388 if (nsk->sk_state != DCCP_TIME_WAIT) {
389 bh_lock_sock(nsk);
390 return nsk;
391 }
392 inet_twsk_put(inet_twsk(nsk));
393 return NULL;
394 }
395
396 return sk;
397 }
398
399 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
400 {
401 struct request_sock *req;
402 struct dccp_request_sock *dreq;
403 struct inet6_request_sock *ireq6;
404 struct ipv6_pinfo *np = inet6_sk(sk);
405 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
406 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
407
408 if (skb->protocol == htons(ETH_P_IP))
409 return dccp_v4_conn_request(sk, skb);
410
411 if (!ipv6_unicast_destination(skb))
412 return 0; /* discard, don't send a reset here */
413
414 if (dccp_bad_service_code(sk, service)) {
415 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
416 goto drop;
417 }
418 /*
419 * There are no SYN attacks on IPv6, yet...
420 */
421 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
422 if (inet_csk_reqsk_queue_is_full(sk))
423 goto drop;
424
425 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
426 goto drop;
427
428 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
429 if (req == NULL)
430 goto drop;
431
432 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
433 goto drop_and_free;
434
435 dreq = dccp_rsk(req);
436 if (dccp_parse_options(sk, dreq, skb))
437 goto drop_and_free;
438
439 if (security_inet_conn_request(sk, skb, req))
440 goto drop_and_free;
441
442 ireq6 = inet6_rsk(req);
443 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
444 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
445
446 if (ipv6_opt_accepted(sk, skb) ||
447 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
448 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
449 atomic_inc(&skb->users);
450 ireq6->pktopts = skb;
451 }
452 ireq6->iif = sk->sk_bound_dev_if;
453
454 /* So that link locals have meaning */
455 if (!sk->sk_bound_dev_if &&
456 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
457 ireq6->iif = inet6_iif(skb);
458
459 /*
460 * Step 3: Process LISTEN state
461 *
462 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
463 *
464 * In fact we defer setting S.GSR, S.SWL, S.SWH to
465 * dccp_create_openreq_child.
466 */
467 dreq->dreq_isr = dcb->dccpd_seq;
468 dreq->dreq_iss = dccp_v6_init_sequence(skb);
469 dreq->dreq_service = service;
470
471 if (dccp_v6_send_response(sk, req))
472 goto drop_and_free;
473
474 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
475 return 0;
476
477 drop_and_free:
478 reqsk_free(req);
479 drop:
480 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
481 return -1;
482 }
483
484 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
485 struct sk_buff *skb,
486 struct request_sock *req,
487 struct dst_entry *dst)
488 {
489 struct inet6_request_sock *ireq6 = inet6_rsk(req);
490 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
491 struct inet_sock *newinet;
492 struct dccp_sock *newdp;
493 struct dccp6_sock *newdp6;
494 struct sock *newsk;
495 struct ipv6_txoptions *opt;
496
497 if (skb->protocol == htons(ETH_P_IP)) {
498 /*
499 * v6 mapped
500 */
501 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
502 if (newsk == NULL)
503 return NULL;
504
505 newdp6 = (struct dccp6_sock *)newsk;
506 newdp = dccp_sk(newsk);
507 newinet = inet_sk(newsk);
508 newinet->pinet6 = &newdp6->inet6;
509 newnp = inet6_sk(newsk);
510
511 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
512
513 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
514 newinet->daddr);
515
516 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
517 newinet->saddr);
518
519 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
520
521 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
522 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
523 newnp->pktoptions = NULL;
524 newnp->opt = NULL;
525 newnp->mcast_oif = inet6_iif(skb);
526 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
527
528 /*
529 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
530 * here, dccp_create_openreq_child now does this for us, see the comment in
531 * that function for the gory details. -acme
532 */
533
534 /* It is tricky place. Until this moment IPv4 tcp
535 worked with IPv6 icsk.icsk_af_ops.
536 Sync it now.
537 */
538 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
539
540 return newsk;
541 }
542
543 opt = np->opt;
544
545 if (sk_acceptq_is_full(sk))
546 goto out_overflow;
547
548 if (dst == NULL) {
549 struct in6_addr *final_p = NULL, final;
550 struct flowi fl;
551
552 memset(&fl, 0, sizeof(fl));
553 fl.proto = IPPROTO_DCCP;
554 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
555 if (opt != NULL && opt->srcrt != NULL) {
556 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
557
558 ipv6_addr_copy(&final, &fl.fl6_dst);
559 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
560 final_p = &final;
561 }
562 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
563 fl.oif = sk->sk_bound_dev_if;
564 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
565 fl.fl_ip_sport = inet_rsk(req)->loc_port;
566 security_sk_classify_flow(sk, &fl);
567
568 if (ip6_dst_lookup(sk, &dst, &fl))
569 goto out;
570
571 if (final_p)
572 ipv6_addr_copy(&fl.fl6_dst, final_p);
573
574 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
575 goto out;
576 }
577
578 newsk = dccp_create_openreq_child(sk, req, skb);
579 if (newsk == NULL)
580 goto out;
581
582 /*
583 * No need to charge this sock to the relevant IPv6 refcnt debug socks
584 * count here, dccp_create_openreq_child now does this for us, see the
585 * comment in that function for the gory details. -acme
586 */
587
588 __ip6_dst_store(newsk, dst, NULL, NULL);
589 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
590 NETIF_F_TSO);
591 newdp6 = (struct dccp6_sock *)newsk;
592 newinet = inet_sk(newsk);
593 newinet->pinet6 = &newdp6->inet6;
594 newdp = dccp_sk(newsk);
595 newnp = inet6_sk(newsk);
596
597 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
598
599 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
600 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
601 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
602 newsk->sk_bound_dev_if = ireq6->iif;
603
604 /* Now IPv6 options...
605
606 First: no IPv4 options.
607 */
608 newinet->opt = NULL;
609
610 /* Clone RX bits */
611 newnp->rxopt.all = np->rxopt.all;
612
613 /* Clone pktoptions received with SYN */
614 newnp->pktoptions = NULL;
615 if (ireq6->pktopts != NULL) {
616 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
617 kfree_skb(ireq6->pktopts);
618 ireq6->pktopts = NULL;
619 if (newnp->pktoptions)
620 skb_set_owner_r(newnp->pktoptions, newsk);
621 }
622 newnp->opt = NULL;
623 newnp->mcast_oif = inet6_iif(skb);
624 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
625
626 /*
627 * Clone native IPv6 options from listening socket (if any)
628 *
629 * Yes, keeping reference count would be much more clever, but we make
630 * one more one thing there: reattach optmem to newsk.
631 */
632 if (opt != NULL) {
633 newnp->opt = ipv6_dup_options(newsk, opt);
634 if (opt != np->opt)
635 sock_kfree_s(sk, opt, opt->tot_len);
636 }
637
638 inet_csk(newsk)->icsk_ext_hdr_len = 0;
639 if (newnp->opt != NULL)
640 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
641 newnp->opt->opt_flen);
642
643 dccp_sync_mss(newsk, dst_mtu(dst));
644
645 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
646
647 __inet6_hash(newsk);
648 __inet_inherit_port(sk, newsk);
649
650 return newsk;
651
652 out_overflow:
653 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
654 out:
655 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
656 if (opt != NULL && opt != np->opt)
657 sock_kfree_s(sk, opt, opt->tot_len);
658 dst_release(dst);
659 return NULL;
660 }
661
662 /* The socket must have it's spinlock held when we get
663 * here.
664 *
665 * We have a potential double-lock case here, so even when
666 * doing backlog processing we use the BH locking scheme.
667 * This is because we cannot sleep with the original spinlock
668 * held.
669 */
670 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
671 {
672 struct ipv6_pinfo *np = inet6_sk(sk);
673 struct sk_buff *opt_skb = NULL;
674
675 /* Imagine: socket is IPv6. IPv4 packet arrives,
676 goes to IPv4 receive handler and backlogged.
677 From backlog it always goes here. Kerboom...
678 Fortunately, dccp_rcv_established and rcv_established
679 handle them correctly, but it is not case with
680 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
681 */
682
683 if (skb->protocol == htons(ETH_P_IP))
684 return dccp_v4_do_rcv(sk, skb);
685
686 if (sk_filter(sk, skb))
687 goto discard;
688
689 /*
690 * socket locking is here for SMP purposes as backlog rcv is currently
691 * called with bh processing disabled.
692 */
693
694 /* Do Stevens' IPV6_PKTOPTIONS.
695
696 Yes, guys, it is the only place in our code, where we
697 may make it not affecting IPv4.
698 The rest of code is protocol independent,
699 and I do not like idea to uglify IPv4.
700
701 Actually, all the idea behind IPV6_PKTOPTIONS
702 looks not very well thought. For now we latch
703 options, received in the last packet, enqueued
704 by tcp. Feel free to propose better solution.
705 --ANK (980728)
706 */
707 if (np->rxopt.all)
708 /*
709 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
710 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
711 */
712 opt_skb = skb_clone(skb, GFP_ATOMIC);
713
714 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
715 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
716 goto reset;
717 if (opt_skb) {
718 /* XXX This is where we would goto ipv6_pktoptions. */
719 __kfree_skb(opt_skb);
720 }
721 return 0;
722 }
723
724 /*
725 * Step 3: Process LISTEN state
726 * If S.state == LISTEN,
727 * If P.type == Request or P contains a valid Init Cookie option,
728 * (* Must scan the packet's options to check for Init
729 * Cookies. Only Init Cookies are processed here,
730 * however; other options are processed in Step 8. This
731 * scan need only be performed if the endpoint uses Init
732 * Cookies *)
733 * (* Generate a new socket and switch to that socket *)
734 * Set S := new socket for this port pair
735 * S.state = RESPOND
736 * Choose S.ISS (initial seqno) or set from Init Cookies
737 * Initialize S.GAR := S.ISS
738 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
739 * Continue with S.state == RESPOND
740 * (* A Response packet will be generated in Step 11 *)
741 * Otherwise,
742 * Generate Reset(No Connection) unless P.type == Reset
743 * Drop packet and return
744 *
745 * NOTE: the check for the packet types is done in
746 * dccp_rcv_state_process
747 */
748 if (sk->sk_state == DCCP_LISTEN) {
749 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
750
751 if (nsk == NULL)
752 goto discard;
753 /*
754 * Queue it on the new socket if the new socket is active,
755 * otherwise we just shortcircuit this and continue with
756 * the new socket..
757 */
758 if (nsk != sk) {
759 if (dccp_child_process(sk, nsk, skb))
760 goto reset;
761 if (opt_skb != NULL)
762 __kfree_skb(opt_skb);
763 return 0;
764 }
765 }
766
767 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
768 goto reset;
769 if (opt_skb) {
770 /* XXX This is where we would goto ipv6_pktoptions. */
771 __kfree_skb(opt_skb);
772 }
773 return 0;
774
775 reset:
776 dccp_v6_ctl_send_reset(sk, skb);
777 discard:
778 if (opt_skb != NULL)
779 __kfree_skb(opt_skb);
780 kfree_skb(skb);
781 return 0;
782 }
783
784 static int dccp_v6_rcv(struct sk_buff *skb)
785 {
786 const struct dccp_hdr *dh;
787 struct sock *sk;
788 int min_cov;
789
790 /* Step 1: Check header basics */
791
792 if (dccp_invalid_packet(skb))
793 goto discard_it;
794
795 /* Step 1: If header checksum is incorrect, drop packet and return. */
796 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
797 &ipv6_hdr(skb)->daddr)) {
798 DCCP_WARN("dropped packet with invalid checksum\n");
799 goto discard_it;
800 }
801
802 dh = dccp_hdr(skb);
803
804 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
805 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
806
807 if (dccp_packet_without_ack(skb))
808 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
809 else
810 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
811
812 /* Step 2:
813 * Look up flow ID in table and get corresponding socket */
814 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
815 dh->dccph_sport, dh->dccph_dport);
816 /*
817 * Step 2:
818 * If no socket ...
819 */
820 if (sk == NULL) {
821 dccp_pr_debug("failed to look up flow ID in table and "
822 "get corresponding socket\n");
823 goto no_dccp_socket;
824 }
825
826 /*
827 * Step 2:
828 * ... or S.state == TIMEWAIT,
829 * Generate Reset(No Connection) unless P.type == Reset
830 * Drop packet and return
831 */
832 if (sk->sk_state == DCCP_TIME_WAIT) {
833 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
834 inet_twsk_put(inet_twsk(sk));
835 goto no_dccp_socket;
836 }
837
838 /*
839 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
840 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
841 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
842 */
843 min_cov = dccp_sk(sk)->dccps_pcrlen;
844 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
845 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
846 dh->dccph_cscov, min_cov);
847 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
848 goto discard_and_relse;
849 }
850
851 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
852 goto discard_and_relse;
853
854 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
855
856 no_dccp_socket:
857 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
858 goto discard_it;
859 /*
860 * Step 2:
861 * If no socket ...
862 * Generate Reset(No Connection) unless P.type == Reset
863 * Drop packet and return
864 */
865 if (dh->dccph_type != DCCP_PKT_RESET) {
866 DCCP_SKB_CB(skb)->dccpd_reset_code =
867 DCCP_RESET_CODE_NO_CONNECTION;
868 dccp_v6_ctl_send_reset(sk, skb);
869 }
870
871 discard_it:
872 kfree_skb(skb);
873 return 0;
874
875 discard_and_relse:
876 sock_put(sk);
877 goto discard_it;
878 }
879
880 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
881 int addr_len)
882 {
883 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
884 struct inet_connection_sock *icsk = inet_csk(sk);
885 struct inet_sock *inet = inet_sk(sk);
886 struct ipv6_pinfo *np = inet6_sk(sk);
887 struct dccp_sock *dp = dccp_sk(sk);
888 struct in6_addr *saddr = NULL, *final_p = NULL, final;
889 struct flowi fl;
890 struct dst_entry *dst;
891 int addr_type;
892 int err;
893
894 dp->dccps_role = DCCP_ROLE_CLIENT;
895
896 if (addr_len < SIN6_LEN_RFC2133)
897 return -EINVAL;
898
899 if (usin->sin6_family != AF_INET6)
900 return -EAFNOSUPPORT;
901
902 memset(&fl, 0, sizeof(fl));
903
904 if (np->sndflow) {
905 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
906 IP6_ECN_flow_init(fl.fl6_flowlabel);
907 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
908 struct ip6_flowlabel *flowlabel;
909 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
910 if (flowlabel == NULL)
911 return -EINVAL;
912 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
913 fl6_sock_release(flowlabel);
914 }
915 }
916 /*
917 * connect() to INADDR_ANY means loopback (BSD'ism).
918 */
919 if (ipv6_addr_any(&usin->sin6_addr))
920 usin->sin6_addr.s6_addr[15] = 1;
921
922 addr_type = ipv6_addr_type(&usin->sin6_addr);
923
924 if (addr_type & IPV6_ADDR_MULTICAST)
925 return -ENETUNREACH;
926
927 if (addr_type & IPV6_ADDR_LINKLOCAL) {
928 if (addr_len >= sizeof(struct sockaddr_in6) &&
929 usin->sin6_scope_id) {
930 /* If interface is set while binding, indices
931 * must coincide.
932 */
933 if (sk->sk_bound_dev_if &&
934 sk->sk_bound_dev_if != usin->sin6_scope_id)
935 return -EINVAL;
936
937 sk->sk_bound_dev_if = usin->sin6_scope_id;
938 }
939
940 /* Connect to link-local address requires an interface */
941 if (!sk->sk_bound_dev_if)
942 return -EINVAL;
943 }
944
945 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
946 np->flow_label = fl.fl6_flowlabel;
947
948 /*
949 * DCCP over IPv4
950 */
951 if (addr_type == IPV6_ADDR_MAPPED) {
952 u32 exthdrlen = icsk->icsk_ext_hdr_len;
953 struct sockaddr_in sin;
954
955 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
956
957 if (__ipv6_only_sock(sk))
958 return -ENETUNREACH;
959
960 sin.sin_family = AF_INET;
961 sin.sin_port = usin->sin6_port;
962 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
963
964 icsk->icsk_af_ops = &dccp_ipv6_mapped;
965 sk->sk_backlog_rcv = dccp_v4_do_rcv;
966
967 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
968 if (err) {
969 icsk->icsk_ext_hdr_len = exthdrlen;
970 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
971 sk->sk_backlog_rcv = dccp_v6_do_rcv;
972 goto failure;
973 } else {
974 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
975 inet->saddr);
976 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
977 inet->rcv_saddr);
978 }
979
980 return err;
981 }
982
983 if (!ipv6_addr_any(&np->rcv_saddr))
984 saddr = &np->rcv_saddr;
985
986 fl.proto = IPPROTO_DCCP;
987 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
988 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
989 fl.oif = sk->sk_bound_dev_if;
990 fl.fl_ip_dport = usin->sin6_port;
991 fl.fl_ip_sport = inet->sport;
992 security_sk_classify_flow(sk, &fl);
993
994 if (np->opt != NULL && np->opt->srcrt != NULL) {
995 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
996
997 ipv6_addr_copy(&final, &fl.fl6_dst);
998 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
999 final_p = &final;
1000 }
1001
1002 err = ip6_dst_lookup(sk, &dst, &fl);
1003 if (err)
1004 goto failure;
1005
1006 if (final_p)
1007 ipv6_addr_copy(&fl.fl6_dst, final_p);
1008
1009 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
1010 if (err < 0) {
1011 if (err == -EREMOTE)
1012 err = ip6_dst_blackhole(sk, &dst, &fl);
1013 if (err < 0)
1014 goto failure;
1015 }
1016
1017 if (saddr == NULL) {
1018 saddr = &fl.fl6_src;
1019 ipv6_addr_copy(&np->rcv_saddr, saddr);
1020 }
1021
1022 /* set the source address */
1023 ipv6_addr_copy(&np->saddr, saddr);
1024 inet->rcv_saddr = LOOPBACK4_IPV6;
1025
1026 __ip6_dst_store(sk, dst, NULL, NULL);
1027
1028 icsk->icsk_ext_hdr_len = 0;
1029 if (np->opt != NULL)
1030 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1031 np->opt->opt_nflen);
1032
1033 inet->dport = usin->sin6_port;
1034
1035 dccp_set_state(sk, DCCP_REQUESTING);
1036 err = inet6_hash_connect(&dccp_death_row, sk);
1037 if (err)
1038 goto late_failure;
1039
1040 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1041 np->daddr.s6_addr32,
1042 inet->sport, inet->dport);
1043 err = dccp_connect(sk);
1044 if (err)
1045 goto late_failure;
1046
1047 return 0;
1048
1049 late_failure:
1050 dccp_set_state(sk, DCCP_CLOSED);
1051 __sk_dst_reset(sk);
1052 failure:
1053 inet->dport = 0;
1054 sk->sk_route_caps = 0;
1055 return err;
1056 }
1057
1058 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1059 .queue_xmit = inet6_csk_xmit,
1060 .send_check = dccp_v6_send_check,
1061 .rebuild_header = inet6_sk_rebuild_header,
1062 .conn_request = dccp_v6_conn_request,
1063 .syn_recv_sock = dccp_v6_request_recv_sock,
1064 .net_header_len = sizeof(struct ipv6hdr),
1065 .setsockopt = ipv6_setsockopt,
1066 .getsockopt = ipv6_getsockopt,
1067 .addr2sockaddr = inet6_csk_addr2sockaddr,
1068 .sockaddr_len = sizeof(struct sockaddr_in6),
1069 .bind_conflict = inet6_csk_bind_conflict,
1070 #ifdef CONFIG_COMPAT
1071 .compat_setsockopt = compat_ipv6_setsockopt,
1072 .compat_getsockopt = compat_ipv6_getsockopt,
1073 #endif
1074 };
1075
1076 /*
1077 * DCCP over IPv4 via INET6 API
1078 */
1079 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1080 .queue_xmit = ip_queue_xmit,
1081 .send_check = dccp_v4_send_check,
1082 .rebuild_header = inet_sk_rebuild_header,
1083 .conn_request = dccp_v6_conn_request,
1084 .syn_recv_sock = dccp_v6_request_recv_sock,
1085 .net_header_len = sizeof(struct iphdr),
1086 .setsockopt = ipv6_setsockopt,
1087 .getsockopt = ipv6_getsockopt,
1088 .addr2sockaddr = inet6_csk_addr2sockaddr,
1089 .sockaddr_len = sizeof(struct sockaddr_in6),
1090 #ifdef CONFIG_COMPAT
1091 .compat_setsockopt = compat_ipv6_setsockopt,
1092 .compat_getsockopt = compat_ipv6_getsockopt,
1093 #endif
1094 };
1095
1096 /* NOTE: A lot of things set to zero explicitly by call to
1097 * sk_alloc() so need not be done here.
1098 */
1099 static int dccp_v6_init_sock(struct sock *sk)
1100 {
1101 static __u8 dccp_v6_ctl_sock_initialized;
1102 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1103
1104 if (err == 0) {
1105 if (unlikely(!dccp_v6_ctl_sock_initialized))
1106 dccp_v6_ctl_sock_initialized = 1;
1107 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1108 }
1109
1110 return err;
1111 }
1112
1113 static void dccp_v6_destroy_sock(struct sock *sk)
1114 {
1115 dccp_destroy_sock(sk);
1116 inet6_destroy_sock(sk);
1117 }
1118
1119 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1120 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1121 };
1122
1123 static struct proto dccp_v6_prot = {
1124 .name = "DCCPv6",
1125 .owner = THIS_MODULE,
1126 .close = dccp_close,
1127 .connect = dccp_v6_connect,
1128 .disconnect = dccp_disconnect,
1129 .ioctl = dccp_ioctl,
1130 .init = dccp_v6_init_sock,
1131 .setsockopt = dccp_setsockopt,
1132 .getsockopt = dccp_getsockopt,
1133 .sendmsg = dccp_sendmsg,
1134 .recvmsg = dccp_recvmsg,
1135 .backlog_rcv = dccp_v6_do_rcv,
1136 .hash = dccp_v6_hash,
1137 .unhash = inet_unhash,
1138 .accept = inet_csk_accept,
1139 .get_port = inet_csk_get_port,
1140 .shutdown = dccp_shutdown,
1141 .destroy = dccp_v6_destroy_sock,
1142 .orphan_count = &dccp_orphan_count,
1143 .max_header = MAX_DCCP_HEADER,
1144 .obj_size = sizeof(struct dccp6_sock),
1145 .slab_flags = SLAB_DESTROY_BY_RCU,
1146 .rsk_prot = &dccp6_request_sock_ops,
1147 .twsk_prot = &dccp6_timewait_sock_ops,
1148 .h.hashinfo = &dccp_hashinfo,
1149 #ifdef CONFIG_COMPAT
1150 .compat_setsockopt = compat_dccp_setsockopt,
1151 .compat_getsockopt = compat_dccp_getsockopt,
1152 #endif
1153 };
1154
1155 static struct inet6_protocol dccp_v6_protocol = {
1156 .handler = dccp_v6_rcv,
1157 .err_handler = dccp_v6_err,
1158 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1159 };
1160
1161 static struct proto_ops inet6_dccp_ops = {
1162 .family = PF_INET6,
1163 .owner = THIS_MODULE,
1164 .release = inet6_release,
1165 .bind = inet6_bind,
1166 .connect = inet_stream_connect,
1167 .socketpair = sock_no_socketpair,
1168 .accept = inet_accept,
1169 .getname = inet6_getname,
1170 .poll = dccp_poll,
1171 .ioctl = inet6_ioctl,
1172 .listen = inet_dccp_listen,
1173 .shutdown = inet_shutdown,
1174 .setsockopt = sock_common_setsockopt,
1175 .getsockopt = sock_common_getsockopt,
1176 .sendmsg = inet_sendmsg,
1177 .recvmsg = sock_common_recvmsg,
1178 .mmap = sock_no_mmap,
1179 .sendpage = sock_no_sendpage,
1180 #ifdef CONFIG_COMPAT
1181 .compat_setsockopt = compat_sock_common_setsockopt,
1182 .compat_getsockopt = compat_sock_common_getsockopt,
1183 #endif
1184 };
1185
1186 static struct inet_protosw dccp_v6_protosw = {
1187 .type = SOCK_DCCP,
1188 .protocol = IPPROTO_DCCP,
1189 .prot = &dccp_v6_prot,
1190 .ops = &inet6_dccp_ops,
1191 .capability = -1,
1192 .flags = INET_PROTOSW_ICSK,
1193 };
1194
1195 static int dccp_v6_init_net(struct net *net)
1196 {
1197 int err;
1198
1199 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1200 SOCK_DCCP, IPPROTO_DCCP, net);
1201 return err;
1202 }
1203
1204 static void dccp_v6_exit_net(struct net *net)
1205 {
1206 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1207 }
1208
1209 static struct pernet_operations dccp_v6_ops = {
1210 .init = dccp_v6_init_net,
1211 .exit = dccp_v6_exit_net,
1212 };
1213
1214 static int __init dccp_v6_init(void)
1215 {
1216 int err = proto_register(&dccp_v6_prot, 1);
1217
1218 if (err != 0)
1219 goto out;
1220
1221 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1222 if (err != 0)
1223 goto out_unregister_proto;
1224
1225 inet6_register_protosw(&dccp_v6_protosw);
1226
1227 err = register_pernet_subsys(&dccp_v6_ops);
1228 if (err != 0)
1229 goto out_destroy_ctl_sock;
1230 out:
1231 return err;
1232
1233 out_destroy_ctl_sock:
1234 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1235 inet6_unregister_protosw(&dccp_v6_protosw);
1236 out_unregister_proto:
1237 proto_unregister(&dccp_v6_prot);
1238 goto out;
1239 }
1240
1241 static void __exit dccp_v6_exit(void)
1242 {
1243 unregister_pernet_subsys(&dccp_v6_ops);
1244 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1245 inet6_unregister_protosw(&dccp_v6_protosw);
1246 proto_unregister(&dccp_v6_prot);
1247 }
1248
1249 module_init(dccp_v6_init);
1250 module_exit(dccp_v6_exit);
1251
1252 /*
1253 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1254 * values directly, Also cover the case where the protocol is not specified,
1255 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1256 */
1257 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1258 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1259 MODULE_LICENSE("GPL");
1260 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1261 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");