]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/dccp/proto.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / net / dccp / proto.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
7c657876
ACM
12#include <linux/dccp.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/in.h>
20#include <linux/if_arp.h>
21#include <linux/init.h>
22#include <linux/random.h>
5a0e3ad6 23#include <linux/slab.h>
7c657876
ACM
24#include <net/checksum.h>
25
14c85021 26#include <net/inet_sock.h>
120e9dab 27#include <net/inet_common.h>
7c657876
ACM
28#include <net/sock.h>
29#include <net/xfrm.h>
30
6273172e 31#include <asm/ioctls.h>
7c657876
ACM
32#include <linux/spinlock.h>
33#include <linux/timer.h>
34#include <linux/delay.h>
35#include <linux/poll.h>
7c657876
ACM
36
37#include "ccid.h"
38#include "dccp.h"
afe00251 39#include "feat.h"
7c657876 40
ba89966c 41DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
7c657876 42
f21e68ca
ACM
43EXPORT_SYMBOL_GPL(dccp_statistics);
44
dd24c001 45struct percpu_counter dccp_orphan_count;
f21e68ca
ACM
46EXPORT_SYMBOL_GPL(dccp_orphan_count);
47
5caea4ea 48struct inet_hashinfo dccp_hashinfo;
075ae866
ACM
49EXPORT_SYMBOL_GPL(dccp_hashinfo);
50
b1308dc0
IM
51/* the maximum queue length for tx in packets. 0 is no limit */
52int sysctl_dccp_tx_qlen __read_mostly = 5;
53
1f4f0f64 54#ifdef CONFIG_IP_DCCP_DEBUG
55static const char *dccp_state_name(const int state)
56{
57 static const char *const dccp_state_names[] = {
58 [DCCP_OPEN] = "OPEN",
59 [DCCP_REQUESTING] = "REQUESTING",
60 [DCCP_PARTOPEN] = "PARTOPEN",
61 [DCCP_LISTEN] = "LISTEN",
62 [DCCP_RESPOND] = "RESPOND",
63 [DCCP_CLOSING] = "CLOSING",
64 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
65 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
66 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
67 [DCCP_TIME_WAIT] = "TIME_WAIT",
68 [DCCP_CLOSED] = "CLOSED",
69 };
70
71 if (state >= DCCP_MAX_STATES)
72 return "INVALID STATE!";
73 else
74 return dccp_state_names[state];
75}
76#endif
77
c25a18ba
ACM
78void dccp_set_state(struct sock *sk, const int state)
79{
80 const int oldstate = sk->sk_state;
81
f11135a3 82 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
c25a18ba
ACM
83 dccp_state_name(oldstate), dccp_state_name(state));
84 WARN_ON(state == oldstate);
85
86 switch (state) {
87 case DCCP_OPEN:
88 if (oldstate != DCCP_OPEN)
89 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
6eb55d17
GR
90 /* Client retransmits all Confirm options until entering OPEN */
91 if (oldstate == DCCP_PARTOPEN)
92 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
c25a18ba
ACM
93 break;
94
95 case DCCP_CLOSED:
0c869620
GR
96 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 oldstate == DCCP_CLOSING)
c25a18ba
ACM
98 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99
100 sk->sk_prot->unhash(sk);
101 if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
ab1e0a13 103 inet_put_port(sk);
c25a18ba
ACM
104 /* fall through */
105 default:
106 if (oldstate == DCCP_OPEN)
107 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
108 }
109
110 /* Change state AFTER socket is unhashed to avoid closed
111 * socket sitting in hash tables.
112 */
113 sk->sk_state = state;
114}
115
116EXPORT_SYMBOL_GPL(dccp_set_state);
117
0c869620
GR
118static void dccp_finish_passive_close(struct sock *sk)
119{
120 switch (sk->sk_state) {
121 case DCCP_PASSIVE_CLOSE:
122 /* Node (client or server) has received Close packet. */
123 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 dccp_set_state(sk, DCCP_CLOSED);
125 break;
126 case DCCP_PASSIVE_CLOSEREQ:
127 /*
128 * Client received CloseReq. We set the `active' flag so that
129 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 */
131 dccp_send_close(sk, 1);
132 dccp_set_state(sk, DCCP_CLOSING);
133 }
134}
135
c25a18ba
ACM
136void dccp_done(struct sock *sk)
137{
138 dccp_set_state(sk, DCCP_CLOSED);
139 dccp_clear_xmit_timers(sk);
140
141 sk->sk_shutdown = SHUTDOWN_MASK;
142
143 if (!sock_flag(sk, SOCK_DEAD))
144 sk->sk_state_change(sk);
145 else
146 inet_csk_destroy_sock(sk);
147}
148
149EXPORT_SYMBOL_GPL(dccp_done);
150
7c657876
ACM
151const char *dccp_packet_name(const int type)
152{
36cbd3dc 153 static const char *const dccp_packet_names[] = {
7c657876
ACM
154 [DCCP_PKT_REQUEST] = "REQUEST",
155 [DCCP_PKT_RESPONSE] = "RESPONSE",
156 [DCCP_PKT_DATA] = "DATA",
157 [DCCP_PKT_ACK] = "ACK",
158 [DCCP_PKT_DATAACK] = "DATAACK",
159 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 [DCCP_PKT_CLOSE] = "CLOSE",
161 [DCCP_PKT_RESET] = "RESET",
162 [DCCP_PKT_SYNC] = "SYNC",
163 [DCCP_PKT_SYNCACK] = "SYNCACK",
164 };
165
166 if (type >= DCCP_NR_PKT_TYPES)
167 return "INVALID";
168 else
169 return dccp_packet_names[type];
170}
171
172EXPORT_SYMBOL_GPL(dccp_packet_name);
173
120e9dab
ED
174static void dccp_sk_destruct(struct sock *sk)
175{
176 struct dccp_sock *dp = dccp_sk(sk);
177
178 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 dp->dccps_hc_tx_ccid = NULL;
180 inet_sock_destruct(sk);
181}
182
72478873 183int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
3e0fadc5
ACM
184{
185 struct dccp_sock *dp = dccp_sk(sk);
186 struct inet_connection_sock *icsk = inet_csk(sk);
3e0fadc5 187
e18d7a98
ACM
188 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
189 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
190 sk->sk_state = DCCP_CLOSED;
191 sk->sk_write_space = dccp_write_space;
120e9dab 192 sk->sk_destruct = dccp_sk_destruct;
e18d7a98 193 icsk->icsk_sync_mss = dccp_sync_mss;
410e27a4 194 dp->dccps_mss_cache = 536;
e18d7a98
ACM
195 dp->dccps_rate_last = jiffies;
196 dp->dccps_role = DCCP_ROLE_UNDEFINED;
197 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
871a2c16 198 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
e18d7a98
ACM
199
200 dccp_init_xmit_timers(sk);
201
ac75773c 202 INIT_LIST_HEAD(&dp->dccps_featneg);
6eb55d17
GR
203 /* control socket doesn't need feat nego */
204 if (likely(ctl_sock_initialized))
205 return dccp_feat_init(sk);
3e0fadc5
ACM
206 return 0;
207}
208
209EXPORT_SYMBOL_GPL(dccp_init_sock);
210
7d06b2e0 211void dccp_destroy_sock(struct sock *sk)
3e0fadc5
ACM
212{
213 struct dccp_sock *dp = dccp_sk(sk);
214
7749d4ff 215 __skb_queue_purge(&sk->sk_write_queue);
3e0fadc5
ACM
216 if (sk->sk_send_head != NULL) {
217 kfree_skb(sk->sk_send_head);
218 sk->sk_send_head = NULL;
219 }
220
221 /* Clean up a referenced DCCP bind bucket. */
222 if (inet_csk(sk)->icsk_bind_hash != NULL)
ab1e0a13 223 inet_put_port(sk);
3e0fadc5
ACM
224
225 kfree(dp->dccps_service_list);
226 dp->dccps_service_list = NULL;
227
6fdd34d4 228 if (dp->dccps_hc_rx_ackvec != NULL) {
3e0fadc5
ACM
229 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
230 dp->dccps_hc_rx_ackvec = NULL;
231 }
232 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
120e9dab 233 dp->dccps_hc_rx_ccid = NULL;
3e0fadc5
ACM
234
235 /* clean up feature negotiation state */
d99a7bd2 236 dccp_feat_list_purge(&dp->dccps_featneg);
3e0fadc5
ACM
237}
238
239EXPORT_SYMBOL_GPL(dccp_destroy_sock);
240
72a3effa 241static inline int dccp_listen_start(struct sock *sk, int backlog)
7c657876 242{
67e6b629
ACM
243 struct dccp_sock *dp = dccp_sk(sk);
244
245 dp->dccps_role = DCCP_ROLE_LISTEN;
9eca0a47
GR
246 /* do not start to listen if feature negotiation setup fails */
247 if (dccp_feat_finalise_settings(dp))
248 return -EPROTO;
72a3effa 249 return inet_csk_listen_start(sk, backlog);
7c657876
ACM
250}
251
ce865a61
GR
252static inline int dccp_need_reset(int state)
253{
254 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
255 state != DCCP_REQUESTING;
256}
257
7c657876
ACM
258int dccp_disconnect(struct sock *sk, int flags)
259{
260 struct inet_connection_sock *icsk = inet_csk(sk);
261 struct inet_sock *inet = inet_sk(sk);
262 int err = 0;
263 const int old_state = sk->sk_state;
264
265 if (old_state != DCCP_CLOSED)
266 dccp_set_state(sk, DCCP_CLOSED);
267
ce865a61
GR
268 /*
269 * This corresponds to the ABORT function of RFC793, sec. 3.8
270 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
271 */
7c657876
ACM
272 if (old_state == DCCP_LISTEN) {
273 inet_csk_listen_stop(sk);
ce865a61
GR
274 } else if (dccp_need_reset(old_state)) {
275 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
276 sk->sk_err = ECONNRESET;
7c657876
ACM
277 } else if (old_state == DCCP_REQUESTING)
278 sk->sk_err = ECONNRESET;
279
280 dccp_clear_xmit_timers(sk);
48816322 281
7c657876 282 __skb_queue_purge(&sk->sk_receive_queue);
48816322 283 __skb_queue_purge(&sk->sk_write_queue);
7c657876
ACM
284 if (sk->sk_send_head != NULL) {
285 __kfree_skb(sk->sk_send_head);
286 sk->sk_send_head = NULL;
287 }
288
c720c7e8 289 inet->inet_dport = 0;
7c657876
ACM
290
291 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
292 inet_reset_saddr(sk);
293
294 sk->sk_shutdown = 0;
295 sock_reset_flag(sk, SOCK_DONE);
296
297 icsk->icsk_backoff = 0;
298 inet_csk_delack_init(sk);
299 __sk_dst_reset(sk);
300
c720c7e8 301 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
7c657876
ACM
302
303 sk->sk_error_report(sk);
304 return err;
305}
306
f21e68ca
ACM
307EXPORT_SYMBOL_GPL(dccp_disconnect);
308
331968bd
ACM
309/*
310 * Wait for a DCCP event.
311 *
312 * Note that we don't need to lock the socket, as the upper poll layers
313 * take care of normal races (between the test and the event) and we don't
314 * go look at any of the socket buffers directly.
315 */
f21e68ca
ACM
316unsigned int dccp_poll(struct file *file, struct socket *sock,
317 poll_table *wait)
331968bd
ACM
318{
319 unsigned int mask;
320 struct sock *sk = sock->sk;
321
aa395145 322 sock_poll_wait(file, sk_sleep(sk), wait);
331968bd
ACM
323 if (sk->sk_state == DCCP_LISTEN)
324 return inet_csk_listen_poll(sk);
325
326 /* Socket is not locked. We are protected from async events
327 by poll logic and correct handling of state changes
328 made by another threads is impossible in any case.
329 */
330
331 mask = 0;
332 if (sk->sk_err)
333 mask = POLLERR;
334
335 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
336 mask |= POLLHUP;
337 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 338 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
331968bd
ACM
339
340 /* Connected? */
341 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
342 if (atomic_read(&sk->sk_rmem_alloc) > 0)
343 mask |= POLLIN | POLLRDNORM;
344
345 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
64dc6130 346 if (sk_stream_is_writeable(sk)) {
331968bd
ACM
347 mask |= POLLOUT | POLLWRNORM;
348 } else { /* send SIGIO later */
9cd3e072 349 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
331968bd
ACM
350 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
351
352 /* Race breaker. If space is freed after
353 * wspace test but before the flags are set,
354 * IO signal will be lost.
355 */
64dc6130 356 if (sk_stream_is_writeable(sk))
331968bd
ACM
357 mask |= POLLOUT | POLLWRNORM;
358 }
359 }
360 }
361 return mask;
362}
363
f21e68ca
ACM
364EXPORT_SYMBOL_GPL(dccp_poll);
365
7c657876
ACM
366int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
367{
6273172e
ACM
368 int rc = -ENOTCONN;
369
370 lock_sock(sk);
371
372 if (sk->sk_state == DCCP_LISTEN)
373 goto out;
374
375 switch (cmd) {
376 case SIOCINQ: {
377 struct sk_buff *skb;
378 unsigned long amount = 0;
379
380 skb = skb_peek(&sk->sk_receive_queue);
381 if (skb != NULL) {
382 /*
383 * We will only return the amount of this packet since
384 * that is all that will be read.
385 */
386 amount = skb->len;
387 }
388 rc = put_user(amount, (int __user *)arg);
389 }
390 break;
391 default:
392 rc = -ENOIOCTLCMD;
393 break;
394 }
395out:
396 release_sock(sk);
397 return rc;
7c657876
ACM
398}
399
f21e68ca
ACM
400EXPORT_SYMBOL_GPL(dccp_ioctl);
401
60fe62e7 402static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
b7058842 403 char __user *optval, unsigned int optlen)
67e6b629
ACM
404{
405 struct dccp_sock *dp = dccp_sk(sk);
406 struct dccp_service_list *sl = NULL;
407
8109b02b 408 if (service == DCCP_SERVICE_INVALID_VALUE ||
67e6b629
ACM
409 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
410 return -EINVAL;
411
412 if (optlen > sizeof(service)) {
413 sl = kmalloc(optlen, GFP_KERNEL);
414 if (sl == NULL)
415 return -ENOMEM;
416
417 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
418 if (copy_from_user(sl->dccpsl_list,
419 optval + sizeof(service),
420 optlen - sizeof(service)) ||
421 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
422 kfree(sl);
423 return -EFAULT;
424 }
425 }
426
427 lock_sock(sk);
428 dp->dccps_service = service;
429
a51482bd 430 kfree(dp->dccps_service_list);
67e6b629
ACM
431
432 dp->dccps_service_list = sl;
433 release_sock(sk);
434 return 0;
435}
436
29450559
GR
437static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
438{
439 u8 *list, len;
440 int i, rc;
441
442 if (cscov < 0 || cscov > 15)
443 return -EINVAL;
444 /*
445 * Populate a list of permissible values, in the range cscov...15. This
446 * is necessary since feature negotiation of single values only works if
447 * both sides incidentally choose the same value. Since the list starts
448 * lowest-value first, negotiation will pick the smallest shared value.
449 */
450 if (cscov == 0)
451 return 0;
452 len = 16 - cscov;
453
454 list = kmalloc(len, GFP_KERNEL);
455 if (list == NULL)
456 return -ENOBUFS;
457
458 for (i = 0; i < len; i++)
459 list[i] = cscov++;
460
461 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
462
463 if (rc == 0) {
464 if (rx)
465 dccp_sk(sk)->dccps_pcrlen = cscov;
466 else
467 dccp_sk(sk)->dccps_pcslen = cscov;
468 }
469 kfree(list);
470 return rc;
471}
472
b20a9c24 473static int dccp_setsockopt_ccid(struct sock *sk, int type,
b7058842 474 char __user *optval, unsigned int optlen)
b20a9c24
GR
475{
476 u8 *val;
477 int rc = 0;
478
479 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
480 return -EINVAL;
481
042604d2
JL
482 val = memdup_user(optval, optlen);
483 if (IS_ERR(val))
484 return PTR_ERR(val);
b20a9c24
GR
485
486 lock_sock(sk);
487 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
488 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
489
490 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
491 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
492 release_sock(sk);
493
494 kfree(val);
495 return rc;
496}
497
3fdadf7d 498static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
b7058842 499 char __user *optval, unsigned int optlen)
7c657876 500{
09dbc389
GR
501 struct dccp_sock *dp = dccp_sk(sk);
502 int val, err = 0;
7c657876 503
19102996
GR
504 switch (optname) {
505 case DCCP_SOCKOPT_PACKET_SIZE:
506 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
507 return 0;
508 case DCCP_SOCKOPT_CHANGE_L:
509 case DCCP_SOCKOPT_CHANGE_R:
510 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
511 return 0;
b20a9c24
GR
512 case DCCP_SOCKOPT_CCID:
513 case DCCP_SOCKOPT_RX_CCID:
514 case DCCP_SOCKOPT_TX_CCID:
515 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
19102996
GR
516 }
517
518 if (optlen < (int)sizeof(int))
a84ffe43
ACM
519 return -EINVAL;
520
521 if (get_user(val, (int __user *)optval))
522 return -EFAULT;
523
67e6b629
ACM
524 if (optname == DCCP_SOCKOPT_SERVICE)
525 return dccp_setsockopt_service(sk, val, optval, optlen);
a84ffe43 526
67e6b629 527 lock_sock(sk);
a84ffe43 528 switch (optname) {
b8599d20
GR
529 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
530 if (dp->dccps_role != DCCP_ROLE_SERVER)
531 err = -EOPNOTSUPP;
532 else
533 dp->dccps_server_timewait = (val != 0);
534 break;
29450559
GR
535 case DCCP_SOCKOPT_SEND_CSCOV:
536 err = dccp_setsockopt_cscov(sk, val, false);
d6da3511 537 break;
29450559
GR
538 case DCCP_SOCKOPT_RECV_CSCOV:
539 err = dccp_setsockopt_cscov(sk, val, true);
d6da3511 540 break;
871a2c16
TG
541 case DCCP_SOCKOPT_QPOLICY_ID:
542 if (sk->sk_state != DCCP_CLOSED)
543 err = -EISCONN;
544 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
545 err = -EINVAL;
546 else
547 dp->dccps_qpolicy = val;
548 break;
549 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
550 if (val < 0)
551 err = -EINVAL;
552 else
553 dp->dccps_tx_qlen = val;
554 break;
a84ffe43
ACM
555 default:
556 err = -ENOPROTOOPT;
557 break;
558 }
410e27a4 559 release_sock(sk);
19102996 560
a84ffe43 561 return err;
7c657876
ACM
562}
563
3fdadf7d 564int dccp_setsockopt(struct sock *sk, int level, int optname,
b7058842 565 char __user *optval, unsigned int optlen)
3fdadf7d
DM
566{
567 if (level != SOL_DCCP)
568 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
569 optname, optval,
570 optlen);
571 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
572}
543d9cfe 573
f21e68ca
ACM
574EXPORT_SYMBOL_GPL(dccp_setsockopt);
575
3fdadf7d
DM
576#ifdef CONFIG_COMPAT
577int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
b7058842 578 char __user *optval, unsigned int optlen)
3fdadf7d 579{
dec73ff0
ACM
580 if (level != SOL_DCCP)
581 return inet_csk_compat_setsockopt(sk, level, optname,
582 optval, optlen);
3fdadf7d
DM
583 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
584}
543d9cfe 585
3fdadf7d
DM
586EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
587#endif
588
67e6b629 589static int dccp_getsockopt_service(struct sock *sk, int len,
60fe62e7 590 __be32 __user *optval,
67e6b629
ACM
591 int __user *optlen)
592{
593 const struct dccp_sock *dp = dccp_sk(sk);
594 const struct dccp_service_list *sl;
595 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
596
597 lock_sock(sk);
67e6b629
ACM
598 if ((sl = dp->dccps_service_list) != NULL) {
599 slen = sl->dccpsl_nr * sizeof(u32);
600 total_len += slen;
601 }
602
603 err = -EINVAL;
604 if (total_len > len)
605 goto out;
606
607 err = 0;
608 if (put_user(total_len, optlen) ||
609 put_user(dp->dccps_service, optval) ||
610 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
611 err = -EFAULT;
612out:
613 release_sock(sk);
614 return err;
615}
616
3fdadf7d 617static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
a1d3a355 618 char __user *optval, int __user *optlen)
7c657876 619{
a84ffe43
ACM
620 struct dccp_sock *dp;
621 int val, len;
7c657876 622
a84ffe43
ACM
623 if (get_user(len, optlen))
624 return -EFAULT;
625
39ebc027 626 if (len < (int)sizeof(int))
a84ffe43
ACM
627 return -EINVAL;
628
629 dp = dccp_sk(sk);
630
631 switch (optname) {
632 case DCCP_SOCKOPT_PACKET_SIZE:
5aed3243 633 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
841bac1d 634 return 0;
88f964db
ACM
635 case DCCP_SOCKOPT_SERVICE:
636 return dccp_getsockopt_service(sk, len,
60fe62e7 637 (__be32 __user *)optval, optlen);
7c559a9e
GR
638 case DCCP_SOCKOPT_GET_CUR_MPS:
639 val = dp->dccps_mss_cache;
7c559a9e 640 break;
d90ebcbf
GR
641 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
642 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
71c262a3
GR
643 case DCCP_SOCKOPT_TX_CCID:
644 val = ccid_get_current_tx_ccid(dp);
645 if (val < 0)
646 return -ENOPROTOOPT;
647 break;
648 case DCCP_SOCKOPT_RX_CCID:
649 val = ccid_get_current_rx_ccid(dp);
650 if (val < 0)
651 return -ENOPROTOOPT;
652 break;
b8599d20
GR
653 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
654 val = dp->dccps_server_timewait;
b8599d20 655 break;
6f4e5fff
GR
656 case DCCP_SOCKOPT_SEND_CSCOV:
657 val = dp->dccps_pcslen;
658 break;
659 case DCCP_SOCKOPT_RECV_CSCOV:
660 val = dp->dccps_pcrlen;
661 break;
871a2c16
TG
662 case DCCP_SOCKOPT_QPOLICY_ID:
663 val = dp->dccps_qpolicy;
664 break;
665 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
666 val = dp->dccps_tx_qlen;
667 break;
88f964db
ACM
668 case 128 ... 191:
669 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
670 len, (u32 __user *)optval, optlen);
671 case 192 ... 255:
672 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
673 len, (u32 __user *)optval, optlen);
a84ffe43
ACM
674 default:
675 return -ENOPROTOOPT;
676 }
677
79133506 678 len = sizeof(val);
a84ffe43
ACM
679 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
680 return -EFAULT;
681
682 return 0;
7c657876
ACM
683}
684
3fdadf7d
DM
685int dccp_getsockopt(struct sock *sk, int level, int optname,
686 char __user *optval, int __user *optlen)
687{
688 if (level != SOL_DCCP)
689 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
690 optname, optval,
691 optlen);
692 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
693}
543d9cfe 694
f21e68ca
ACM
695EXPORT_SYMBOL_GPL(dccp_getsockopt);
696
3fdadf7d
DM
697#ifdef CONFIG_COMPAT
698int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
543d9cfe 699 char __user *optval, int __user *optlen)
3fdadf7d 700{
dec73ff0
ACM
701 if (level != SOL_DCCP)
702 return inet_csk_compat_getsockopt(sk, level, optname,
703 optval, optlen);
3fdadf7d
DM
704 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
705}
543d9cfe 706
3fdadf7d
DM
707EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
708#endif
709
871a2c16
TG
710static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
711{
f95b414e 712 struct cmsghdr *cmsg;
871a2c16
TG
713
714 /*
715 * Assign an (opaque) qpolicy priority value to skb->priority.
716 *
717 * We are overloading this skb field for use with the qpolicy subystem.
718 * The skb->priority is normally used for the SO_PRIORITY option, which
719 * is initialised from sk_priority. Since the assignment of sk_priority
720 * to skb->priority happens later (on layer 3), we overload this field
721 * for use with queueing priorities as long as the skb is on layer 4.
722 * The default priority value (if nothing is set) is 0.
723 */
724 skb->priority = 0;
725
f95b414e 726 for_each_cmsghdr(cmsg, msg) {
871a2c16
TG
727 if (!CMSG_OK(msg, cmsg))
728 return -EINVAL;
729
730 if (cmsg->cmsg_level != SOL_DCCP)
731 continue;
732
04910265
TG
733 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
734 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
735 return -EINVAL;
736
871a2c16
TG
737 switch (cmsg->cmsg_type) {
738 case DCCP_SCM_PRIORITY:
739 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
740 return -EINVAL;
741 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
742 break;
743 default:
744 return -EINVAL;
745 }
746 }
747 return 0;
748}
749
1b784140 750int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
7c657876
ACM
751{
752 const struct dccp_sock *dp = dccp_sk(sk);
753 const int flags = msg->msg_flags;
754 const int noblock = flags & MSG_DONTWAIT;
755 struct sk_buff *skb;
756 int rc, size;
757 long timeo;
758
759 if (len > dp->dccps_mss_cache)
760 return -EMSGSIZE;
761
762 lock_sock(sk);
b1308dc0 763
871a2c16 764 if (dccp_qpolicy_full(sk)) {
b1308dc0
IM
765 rc = -EAGAIN;
766 goto out_release;
767 }
768
27258ee5 769 timeo = sock_sndtimeo(sk, noblock);
7c657876
ACM
770
771 /*
772 * We have to use sk_stream_wait_connect here to set sk_write_pending,
773 * so that the trick in dccp_rcv_request_sent_state_process.
774 */
775 /* Wait for a connection to finish. */
cecd8d0e 776 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
7c657876 777 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
27258ee5 778 goto out_release;
7c657876
ACM
779
780 size = sk->sk_prot->max_header + len;
781 release_sock(sk);
782 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
783 lock_sock(sk);
7c657876
ACM
784 if (skb == NULL)
785 goto out_release;
786
787 skb_reserve(skb, sk->sk_prot->max_header);
6ce8e9ce 788 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
27258ee5
ACM
789 if (rc != 0)
790 goto out_discard;
791
871a2c16
TG
792 rc = dccp_msghdr_parse(msg, skb);
793 if (rc != 0)
794 goto out_discard;
795
796 dccp_qpolicy_push(sk, skb);
b1fcf55e
GR
797 /*
798 * The xmit_timer is set if the TX CCID is rate-based and will expire
799 * when congestion control permits to release further packets into the
800 * network. Window-based CCIDs do not use this timer.
801 */
802 if (!timer_pending(&dp->dccps_xmit_timer))
803 dccp_write_xmit(sk);
7c657876
ACM
804out_release:
805 release_sock(sk);
806 return rc ? : len;
27258ee5
ACM
807out_discard:
808 kfree_skb(skb);
7c657876 809 goto out_release;
7c657876
ACM
810}
811
f21e68ca
ACM
812EXPORT_SYMBOL_GPL(dccp_sendmsg);
813
1b784140
YX
814int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
815 int flags, int *addr_len)
7c657876
ACM
816{
817 const struct dccp_hdr *dh;
7c657876
ACM
818 long timeo;
819
820 lock_sock(sk);
821
531669a0
ACM
822 if (sk->sk_state == DCCP_LISTEN) {
823 len = -ENOTCONN;
7c657876 824 goto out;
7c657876 825 }
7c657876 826
531669a0 827 timeo = sock_rcvtimeo(sk, nonblock);
7c657876
ACM
828
829 do {
531669a0 830 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
7c657876 831
531669a0
ACM
832 if (skb == NULL)
833 goto verify_sock_status;
7c657876 834
531669a0 835 dh = dccp_hdr(skb);
7c657876 836
0c869620
GR
837 switch (dh->dccph_type) {
838 case DCCP_PKT_DATA:
839 case DCCP_PKT_DATAACK:
531669a0 840 goto found_ok_skb;
7c657876 841
0c869620
GR
842 case DCCP_PKT_CLOSE:
843 case DCCP_PKT_CLOSEREQ:
844 if (!(flags & MSG_PEEK))
845 dccp_finish_passive_close(sk);
846 /* fall through */
847 case DCCP_PKT_RESET:
848 dccp_pr_debug("found fin (%s) ok!\n",
849 dccp_packet_name(dh->dccph_type));
531669a0
ACM
850 len = 0;
851 goto found_fin_ok;
0c869620
GR
852 default:
853 dccp_pr_debug("packet_type=%s\n",
854 dccp_packet_name(dh->dccph_type));
7bced397 855 sk_eat_skb(sk, skb);
531669a0 856 }
531669a0
ACM
857verify_sock_status:
858 if (sock_flag(sk, SOCK_DONE)) {
859 len = 0;
7c657876 860 break;
531669a0 861 }
7c657876 862
531669a0
ACM
863 if (sk->sk_err) {
864 len = sock_error(sk);
865 break;
866 }
7c657876 867
531669a0
ACM
868 if (sk->sk_shutdown & RCV_SHUTDOWN) {
869 len = 0;
870 break;
871 }
7c657876 872
531669a0
ACM
873 if (sk->sk_state == DCCP_CLOSED) {
874 if (!sock_flag(sk, SOCK_DONE)) {
875 /* This occurs when user tries to read
876 * from never connected socket.
877 */
878 len = -ENOTCONN;
7c657876
ACM
879 break;
880 }
531669a0
ACM
881 len = 0;
882 break;
7c657876
ACM
883 }
884
531669a0
ACM
885 if (!timeo) {
886 len = -EAGAIN;
887 break;
888 }
7c657876 889
531669a0
ACM
890 if (signal_pending(current)) {
891 len = sock_intr_errno(timeo);
892 break;
893 }
7c657876 894
dfbafc99 895 sk_wait_data(sk, &timeo, NULL);
7c657876 896 continue;
7c657876 897 found_ok_skb:
531669a0
ACM
898 if (len > skb->len)
899 len = skb->len;
900 else if (len < skb->len)
901 msg->msg_flags |= MSG_TRUNC;
902
51f3d02b 903 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
531669a0
ACM
904 /* Exception. Bailout! */
905 len = -EFAULT;
906 break;
7c657876 907 }
55d95590
GR
908 if (flags & MSG_TRUNC)
909 len = skb->len;
7c657876
ACM
910 found_fin_ok:
911 if (!(flags & MSG_PEEK))
7bced397 912 sk_eat_skb(sk, skb);
7c657876 913 break;
531669a0 914 } while (1);
7c657876
ACM
915out:
916 release_sock(sk);
531669a0 917 return len;
7c657876
ACM
918}
919
f21e68ca
ACM
920EXPORT_SYMBOL_GPL(dccp_recvmsg);
921
922int inet_dccp_listen(struct socket *sock, int backlog)
7c657876
ACM
923{
924 struct sock *sk = sock->sk;
925 unsigned char old_state;
926 int err;
927
928 lock_sock(sk);
929
930 err = -EINVAL;
931 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
932 goto out;
933
934 old_state = sk->sk_state;
935 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
936 goto out;
937
938 /* Really, if the socket is already in listen state
939 * we can only allow the backlog to be adjusted.
940 */
941 if (old_state != DCCP_LISTEN) {
942 /*
943 * FIXME: here it probably should be sk->sk_prot->listen_start
944 * see tcp_listen_start
945 */
72a3effa 946 err = dccp_listen_start(sk, backlog);
7c657876
ACM
947 if (err)
948 goto out;
949 }
950 sk->sk_max_ack_backlog = backlog;
951 err = 0;
952
953out:
954 release_sock(sk);
955 return err;
956}
957
f21e68ca
ACM
958EXPORT_SYMBOL_GPL(inet_dccp_listen);
959
0c869620 960static void dccp_terminate_connection(struct sock *sk)
7c657876 961{
0c869620 962 u8 next_state = DCCP_CLOSED;
7c657876 963
0c869620
GR
964 switch (sk->sk_state) {
965 case DCCP_PASSIVE_CLOSE:
966 case DCCP_PASSIVE_CLOSEREQ:
967 dccp_finish_passive_close(sk);
968 break;
969 case DCCP_PARTOPEN:
970 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
971 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
972 /* fall through */
973 case DCCP_OPEN:
974 dccp_send_close(sk, 1);
7c657876 975
b8599d20
GR
976 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
977 !dccp_sk(sk)->dccps_server_timewait)
0c869620
GR
978 next_state = DCCP_ACTIVE_CLOSEREQ;
979 else
980 next_state = DCCP_CLOSING;
981 /* fall through */
982 default:
983 dccp_set_state(sk, next_state);
984 }
7c657876
ACM
985}
986
987void dccp_close(struct sock *sk, long timeout)
988{
97e5848d 989 struct dccp_sock *dp = dccp_sk(sk);
7c657876 990 struct sk_buff *skb;
d83bd95b 991 u32 data_was_unread = 0;
134af346 992 int state;
7c657876
ACM
993
994 lock_sock(sk);
995
996 sk->sk_shutdown = SHUTDOWN_MASK;
997
998 if (sk->sk_state == DCCP_LISTEN) {
999 dccp_set_state(sk, DCCP_CLOSED);
1000
1001 /* Special case. */
1002 inet_csk_listen_stop(sk);
1003
1004 goto adjudge_to_death;
1005 }
1006
97e5848d
IM
1007 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1008
7c657876
ACM
1009 /*
1010 * We need to flush the recv. buffs. We do this only on the
1011 * descriptor close, not protocol-sourced closes, because the
1012 *reader process may not have drained the data yet!
1013 */
7c657876 1014 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
d83bd95b 1015 data_was_unread += skb->len;
7c657876
ACM
1016 __kfree_skb(skb);
1017 }
1018
346da62c
ED
1019 /* If socket has been already reset kill it. */
1020 if (sk->sk_state == DCCP_CLOSED)
1021 goto adjudge_to_death;
1022
d83bd95b
GR
1023 if (data_was_unread) {
1024 /* Unread data was tossed, send an appropriate Reset Code */
2f34b329 1025 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
d83bd95b
GR
1026 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1027 dccp_set_state(sk, DCCP_CLOSED);
1028 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
7c657876
ACM
1029 /* Check zero linger _after_ checking for unread data. */
1030 sk->sk_prot->disconnect(sk, 0);
0c869620 1031 } else if (sk->sk_state != DCCP_CLOSED) {
b1fcf55e
GR
1032 /*
1033 * Normal connection termination. May need to wait if there are
1034 * still packets in the TX queue that are delayed by the CCID.
1035 */
1036 dccp_flush_write_queue(sk, &timeout);
0c869620 1037 dccp_terminate_connection(sk);
7c657876
ACM
1038 }
1039
b1fcf55e
GR
1040 /*
1041 * Flush write queue. This may be necessary in several cases:
1042 * - we have been closed by the peer but still have application data;
1043 * - abortive termination (unread data or zero linger time),
1044 * - normal termination but queue could not be flushed within time limit
1045 */
1046 __skb_queue_purge(&sk->sk_write_queue);
1047
7c657876
ACM
1048 sk_stream_wait_close(sk, timeout);
1049
1050adjudge_to_death:
134af346
HX
1051 state = sk->sk_state;
1052 sock_hold(sk);
1053 sock_orphan(sk);
134af346 1054
7ad07e7c
ACM
1055 /*
1056 * It is the last release_sock in its life. It will remove backlog.
1057 */
7c657876
ACM
1058 release_sock(sk);
1059 /*
1060 * Now socket is owned by kernel and we acquire BH lock
1061 * to finish close. No need to check for user refs.
1062 */
1063 local_bh_disable();
1064 bh_lock_sock(sk);
547b792c 1065 WARN_ON(sock_owned_by_user(sk));
7c657876 1066
eb4dea58
HX
1067 percpu_counter_inc(sk->sk_prot->orphan_count);
1068
134af346
HX
1069 /* Have we already been destroyed by a softirq or backlog? */
1070 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1071 goto out;
7ad07e7c 1072
7c657876
ACM
1073 if (sk->sk_state == DCCP_CLOSED)
1074 inet_csk_destroy_sock(sk);
1075
1076 /* Otherwise, socket is reprieved until protocol close. */
1077
134af346 1078out:
7c657876
ACM
1079 bh_unlock_sock(sk);
1080 local_bh_enable();
1081 sock_put(sk);
1082}
1083
f21e68ca
ACM
1084EXPORT_SYMBOL_GPL(dccp_close);
1085
7c657876
ACM
1086void dccp_shutdown(struct sock *sk, int how)
1087{
8e8c71f1 1088 dccp_pr_debug("called shutdown(%x)\n", how);
7c657876
ACM
1089}
1090
f21e68ca
ACM
1091EXPORT_SYMBOL_GPL(dccp_shutdown);
1092
0c5b8a46 1093static inline int __init dccp_mib_init(void)
7c657876 1094{
698365fa
WC
1095 dccp_statistics = alloc_percpu(struct dccp_mib);
1096 if (!dccp_statistics)
1097 return -ENOMEM;
1098 return 0;
7c657876
ACM
1099}
1100
24e8b7e4 1101static inline void dccp_mib_exit(void)
46f09ffa 1102{
698365fa 1103 free_percpu(dccp_statistics);
46f09ffa
ACM
1104}
1105
7c657876
ACM
1106static int thash_entries;
1107module_param(thash_entries, int, 0444);
1108MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1109
a1d3a355 1110#ifdef CONFIG_IP_DCCP_DEBUG
eb939922 1111bool dccp_debug;
43264991 1112module_param(dccp_debug, bool, 0644);
7c657876 1113MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
f21e68ca
ACM
1114
1115EXPORT_SYMBOL_GPL(dccp_debug);
a1d3a355 1116#endif
7c657876
ACM
1117
1118static int __init dccp_init(void)
1119{
1120 unsigned long goal;
1121 int ehash_order, bhash_order, i;
dd24c001 1122 int rc;
7c657876 1123
028b0275
PM
1124 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1125 FIELD_SIZEOF(struct sk_buff, cb));
908c7f19 1126 rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
dd24c001 1127 if (rc)
d14a0ebd 1128 goto out_fail;
dd24c001 1129 rc = -ENOBUFS;
5caea4ea 1130 inet_hashinfo_init(&dccp_hashinfo);
7690af3f
ACM
1131 dccp_hashinfo.bind_bucket_cachep =
1132 kmem_cache_create("dccp_bind_bucket",
1133 sizeof(struct inet_bind_bucket), 0,
20c2df83 1134 SLAB_HWCACHE_ALIGN, NULL);
7c657876 1135 if (!dccp_hashinfo.bind_bucket_cachep)
dd24c001 1136 goto out_free_percpu;
7c657876
ACM
1137
1138 /*
1139 * Size and allocate the main established and bind bucket
1140 * hash tables.
1141 *
1142 * The methodology is similar to that of the buffer cache.
1143 */
4481374c
JB
1144 if (totalram_pages >= (128 * 1024))
1145 goal = totalram_pages >> (21 - PAGE_SHIFT);
7c657876 1146 else
4481374c 1147 goal = totalram_pages >> (23 - PAGE_SHIFT);
7c657876
ACM
1148
1149 if (thash_entries)
7690af3f
ACM
1150 goal = (thash_entries *
1151 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
7c657876
ACM
1152 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1153 ;
1154 do {
f373b53b 1155 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
7c657876 1156 sizeof(struct inet_ehash_bucket);
f373b53b
ED
1157
1158 while (hash_size & (hash_size - 1))
1159 hash_size--;
1160 dccp_hashinfo.ehash_mask = hash_size - 1;
7c657876 1161 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1c29b3ff 1162 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
7c657876
ACM
1163 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1164
1165 if (!dccp_hashinfo.ehash) {
59348b19 1166 DCCP_CRIT("Failed to allocate DCCP established hash table");
7c657876
ACM
1167 goto out_free_bind_bucket_cachep;
1168 }
1169
05dbc7b5 1170 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
3ab5aee7 1171 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
7c657876 1172
230140cf
ED
1173 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1174 goto out_free_dccp_ehash;
1175
7c657876
ACM
1176 bhash_order = ehash_order;
1177
1178 do {
1179 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1180 sizeof(struct inet_bind_hashbucket);
7690af3f
ACM
1181 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1182 bhash_order > 0)
7c657876
ACM
1183 continue;
1184 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1c29b3ff 1185 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
7c657876
ACM
1186 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1187
1188 if (!dccp_hashinfo.bhash) {
59348b19 1189 DCCP_CRIT("Failed to allocate DCCP bind hash table");
230140cf 1190 goto out_free_dccp_locks;
7c657876
ACM
1191 }
1192
1193 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1194 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1195 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1196 }
1197
46f09ffa 1198 rc = dccp_mib_init();
fa23e2ec 1199 if (rc)
7c657876
ACM
1200 goto out_free_dccp_bhash;
1201
9b07ef5d 1202 rc = dccp_ackvec_init();
7c657876 1203 if (rc)
b61fafc4 1204 goto out_free_dccp_mib;
9b07ef5d 1205
e55d912f 1206 rc = dccp_sysctl_init();
9b07ef5d
ACM
1207 if (rc)
1208 goto out_ackvec_exit;
4c70f383 1209
ddebc973
GR
1210 rc = ccid_initialize_builtins();
1211 if (rc)
1212 goto out_sysctl_exit;
1213
4c70f383 1214 dccp_timestamping_init();
d14a0ebd
GR
1215
1216 return 0;
1217
ddebc973
GR
1218out_sysctl_exit:
1219 dccp_sysctl_exit();
9b07ef5d
ACM
1220out_ackvec_exit:
1221 dccp_ackvec_exit();
b61fafc4 1222out_free_dccp_mib:
46f09ffa 1223 dccp_mib_exit();
7c657876
ACM
1224out_free_dccp_bhash:
1225 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
230140cf
ED
1226out_free_dccp_locks:
1227 inet_ehash_locks_free(&dccp_hashinfo);
7c657876
ACM
1228out_free_dccp_ehash:
1229 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
7c657876
ACM
1230out_free_bind_bucket_cachep:
1231 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
dd24c001
ED
1232out_free_percpu:
1233 percpu_counter_destroy(&dccp_orphan_count);
d14a0ebd
GR
1234out_fail:
1235 dccp_hashinfo.bhash = NULL;
1236 dccp_hashinfo.ehash = NULL;
1237 dccp_hashinfo.bind_bucket_cachep = NULL;
1238 return rc;
7c657876
ACM
1239}
1240
7c657876
ACM
1241static void __exit dccp_fini(void)
1242{
ddebc973 1243 ccid_cleanup_builtins();
46f09ffa 1244 dccp_mib_exit();
725ba8ee
ACM
1245 free_pages((unsigned long)dccp_hashinfo.bhash,
1246 get_order(dccp_hashinfo.bhash_size *
1247 sizeof(struct inet_bind_hashbucket)));
1248 free_pages((unsigned long)dccp_hashinfo.ehash,
f373b53b 1249 get_order((dccp_hashinfo.ehash_mask + 1) *
725ba8ee 1250 sizeof(struct inet_ehash_bucket)));
230140cf 1251 inet_ehash_locks_free(&dccp_hashinfo);
7c657876 1252 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
9b07ef5d 1253 dccp_ackvec_exit();
e55d912f 1254 dccp_sysctl_exit();
476181cb 1255 percpu_counter_destroy(&dccp_orphan_count);
7c657876
ACM
1256}
1257
1258module_init(dccp_init);
1259module_exit(dccp_fini);
1260
7c657876
ACM
1261MODULE_LICENSE("GPL");
1262MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1263MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");