]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/dccp/input.c
[DCCP]: Introduce the DCCP Kernel hacking menu
[mirror_ubuntu-artful-kernel.git] / net / dccp / input.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/input.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16
17#include <net/sock.h>
18
19#include "ccid.h"
20#include "dccp.h"
21
22static void dccp_fin(struct sock *sk, struct sk_buff *skb)
23{
24 sk->sk_shutdown |= RCV_SHUTDOWN;
25 sock_set_flag(sk, SOCK_DONE);
26 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 __skb_queue_tail(&sk->sk_receive_queue, skb);
28 skb_set_owner_r(skb, sk);
29 sk->sk_data_ready(sk, 0);
30}
31
32static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33{
34 switch (sk->sk_state) {
35 case DCCP_PARTOPEN:
36 case DCCP_OPEN:
37 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
38 dccp_fin(sk, skb);
39 dccp_set_state(sk, DCCP_CLOSED);
40 break;
41 }
42}
43
44static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
45{
46 /*
47 * Step 7: Check for unexpected packet types
48 * If (S.is_server and P.type == CloseReq)
49 * Send Sync packet acknowledging P.seqno
50 * Drop packet and return
51 */
52 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
53 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
54 return;
55 }
56
57 switch (sk->sk_state) {
58 case DCCP_PARTOPEN:
59 case DCCP_OPEN:
60 dccp_set_state(sk, DCCP_CLOSING);
61 dccp_send_close(sk);
62 break;
63 }
64}
65
66static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
67{
68 struct dccp_sock *dp = dccp_sk(sk);
69
70 if (dp->dccps_options.dccpo_send_ack_vector)
71 dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
72 DCCP_SKB_CB(skb)->dccpd_ack_seq);
73}
74
75static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
76{
77 const struct dccp_hdr *dh = dccp_hdr(skb);
78 struct dccp_sock *dp = dccp_sk(sk);
79 u64 lswl = dp->dccps_swl;
80 u64 lawl = dp->dccps_awl;
81
82 /*
83 * Step 5: Prepare sequence numbers for Sync
84 * If P.type == Sync or P.type == SyncAck,
85 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
86 * / * P is valid, so update sequence number variables
87 * accordingly. After this update, P will pass the tests
88 * in Step 6. A SyncAck is generated if necessary in
89 * Step 15 * /
90 * Update S.GSR, S.SWL, S.SWH
91 * Otherwise,
92 * Drop packet and return
93 */
94 if (dh->dccph_type == DCCP_PKT_SYNC ||
95 dh->dccph_type == DCCP_PKT_SYNCACK) {
7690af3f
ACM
96 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
97 dp->dccps_awl, dp->dccps_awh) &&
7c657876
ACM
98 !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
99 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
100 else
101 return -1;
102 /*
103 * Step 6: Check sequence numbers
104 * Let LSWL = S.SWL and LAWL = S.AWL
105 * If P.type == CloseReq or P.type == Close or P.type == Reset,
106 * LSWL := S.GSR + 1, LAWL := S.GAR
107 * If LSWL <= P.seqno <= S.SWH
108 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
109 * Update S.GSR, S.SWL, S.SWH
110 * If P.type != Sync,
111 * Update S.GAR
112 * Otherwise,
113 * Send Sync packet acknowledging P.seqno
114 * Drop packet and return
115 */
116 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
117 dh->dccph_type == DCCP_PKT_CLOSE ||
118 dh->dccph_type == DCCP_PKT_RESET) {
119 lswl = dp->dccps_gsr;
120 dccp_inc_seqno(&lswl);
121 lawl = dp->dccps_gar;
122 }
123
124 if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
125 (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
7690af3f
ACM
126 between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
127 lawl, dp->dccps_awh))) {
7c657876
ACM
128 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
129
130 if (dh->dccph_type != DCCP_PKT_SYNC &&
7690af3f
ACM
131 (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
132 DCCP_PKT_WITHOUT_ACK_SEQ))
7c657876
ACM
133 dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
134 } else {
135 dccp_pr_debug("Step 6 failed, sending SYNC...\n");
136 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
137 return -1;
138 }
139
140 return 0;
141}
142
143int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
144 const struct dccp_hdr *dh, const unsigned len)
145{
146 struct dccp_sock *dp = dccp_sk(sk);
147
148 if (dccp_check_seqno(sk, skb))
149 goto discard;
150
151 if (dccp_parse_options(sk, skb))
152 goto discard;
153
154 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
155 dccp_event_ack_recv(sk, skb);
156
157 /*
158 * FIXME: check ECN to see if we should use
159 * DCCP_ACKPKTS_STATE_ECN_MARKED
160 */
161 if (dp->dccps_options.dccpo_send_ack_vector) {
162 struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
163
164 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
165 DCCP_SKB_CB(skb)->dccpd_seq,
166 DCCP_ACKPKTS_STATE_RECEIVED)) {
7690af3f
ACM
167 LIMIT_NETDEBUG(KERN_INFO "DCCP: acknowledgeable "
168 "packets buffer full!\n");
7c657876
ACM
169 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
170 inet_csk_schedule_ack(sk);
7690af3f
ACM
171 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
172 TCP_DELACK_MIN,
173 DCCP_RTO_MAX);
7c657876
ACM
174 goto discard;
175 }
176
177 /*
178 * FIXME: this activation is probably wrong, have to study more
179 * TCP delack machinery and how it fits into DCCP draft, but
180 * for now it kinda "works" 8)
181 */
182 if (!inet_csk_ack_scheduled(sk)) {
183 inet_csk_schedule_ack(sk);
7690af3f
ACM
184 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ,
185 DCCP_RTO_MAX);
7c657876
ACM
186 }
187 }
188
189 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
190 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
191
192 switch (dccp_hdr(skb)->dccph_type) {
193 case DCCP_PKT_DATAACK:
194 case DCCP_PKT_DATA:
195 /*
7690af3f
ACM
196 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
197 * option if it is.
7c657876
ACM
198 */
199 __skb_pull(skb, dh->dccph_doff * 4);
200 __skb_queue_tail(&sk->sk_receive_queue, skb);
201 skb_set_owner_r(skb, sk);
202 sk->sk_data_ready(sk, 0);
203 return 0;
204 case DCCP_PKT_ACK:
205 goto discard;
206 case DCCP_PKT_RESET:
207 /*
208 * Step 9: Process Reset
209 * If P.type == Reset,
210 * Tear down connection
211 * S.state := TIMEWAIT
212 * Set TIMEWAIT timer
213 * Drop packet and return
214 */
215 dccp_fin(sk, skb);
216 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
217 return 0;
218 case DCCP_PKT_CLOSEREQ:
219 dccp_rcv_closereq(sk, skb);
220 goto discard;
221 case DCCP_PKT_CLOSE:
222 dccp_rcv_close(sk, skb);
223 return 0;
224 case DCCP_PKT_REQUEST:
225 /* Step 7
226 * or (S.is_server and P.type == Response)
227 * or (S.is_client and P.type == Request)
228 * or (S.state >= OPEN and P.type == Request
229 * and P.seqno >= S.OSR)
230 * or (S.state >= OPEN and P.type == Response
231 * and P.seqno >= S.OSR)
232 * or (S.state == RESPOND and P.type == Data),
233 * Send Sync packet acknowledging P.seqno
234 * Drop packet and return
235 */
236 if (dp->dccps_role != DCCP_ROLE_LISTEN)
237 goto send_sync;
238 goto check_seq;
239 case DCCP_PKT_RESPONSE:
240 if (dp->dccps_role != DCCP_ROLE_CLIENT)
241 goto send_sync;
242check_seq:
243 if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
244send_sync:
245 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
246 }
247 break;
248 }
249
250 DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
251discard:
252 __kfree_skb(skb);
253 return 0;
254}
255
256static int dccp_rcv_request_sent_state_process(struct sock *sk,
257 struct sk_buff *skb,
258 const struct dccp_hdr *dh,
259 const unsigned len)
260{
261 /*
262 * Step 4: Prepare sequence numbers in REQUEST
263 * If S.state == REQUEST,
264 * If (P.type == Response or P.type == Reset)
265 * and S.AWL <= P.ackno <= S.AWH,
266 * / * Set sequence number variables corresponding to the
267 * other endpoint, so P will pass the tests in Step 6 * /
268 * Set S.GSR, S.ISR, S.SWL, S.SWH
269 * / * Response processing continues in Step 10; Reset
270 * processing continues in Step 9 * /
271 */
272 if (dh->dccph_type == DCCP_PKT_RESPONSE) {
273 const struct inet_connection_sock *icsk = inet_csk(sk);
274 struct dccp_sock *dp = dccp_sk(sk);
275
276 /* Stop the REQUEST timer */
277 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
278 BUG_TRAP(sk->sk_send_head != NULL);
279 __kfree_skb(sk->sk_send_head);
280 sk->sk_send_head = NULL;
281
7690af3f
ACM
282 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
283 dp->dccps_awl, dp->dccps_awh)) {
284 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
285 "P.ackno=%llu, S.AWH=%llu \n",
286 (unsigned long long)dp->dccps_awl,
287 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
288 (unsigned long long)dp->dccps_awh);
7c657876
ACM
289 goto out_invalid_packet;
290 }
291
292 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
293 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
294
295 if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
296 ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
297 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
298 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
299 /* FIXME: send appropriate RESET code */
300 goto out_invalid_packet;
301 }
302
303 dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
304
305 /*
306 * Step 10: Process REQUEST state (second part)
307 * If S.state == REQUEST,
7690af3f
ACM
308 * / * If we get here, P is a valid Response from the
309 * server (see Step 4), and we should move to
310 * PARTOPEN state. PARTOPEN means send an Ack,
311 * don't send Data packets, retransmit Acks
312 * periodically, and always include any Init Cookie
313 * from the Response * /
7c657876
ACM
314 * S.state := PARTOPEN
315 * Set PARTOPEN timer
316 * Continue with S.state == PARTOPEN
7690af3f
ACM
317 * / * Step 12 will send the Ack completing the
318 * three-way handshake * /
7c657876
ACM
319 */
320 dccp_set_state(sk, DCCP_PARTOPEN);
321
322 /* Make sure socket is routed, for correct metrics. */
323 inet_sk_rebuild_header(sk);
324
325 if (!sock_flag(sk, SOCK_DEAD)) {
326 sk->sk_state_change(sk);
327 sk_wake_async(sk, 0, POLL_OUT);
328 }
329
330 if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
331 icsk->icsk_accept_queue.rskq_defer_accept) {
332 /* Save one ACK. Data will be ready after
333 * several ticks, if write_pending is set.
334 *
335 * It may be deleted, but with this feature tcpdumps
336 * look so _wonderfully_ clever, that I was not able
337 * to stand against the temptation 8) --ANK
338 */
339 /*
340 * OK, in DCCP we can as well do a similar trick, its
341 * even in the draft, but there is no need for us to
342 * schedule an ack here, as dccp_sendmsg does this for
343 * us, also stated in the draft. -acme
344 */
345 __kfree_skb(skb);
346 return 0;
347 }
348 dccp_send_ack(sk);
349 return -1;
350 }
351
352out_invalid_packet:
353 return 1; /* dccp_v4_do_rcv will send a reset, but...
7690af3f
ACM
354 FIXME: the reset code should be
355 DCCP_RESET_CODE_PACKET_ERROR */
7c657876
ACM
356}
357
358static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
359 struct sk_buff *skb,
360 const struct dccp_hdr *dh,
361 const unsigned len)
362{
363 int queued = 0;
364
365 switch (dh->dccph_type) {
366 case DCCP_PKT_RESET:
367 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
368 break;
369 case DCCP_PKT_DATAACK:
370 case DCCP_PKT_ACK:
371 /*
7690af3f
ACM
372 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
373 * here but only if we haven't used the DELACK timer for
374 * something else, like sending a delayed ack for a TIMESTAMP
375 * echo, etc, for now were not clearing it, sending an extra
376 * ACK when there is nothing else to do in DELACK is not a big
377 * deal after all.
7c657876
ACM
378 */
379
380 /* Stop the PARTOPEN timer */
381 if (sk->sk_state == DCCP_PARTOPEN)
382 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
383
384 dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
385 dccp_set_state(sk, DCCP_OPEN);
386
387 if (dh->dccph_type == DCCP_PKT_DATAACK) {
388 dccp_rcv_established(sk, skb, dh, len);
7690af3f
ACM
389 queued = 1; /* packet was queued
390 (by dccp_rcv_established) */
7c657876
ACM
391 }
392 break;
393 }
394
395 return queued;
396}
397
398int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
399 struct dccp_hdr *dh, unsigned len)
400{
401 struct dccp_sock *dp = dccp_sk(sk);
402 const int old_state = sk->sk_state;
403 int queued = 0;
404
405 if (sk->sk_state != DCCP_LISTEN && sk->sk_state != DCCP_REQUESTING) {
406 if (dccp_check_seqno(sk, skb))
407 goto discard;
408
409 /*
410 * Step 8: Process options and mark acknowledgeable
411 */
412 if (dccp_parse_options(sk, skb))
413 goto discard;
414
7690af3f
ACM
415 if (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
416 DCCP_PKT_WITHOUT_ACK_SEQ)
7c657876
ACM
417 dccp_event_ack_recv(sk, skb);
418
419 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
420 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
421
422 /*
423 * FIXME: check ECN to see if we should use
424 * DCCP_ACKPKTS_STATE_ECN_MARKED
425 */
426 if (dp->dccps_options.dccpo_send_ack_vector) {
427 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
428 DCCP_SKB_CB(skb)->dccpd_seq,
429 DCCP_ACKPKTS_STATE_RECEIVED))
430 goto discard;
431 /*
7690af3f
ACM
432 * FIXME: this activation is probably wrong, have to
433 * study more TCP delack machinery and how it fits into
434 * DCCP draft, but for now it kinda "works" 8)
7c657876 435 */
7690af3f
ACM
436 if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno ==
437 DCCP_MAX_SEQNO + 1) &&
7c657876
ACM
438 !inet_csk_ack_scheduled(sk)) {
439 inet_csk_schedule_ack(sk);
7690af3f
ACM
440 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
441 TCP_DELACK_MIN,
442 DCCP_RTO_MAX);
7c657876
ACM
443 }
444 }
445 }
446
447 /*
448 * Step 9: Process Reset
449 * If P.type == Reset,
450 * Tear down connection
451 * S.state := TIMEWAIT
452 * Set TIMEWAIT timer
453 * Drop packet and return
454 */
455 if (dh->dccph_type == DCCP_PKT_RESET) {
7690af3f
ACM
456 /*
457 * Queue the equivalent of TCP fin so that dccp_recvmsg
458 * exits the loop
459 */
7c657876
ACM
460 dccp_fin(sk, skb);
461 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
462 return 0;
463 /*
464 * Step 7: Check for unexpected packet types
465 * If (S.is_server and P.type == CloseReq)
466 * or (S.is_server and P.type == Response)
467 * or (S.is_client and P.type == Request)
468 * or (S.state == RESPOND and P.type == Data),
469 * Send Sync packet acknowledging P.seqno
470 * Drop packet and return
471 */
472 } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
7690af3f
ACM
473 (dh->dccph_type == DCCP_PKT_RESPONSE ||
474 dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
7c657876
ACM
475 (dp->dccps_role == DCCP_ROLE_CLIENT &&
476 dh->dccph_type == DCCP_PKT_REQUEST) ||
7690af3f
ACM
477 (sk->sk_state == DCCP_RESPOND &&
478 dh->dccph_type == DCCP_PKT_DATA)) {
7c657876
ACM
479 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
480 goto discard;
481 }
482
483 switch (sk->sk_state) {
484 case DCCP_CLOSED:
485 return 1;
486
487 case DCCP_LISTEN:
488 if (dh->dccph_type == DCCP_PKT_ACK ||
489 dh->dccph_type == DCCP_PKT_DATAACK)
490 return 1;
491
492 if (dh->dccph_type == DCCP_PKT_RESET)
493 goto discard;
494
495 if (dh->dccph_type == DCCP_PKT_REQUEST) {
496 if (dccp_v4_conn_request(sk, skb) < 0)
497 return 1;
498
499 /* FIXME: do congestion control initialization */
500 goto discard;
501 }
502 goto discard;
503
504 case DCCP_REQUESTING:
505 /* FIXME: do congestion control initialization */
506
507 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
508 if (queued >= 0)
509 return queued;
510
511 __kfree_skb(skb);
512 return 0;
513
514 case DCCP_RESPOND:
515 case DCCP_PARTOPEN:
7690af3f
ACM
516 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
517 dh, len);
7c657876
ACM
518 break;
519 }
520
7690af3f
ACM
521 if (dh->dccph_type == DCCP_PKT_ACK ||
522 dh->dccph_type == DCCP_PKT_DATAACK) {
7c657876
ACM
523 switch (old_state) {
524 case DCCP_PARTOPEN:
525 sk->sk_state_change(sk);
526 sk_wake_async(sk, 0, POLL_OUT);
527 break;
528 }
529 }
530
531 if (!queued) {
532discard:
533 __kfree_skb(skb);
534 }
535 return 0;
536}