4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
22 static void dccp_fin(struct sock
*sk
, struct sk_buff
*skb
)
24 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
25 sock_set_flag(sk
, SOCK_DONE
);
26 __skb_pull(skb
, dccp_hdr(skb
)->dccph_doff
* 4);
27 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
28 skb_set_owner_r(skb
, sk
);
29 sk
->sk_data_ready(sk
, 0);
32 static void dccp_rcv_close(struct sock
*sk
, struct sk_buff
*skb
)
34 switch (sk
->sk_state
) {
37 dccp_v4_send_reset(sk
, DCCP_RESET_CODE_CLOSED
);
39 dccp_set_state(sk
, DCCP_CLOSED
);
44 static void dccp_rcv_closereq(struct sock
*sk
, struct sk_buff
*skb
)
47 * Step 7: Check for unexpected packet types
48 * If (S.is_server and P.type == CloseReq)
49 * Send Sync packet acknowledging P.seqno
50 * Drop packet and return
52 if (dccp_sk(sk
)->dccps_role
!= DCCP_ROLE_CLIENT
) {
53 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
57 switch (sk
->sk_state
) {
60 dccp_set_state(sk
, DCCP_CLOSING
);
66 static inline void dccp_event_ack_recv(struct sock
*sk
, struct sk_buff
*skb
)
68 struct dccp_sock
*dp
= dccp_sk(sk
);
70 if (dp
->dccps_options
.dccpo_send_ack_vector
)
71 dccp_ackpkts_check_rcv_ackno(dp
->dccps_hc_rx_ackpkts
, sk
,
72 DCCP_SKB_CB(skb
)->dccpd_ack_seq
);
75 static int dccp_check_seqno(struct sock
*sk
, struct sk_buff
*skb
)
77 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
78 struct dccp_sock
*dp
= dccp_sk(sk
);
79 u64 lswl
= dp
->dccps_swl
;
80 u64 lawl
= dp
->dccps_awl
;
83 * Step 5: Prepare sequence numbers for Sync
84 * If P.type == Sync or P.type == SyncAck,
85 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
86 * / * P is valid, so update sequence number variables
87 * accordingly. After this update, P will pass the tests
88 * in Step 6. A SyncAck is generated if necessary in
90 * Update S.GSR, S.SWL, S.SWH
92 * Drop packet and return
94 if (dh
->dccph_type
== DCCP_PKT_SYNC
||
95 dh
->dccph_type
== DCCP_PKT_SYNCACK
) {
96 if (between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
97 dp
->dccps_awl
, dp
->dccps_awh
) &&
98 !before48(DCCP_SKB_CB(skb
)->dccpd_seq
, dp
->dccps_swl
))
99 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
103 * Step 6: Check sequence numbers
104 * Let LSWL = S.SWL and LAWL = S.AWL
105 * If P.type == CloseReq or P.type == Close or P.type == Reset,
106 * LSWL := S.GSR + 1, LAWL := S.GAR
107 * If LSWL <= P.seqno <= S.SWH
108 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
109 * Update S.GSR, S.SWL, S.SWH
113 * Send Sync packet acknowledging P.seqno
114 * Drop packet and return
116 } else if (dh
->dccph_type
== DCCP_PKT_CLOSEREQ
||
117 dh
->dccph_type
== DCCP_PKT_CLOSE
||
118 dh
->dccph_type
== DCCP_PKT_RESET
) {
119 lswl
= dp
->dccps_gsr
;
120 dccp_inc_seqno(&lswl
);
121 lawl
= dp
->dccps_gar
;
124 if (between48(DCCP_SKB_CB(skb
)->dccpd_seq
, lswl
, dp
->dccps_swh
) &&
125 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
== DCCP_PKT_WITHOUT_ACK_SEQ
||
126 between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
127 lawl
, dp
->dccps_awh
))) {
128 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
130 if (dh
->dccph_type
!= DCCP_PKT_SYNC
&&
131 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!=
132 DCCP_PKT_WITHOUT_ACK_SEQ
))
133 dp
->dccps_gar
= DCCP_SKB_CB(skb
)->dccpd_ack_seq
;
135 dccp_pr_debug("Step 6 failed, sending SYNC...\n");
136 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
143 int dccp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
144 const struct dccp_hdr
*dh
, const unsigned len
)
146 struct dccp_sock
*dp
= dccp_sk(sk
);
148 if (dccp_check_seqno(sk
, skb
))
151 if (dccp_parse_options(sk
, skb
))
154 if (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
155 dccp_event_ack_recv(sk
, skb
);
158 * FIXME: check ECN to see if we should use
159 * DCCP_ACKPKTS_STATE_ECN_MARKED
161 if (dp
->dccps_options
.dccpo_send_ack_vector
) {
162 struct dccp_ackpkts
*ap
= dp
->dccps_hc_rx_ackpkts
;
164 if (dccp_ackpkts_add(dp
->dccps_hc_rx_ackpkts
,
165 DCCP_SKB_CB(skb
)->dccpd_seq
,
166 DCCP_ACKPKTS_STATE_RECEIVED
)) {
167 LIMIT_NETDEBUG(KERN_INFO
"DCCP: acknowledgeable "
168 "packets buffer full!\n");
169 ap
->dccpap_ack_seqno
= DCCP_MAX_SEQNO
+ 1;
170 inet_csk_schedule_ack(sk
);
171 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
178 * FIXME: this activation is probably wrong, have to study more
179 * TCP delack machinery and how it fits into DCCP draft, but
180 * for now it kinda "works" 8)
182 if (!inet_csk_ack_scheduled(sk
)) {
183 inet_csk_schedule_ack(sk
);
184 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
, 5 * HZ
,
189 ccid_hc_rx_packet_recv(dp
->dccps_hc_rx_ccid
, sk
, skb
);
190 ccid_hc_tx_packet_recv(dp
->dccps_hc_tx_ccid
, sk
, skb
);
192 switch (dccp_hdr(skb
)->dccph_type
) {
193 case DCCP_PKT_DATAACK
:
196 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
199 __skb_pull(skb
, dh
->dccph_doff
* 4);
200 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
201 skb_set_owner_r(skb
, sk
);
202 sk
->sk_data_ready(sk
, 0);
208 * Step 9: Process Reset
209 * If P.type == Reset,
210 * Tear down connection
211 * S.state := TIMEWAIT
213 * Drop packet and return
216 dccp_time_wait(sk
, DCCP_TIME_WAIT
, 0);
218 case DCCP_PKT_CLOSEREQ
:
219 dccp_rcv_closereq(sk
, skb
);
222 dccp_rcv_close(sk
, skb
);
224 case DCCP_PKT_REQUEST
:
226 * or (S.is_server and P.type == Response)
227 * or (S.is_client and P.type == Request)
228 * or (S.state >= OPEN and P.type == Request
229 * and P.seqno >= S.OSR)
230 * or (S.state >= OPEN and P.type == Response
231 * and P.seqno >= S.OSR)
232 * or (S.state == RESPOND and P.type == Data),
233 * Send Sync packet acknowledging P.seqno
234 * Drop packet and return
236 if (dp
->dccps_role
!= DCCP_ROLE_LISTEN
)
239 case DCCP_PKT_RESPONSE
:
240 if (dp
->dccps_role
!= DCCP_ROLE_CLIENT
)
243 if (!before48(DCCP_SKB_CB(skb
)->dccpd_seq
, dp
->dccps_osr
)) {
245 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
250 DCCP_INC_STATS_BH(DCCP_MIB_INERRS
);
256 static int dccp_rcv_request_sent_state_process(struct sock
*sk
,
258 const struct dccp_hdr
*dh
,
262 * Step 4: Prepare sequence numbers in REQUEST
263 * If S.state == REQUEST,
264 * If (P.type == Response or P.type == Reset)
265 * and S.AWL <= P.ackno <= S.AWH,
266 * / * Set sequence number variables corresponding to the
267 * other endpoint, so P will pass the tests in Step 6 * /
268 * Set S.GSR, S.ISR, S.SWL, S.SWH
269 * / * Response processing continues in Step 10; Reset
270 * processing continues in Step 9 * /
272 if (dh
->dccph_type
== DCCP_PKT_RESPONSE
) {
273 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
274 struct dccp_sock
*dp
= dccp_sk(sk
);
276 /* Stop the REQUEST timer */
277 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_RETRANS
);
278 BUG_TRAP(sk
->sk_send_head
!= NULL
);
279 __kfree_skb(sk
->sk_send_head
);
280 sk
->sk_send_head
= NULL
;
282 if (!between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
283 dp
->dccps_awl
, dp
->dccps_awh
)) {
284 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
285 "P.ackno=%llu, S.AWH=%llu \n",
286 (unsigned long long)dp
->dccps_awl
,
287 (unsigned long long)DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
288 (unsigned long long)dp
->dccps_awh
);
289 goto out_invalid_packet
;
292 dp
->dccps_isr
= DCCP_SKB_CB(skb
)->dccpd_seq
;
293 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
295 if (ccid_hc_rx_init(dp
->dccps_hc_rx_ccid
, sk
) != 0 ||
296 ccid_hc_tx_init(dp
->dccps_hc_tx_ccid
, sk
) != 0) {
297 ccid_hc_rx_exit(dp
->dccps_hc_rx_ccid
, sk
);
298 ccid_hc_tx_exit(dp
->dccps_hc_tx_ccid
, sk
);
299 /* FIXME: send appropriate RESET code */
300 goto out_invalid_packet
;
303 dccp_sync_mss(sk
, dp
->dccps_pmtu_cookie
);
306 * Step 10: Process REQUEST state (second part)
307 * If S.state == REQUEST,
308 * / * If we get here, P is a valid Response from the
309 * server (see Step 4), and we should move to
310 * PARTOPEN state. PARTOPEN means send an Ack,
311 * don't send Data packets, retransmit Acks
312 * periodically, and always include any Init Cookie
313 * from the Response * /
314 * S.state := PARTOPEN
316 * Continue with S.state == PARTOPEN
317 * / * Step 12 will send the Ack completing the
318 * three-way handshake * /
320 dccp_set_state(sk
, DCCP_PARTOPEN
);
322 /* Make sure socket is routed, for correct metrics. */
323 inet_sk_rebuild_header(sk
);
325 if (!sock_flag(sk
, SOCK_DEAD
)) {
326 sk
->sk_state_change(sk
);
327 sk_wake_async(sk
, 0, POLL_OUT
);
330 if (sk
->sk_write_pending
|| icsk
->icsk_ack
.pingpong
||
331 icsk
->icsk_accept_queue
.rskq_defer_accept
) {
332 /* Save one ACK. Data will be ready after
333 * several ticks, if write_pending is set.
335 * It may be deleted, but with this feature tcpdumps
336 * look so _wonderfully_ clever, that I was not able
337 * to stand against the temptation 8) --ANK
340 * OK, in DCCP we can as well do a similar trick, its
341 * even in the draft, but there is no need for us to
342 * schedule an ack here, as dccp_sendmsg does this for
343 * us, also stated in the draft. -acme
353 return 1; /* dccp_v4_do_rcv will send a reset, but...
354 FIXME: the reset code should be
355 DCCP_RESET_CODE_PACKET_ERROR */
358 static int dccp_rcv_respond_partopen_state_process(struct sock
*sk
,
360 const struct dccp_hdr
*dh
,
365 switch (dh
->dccph_type
) {
367 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
369 case DCCP_PKT_DATAACK
:
372 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
373 * here but only if we haven't used the DELACK timer for
374 * something else, like sending a delayed ack for a TIMESTAMP
375 * echo, etc, for now were not clearing it, sending an extra
376 * ACK when there is nothing else to do in DELACK is not a big
380 /* Stop the PARTOPEN timer */
381 if (sk
->sk_state
== DCCP_PARTOPEN
)
382 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
384 dccp_sk(sk
)->dccps_osr
= DCCP_SKB_CB(skb
)->dccpd_seq
;
385 dccp_set_state(sk
, DCCP_OPEN
);
387 if (dh
->dccph_type
== DCCP_PKT_DATAACK
) {
388 dccp_rcv_established(sk
, skb
, dh
, len
);
389 queued
= 1; /* packet was queued
390 (by dccp_rcv_established) */
398 int dccp_rcv_state_process(struct sock
*sk
, struct sk_buff
*skb
,
399 struct dccp_hdr
*dh
, unsigned len
)
401 struct dccp_sock
*dp
= dccp_sk(sk
);
402 const int old_state
= sk
->sk_state
;
405 if (sk
->sk_state
!= DCCP_LISTEN
&& sk
->sk_state
!= DCCP_REQUESTING
) {
406 if (dccp_check_seqno(sk
, skb
))
410 * Step 8: Process options and mark acknowledgeable
412 if (dccp_parse_options(sk
, skb
))
415 if (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!=
416 DCCP_PKT_WITHOUT_ACK_SEQ
)
417 dccp_event_ack_recv(sk
, skb
);
419 ccid_hc_rx_packet_recv(dp
->dccps_hc_rx_ccid
, sk
, skb
);
420 ccid_hc_tx_packet_recv(dp
->dccps_hc_tx_ccid
, sk
, skb
);
423 * FIXME: check ECN to see if we should use
424 * DCCP_ACKPKTS_STATE_ECN_MARKED
426 if (dp
->dccps_options
.dccpo_send_ack_vector
) {
427 if (dccp_ackpkts_add(dp
->dccps_hc_rx_ackpkts
,
428 DCCP_SKB_CB(skb
)->dccpd_seq
,
429 DCCP_ACKPKTS_STATE_RECEIVED
))
432 * FIXME: this activation is probably wrong, have to
433 * study more TCP delack machinery and how it fits into
434 * DCCP draft, but for now it kinda "works" 8)
436 if ((dp
->dccps_hc_rx_ackpkts
->dccpap_ack_seqno
==
437 DCCP_MAX_SEQNO
+ 1) &&
438 !inet_csk_ack_scheduled(sk
)) {
439 inet_csk_schedule_ack(sk
);
440 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
448 * Step 9: Process Reset
449 * If P.type == Reset,
450 * Tear down connection
451 * S.state := TIMEWAIT
453 * Drop packet and return
455 if (dh
->dccph_type
== DCCP_PKT_RESET
) {
457 * Queue the equivalent of TCP fin so that dccp_recvmsg
461 dccp_time_wait(sk
, DCCP_TIME_WAIT
, 0);
464 * Step 7: Check for unexpected packet types
465 * If (S.is_server and P.type == CloseReq)
466 * or (S.is_server and P.type == Response)
467 * or (S.is_client and P.type == Request)
468 * or (S.state == RESPOND and P.type == Data),
469 * Send Sync packet acknowledging P.seqno
470 * Drop packet and return
472 } else if ((dp
->dccps_role
!= DCCP_ROLE_CLIENT
&&
473 (dh
->dccph_type
== DCCP_PKT_RESPONSE
||
474 dh
->dccph_type
== DCCP_PKT_CLOSEREQ
)) ||
475 (dp
->dccps_role
== DCCP_ROLE_CLIENT
&&
476 dh
->dccph_type
== DCCP_PKT_REQUEST
) ||
477 (sk
->sk_state
== DCCP_RESPOND
&&
478 dh
->dccph_type
== DCCP_PKT_DATA
)) {
479 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
483 switch (sk
->sk_state
) {
488 if (dh
->dccph_type
== DCCP_PKT_ACK
||
489 dh
->dccph_type
== DCCP_PKT_DATAACK
)
492 if (dh
->dccph_type
== DCCP_PKT_RESET
)
495 if (dh
->dccph_type
== DCCP_PKT_REQUEST
) {
496 if (dccp_v4_conn_request(sk
, skb
) < 0)
499 /* FIXME: do congestion control initialization */
504 case DCCP_REQUESTING
:
505 /* FIXME: do congestion control initialization */
507 queued
= dccp_rcv_request_sent_state_process(sk
, skb
, dh
, len
);
516 queued
= dccp_rcv_respond_partopen_state_process(sk
, skb
,
521 if (dh
->dccph_type
== DCCP_PKT_ACK
||
522 dh
->dccph_type
== DCCP_PKT_DATAACK
) {
525 sk
->sk_state_change(sk
);
526 sk_wake_async(sk
, 0, POLL_OUT
);