]>
Commit | Line | Data |
---|---|---|
1 | /* SCTP kernel implementation | |
2 | * (C) Copyright IBM Corp. 2001, 2004 | |
3 | * Copyright (c) 1999 Cisco, Inc. | |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | |
5 | * | |
6 | * This file is part of the SCTP kernel implementation | |
7 | * | |
8 | * These functions work with the state functions in sctp_sm_statefuns.c | |
9 | * to implement that state operations. These functions implement the | |
10 | * steps which require modifying existing data structures. | |
11 | * | |
12 | * This SCTP implementation is free software; | |
13 | * you can redistribute it and/or modify it under the terms of | |
14 | * the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2, or (at your option) | |
16 | * any later version. | |
17 | * | |
18 | * This SCTP implementation is distributed in the hope that it | |
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | |
20 | * ************************ | |
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
22 | * See the GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with GNU CC; see the file COPYING. If not, see | |
26 | * <http://www.gnu.org/licenses/>. | |
27 | * | |
28 | * Please send any bug reports or fixes you make to the | |
29 | * email address(es): | |
30 | * lksctp developers <linux-sctp@vger.kernel.org> | |
31 | * | |
32 | * Written or modified by: | |
33 | * La Monte H.P. Yarroll <piggy@acm.org> | |
34 | * Karl Knutson <karl@athena.chicago.il.us> | |
35 | * Jon Grimm <jgrimm@austin.ibm.com> | |
36 | * Hui Huang <hui.huang@nokia.com> | |
37 | * Dajiang Zhang <dajiang.zhang@nokia.com> | |
38 | * Daisy Chang <daisyc@us.ibm.com> | |
39 | * Sridhar Samudrala <sri@us.ibm.com> | |
40 | * Ardelle Fan <ardelle.fan@intel.com> | |
41 | */ | |
42 | ||
43 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
44 | ||
45 | #include <linux/skbuff.h> | |
46 | #include <linux/types.h> | |
47 | #include <linux/socket.h> | |
48 | #include <linux/ip.h> | |
49 | #include <linux/gfp.h> | |
50 | #include <net/sock.h> | |
51 | #include <net/sctp/sctp.h> | |
52 | #include <net/sctp/sm.h> | |
53 | #include <net/sctp/stream_sched.h> | |
54 | ||
55 | static int sctp_cmd_interpreter(enum sctp_event event_type, | |
56 | union sctp_subtype subtype, | |
57 | enum sctp_state state, | |
58 | struct sctp_endpoint *ep, | |
59 | struct sctp_association *asoc, | |
60 | void *event_arg, | |
61 | enum sctp_disposition status, | |
62 | struct sctp_cmd_seq *commands, | |
63 | gfp_t gfp); | |
64 | static int sctp_side_effects(enum sctp_event event_type, | |
65 | union sctp_subtype subtype, | |
66 | enum sctp_state state, | |
67 | struct sctp_endpoint *ep, | |
68 | struct sctp_association **asoc, | |
69 | void *event_arg, | |
70 | enum sctp_disposition status, | |
71 | struct sctp_cmd_seq *commands, | |
72 | gfp_t gfp); | |
73 | ||
74 | /******************************************************************** | |
75 | * Helper functions | |
76 | ********************************************************************/ | |
77 | ||
78 | /* A helper function for delayed processing of INET ECN CE bit. */ | |
79 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, | |
80 | __u32 lowest_tsn) | |
81 | { | |
82 | /* Save the TSN away for comparison when we receive CWR */ | |
83 | ||
84 | asoc->last_ecne_tsn = lowest_tsn; | |
85 | asoc->need_ecne = 1; | |
86 | } | |
87 | ||
88 | /* Helper function for delayed processing of SCTP ECNE chunk. */ | |
89 | /* RFC 2960 Appendix A | |
90 | * | |
91 | * RFC 2481 details a specific bit for a sender to send in | |
92 | * the header of its next outbound TCP segment to indicate to | |
93 | * its peer that it has reduced its congestion window. This | |
94 | * is termed the CWR bit. For SCTP the same indication is made | |
95 | * by including the CWR chunk. This chunk contains one data | |
96 | * element, i.e. the TSN number that was sent in the ECNE chunk. | |
97 | * This element represents the lowest TSN number in the datagram | |
98 | * that was originally marked with the CE bit. | |
99 | */ | |
100 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, | |
101 | __u32 lowest_tsn, | |
102 | struct sctp_chunk *chunk) | |
103 | { | |
104 | struct sctp_chunk *repl; | |
105 | ||
106 | /* Our previously transmitted packet ran into some congestion | |
107 | * so we should take action by reducing cwnd and ssthresh | |
108 | * and then ACK our peer that we we've done so by | |
109 | * sending a CWR. | |
110 | */ | |
111 | ||
112 | /* First, try to determine if we want to actually lower | |
113 | * our cwnd variables. Only lower them if the ECNE looks more | |
114 | * recent than the last response. | |
115 | */ | |
116 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { | |
117 | struct sctp_transport *transport; | |
118 | ||
119 | /* Find which transport's congestion variables | |
120 | * need to be adjusted. | |
121 | */ | |
122 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); | |
123 | ||
124 | /* Update the congestion variables. */ | |
125 | if (transport) | |
126 | sctp_transport_lower_cwnd(transport, | |
127 | SCTP_LOWER_CWND_ECNE); | |
128 | asoc->last_cwr_tsn = lowest_tsn; | |
129 | } | |
130 | ||
131 | /* Always try to quiet the other end. In case of lost CWR, | |
132 | * resend last_cwr_tsn. | |
133 | */ | |
134 | repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); | |
135 | ||
136 | /* If we run out of memory, it will look like a lost CWR. We'll | |
137 | * get back in sync eventually. | |
138 | */ | |
139 | return repl; | |
140 | } | |
141 | ||
142 | /* Helper function to do delayed processing of ECN CWR chunk. */ | |
143 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, | |
144 | __u32 lowest_tsn) | |
145 | { | |
146 | /* Turn off ECNE getting auto-prepended to every outgoing | |
147 | * packet | |
148 | */ | |
149 | asoc->need_ecne = 0; | |
150 | } | |
151 | ||
152 | /* Generate SACK if necessary. We call this at the end of a packet. */ | |
153 | static int sctp_gen_sack(struct sctp_association *asoc, int force, | |
154 | struct sctp_cmd_seq *commands) | |
155 | { | |
156 | struct sctp_transport *trans = asoc->peer.last_data_from; | |
157 | __u32 ctsn, max_tsn_seen; | |
158 | struct sctp_chunk *sack; | |
159 | int error = 0; | |
160 | ||
161 | if (force || | |
162 | (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || | |
163 | (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) | |
164 | asoc->peer.sack_needed = 1; | |
165 | ||
166 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | |
167 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); | |
168 | ||
169 | /* From 12.2 Parameters necessary per association (i.e. the TCB): | |
170 | * | |
171 | * Ack State : This flag indicates if the next received packet | |
172 | * : is to be responded to with a SACK. ... | |
173 | * : When DATA chunks are out of order, SACK's | |
174 | * : are not delayed (see Section 6). | |
175 | * | |
176 | * [This is actually not mentioned in Section 6, but we | |
177 | * implement it here anyway. --piggy] | |
178 | */ | |
179 | if (max_tsn_seen != ctsn) | |
180 | asoc->peer.sack_needed = 1; | |
181 | ||
182 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: | |
183 | * | |
184 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, | |
185 | * an acknowledgement SHOULD be generated for at least every | |
186 | * second packet (not every second DATA chunk) received, and | |
187 | * SHOULD be generated within 200 ms of the arrival of any | |
188 | * unacknowledged DATA chunk. ... | |
189 | */ | |
190 | if (!asoc->peer.sack_needed) { | |
191 | asoc->peer.sack_cnt++; | |
192 | ||
193 | /* Set the SACK delay timeout based on the | |
194 | * SACK delay for the last transport | |
195 | * data was received from, or the default | |
196 | * for the association. | |
197 | */ | |
198 | if (trans) { | |
199 | /* We will need a SACK for the next packet. */ | |
200 | if (asoc->peer.sack_cnt >= trans->sackfreq - 1) | |
201 | asoc->peer.sack_needed = 1; | |
202 | ||
203 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | |
204 | trans->sackdelay; | |
205 | } else { | |
206 | /* We will need a SACK for the next packet. */ | |
207 | if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) | |
208 | asoc->peer.sack_needed = 1; | |
209 | ||
210 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | |
211 | asoc->sackdelay; | |
212 | } | |
213 | ||
214 | /* Restart the SACK timer. */ | |
215 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
216 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
217 | } else { | |
218 | __u32 old_a_rwnd = asoc->a_rwnd; | |
219 | ||
220 | asoc->a_rwnd = asoc->rwnd; | |
221 | sack = sctp_make_sack(asoc); | |
222 | if (!sack) { | |
223 | asoc->a_rwnd = old_a_rwnd; | |
224 | goto nomem; | |
225 | } | |
226 | ||
227 | asoc->peer.sack_needed = 0; | |
228 | asoc->peer.sack_cnt = 0; | |
229 | ||
230 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); | |
231 | ||
232 | /* Stop the SACK timer. */ | |
233 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | |
234 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
235 | } | |
236 | ||
237 | return error; | |
238 | nomem: | |
239 | error = -ENOMEM; | |
240 | return error; | |
241 | } | |
242 | ||
243 | /* When the T3-RTX timer expires, it calls this function to create the | |
244 | * relevant state machine event. | |
245 | */ | |
246 | void sctp_generate_t3_rtx_event(struct timer_list *t) | |
247 | { | |
248 | struct sctp_transport *transport = | |
249 | from_timer(transport, t, T3_rtx_timer); | |
250 | struct sctp_association *asoc = transport->asoc; | |
251 | struct sock *sk = asoc->base.sk; | |
252 | struct net *net = sock_net(sk); | |
253 | int error; | |
254 | ||
255 | /* Check whether a task is in the sock. */ | |
256 | ||
257 | bh_lock_sock(sk); | |
258 | if (sock_owned_by_user(sk)) { | |
259 | pr_debug("%s: sock is busy\n", __func__); | |
260 | ||
261 | /* Try again later. */ | |
262 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | |
263 | sctp_transport_hold(transport); | |
264 | goto out_unlock; | |
265 | } | |
266 | ||
267 | /* Run through the state machine. */ | |
268 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, | |
269 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), | |
270 | asoc->state, | |
271 | asoc->ep, asoc, | |
272 | transport, GFP_ATOMIC); | |
273 | ||
274 | if (error) | |
275 | sk->sk_err = -error; | |
276 | ||
277 | out_unlock: | |
278 | bh_unlock_sock(sk); | |
279 | sctp_transport_put(transport); | |
280 | } | |
281 | ||
282 | /* This is a sa interface for producing timeout events. It works | |
283 | * for timeouts which use the association as their parameter. | |
284 | */ | |
285 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | |
286 | enum sctp_event_timeout timeout_type) | |
287 | { | |
288 | struct sock *sk = asoc->base.sk; | |
289 | struct net *net = sock_net(sk); | |
290 | int error = 0; | |
291 | ||
292 | bh_lock_sock(sk); | |
293 | if (sock_owned_by_user(sk)) { | |
294 | pr_debug("%s: sock is busy: timer %d\n", __func__, | |
295 | timeout_type); | |
296 | ||
297 | /* Try again later. */ | |
298 | if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) | |
299 | sctp_association_hold(asoc); | |
300 | goto out_unlock; | |
301 | } | |
302 | ||
303 | /* Is this association really dead and just waiting around for | |
304 | * the timer to let go of the reference? | |
305 | */ | |
306 | if (asoc->base.dead) | |
307 | goto out_unlock; | |
308 | ||
309 | /* Run through the state machine. */ | |
310 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, | |
311 | SCTP_ST_TIMEOUT(timeout_type), | |
312 | asoc->state, asoc->ep, asoc, | |
313 | (void *)timeout_type, GFP_ATOMIC); | |
314 | ||
315 | if (error) | |
316 | sk->sk_err = -error; | |
317 | ||
318 | out_unlock: | |
319 | bh_unlock_sock(sk); | |
320 | sctp_association_put(asoc); | |
321 | } | |
322 | ||
323 | static void sctp_generate_t1_cookie_event(struct timer_list *t) | |
324 | { | |
325 | struct sctp_association *asoc = | |
326 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]); | |
327 | ||
328 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); | |
329 | } | |
330 | ||
331 | static void sctp_generate_t1_init_event(struct timer_list *t) | |
332 | { | |
333 | struct sctp_association *asoc = | |
334 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]); | |
335 | ||
336 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); | |
337 | } | |
338 | ||
339 | static void sctp_generate_t2_shutdown_event(struct timer_list *t) | |
340 | { | |
341 | struct sctp_association *asoc = | |
342 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]); | |
343 | ||
344 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); | |
345 | } | |
346 | ||
347 | static void sctp_generate_t4_rto_event(struct timer_list *t) | |
348 | { | |
349 | struct sctp_association *asoc = | |
350 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]); | |
351 | ||
352 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); | |
353 | } | |
354 | ||
355 | static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t) | |
356 | { | |
357 | struct sctp_association *asoc = | |
358 | from_timer(asoc, t, | |
359 | timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]); | |
360 | ||
361 | sctp_generate_timeout_event(asoc, | |
362 | SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); | |
363 | ||
364 | } /* sctp_generate_t5_shutdown_guard_event() */ | |
365 | ||
366 | static void sctp_generate_autoclose_event(struct timer_list *t) | |
367 | { | |
368 | struct sctp_association *asoc = | |
369 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]); | |
370 | ||
371 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); | |
372 | } | |
373 | ||
374 | /* Generate a heart beat event. If the sock is busy, reschedule. Make | |
375 | * sure that the transport is still valid. | |
376 | */ | |
377 | void sctp_generate_heartbeat_event(struct timer_list *t) | |
378 | { | |
379 | struct sctp_transport *transport = from_timer(transport, t, hb_timer); | |
380 | struct sctp_association *asoc = transport->asoc; | |
381 | struct sock *sk = asoc->base.sk; | |
382 | struct net *net = sock_net(sk); | |
383 | u32 elapsed, timeout; | |
384 | int error = 0; | |
385 | ||
386 | bh_lock_sock(sk); | |
387 | if (sock_owned_by_user(sk)) { | |
388 | pr_debug("%s: sock is busy\n", __func__); | |
389 | ||
390 | /* Try again later. */ | |
391 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | |
392 | sctp_transport_hold(transport); | |
393 | goto out_unlock; | |
394 | } | |
395 | ||
396 | /* Check if we should still send the heartbeat or reschedule */ | |
397 | elapsed = jiffies - transport->last_time_sent; | |
398 | timeout = sctp_transport_timeout(transport); | |
399 | if (elapsed < timeout) { | |
400 | elapsed = timeout - elapsed; | |
401 | if (!mod_timer(&transport->hb_timer, jiffies + elapsed)) | |
402 | sctp_transport_hold(transport); | |
403 | goto out_unlock; | |
404 | } | |
405 | ||
406 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, | |
407 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), | |
408 | asoc->state, asoc->ep, asoc, | |
409 | transport, GFP_ATOMIC); | |
410 | ||
411 | if (error) | |
412 | sk->sk_err = -error; | |
413 | ||
414 | out_unlock: | |
415 | bh_unlock_sock(sk); | |
416 | sctp_transport_put(transport); | |
417 | } | |
418 | ||
419 | /* Handle the timeout of the ICMP protocol unreachable timer. Trigger | |
420 | * the correct state machine transition that will close the association. | |
421 | */ | |
422 | void sctp_generate_proto_unreach_event(struct timer_list *t) | |
423 | { | |
424 | struct sctp_transport *transport = | |
425 | from_timer(transport, t, proto_unreach_timer); | |
426 | struct sctp_association *asoc = transport->asoc; | |
427 | struct sock *sk = asoc->base.sk; | |
428 | struct net *net = sock_net(sk); | |
429 | ||
430 | bh_lock_sock(sk); | |
431 | if (sock_owned_by_user(sk)) { | |
432 | pr_debug("%s: sock is busy\n", __func__); | |
433 | ||
434 | /* Try again later. */ | |
435 | if (!mod_timer(&transport->proto_unreach_timer, | |
436 | jiffies + (HZ/20))) | |
437 | sctp_association_hold(asoc); | |
438 | goto out_unlock; | |
439 | } | |
440 | ||
441 | /* Is this structure just waiting around for us to actually | |
442 | * get destroyed? | |
443 | */ | |
444 | if (asoc->base.dead) | |
445 | goto out_unlock; | |
446 | ||
447 | sctp_do_sm(net, SCTP_EVENT_T_OTHER, | |
448 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | |
449 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | |
450 | ||
451 | out_unlock: | |
452 | bh_unlock_sock(sk); | |
453 | sctp_association_put(asoc); | |
454 | } | |
455 | ||
456 | /* Handle the timeout of the RE-CONFIG timer. */ | |
457 | void sctp_generate_reconf_event(struct timer_list *t) | |
458 | { | |
459 | struct sctp_transport *transport = | |
460 | from_timer(transport, t, reconf_timer); | |
461 | struct sctp_association *asoc = transport->asoc; | |
462 | struct sock *sk = asoc->base.sk; | |
463 | struct net *net = sock_net(sk); | |
464 | int error = 0; | |
465 | ||
466 | bh_lock_sock(sk); | |
467 | if (sock_owned_by_user(sk)) { | |
468 | pr_debug("%s: sock is busy\n", __func__); | |
469 | ||
470 | /* Try again later. */ | |
471 | if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20))) | |
472 | sctp_transport_hold(transport); | |
473 | goto out_unlock; | |
474 | } | |
475 | ||
476 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, | |
477 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF), | |
478 | asoc->state, asoc->ep, asoc, | |
479 | transport, GFP_ATOMIC); | |
480 | ||
481 | if (error) | |
482 | sk->sk_err = -error; | |
483 | ||
484 | out_unlock: | |
485 | bh_unlock_sock(sk); | |
486 | sctp_transport_put(transport); | |
487 | } | |
488 | ||
489 | /* Inject a SACK Timeout event into the state machine. */ | |
490 | static void sctp_generate_sack_event(struct timer_list *t) | |
491 | { | |
492 | struct sctp_association *asoc = | |
493 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]); | |
494 | ||
495 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); | |
496 | } | |
497 | ||
498 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { | |
499 | [SCTP_EVENT_TIMEOUT_NONE] = NULL, | |
500 | [SCTP_EVENT_TIMEOUT_T1_COOKIE] = sctp_generate_t1_cookie_event, | |
501 | [SCTP_EVENT_TIMEOUT_T1_INIT] = sctp_generate_t1_init_event, | |
502 | [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = sctp_generate_t2_shutdown_event, | |
503 | [SCTP_EVENT_TIMEOUT_T3_RTX] = NULL, | |
504 | [SCTP_EVENT_TIMEOUT_T4_RTO] = sctp_generate_t4_rto_event, | |
505 | [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] = | |
506 | sctp_generate_t5_shutdown_guard_event, | |
507 | [SCTP_EVENT_TIMEOUT_HEARTBEAT] = NULL, | |
508 | [SCTP_EVENT_TIMEOUT_RECONF] = NULL, | |
509 | [SCTP_EVENT_TIMEOUT_SACK] = sctp_generate_sack_event, | |
510 | [SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sctp_generate_autoclose_event, | |
511 | }; | |
512 | ||
513 | ||
514 | /* RFC 2960 8.2 Path Failure Detection | |
515 | * | |
516 | * When its peer endpoint is multi-homed, an endpoint should keep a | |
517 | * error counter for each of the destination transport addresses of the | |
518 | * peer endpoint. | |
519 | * | |
520 | * Each time the T3-rtx timer expires on any address, or when a | |
521 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, | |
522 | * the error counter of that destination address will be incremented. | |
523 | * When the value in the error counter exceeds the protocol parameter | |
524 | * 'Path.Max.Retrans' of that destination address, the endpoint should | |
525 | * mark the destination transport address as inactive, and a | |
526 | * notification SHOULD be sent to the upper layer. | |
527 | * | |
528 | */ | |
529 | static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, | |
530 | struct sctp_association *asoc, | |
531 | struct sctp_transport *transport, | |
532 | int is_hb) | |
533 | { | |
534 | struct net *net = sock_net(asoc->base.sk); | |
535 | ||
536 | /* The check for association's overall error counter exceeding the | |
537 | * threshold is done in the state function. | |
538 | */ | |
539 | /* We are here due to a timer expiration. If the timer was | |
540 | * not a HEARTBEAT, then normal error tracking is done. | |
541 | * If the timer was a heartbeat, we only increment error counts | |
542 | * when we already have an outstanding HEARTBEAT that has not | |
543 | * been acknowledged. | |
544 | * Additionally, some tranport states inhibit error increments. | |
545 | */ | |
546 | if (!is_hb) { | |
547 | asoc->overall_error_count++; | |
548 | if (transport->state != SCTP_INACTIVE) | |
549 | transport->error_count++; | |
550 | } else if (transport->hb_sent) { | |
551 | if (transport->state != SCTP_UNCONFIRMED) | |
552 | asoc->overall_error_count++; | |
553 | if (transport->state != SCTP_INACTIVE) | |
554 | transport->error_count++; | |
555 | } | |
556 | ||
557 | /* If the transport error count is greater than the pf_retrans | |
558 | * threshold, and less than pathmaxrtx, and if the current state | |
559 | * is SCTP_ACTIVE, then mark this transport as Partially Failed, | |
560 | * see SCTP Quick Failover Draft, section 5.1 | |
561 | */ | |
562 | if (net->sctp.pf_enable && | |
563 | (transport->state == SCTP_ACTIVE) && | |
564 | (asoc->pf_retrans < transport->pathmaxrxt) && | |
565 | (transport->error_count > asoc->pf_retrans)) { | |
566 | ||
567 | sctp_assoc_control_transport(asoc, transport, | |
568 | SCTP_TRANSPORT_PF, | |
569 | 0); | |
570 | ||
571 | /* Update the hb timer to resend a heartbeat every rto */ | |
572 | sctp_transport_reset_hb_timer(transport); | |
573 | } | |
574 | ||
575 | if (transport->state != SCTP_INACTIVE && | |
576 | (transport->error_count > transport->pathmaxrxt)) { | |
577 | pr_debug("%s: association:%p transport addr:%pISpc failed\n", | |
578 | __func__, asoc, &transport->ipaddr.sa); | |
579 | ||
580 | sctp_assoc_control_transport(asoc, transport, | |
581 | SCTP_TRANSPORT_DOWN, | |
582 | SCTP_FAILED_THRESHOLD); | |
583 | } | |
584 | ||
585 | /* E2) For the destination address for which the timer | |
586 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | |
587 | * maximum value discussed in rule C7 above (RTO.max) may be | |
588 | * used to provide an upper bound to this doubling operation. | |
589 | * | |
590 | * Special Case: the first HB doesn't trigger exponential backoff. | |
591 | * The first unacknowledged HB triggers it. We do this with a flag | |
592 | * that indicates that we have an outstanding HB. | |
593 | */ | |
594 | if (!is_hb || transport->hb_sent) { | |
595 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); | |
596 | sctp_max_rto(asoc, transport); | |
597 | } | |
598 | } | |
599 | ||
600 | /* Worker routine to handle INIT command failure. */ | |
601 | static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands, | |
602 | struct sctp_association *asoc, | |
603 | unsigned int error) | |
604 | { | |
605 | struct sctp_ulpevent *event; | |
606 | ||
607 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, | |
608 | (__u16)error, 0, 0, NULL, | |
609 | GFP_ATOMIC); | |
610 | ||
611 | if (event) | |
612 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
613 | SCTP_ULPEVENT(event)); | |
614 | ||
615 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
616 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
617 | ||
618 | /* SEND_FAILED sent later when cleaning up the association. */ | |
619 | asoc->outqueue.error = error; | |
620 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
621 | } | |
622 | ||
623 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ | |
624 | static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands, | |
625 | struct sctp_association *asoc, | |
626 | enum sctp_event event_type, | |
627 | union sctp_subtype subtype, | |
628 | struct sctp_chunk *chunk, | |
629 | unsigned int error) | |
630 | { | |
631 | struct sctp_ulpevent *event; | |
632 | struct sctp_chunk *abort; | |
633 | ||
634 | /* Cancel any partial delivery in progress. */ | |
635 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
636 | ||
637 | if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) | |
638 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
639 | (__u16)error, 0, 0, chunk, | |
640 | GFP_ATOMIC); | |
641 | else | |
642 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
643 | (__u16)error, 0, 0, NULL, | |
644 | GFP_ATOMIC); | |
645 | if (event) | |
646 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
647 | SCTP_ULPEVENT(event)); | |
648 | ||
649 | if (asoc->overall_error_count >= asoc->max_retrans) { | |
650 | abort = sctp_make_violation_max_retrans(asoc, chunk); | |
651 | if (abort) | |
652 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
653 | SCTP_CHUNK(abort)); | |
654 | } | |
655 | ||
656 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
657 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
658 | ||
659 | /* SEND_FAILED sent later when cleaning up the association. */ | |
660 | asoc->outqueue.error = error; | |
661 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
662 | } | |
663 | ||
664 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT | |
665 | * inside the cookie. In reality, this is only used for INIT-ACK processing | |
666 | * since all other cases use "temporary" associations and can do all | |
667 | * their work in statefuns directly. | |
668 | */ | |
669 | static int sctp_cmd_process_init(struct sctp_cmd_seq *commands, | |
670 | struct sctp_association *asoc, | |
671 | struct sctp_chunk *chunk, | |
672 | struct sctp_init_chunk *peer_init, | |
673 | gfp_t gfp) | |
674 | { | |
675 | int error; | |
676 | ||
677 | /* We only process the init as a sideeffect in a single | |
678 | * case. This is when we process the INIT-ACK. If we | |
679 | * fail during INIT processing (due to malloc problems), | |
680 | * just return the error and stop processing the stack. | |
681 | */ | |
682 | if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) | |
683 | error = -ENOMEM; | |
684 | else | |
685 | error = 0; | |
686 | ||
687 | return error; | |
688 | } | |
689 | ||
690 | /* Helper function to break out starting up of heartbeat timers. */ | |
691 | static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds, | |
692 | struct sctp_association *asoc) | |
693 | { | |
694 | struct sctp_transport *t; | |
695 | ||
696 | /* Start a heartbeat timer for each transport on the association. | |
697 | * hold a reference on the transport to make sure none of | |
698 | * the needed data structures go away. | |
699 | */ | |
700 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) | |
701 | sctp_transport_reset_hb_timer(t); | |
702 | } | |
703 | ||
704 | static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds, | |
705 | struct sctp_association *asoc) | |
706 | { | |
707 | struct sctp_transport *t; | |
708 | ||
709 | /* Stop all heartbeat timers. */ | |
710 | ||
711 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | |
712 | transports) { | |
713 | if (del_timer(&t->hb_timer)) | |
714 | sctp_transport_put(t); | |
715 | } | |
716 | } | |
717 | ||
718 | /* Helper function to stop any pending T3-RTX timers */ | |
719 | static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds, | |
720 | struct sctp_association *asoc) | |
721 | { | |
722 | struct sctp_transport *t; | |
723 | ||
724 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | |
725 | transports) { | |
726 | if (del_timer(&t->T3_rtx_timer)) | |
727 | sctp_transport_put(t); | |
728 | } | |
729 | } | |
730 | ||
731 | ||
732 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ | |
733 | static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds, | |
734 | struct sctp_association *asoc, | |
735 | struct sctp_transport *t, | |
736 | struct sctp_chunk *chunk) | |
737 | { | |
738 | struct sctp_sender_hb_info *hbinfo; | |
739 | int was_unconfirmed = 0; | |
740 | ||
741 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | |
742 | * HEARTBEAT should clear the error counter of the destination | |
743 | * transport address to which the HEARTBEAT was sent. | |
744 | */ | |
745 | t->error_count = 0; | |
746 | ||
747 | /* | |
748 | * Although RFC4960 specifies that the overall error count must | |
749 | * be cleared when a HEARTBEAT ACK is received, we make an | |
750 | * exception while in SHUTDOWN PENDING. If the peer keeps its | |
751 | * window shut forever, we may never be able to transmit our | |
752 | * outstanding data and rely on the retransmission limit be reached | |
753 | * to shutdown the association. | |
754 | */ | |
755 | if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) | |
756 | t->asoc->overall_error_count = 0; | |
757 | ||
758 | /* Clear the hb_sent flag to signal that we had a good | |
759 | * acknowledgement. | |
760 | */ | |
761 | t->hb_sent = 0; | |
762 | ||
763 | /* Mark the destination transport address as active if it is not so | |
764 | * marked. | |
765 | */ | |
766 | if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { | |
767 | was_unconfirmed = 1; | |
768 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, | |
769 | SCTP_HEARTBEAT_SUCCESS); | |
770 | } | |
771 | ||
772 | if (t->state == SCTP_PF) | |
773 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, | |
774 | SCTP_HEARTBEAT_SUCCESS); | |
775 | ||
776 | /* HB-ACK was received for a the proper HB. Consider this | |
777 | * forward progress. | |
778 | */ | |
779 | if (t->dst) | |
780 | sctp_transport_dst_confirm(t); | |
781 | ||
782 | /* The receiver of the HEARTBEAT ACK should also perform an | |
783 | * RTT measurement for that destination transport address | |
784 | * using the time value carried in the HEARTBEAT ACK chunk. | |
785 | * If the transport's rto_pending variable has been cleared, | |
786 | * it was most likely due to a retransmit. However, we want | |
787 | * to re-enable it to properly update the rto. | |
788 | */ | |
789 | if (t->rto_pending == 0) | |
790 | t->rto_pending = 1; | |
791 | ||
792 | hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; | |
793 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | |
794 | ||
795 | /* Update the heartbeat timer. */ | |
796 | sctp_transport_reset_hb_timer(t); | |
797 | ||
798 | if (was_unconfirmed && asoc->peer.transport_count == 1) | |
799 | sctp_transport_immediate_rtx(t); | |
800 | } | |
801 | ||
802 | ||
803 | /* Helper function to process the process SACK command. */ | |
804 | static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds, | |
805 | struct sctp_association *asoc, | |
806 | struct sctp_chunk *chunk) | |
807 | { | |
808 | int err = 0; | |
809 | ||
810 | if (sctp_outq_sack(&asoc->outqueue, chunk)) { | |
811 | struct net *net = sock_net(asoc->base.sk); | |
812 | ||
813 | /* There are no more TSNs awaiting SACK. */ | |
814 | err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, | |
815 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | |
816 | asoc->state, asoc->ep, asoc, NULL, | |
817 | GFP_ATOMIC); | |
818 | } | |
819 | ||
820 | return err; | |
821 | } | |
822 | ||
823 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set | |
824 | * the transport for a shutdown chunk. | |
825 | */ | |
826 | static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds, | |
827 | struct sctp_association *asoc, | |
828 | struct sctp_chunk *chunk) | |
829 | { | |
830 | struct sctp_transport *t; | |
831 | ||
832 | if (chunk->transport) | |
833 | t = chunk->transport; | |
834 | else { | |
835 | t = sctp_assoc_choose_alter_transport(asoc, | |
836 | asoc->shutdown_last_sent_to); | |
837 | chunk->transport = t; | |
838 | } | |
839 | asoc->shutdown_last_sent_to = t; | |
840 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | |
841 | } | |
842 | ||
843 | static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds, | |
844 | struct sctp_association *asoc, | |
845 | struct sctp_association *new) | |
846 | { | |
847 | struct net *net = sock_net(asoc->base.sk); | |
848 | struct sctp_chunk *abort; | |
849 | ||
850 | if (!sctp_assoc_update(asoc, new)) | |
851 | return; | |
852 | ||
853 | abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr)); | |
854 | if (abort) { | |
855 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); | |
856 | sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | |
857 | } | |
858 | sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); | |
859 | sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED, | |
860 | SCTP_PERR(SCTP_ERROR_RSRC_LOW)); | |
861 | SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); | |
862 | SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); | |
863 | } | |
864 | ||
865 | /* Helper function to change the state of an association. */ | |
866 | static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds, | |
867 | struct sctp_association *asoc, | |
868 | enum sctp_state state) | |
869 | { | |
870 | struct sock *sk = asoc->base.sk; | |
871 | ||
872 | asoc->state = state; | |
873 | ||
874 | pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); | |
875 | ||
876 | if (sctp_style(sk, TCP)) { | |
877 | /* Change the sk->sk_state of a TCP-style socket that has | |
878 | * successfully completed a connect() call. | |
879 | */ | |
880 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | |
881 | sk->sk_state = SCTP_SS_ESTABLISHED; | |
882 | ||
883 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ | |
884 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && | |
885 | sctp_sstate(sk, ESTABLISHED)) { | |
886 | sk->sk_state = SCTP_SS_CLOSING; | |
887 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
888 | } | |
889 | } | |
890 | ||
891 | if (sctp_state(asoc, COOKIE_WAIT)) { | |
892 | /* Reset init timeouts since they may have been | |
893 | * increased due to timer expirations. | |
894 | */ | |
895 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = | |
896 | asoc->rto_initial; | |
897 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = | |
898 | asoc->rto_initial; | |
899 | } | |
900 | ||
901 | if (sctp_state(asoc, ESTABLISHED) || | |
902 | sctp_state(asoc, CLOSED) || | |
903 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { | |
904 | /* Wake up any processes waiting in the asoc's wait queue in | |
905 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). | |
906 | */ | |
907 | if (waitqueue_active(&asoc->wait)) | |
908 | wake_up_interruptible(&asoc->wait); | |
909 | ||
910 | /* Wake up any processes waiting in the sk's sleep queue of | |
911 | * a TCP-style or UDP-style peeled-off socket in | |
912 | * sctp_wait_for_accept() or sctp_wait_for_packet(). | |
913 | * For a UDP-style socket, the waiters are woken up by the | |
914 | * notifications. | |
915 | */ | |
916 | if (!sctp_style(sk, UDP)) | |
917 | sk->sk_state_change(sk); | |
918 | } | |
919 | ||
920 | if (sctp_state(asoc, SHUTDOWN_PENDING) && | |
921 | !sctp_outq_is_empty(&asoc->outqueue)) | |
922 | sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC); | |
923 | } | |
924 | ||
925 | /* Helper function to delete an association. */ | |
926 | static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds, | |
927 | struct sctp_association *asoc) | |
928 | { | |
929 | struct sock *sk = asoc->base.sk; | |
930 | ||
931 | /* If it is a non-temporary association belonging to a TCP-style | |
932 | * listening socket that is not closed, do not free it so that accept() | |
933 | * can pick it up later. | |
934 | */ | |
935 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && | |
936 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | |
937 | return; | |
938 | ||
939 | sctp_association_free(asoc); | |
940 | } | |
941 | ||
942 | /* | |
943 | * ADDIP Section 4.1 ASCONF Chunk Procedures | |
944 | * A4) Start a T-4 RTO timer, using the RTO value of the selected | |
945 | * destination address (we use active path instead of primary path just | |
946 | * because primary path may be inactive. | |
947 | */ | |
948 | static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds, | |
949 | struct sctp_association *asoc, | |
950 | struct sctp_chunk *chunk) | |
951 | { | |
952 | struct sctp_transport *t; | |
953 | ||
954 | t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); | |
955 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; | |
956 | chunk->transport = t; | |
957 | } | |
958 | ||
959 | /* Process an incoming Operation Error Chunk. */ | |
960 | static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds, | |
961 | struct sctp_association *asoc, | |
962 | struct sctp_chunk *chunk) | |
963 | { | |
964 | struct sctp_errhdr *err_hdr; | |
965 | struct sctp_ulpevent *ev; | |
966 | ||
967 | while (chunk->chunk_end > chunk->skb->data) { | |
968 | err_hdr = (struct sctp_errhdr *)(chunk->skb->data); | |
969 | ||
970 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, | |
971 | GFP_ATOMIC); | |
972 | if (!ev) | |
973 | return; | |
974 | ||
975 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
976 | ||
977 | switch (err_hdr->cause) { | |
978 | case SCTP_ERROR_UNKNOWN_CHUNK: | |
979 | { | |
980 | struct sctp_chunkhdr *unk_chunk_hdr; | |
981 | ||
982 | unk_chunk_hdr = (struct sctp_chunkhdr *) | |
983 | err_hdr->variable; | |
984 | switch (unk_chunk_hdr->type) { | |
985 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with | |
986 | * an ERROR chunk reporting that it did not recognized | |
987 | * the ASCONF chunk type, the sender of the ASCONF MUST | |
988 | * NOT send any further ASCONF chunks and MUST stop its | |
989 | * T-4 timer. | |
990 | */ | |
991 | case SCTP_CID_ASCONF: | |
992 | if (asoc->peer.asconf_capable == 0) | |
993 | break; | |
994 | ||
995 | asoc->peer.asconf_capable = 0; | |
996 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | |
997 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | |
998 | break; | |
999 | default: | |
1000 | break; | |
1001 | } | |
1002 | break; | |
1003 | } | |
1004 | default: | |
1005 | break; | |
1006 | } | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | /* Process variable FWDTSN chunk information. */ | |
1011 | static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, | |
1012 | struct sctp_chunk *chunk) | |
1013 | { | |
1014 | struct sctp_fwdtsn_skip *skip; | |
1015 | ||
1016 | /* Walk through all the skipped SSNs */ | |
1017 | sctp_walk_fwdtsn(skip, chunk) { | |
1018 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | |
1019 | } | |
1020 | } | |
1021 | ||
1022 | /* Helper function to remove the association non-primary peer | |
1023 | * transports. | |
1024 | */ | |
1025 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) | |
1026 | { | |
1027 | struct sctp_transport *t; | |
1028 | struct list_head *temp; | |
1029 | struct list_head *pos; | |
1030 | ||
1031 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | |
1032 | t = list_entry(pos, struct sctp_transport, transports); | |
1033 | if (!sctp_cmp_addr_exact(&t->ipaddr, | |
1034 | &asoc->peer.primary_addr)) { | |
1035 | sctp_assoc_rm_peer(asoc, t); | |
1036 | } | |
1037 | } | |
1038 | } | |
1039 | ||
1040 | /* Helper function to set sk_err on a 1-1 style socket. */ | |
1041 | static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) | |
1042 | { | |
1043 | struct sock *sk = asoc->base.sk; | |
1044 | ||
1045 | if (!sctp_style(sk, UDP)) | |
1046 | sk->sk_err = error; | |
1047 | } | |
1048 | ||
1049 | /* Helper function to generate an association change event */ | |
1050 | static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands, | |
1051 | struct sctp_association *asoc, | |
1052 | u8 state) | |
1053 | { | |
1054 | struct sctp_ulpevent *ev; | |
1055 | ||
1056 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, | |
1057 | asoc->c.sinit_num_ostreams, | |
1058 | asoc->c.sinit_max_instreams, | |
1059 | NULL, GFP_ATOMIC); | |
1060 | if (ev) | |
1061 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
1062 | } | |
1063 | ||
1064 | /* Helper function to generate an adaptation indication event */ | |
1065 | static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands, | |
1066 | struct sctp_association *asoc) | |
1067 | { | |
1068 | struct sctp_ulpevent *ev; | |
1069 | ||
1070 | ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); | |
1071 | ||
1072 | if (ev) | |
1073 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
1074 | } | |
1075 | ||
1076 | ||
1077 | static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, | |
1078 | enum sctp_event_timeout timer, | |
1079 | char *name) | |
1080 | { | |
1081 | struct sctp_transport *t; | |
1082 | ||
1083 | t = asoc->init_last_sent_to; | |
1084 | asoc->init_err_counter++; | |
1085 | ||
1086 | if (t->init_sent_count > (asoc->init_cycle + 1)) { | |
1087 | asoc->timeouts[timer] *= 2; | |
1088 | if (asoc->timeouts[timer] > asoc->max_init_timeo) { | |
1089 | asoc->timeouts[timer] = asoc->max_init_timeo; | |
1090 | } | |
1091 | asoc->init_cycle++; | |
1092 | ||
1093 | pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" | |
1094 | " cycle:%d timeout:%ld\n", __func__, name, | |
1095 | asoc->init_err_counter, asoc->init_cycle, | |
1096 | asoc->timeouts[timer]); | |
1097 | } | |
1098 | ||
1099 | } | |
1100 | ||
1101 | /* Send the whole message, chunk by chunk, to the outqueue. | |
1102 | * This way the whole message is queued up and bundling if | |
1103 | * encouraged for small fragments. | |
1104 | */ | |
1105 | static void sctp_cmd_send_msg(struct sctp_association *asoc, | |
1106 | struct sctp_datamsg *msg, gfp_t gfp) | |
1107 | { | |
1108 | struct sctp_chunk *chunk; | |
1109 | ||
1110 | list_for_each_entry(chunk, &msg->chunks, frag_list) | |
1111 | sctp_outq_tail(&asoc->outqueue, chunk, gfp); | |
1112 | ||
1113 | asoc->outqueue.sched->enqueue(&asoc->outqueue, msg); | |
1114 | } | |
1115 | ||
1116 | ||
1117 | /* Sent the next ASCONF packet currently stored in the association. | |
1118 | * This happens after the ASCONF_ACK was succeffully processed. | |
1119 | */ | |
1120 | static void sctp_cmd_send_asconf(struct sctp_association *asoc) | |
1121 | { | |
1122 | struct net *net = sock_net(asoc->base.sk); | |
1123 | ||
1124 | /* Send the next asconf chunk from the addip chunk | |
1125 | * queue. | |
1126 | */ | |
1127 | if (!list_empty(&asoc->addip_chunk_list)) { | |
1128 | struct list_head *entry = asoc->addip_chunk_list.next; | |
1129 | struct sctp_chunk *asconf = list_entry(entry, | |
1130 | struct sctp_chunk, list); | |
1131 | list_del_init(entry); | |
1132 | ||
1133 | /* Hold the chunk until an ASCONF_ACK is received. */ | |
1134 | sctp_chunk_hold(asconf); | |
1135 | if (sctp_primitive_ASCONF(net, asoc, asconf)) | |
1136 | sctp_chunk_free(asconf); | |
1137 | else | |
1138 | asoc->addip_last_asconf = asconf; | |
1139 | } | |
1140 | } | |
1141 | ||
1142 | ||
1143 | /* These three macros allow us to pull the debugging code out of the | |
1144 | * main flow of sctp_do_sm() to keep attention focused on the real | |
1145 | * functionality there. | |
1146 | */ | |
1147 | #define debug_pre_sfn() \ | |
1148 | pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ | |
1149 | ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ | |
1150 | asoc, sctp_state_tbl[state], state_fn->name) | |
1151 | ||
1152 | #define debug_post_sfn() \ | |
1153 | pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ | |
1154 | sctp_status_tbl[status]) | |
1155 | ||
1156 | #define debug_post_sfx() \ | |
1157 | pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ | |
1158 | asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ | |
1159 | sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) | |
1160 | ||
1161 | /* | |
1162 | * This is the master state machine processing function. | |
1163 | * | |
1164 | * If you want to understand all of lksctp, this is a | |
1165 | * good place to start. | |
1166 | */ | |
1167 | int sctp_do_sm(struct net *net, enum sctp_event event_type, | |
1168 | union sctp_subtype subtype, enum sctp_state state, | |
1169 | struct sctp_endpoint *ep, struct sctp_association *asoc, | |
1170 | void *event_arg, gfp_t gfp) | |
1171 | { | |
1172 | typedef const char *(printfn_t)(union sctp_subtype); | |
1173 | static printfn_t *table[] = { | |
1174 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, | |
1175 | }; | |
1176 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; | |
1177 | const struct sctp_sm_table_entry *state_fn; | |
1178 | struct sctp_cmd_seq commands; | |
1179 | enum sctp_disposition status; | |
1180 | int error = 0; | |
1181 | ||
1182 | /* Look up the state function, run it, and then process the | |
1183 | * side effects. These three steps are the heart of lksctp. | |
1184 | */ | |
1185 | state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); | |
1186 | ||
1187 | sctp_init_cmd_seq(&commands); | |
1188 | ||
1189 | debug_pre_sfn(); | |
1190 | status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); | |
1191 | debug_post_sfn(); | |
1192 | ||
1193 | error = sctp_side_effects(event_type, subtype, state, | |
1194 | ep, &asoc, event_arg, status, | |
1195 | &commands, gfp); | |
1196 | debug_post_sfx(); | |
1197 | ||
1198 | return error; | |
1199 | } | |
1200 | ||
1201 | /***************************************************************** | |
1202 | * This the master state function side effect processing function. | |
1203 | *****************************************************************/ | |
1204 | static int sctp_side_effects(enum sctp_event event_type, | |
1205 | union sctp_subtype subtype, | |
1206 | enum sctp_state state, | |
1207 | struct sctp_endpoint *ep, | |
1208 | struct sctp_association **asoc, | |
1209 | void *event_arg, | |
1210 | enum sctp_disposition status, | |
1211 | struct sctp_cmd_seq *commands, | |
1212 | gfp_t gfp) | |
1213 | { | |
1214 | int error; | |
1215 | ||
1216 | /* FIXME - Most of the dispositions left today would be categorized | |
1217 | * as "exceptional" dispositions. For those dispositions, it | |
1218 | * may not be proper to run through any of the commands at all. | |
1219 | * For example, the command interpreter might be run only with | |
1220 | * disposition SCTP_DISPOSITION_CONSUME. | |
1221 | */ | |
1222 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, | |
1223 | ep, *asoc, | |
1224 | event_arg, status, | |
1225 | commands, gfp))) | |
1226 | goto bail; | |
1227 | ||
1228 | switch (status) { | |
1229 | case SCTP_DISPOSITION_DISCARD: | |
1230 | pr_debug("%s: ignored sctp protocol event - state:%d, " | |
1231 | "event_type:%d, event_id:%d\n", __func__, state, | |
1232 | event_type, subtype.chunk); | |
1233 | break; | |
1234 | ||
1235 | case SCTP_DISPOSITION_NOMEM: | |
1236 | /* We ran out of memory, so we need to discard this | |
1237 | * packet. | |
1238 | */ | |
1239 | /* BUG--we should now recover some memory, probably by | |
1240 | * reneging... | |
1241 | */ | |
1242 | error = -ENOMEM; | |
1243 | break; | |
1244 | ||
1245 | case SCTP_DISPOSITION_DELETE_TCB: | |
1246 | case SCTP_DISPOSITION_ABORT: | |
1247 | /* This should now be a command. */ | |
1248 | *asoc = NULL; | |
1249 | break; | |
1250 | ||
1251 | case SCTP_DISPOSITION_CONSUME: | |
1252 | /* | |
1253 | * We should no longer have much work to do here as the | |
1254 | * real work has been done as explicit commands above. | |
1255 | */ | |
1256 | break; | |
1257 | ||
1258 | case SCTP_DISPOSITION_VIOLATION: | |
1259 | net_err_ratelimited("protocol violation state %d chunkid %d\n", | |
1260 | state, subtype.chunk); | |
1261 | break; | |
1262 | ||
1263 | case SCTP_DISPOSITION_NOT_IMPL: | |
1264 | pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", | |
1265 | state, event_type, subtype.chunk); | |
1266 | break; | |
1267 | ||
1268 | case SCTP_DISPOSITION_BUG: | |
1269 | pr_err("bug in state %d, event_type %d, event_id %d\n", | |
1270 | state, event_type, subtype.chunk); | |
1271 | BUG(); | |
1272 | break; | |
1273 | ||
1274 | default: | |
1275 | pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", | |
1276 | status, state, event_type, subtype.chunk); | |
1277 | BUG(); | |
1278 | break; | |
1279 | } | |
1280 | ||
1281 | bail: | |
1282 | return error; | |
1283 | } | |
1284 | ||
1285 | /******************************************************************** | |
1286 | * 2nd Level Abstractions | |
1287 | ********************************************************************/ | |
1288 | ||
1289 | /* This is the side-effect interpreter. */ | |
1290 | static int sctp_cmd_interpreter(enum sctp_event event_type, | |
1291 | union sctp_subtype subtype, | |
1292 | enum sctp_state state, | |
1293 | struct sctp_endpoint *ep, | |
1294 | struct sctp_association *asoc, | |
1295 | void *event_arg, | |
1296 | enum sctp_disposition status, | |
1297 | struct sctp_cmd_seq *commands, | |
1298 | gfp_t gfp) | |
1299 | { | |
1300 | struct sctp_sock *sp = sctp_sk(ep->base.sk); | |
1301 | struct sctp_chunk *chunk = NULL, *new_obj; | |
1302 | struct sctp_packet *packet; | |
1303 | struct sctp_sackhdr sackh; | |
1304 | struct timer_list *timer; | |
1305 | struct sctp_transport *t; | |
1306 | unsigned long timeout; | |
1307 | struct sctp_cmd *cmd; | |
1308 | int local_cork = 0; | |
1309 | int error = 0; | |
1310 | int force; | |
1311 | ||
1312 | if (SCTP_EVENT_T_TIMEOUT != event_type) | |
1313 | chunk = event_arg; | |
1314 | ||
1315 | /* Note: This whole file is a huge candidate for rework. | |
1316 | * For example, each command could either have its own handler, so | |
1317 | * the loop would look like: | |
1318 | * while (cmds) | |
1319 | * cmd->handle(x, y, z) | |
1320 | * --jgrimm | |
1321 | */ | |
1322 | while (NULL != (cmd = sctp_next_cmd(commands))) { | |
1323 | switch (cmd->verb) { | |
1324 | case SCTP_CMD_NOP: | |
1325 | /* Do nothing. */ | |
1326 | break; | |
1327 | ||
1328 | case SCTP_CMD_NEW_ASOC: | |
1329 | /* Register a new association. */ | |
1330 | if (local_cork) { | |
1331 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1332 | local_cork = 0; | |
1333 | } | |
1334 | ||
1335 | /* Register with the endpoint. */ | |
1336 | asoc = cmd->obj.asoc; | |
1337 | BUG_ON(asoc->peer.primary_path == NULL); | |
1338 | sctp_endpoint_add_asoc(ep, asoc); | |
1339 | break; | |
1340 | ||
1341 | case SCTP_CMD_UPDATE_ASSOC: | |
1342 | sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc); | |
1343 | break; | |
1344 | ||
1345 | case SCTP_CMD_PURGE_OUTQUEUE: | |
1346 | sctp_outq_teardown(&asoc->outqueue); | |
1347 | break; | |
1348 | ||
1349 | case SCTP_CMD_DELETE_TCB: | |
1350 | if (local_cork) { | |
1351 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1352 | local_cork = 0; | |
1353 | } | |
1354 | /* Delete the current association. */ | |
1355 | sctp_cmd_delete_tcb(commands, asoc); | |
1356 | asoc = NULL; | |
1357 | break; | |
1358 | ||
1359 | case SCTP_CMD_NEW_STATE: | |
1360 | /* Enter a new state. */ | |
1361 | sctp_cmd_new_state(commands, asoc, cmd->obj.state); | |
1362 | break; | |
1363 | ||
1364 | case SCTP_CMD_REPORT_TSN: | |
1365 | /* Record the arrival of a TSN. */ | |
1366 | error = sctp_tsnmap_mark(&asoc->peer.tsn_map, | |
1367 | cmd->obj.u32, NULL); | |
1368 | break; | |
1369 | ||
1370 | case SCTP_CMD_REPORT_FWDTSN: | |
1371 | /* Move the Cumulattive TSN Ack ahead. */ | |
1372 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | |
1373 | ||
1374 | /* purge the fragmentation queue */ | |
1375 | sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); | |
1376 | ||
1377 | /* Abort any in progress partial delivery. */ | |
1378 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
1379 | break; | |
1380 | ||
1381 | case SCTP_CMD_PROCESS_FWDTSN: | |
1382 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); | |
1383 | break; | |
1384 | ||
1385 | case SCTP_CMD_GEN_SACK: | |
1386 | /* Generate a Selective ACK. | |
1387 | * The argument tells us whether to just count | |
1388 | * the packet and MAYBE generate a SACK, or | |
1389 | * force a SACK out. | |
1390 | */ | |
1391 | force = cmd->obj.i32; | |
1392 | error = sctp_gen_sack(asoc, force, commands); | |
1393 | break; | |
1394 | ||
1395 | case SCTP_CMD_PROCESS_SACK: | |
1396 | /* Process an inbound SACK. */ | |
1397 | error = sctp_cmd_process_sack(commands, asoc, | |
1398 | cmd->obj.chunk); | |
1399 | break; | |
1400 | ||
1401 | case SCTP_CMD_GEN_INIT_ACK: | |
1402 | /* Generate an INIT ACK chunk. */ | |
1403 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, | |
1404 | 0); | |
1405 | if (!new_obj) | |
1406 | goto nomem; | |
1407 | ||
1408 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1409 | SCTP_CHUNK(new_obj)); | |
1410 | break; | |
1411 | ||
1412 | case SCTP_CMD_PEER_INIT: | |
1413 | /* Process a unified INIT from the peer. | |
1414 | * Note: Only used during INIT-ACK processing. If | |
1415 | * there is an error just return to the outter | |
1416 | * layer which will bail. | |
1417 | */ | |
1418 | error = sctp_cmd_process_init(commands, asoc, chunk, | |
1419 | cmd->obj.init, gfp); | |
1420 | break; | |
1421 | ||
1422 | case SCTP_CMD_GEN_COOKIE_ECHO: | |
1423 | /* Generate a COOKIE ECHO chunk. */ | |
1424 | new_obj = sctp_make_cookie_echo(asoc, chunk); | |
1425 | if (!new_obj) { | |
1426 | if (cmd->obj.chunk) | |
1427 | sctp_chunk_free(cmd->obj.chunk); | |
1428 | goto nomem; | |
1429 | } | |
1430 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1431 | SCTP_CHUNK(new_obj)); | |
1432 | ||
1433 | /* If there is an ERROR chunk to be sent along with | |
1434 | * the COOKIE_ECHO, send it, too. | |
1435 | */ | |
1436 | if (cmd->obj.chunk) | |
1437 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1438 | SCTP_CHUNK(cmd->obj.chunk)); | |
1439 | ||
1440 | if (new_obj->transport) { | |
1441 | new_obj->transport->init_sent_count++; | |
1442 | asoc->init_last_sent_to = new_obj->transport; | |
1443 | } | |
1444 | ||
1445 | /* FIXME - Eventually come up with a cleaner way to | |
1446 | * enabling COOKIE-ECHO + DATA bundling during | |
1447 | * multihoming stale cookie scenarios, the following | |
1448 | * command plays with asoc->peer.retran_path to | |
1449 | * avoid the problem of sending the COOKIE-ECHO and | |
1450 | * DATA in different paths, which could result | |
1451 | * in the association being ABORTed if the DATA chunk | |
1452 | * is processed first by the server. Checking the | |
1453 | * init error counter simply causes this command | |
1454 | * to be executed only during failed attempts of | |
1455 | * association establishment. | |
1456 | */ | |
1457 | if ((asoc->peer.retran_path != | |
1458 | asoc->peer.primary_path) && | |
1459 | (asoc->init_err_counter > 0)) { | |
1460 | sctp_add_cmd_sf(commands, | |
1461 | SCTP_CMD_FORCE_PRIM_RETRAN, | |
1462 | SCTP_NULL()); | |
1463 | } | |
1464 | ||
1465 | break; | |
1466 | ||
1467 | case SCTP_CMD_GEN_SHUTDOWN: | |
1468 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. | |
1469 | * Reset error counts. | |
1470 | */ | |
1471 | asoc->overall_error_count = 0; | |
1472 | ||
1473 | /* Generate a SHUTDOWN chunk. */ | |
1474 | new_obj = sctp_make_shutdown(asoc, chunk); | |
1475 | if (!new_obj) | |
1476 | goto nomem; | |
1477 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1478 | SCTP_CHUNK(new_obj)); | |
1479 | break; | |
1480 | ||
1481 | case SCTP_CMD_CHUNK_ULP: | |
1482 | /* Send a chunk to the sockets layer. */ | |
1483 | pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", | |
1484 | __func__, cmd->obj.chunk, &asoc->ulpq); | |
1485 | ||
1486 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, | |
1487 | GFP_ATOMIC); | |
1488 | break; | |
1489 | ||
1490 | case SCTP_CMD_EVENT_ULP: | |
1491 | /* Send a notification to the sockets layer. */ | |
1492 | pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", | |
1493 | __func__, cmd->obj.ulpevent, &asoc->ulpq); | |
1494 | ||
1495 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); | |
1496 | break; | |
1497 | ||
1498 | case SCTP_CMD_REPLY: | |
1499 | /* If an caller has not already corked, do cork. */ | |
1500 | if (!asoc->outqueue.cork) { | |
1501 | sctp_outq_cork(&asoc->outqueue); | |
1502 | local_cork = 1; | |
1503 | } | |
1504 | /* Send a chunk to our peer. */ | |
1505 | sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp); | |
1506 | break; | |
1507 | ||
1508 | case SCTP_CMD_SEND_PKT: | |
1509 | /* Send a full packet to our peer. */ | |
1510 | packet = cmd->obj.packet; | |
1511 | sctp_packet_transmit(packet, gfp); | |
1512 | sctp_ootb_pkt_free(packet); | |
1513 | break; | |
1514 | ||
1515 | case SCTP_CMD_T1_RETRAN: | |
1516 | /* Mark a transport for retransmission. */ | |
1517 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1518 | SCTP_RTXR_T1_RTX); | |
1519 | break; | |
1520 | ||
1521 | case SCTP_CMD_RETRAN: | |
1522 | /* Mark a transport for retransmission. */ | |
1523 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1524 | SCTP_RTXR_T3_RTX); | |
1525 | break; | |
1526 | ||
1527 | case SCTP_CMD_ECN_CE: | |
1528 | /* Do delayed CE processing. */ | |
1529 | sctp_do_ecn_ce_work(asoc, cmd->obj.u32); | |
1530 | break; | |
1531 | ||
1532 | case SCTP_CMD_ECN_ECNE: | |
1533 | /* Do delayed ECNE processing. */ | |
1534 | new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, | |
1535 | chunk); | |
1536 | if (new_obj) | |
1537 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1538 | SCTP_CHUNK(new_obj)); | |
1539 | break; | |
1540 | ||
1541 | case SCTP_CMD_ECN_CWR: | |
1542 | /* Do delayed CWR processing. */ | |
1543 | sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); | |
1544 | break; | |
1545 | ||
1546 | case SCTP_CMD_SETUP_T2: | |
1547 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); | |
1548 | break; | |
1549 | ||
1550 | case SCTP_CMD_TIMER_START_ONCE: | |
1551 | timer = &asoc->timers[cmd->obj.to]; | |
1552 | ||
1553 | if (timer_pending(timer)) | |
1554 | break; | |
1555 | /* fall through */ | |
1556 | ||
1557 | case SCTP_CMD_TIMER_START: | |
1558 | timer = &asoc->timers[cmd->obj.to]; | |
1559 | timeout = asoc->timeouts[cmd->obj.to]; | |
1560 | BUG_ON(!timeout); | |
1561 | ||
1562 | timer->expires = jiffies + timeout; | |
1563 | sctp_association_hold(asoc); | |
1564 | add_timer(timer); | |
1565 | break; | |
1566 | ||
1567 | case SCTP_CMD_TIMER_RESTART: | |
1568 | timer = &asoc->timers[cmd->obj.to]; | |
1569 | timeout = asoc->timeouts[cmd->obj.to]; | |
1570 | if (!mod_timer(timer, jiffies + timeout)) | |
1571 | sctp_association_hold(asoc); | |
1572 | break; | |
1573 | ||
1574 | case SCTP_CMD_TIMER_STOP: | |
1575 | timer = &asoc->timers[cmd->obj.to]; | |
1576 | if (del_timer(timer)) | |
1577 | sctp_association_put(asoc); | |
1578 | break; | |
1579 | ||
1580 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: | |
1581 | chunk = cmd->obj.chunk; | |
1582 | t = sctp_assoc_choose_alter_transport(asoc, | |
1583 | asoc->init_last_sent_to); | |
1584 | asoc->init_last_sent_to = t; | |
1585 | chunk->transport = t; | |
1586 | t->init_sent_count++; | |
1587 | /* Set the new transport as primary */ | |
1588 | sctp_assoc_set_primary(asoc, t); | |
1589 | break; | |
1590 | ||
1591 | case SCTP_CMD_INIT_RESTART: | |
1592 | /* Do the needed accounting and updates | |
1593 | * associated with restarting an initialization | |
1594 | * timer. Only multiply the timeout by two if | |
1595 | * all transports have been tried at the current | |
1596 | * timeout. | |
1597 | */ | |
1598 | sctp_cmd_t1_timer_update(asoc, | |
1599 | SCTP_EVENT_TIMEOUT_T1_INIT, | |
1600 | "INIT"); | |
1601 | ||
1602 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
1603 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | |
1604 | break; | |
1605 | ||
1606 | case SCTP_CMD_COOKIEECHO_RESTART: | |
1607 | /* Do the needed accounting and updates | |
1608 | * associated with restarting an initialization | |
1609 | * timer. Only multiply the timeout by two if | |
1610 | * all transports have been tried at the current | |
1611 | * timeout. | |
1612 | */ | |
1613 | sctp_cmd_t1_timer_update(asoc, | |
1614 | SCTP_EVENT_TIMEOUT_T1_COOKIE, | |
1615 | "COOKIE"); | |
1616 | ||
1617 | /* If we've sent any data bundled with | |
1618 | * COOKIE-ECHO we need to resend. | |
1619 | */ | |
1620 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | |
1621 | transports) { | |
1622 | sctp_retransmit_mark(&asoc->outqueue, t, | |
1623 | SCTP_RTXR_T1_RTX); | |
1624 | } | |
1625 | ||
1626 | sctp_add_cmd_sf(commands, | |
1627 | SCTP_CMD_TIMER_RESTART, | |
1628 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); | |
1629 | break; | |
1630 | ||
1631 | case SCTP_CMD_INIT_FAILED: | |
1632 | sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); | |
1633 | break; | |
1634 | ||
1635 | case SCTP_CMD_ASSOC_FAILED: | |
1636 | sctp_cmd_assoc_failed(commands, asoc, event_type, | |
1637 | subtype, chunk, cmd->obj.u32); | |
1638 | break; | |
1639 | ||
1640 | case SCTP_CMD_INIT_COUNTER_INC: | |
1641 | asoc->init_err_counter++; | |
1642 | break; | |
1643 | ||
1644 | case SCTP_CMD_INIT_COUNTER_RESET: | |
1645 | asoc->init_err_counter = 0; | |
1646 | asoc->init_cycle = 0; | |
1647 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | |
1648 | transports) { | |
1649 | t->init_sent_count = 0; | |
1650 | } | |
1651 | break; | |
1652 | ||
1653 | case SCTP_CMD_REPORT_DUP: | |
1654 | sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, | |
1655 | cmd->obj.u32); | |
1656 | break; | |
1657 | ||
1658 | case SCTP_CMD_REPORT_BAD_TAG: | |
1659 | pr_debug("%s: vtag mismatch!\n", __func__); | |
1660 | break; | |
1661 | ||
1662 | case SCTP_CMD_STRIKE: | |
1663 | /* Mark one strike against a transport. */ | |
1664 | sctp_do_8_2_transport_strike(commands, asoc, | |
1665 | cmd->obj.transport, 0); | |
1666 | break; | |
1667 | ||
1668 | case SCTP_CMD_TRANSPORT_IDLE: | |
1669 | t = cmd->obj.transport; | |
1670 | sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); | |
1671 | break; | |
1672 | ||
1673 | case SCTP_CMD_TRANSPORT_HB_SENT: | |
1674 | t = cmd->obj.transport; | |
1675 | sctp_do_8_2_transport_strike(commands, asoc, | |
1676 | t, 1); | |
1677 | t->hb_sent = 1; | |
1678 | break; | |
1679 | ||
1680 | case SCTP_CMD_TRANSPORT_ON: | |
1681 | t = cmd->obj.transport; | |
1682 | sctp_cmd_transport_on(commands, asoc, t, chunk); | |
1683 | break; | |
1684 | ||
1685 | case SCTP_CMD_HB_TIMERS_START: | |
1686 | sctp_cmd_hb_timers_start(commands, asoc); | |
1687 | break; | |
1688 | ||
1689 | case SCTP_CMD_HB_TIMER_UPDATE: | |
1690 | t = cmd->obj.transport; | |
1691 | sctp_transport_reset_hb_timer(t); | |
1692 | break; | |
1693 | ||
1694 | case SCTP_CMD_HB_TIMERS_STOP: | |
1695 | sctp_cmd_hb_timers_stop(commands, asoc); | |
1696 | break; | |
1697 | ||
1698 | case SCTP_CMD_REPORT_ERROR: | |
1699 | error = cmd->obj.error; | |
1700 | break; | |
1701 | ||
1702 | case SCTP_CMD_PROCESS_CTSN: | |
1703 | /* Dummy up a SACK for processing. */ | |
1704 | sackh.cum_tsn_ack = cmd->obj.be32; | |
1705 | sackh.a_rwnd = htonl(asoc->peer.rwnd + | |
1706 | asoc->outqueue.outstanding_bytes); | |
1707 | sackh.num_gap_ack_blocks = 0; | |
1708 | sackh.num_dup_tsns = 0; | |
1709 | chunk->subh.sack_hdr = &sackh; | |
1710 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | |
1711 | SCTP_CHUNK(chunk)); | |
1712 | break; | |
1713 | ||
1714 | case SCTP_CMD_DISCARD_PACKET: | |
1715 | /* We need to discard the whole packet. | |
1716 | * Uncork the queue since there might be | |
1717 | * responses pending | |
1718 | */ | |
1719 | chunk->pdiscard = 1; | |
1720 | if (asoc) { | |
1721 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1722 | local_cork = 0; | |
1723 | } | |
1724 | break; | |
1725 | ||
1726 | case SCTP_CMD_RTO_PENDING: | |
1727 | t = cmd->obj.transport; | |
1728 | t->rto_pending = 1; | |
1729 | break; | |
1730 | ||
1731 | case SCTP_CMD_PART_DELIVER: | |
1732 | sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); | |
1733 | break; | |
1734 | ||
1735 | case SCTP_CMD_RENEGE: | |
1736 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, | |
1737 | GFP_ATOMIC); | |
1738 | break; | |
1739 | ||
1740 | case SCTP_CMD_SETUP_T4: | |
1741 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); | |
1742 | break; | |
1743 | ||
1744 | case SCTP_CMD_PROCESS_OPERR: | |
1745 | sctp_cmd_process_operr(commands, asoc, chunk); | |
1746 | break; | |
1747 | case SCTP_CMD_CLEAR_INIT_TAG: | |
1748 | asoc->peer.i.init_tag = 0; | |
1749 | break; | |
1750 | case SCTP_CMD_DEL_NON_PRIMARY: | |
1751 | sctp_cmd_del_non_primary(asoc); | |
1752 | break; | |
1753 | case SCTP_CMD_T3_RTX_TIMERS_STOP: | |
1754 | sctp_cmd_t3_rtx_timers_stop(commands, asoc); | |
1755 | break; | |
1756 | case SCTP_CMD_FORCE_PRIM_RETRAN: | |
1757 | t = asoc->peer.retran_path; | |
1758 | asoc->peer.retran_path = asoc->peer.primary_path; | |
1759 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1760 | local_cork = 0; | |
1761 | asoc->peer.retran_path = t; | |
1762 | break; | |
1763 | case SCTP_CMD_SET_SK_ERR: | |
1764 | sctp_cmd_set_sk_err(asoc, cmd->obj.error); | |
1765 | break; | |
1766 | case SCTP_CMD_ASSOC_CHANGE: | |
1767 | sctp_cmd_assoc_change(commands, asoc, | |
1768 | cmd->obj.u8); | |
1769 | break; | |
1770 | case SCTP_CMD_ADAPTATION_IND: | |
1771 | sctp_cmd_adaptation_ind(commands, asoc); | |
1772 | break; | |
1773 | ||
1774 | case SCTP_CMD_ASSOC_SHKEY: | |
1775 | error = sctp_auth_asoc_init_active_key(asoc, | |
1776 | GFP_ATOMIC); | |
1777 | break; | |
1778 | case SCTP_CMD_UPDATE_INITTAG: | |
1779 | asoc->peer.i.init_tag = cmd->obj.u32; | |
1780 | break; | |
1781 | case SCTP_CMD_SEND_MSG: | |
1782 | if (!asoc->outqueue.cork) { | |
1783 | sctp_outq_cork(&asoc->outqueue); | |
1784 | local_cork = 1; | |
1785 | } | |
1786 | sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp); | |
1787 | break; | |
1788 | case SCTP_CMD_SEND_NEXT_ASCONF: | |
1789 | sctp_cmd_send_asconf(asoc); | |
1790 | break; | |
1791 | case SCTP_CMD_PURGE_ASCONF_QUEUE: | |
1792 | sctp_asconf_queue_teardown(asoc); | |
1793 | break; | |
1794 | ||
1795 | case SCTP_CMD_SET_ASOC: | |
1796 | if (asoc && local_cork) { | |
1797 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1798 | local_cork = 0; | |
1799 | } | |
1800 | asoc = cmd->obj.asoc; | |
1801 | break; | |
1802 | ||
1803 | default: | |
1804 | pr_warn("Impossible command: %u\n", | |
1805 | cmd->verb); | |
1806 | break; | |
1807 | } | |
1808 | ||
1809 | if (error) | |
1810 | break; | |
1811 | } | |
1812 | ||
1813 | out: | |
1814 | /* If this is in response to a received chunk, wait until | |
1815 | * we are done with the packet to open the queue so that we don't | |
1816 | * send multiple packets in response to a single request. | |
1817 | */ | |
1818 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { | |
1819 | if (chunk->end_of_packet || chunk->singleton) | |
1820 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1821 | } else if (local_cork) | |
1822 | sctp_outq_uncork(&asoc->outqueue, gfp); | |
1823 | ||
1824 | if (sp->data_ready_signalled) | |
1825 | sp->data_ready_signalled = 0; | |
1826 | ||
1827 | return error; | |
1828 | nomem: | |
1829 | error = -ENOMEM; | |
1830 | goto out; | |
1831 | } | |
1832 |