]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - net/sctp/sm_statefuns.c
Merge tag 'for-5.16/block-2021-10-29' of git://git.kernel.dk/linux-block
[mirror_ubuntu-kernels.git] / net / sctp / sm_statefuns.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001-2002 Intel Corp.
7 * Copyright (c) 2002 Nokia Corp.
8 *
9 * This is part of the SCTP Linux Kernel Implementation.
10 *
11 * These are the state functions for the state machine.
12 *
13 * Please send any bug reports or fixes you make to the
14 * email address(es):
15 * lksctp developers <linux-sctp@vger.kernel.org>
16 *
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Mathew Kotowsky <kotowsky@sctp.org>
21 * Sridhar Samudrala <samudrala@us.ibm.com>
22 * Jon Grimm <jgrimm@us.ibm.com>
23 * Hui Huang <hui.huang@nokia.com>
24 * Dajiang Zhang <dajiang.zhang@nokia.com>
25 * Daisy Chang <daisyc@us.ibm.com>
26 * Ardelle Fan <ardelle.fan@intel.com>
27 * Ryan Layer <rmlayer@us.ibm.com>
28 * Kevin Gao <kevin.gao@intel.com>
29 */
30
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/net.h>
38 #include <linux/inet.h>
39 #include <linux/slab.h>
40 #include <net/sock.h>
41 #include <net/inet_ecn.h>
42 #include <linux/skbuff.h>
43 #include <net/sctp/sctp.h>
44 #include <net/sctp/sm.h>
45 #include <net/sctp/structs.h>
46
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/sctp.h>
49
50 static struct sctp_packet *sctp_abort_pkt_new(
51 struct net *net,
52 const struct sctp_endpoint *ep,
53 const struct sctp_association *asoc,
54 struct sctp_chunk *chunk,
55 const void *payload, size_t paylen);
56 static int sctp_eat_data(const struct sctp_association *asoc,
57 struct sctp_chunk *chunk,
58 struct sctp_cmd_seq *commands);
59 static struct sctp_packet *sctp_ootb_pkt_new(
60 struct net *net,
61 const struct sctp_association *asoc,
62 const struct sctp_chunk *chunk);
63 static void sctp_send_stale_cookie_err(struct net *net,
64 const struct sctp_endpoint *ep,
65 const struct sctp_association *asoc,
66 const struct sctp_chunk *chunk,
67 struct sctp_cmd_seq *commands,
68 struct sctp_chunk *err_chunk);
69 static enum sctp_disposition sctp_sf_do_5_2_6_stale(
70 struct net *net,
71 const struct sctp_endpoint *ep,
72 const struct sctp_association *asoc,
73 const union sctp_subtype type,
74 void *arg,
75 struct sctp_cmd_seq *commands);
76 static enum sctp_disposition sctp_sf_shut_8_4_5(
77 struct net *net,
78 const struct sctp_endpoint *ep,
79 const struct sctp_association *asoc,
80 const union sctp_subtype type,
81 void *arg,
82 struct sctp_cmd_seq *commands);
83 static enum sctp_disposition sctp_sf_tabort_8_4_8(
84 struct net *net,
85 const struct sctp_endpoint *ep,
86 const struct sctp_association *asoc,
87 const union sctp_subtype type,
88 void *arg,
89 struct sctp_cmd_seq *commands);
90 static enum sctp_disposition sctp_sf_new_encap_port(
91 struct net *net,
92 const struct sctp_endpoint *ep,
93 const struct sctp_association *asoc,
94 const union sctp_subtype type,
95 void *arg,
96 struct sctp_cmd_seq *commands);
97 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
98
99 static enum sctp_disposition sctp_stop_t1_and_abort(
100 struct net *net,
101 struct sctp_cmd_seq *commands,
102 __be16 error, int sk_err,
103 const struct sctp_association *asoc,
104 struct sctp_transport *transport);
105
106 static enum sctp_disposition sctp_sf_abort_violation(
107 struct net *net,
108 const struct sctp_endpoint *ep,
109 const struct sctp_association *asoc,
110 void *arg,
111 struct sctp_cmd_seq *commands,
112 const __u8 *payload,
113 const size_t paylen);
114
115 static enum sctp_disposition sctp_sf_violation_chunklen(
116 struct net *net,
117 const struct sctp_endpoint *ep,
118 const struct sctp_association *asoc,
119 const union sctp_subtype type,
120 void *arg,
121 struct sctp_cmd_seq *commands);
122
123 static enum sctp_disposition sctp_sf_violation_paramlen(
124 struct net *net,
125 const struct sctp_endpoint *ep,
126 const struct sctp_association *asoc,
127 const union sctp_subtype type,
128 void *arg, void *ext,
129 struct sctp_cmd_seq *commands);
130
131 static enum sctp_disposition sctp_sf_violation_ctsn(
132 struct net *net,
133 const struct sctp_endpoint *ep,
134 const struct sctp_association *asoc,
135 const union sctp_subtype type,
136 void *arg,
137 struct sctp_cmd_seq *commands);
138
139 static enum sctp_disposition sctp_sf_violation_chunk(
140 struct net *net,
141 const struct sctp_endpoint *ep,
142 const struct sctp_association *asoc,
143 const union sctp_subtype type,
144 void *arg,
145 struct sctp_cmd_seq *commands);
146
147 static enum sctp_ierror sctp_sf_authenticate(
148 const struct sctp_association *asoc,
149 struct sctp_chunk *chunk);
150
151 static enum sctp_disposition __sctp_sf_do_9_1_abort(
152 struct net *net,
153 const struct sctp_endpoint *ep,
154 const struct sctp_association *asoc,
155 const union sctp_subtype type,
156 void *arg,
157 struct sctp_cmd_seq *commands);
158
159 static enum sctp_disposition
160 __sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
161 const struct sctp_association *asoc,
162 const union sctp_subtype type, void *arg,
163 struct sctp_cmd_seq *commands);
164
165 /* Small helper function that checks if the chunk length
166 * is of the appropriate length. The 'required_length' argument
167 * is set to be the size of a specific chunk we are testing.
168 * Return Values: true = Valid length
169 * false = Invalid length
170 *
171 */
172 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
173 __u16 required_length)
174 {
175 __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
176
177 /* Previously already marked? */
178 if (unlikely(chunk->pdiscard))
179 return false;
180 if (unlikely(chunk_length < required_length))
181 return false;
182
183 return true;
184 }
185
186 /* Check for format error in an ABORT chunk */
187 static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
188 {
189 struct sctp_errhdr *err;
190
191 sctp_walk_errors(err, chunk->chunk_hdr);
192
193 return (void *)err == (void *)chunk->chunk_end;
194 }
195
196 /**********************************************************
197 * These are the state functions for handling chunk events.
198 **********************************************************/
199
200 /*
201 * Process the final SHUTDOWN COMPLETE.
202 *
203 * Section: 4 (C) (diagram), 9.2
204 * Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify
205 * that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be
206 * discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint
207 * should stop the T2-shutdown timer and remove all knowledge of the
208 * association (and thus the association enters the CLOSED state).
209 *
210 * Verification Tag: 8.5.1(C), sctpimpguide 2.41.
211 * C) Rules for packet carrying SHUTDOWN COMPLETE:
212 * ...
213 * - The receiver of a SHUTDOWN COMPLETE shall accept the packet
214 * if the Verification Tag field of the packet matches its own tag and
215 * the T bit is not set
216 * OR
217 * it is set to its peer's tag and the T bit is set in the Chunk
218 * Flags.
219 * Otherwise, the receiver MUST silently discard the packet
220 * and take no further action. An endpoint MUST ignore the
221 * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
222 *
223 * Inputs
224 * (endpoint, asoc, chunk)
225 *
226 * Outputs
227 * (asoc, reply_msg, msg_up, timers, counters)
228 *
229 * The return value is the disposition of the chunk.
230 */
231 enum sctp_disposition sctp_sf_do_4_C(struct net *net,
232 const struct sctp_endpoint *ep,
233 const struct sctp_association *asoc,
234 const union sctp_subtype type,
235 void *arg, struct sctp_cmd_seq *commands)
236 {
237 struct sctp_chunk *chunk = arg;
238 struct sctp_ulpevent *ev;
239
240 if (!sctp_vtag_verify_either(chunk, asoc))
241 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
242
243 /* RFC 2960 6.10 Bundling
244 *
245 * An endpoint MUST NOT bundle INIT, INIT ACK or
246 * SHUTDOWN COMPLETE with any other chunks.
247 */
248 if (!chunk->singleton)
249 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
250
251 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
252 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
253 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
254 commands);
255
256 /* RFC 2960 10.2 SCTP-to-ULP
257 *
258 * H) SHUTDOWN COMPLETE notification
259 *
260 * When SCTP completes the shutdown procedures (section 9.2) this
261 * notification is passed to the upper layer.
262 */
263 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
264 0, 0, 0, NULL, GFP_ATOMIC);
265 if (ev)
266 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
267 SCTP_ULPEVENT(ev));
268
269 /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
270 * will verify that it is in SHUTDOWN-ACK-SENT state, if it is
271 * not the chunk should be discarded. If the endpoint is in
272 * the SHUTDOWN-ACK-SENT state the endpoint should stop the
273 * T2-shutdown timer and remove all knowledge of the
274 * association (and thus the association enters the CLOSED
275 * state).
276 */
277 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
278 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
279
280 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
281 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
282
283 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
284 SCTP_STATE(SCTP_STATE_CLOSED));
285
286 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
287 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
288
289 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
290
291 return SCTP_DISPOSITION_DELETE_TCB;
292 }
293
294 /*
295 * Respond to a normal INIT chunk.
296 * We are the side that is being asked for an association.
297 *
298 * Section: 5.1 Normal Establishment of an Association, B
299 * B) "Z" shall respond immediately with an INIT ACK chunk. The
300 * destination IP address of the INIT ACK MUST be set to the source
301 * IP address of the INIT to which this INIT ACK is responding. In
302 * the response, besides filling in other parameters, "Z" must set the
303 * Verification Tag field to Tag_A, and also provide its own
304 * Verification Tag (Tag_Z) in the Initiate Tag field.
305 *
306 * Verification Tag: Must be 0.
307 *
308 * Inputs
309 * (endpoint, asoc, chunk)
310 *
311 * Outputs
312 * (asoc, reply_msg, msg_up, timers, counters)
313 *
314 * The return value is the disposition of the chunk.
315 */
316 enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
317 const struct sctp_endpoint *ep,
318 const struct sctp_association *asoc,
319 const union sctp_subtype type,
320 void *arg,
321 struct sctp_cmd_seq *commands)
322 {
323 struct sctp_chunk *chunk = arg, *repl, *err_chunk;
324 struct sctp_unrecognized_param *unk_param;
325 struct sctp_association *new_asoc;
326 struct sctp_packet *packet;
327 int len;
328
329 /* Update socket peer label if first association. */
330 if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
331 chunk->skb))
332 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
333
334 /* 6.10 Bundling
335 * An endpoint MUST NOT bundle INIT, INIT ACK or
336 * SHUTDOWN COMPLETE with any other chunks.
337 *
338 * IG Section 2.11.2
339 * Furthermore, we require that the receiver of an INIT chunk MUST
340 * enforce these rules by silently discarding an arriving packet
341 * with an INIT chunk that is bundled with other chunks.
342 */
343 if (!chunk->singleton)
344 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
345
346 /* Make sure that the INIT chunk has a valid length.
347 * Normally, this would cause an ABORT with a Protocol Violation
348 * error, but since we don't have an association, we'll
349 * just discard the packet.
350 */
351 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
352 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
353
354 /* If the packet is an OOTB packet which is temporarily on the
355 * control endpoint, respond with an ABORT.
356 */
357 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
358 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
359 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
360 }
361
362 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
363 * Tag.
364 */
365 if (chunk->sctp_hdr->vtag != 0)
366 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
367
368 /* If the INIT is coming toward a closing socket, we'll send back
369 * and ABORT. Essentially, this catches the race of INIT being
370 * backloged to the socket at the same time as the user issues close().
371 * Since the socket and all its associations are going away, we
372 * can treat this OOTB
373 */
374 if (sctp_sstate(ep->base.sk, CLOSING))
375 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
376
377 /* Verify the INIT chunk before processing it. */
378 err_chunk = NULL;
379 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
380 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk,
381 &err_chunk)) {
382 /* This chunk contains fatal error. It is to be discarded.
383 * Send an ABORT, with causes if there is any.
384 */
385 if (err_chunk) {
386 packet = sctp_abort_pkt_new(net, ep, asoc, arg,
387 (__u8 *)(err_chunk->chunk_hdr) +
388 sizeof(struct sctp_chunkhdr),
389 ntohs(err_chunk->chunk_hdr->length) -
390 sizeof(struct sctp_chunkhdr));
391
392 sctp_chunk_free(err_chunk);
393
394 if (packet) {
395 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
396 SCTP_PACKET(packet));
397 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
398 return SCTP_DISPOSITION_CONSUME;
399 } else {
400 return SCTP_DISPOSITION_NOMEM;
401 }
402 } else {
403 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
404 commands);
405 }
406 }
407
408 /* Grab the INIT header. */
409 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
410
411 /* Tag the variable length parameters. */
412 chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
413
414 new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
415 if (!new_asoc)
416 goto nomem;
417
418 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
419 sctp_scope(sctp_source(chunk)),
420 GFP_ATOMIC) < 0)
421 goto nomem_init;
422
423 /* The call, sctp_process_init(), can fail on memory allocation. */
424 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
425 (struct sctp_init_chunk *)chunk->chunk_hdr,
426 GFP_ATOMIC))
427 goto nomem_init;
428
429 /* B) "Z" shall respond immediately with an INIT ACK chunk. */
430
431 /* If there are errors need to be reported for unknown parameters,
432 * make sure to reserve enough room in the INIT ACK for them.
433 */
434 len = 0;
435 if (err_chunk)
436 len = ntohs(err_chunk->chunk_hdr->length) -
437 sizeof(struct sctp_chunkhdr);
438
439 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
440 if (!repl)
441 goto nomem_init;
442
443 /* If there are errors need to be reported for unknown parameters,
444 * include them in the outgoing INIT ACK as "Unrecognized parameter"
445 * parameter.
446 */
447 if (err_chunk) {
448 /* Get the "Unrecognized parameter" parameter(s) out of the
449 * ERROR chunk generated by sctp_verify_init(). Since the
450 * error cause code for "unknown parameter" and the
451 * "Unrecognized parameter" type is the same, we can
452 * construct the parameters in INIT ACK by copying the
453 * ERROR causes over.
454 */
455 unk_param = (struct sctp_unrecognized_param *)
456 ((__u8 *)(err_chunk->chunk_hdr) +
457 sizeof(struct sctp_chunkhdr));
458 /* Replace the cause code with the "Unrecognized parameter"
459 * parameter type.
460 */
461 sctp_addto_chunk(repl, len, unk_param);
462 sctp_chunk_free(err_chunk);
463 }
464
465 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
466
467 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
468
469 /*
470 * Note: After sending out INIT ACK with the State Cookie parameter,
471 * "Z" MUST NOT allocate any resources, nor keep any states for the
472 * new association. Otherwise, "Z" will be vulnerable to resource
473 * attacks.
474 */
475 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
476
477 return SCTP_DISPOSITION_DELETE_TCB;
478
479 nomem_init:
480 sctp_association_free(new_asoc);
481 nomem:
482 if (err_chunk)
483 sctp_chunk_free(err_chunk);
484 return SCTP_DISPOSITION_NOMEM;
485 }
486
487 /*
488 * Respond to a normal INIT ACK chunk.
489 * We are the side that is initiating the association.
490 *
491 * Section: 5.1 Normal Establishment of an Association, C
492 * C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init
493 * timer and leave COOKIE-WAIT state. "A" shall then send the State
494 * Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start
495 * the T1-cookie timer, and enter the COOKIE-ECHOED state.
496 *
497 * Note: The COOKIE ECHO chunk can be bundled with any pending outbound
498 * DATA chunks, but it MUST be the first chunk in the packet and
499 * until the COOKIE ACK is returned the sender MUST NOT send any
500 * other packets to the peer.
501 *
502 * Verification Tag: 3.3.3
503 * If the value of the Initiate Tag in a received INIT ACK chunk is
504 * found to be 0, the receiver MUST treat it as an error and close the
505 * association by transmitting an ABORT.
506 *
507 * Inputs
508 * (endpoint, asoc, chunk)
509 *
510 * Outputs
511 * (asoc, reply_msg, msg_up, timers, counters)
512 *
513 * The return value is the disposition of the chunk.
514 */
515 enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
516 const struct sctp_endpoint *ep,
517 const struct sctp_association *asoc,
518 const union sctp_subtype type,
519 void *arg,
520 struct sctp_cmd_seq *commands)
521 {
522 struct sctp_init_chunk *initchunk;
523 struct sctp_chunk *chunk = arg;
524 struct sctp_chunk *err_chunk;
525 struct sctp_packet *packet;
526
527 if (!sctp_vtag_verify(chunk, asoc))
528 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
529
530 /* 6.10 Bundling
531 * An endpoint MUST NOT bundle INIT, INIT ACK or
532 * SHUTDOWN COMPLETE with any other chunks.
533 */
534 if (!chunk->singleton)
535 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
536
537 /* Make sure that the INIT-ACK chunk has a valid length */
538 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_initack_chunk)))
539 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
540 commands);
541 /* Grab the INIT header. */
542 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
543
544 /* Verify the INIT chunk before processing it. */
545 err_chunk = NULL;
546 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
547 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk,
548 &err_chunk)) {
549
550 enum sctp_error error = SCTP_ERROR_NO_RESOURCE;
551
552 /* This chunk contains fatal error. It is to be discarded.
553 * Send an ABORT, with causes. If there are no causes,
554 * then there wasn't enough memory. Just terminate
555 * the association.
556 */
557 if (err_chunk) {
558 packet = sctp_abort_pkt_new(net, ep, asoc, arg,
559 (__u8 *)(err_chunk->chunk_hdr) +
560 sizeof(struct sctp_chunkhdr),
561 ntohs(err_chunk->chunk_hdr->length) -
562 sizeof(struct sctp_chunkhdr));
563
564 sctp_chunk_free(err_chunk);
565
566 if (packet) {
567 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
568 SCTP_PACKET(packet));
569 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
570 error = SCTP_ERROR_INV_PARAM;
571 }
572 }
573
574 /* SCTP-AUTH, Section 6.3:
575 * It should be noted that if the receiver wants to tear
576 * down an association in an authenticated way only, the
577 * handling of malformed packets should not result in
578 * tearing down the association.
579 *
580 * This means that if we only want to abort associations
581 * in an authenticated way (i.e AUTH+ABORT), then we
582 * can't destroy this association just because the packet
583 * was malformed.
584 */
585 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
586 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
587
588 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
589 return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
590 asoc, chunk->transport);
591 }
592
593 /* Tag the variable length parameters. Note that we never
594 * convert the parameters in an INIT chunk.
595 */
596 chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
597
598 initchunk = (struct sctp_init_chunk *)chunk->chunk_hdr;
599
600 sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
601 SCTP_PEER_INIT(initchunk));
602
603 /* Reset init error count upon receipt of INIT-ACK. */
604 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
605
606 /* 5.1 C) "A" shall stop the T1-init timer and leave
607 * COOKIE-WAIT state. "A" shall then ... start the T1-cookie
608 * timer, and enter the COOKIE-ECHOED state.
609 */
610 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
611 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
612 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
613 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
614 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
615 SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
616
617 /* SCTP-AUTH: generate the association shared keys so that
618 * we can potentially sign the COOKIE-ECHO.
619 */
620 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
621
622 /* 5.1 C) "A" shall then send the State Cookie received in the
623 * INIT ACK chunk in a COOKIE ECHO chunk, ...
624 */
625 /* If there is any errors to report, send the ERROR chunk generated
626 * for unknown parameters as well.
627 */
628 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO,
629 SCTP_CHUNK(err_chunk));
630
631 return SCTP_DISPOSITION_CONSUME;
632 }
633
634 static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
635 const struct sctp_association *asoc)
636 {
637 struct sctp_chunk auth;
638
639 if (!chunk->auth_chunk)
640 return true;
641
642 /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
643 * is supposed to be authenticated and we have to do delayed
644 * authentication. We've just recreated the association using
645 * the information in the cookie and now it's much easier to
646 * do the authentication.
647 */
648
649 /* Make sure that we and the peer are AUTH capable */
650 if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
651 return false;
652
653 /* set-up our fake chunk so that we can process it */
654 auth.skb = chunk->auth_chunk;
655 auth.asoc = chunk->asoc;
656 auth.sctp_hdr = chunk->sctp_hdr;
657 auth.chunk_hdr = (struct sctp_chunkhdr *)
658 skb_push(chunk->auth_chunk,
659 sizeof(struct sctp_chunkhdr));
660 skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
661 auth.transport = chunk->transport;
662
663 return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
664 }
665
666 /*
667 * Respond to a normal COOKIE ECHO chunk.
668 * We are the side that is being asked for an association.
669 *
670 * Section: 5.1 Normal Establishment of an Association, D
671 * D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply
672 * with a COOKIE ACK chunk after building a TCB and moving to
673 * the ESTABLISHED state. A COOKIE ACK chunk may be bundled with
674 * any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK
675 * chunk MUST be the first chunk in the packet.
676 *
677 * IMPLEMENTATION NOTE: An implementation may choose to send the
678 * Communication Up notification to the SCTP user upon reception
679 * of a valid COOKIE ECHO chunk.
680 *
681 * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
682 * D) Rules for packet carrying a COOKIE ECHO
683 *
684 * - When sending a COOKIE ECHO, the endpoint MUST use the value of the
685 * Initial Tag received in the INIT ACK.
686 *
687 * - The receiver of a COOKIE ECHO follows the procedures in Section 5.
688 *
689 * Inputs
690 * (endpoint, asoc, chunk)
691 *
692 * Outputs
693 * (asoc, reply_msg, msg_up, timers, counters)
694 *
695 * The return value is the disposition of the chunk.
696 */
697 enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
698 const struct sctp_endpoint *ep,
699 const struct sctp_association *asoc,
700 const union sctp_subtype type,
701 void *arg,
702 struct sctp_cmd_seq *commands)
703 {
704 struct sctp_ulpevent *ev, *ai_ev = NULL, *auth_ev = NULL;
705 struct sctp_association *new_asoc;
706 struct sctp_init_chunk *peer_init;
707 struct sctp_chunk *chunk = arg;
708 struct sctp_chunk *err_chk_p;
709 struct sctp_chunk *repl;
710 struct sock *sk;
711 int error = 0;
712
713 if (asoc && !sctp_vtag_verify(chunk, asoc))
714 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
715
716 /* If the packet is an OOTB packet which is temporarily on the
717 * control endpoint, respond with an ABORT.
718 */
719 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
720 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
721 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
722 }
723
724 /* Make sure that the COOKIE_ECHO chunk has a valid length.
725 * In this case, we check that we have enough for at least a
726 * chunk header. More detailed verification is done
727 * in sctp_unpack_cookie().
728 */
729 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
730 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
731 commands);
732
733 /* If the endpoint is not listening or if the number of associations
734 * on the TCP-style socket exceed the max backlog, respond with an
735 * ABORT.
736 */
737 sk = ep->base.sk;
738 if (!sctp_sstate(sk, LISTENING) ||
739 (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
740 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
741
742 /* "Decode" the chunk. We have no optional parameters so we
743 * are in good shape.
744 */
745 chunk->subh.cookie_hdr =
746 (struct sctp_signed_cookie *)chunk->skb->data;
747 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
748 sizeof(struct sctp_chunkhdr)))
749 goto nomem;
750
751 /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
752 * "Z" will reply with a COOKIE ACK chunk after building a TCB
753 * and moving to the ESTABLISHED state.
754 */
755 new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
756 &err_chk_p);
757
758 /* FIXME:
759 * If the re-build failed, what is the proper error path
760 * from here?
761 *
762 * [We should abort the association. --piggy]
763 */
764 if (!new_asoc) {
765 /* FIXME: Several errors are possible. A bad cookie should
766 * be silently discarded, but think about logging it too.
767 */
768 switch (error) {
769 case -SCTP_IERROR_NOMEM:
770 goto nomem;
771
772 case -SCTP_IERROR_STALE_COOKIE:
773 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
774 err_chk_p);
775 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
776
777 case -SCTP_IERROR_BAD_SIG:
778 default:
779 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
780 }
781 }
782
783
784 /* Delay state machine commands until later.
785 *
786 * Re-build the bind address for the association is done in
787 * the sctp_unpack_cookie() already.
788 */
789 /* This is a brand-new association, so these are not yet side
790 * effects--it is safe to run them here.
791 */
792 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
793
794 if (!sctp_process_init(new_asoc, chunk,
795 &chunk->subh.cookie_hdr->c.peer_addr,
796 peer_init, GFP_ATOMIC))
797 goto nomem_init;
798
799 /* SCTP-AUTH: Now that we've populate required fields in
800 * sctp_process_init, set up the association shared keys as
801 * necessary so that we can potentially authenticate the ACK
802 */
803 error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
804 if (error)
805 goto nomem_init;
806
807 if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
808 sctp_association_free(new_asoc);
809 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
810 }
811
812 repl = sctp_make_cookie_ack(new_asoc, chunk);
813 if (!repl)
814 goto nomem_init;
815
816 /* RFC 2960 5.1 Normal Establishment of an Association
817 *
818 * D) IMPLEMENTATION NOTE: An implementation may choose to
819 * send the Communication Up notification to the SCTP user
820 * upon reception of a valid COOKIE ECHO chunk.
821 */
822 ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
823 new_asoc->c.sinit_num_ostreams,
824 new_asoc->c.sinit_max_instreams,
825 NULL, GFP_ATOMIC);
826 if (!ev)
827 goto nomem_ev;
828
829 /* Sockets API Draft Section 5.3.1.6
830 * When a peer sends a Adaptation Layer Indication parameter , SCTP
831 * delivers this notification to inform the application that of the
832 * peers requested adaptation layer.
833 */
834 if (new_asoc->peer.adaptation_ind) {
835 ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
836 GFP_ATOMIC);
837 if (!ai_ev)
838 goto nomem_aiev;
839 }
840
841 if (!new_asoc->peer.auth_capable) {
842 auth_ev = sctp_ulpevent_make_authkey(new_asoc, 0,
843 SCTP_AUTH_NO_AUTH,
844 GFP_ATOMIC);
845 if (!auth_ev)
846 goto nomem_authev;
847 }
848
849 /* Add all the state machine commands now since we've created
850 * everything. This way we don't introduce memory corruptions
851 * during side-effect processing and correctly count established
852 * associations.
853 */
854 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
855 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
856 SCTP_STATE(SCTP_STATE_ESTABLISHED));
857 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
858 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
859 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
860
861 if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
862 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
863 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
864
865 /* This will send the COOKIE ACK */
866 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
867
868 /* Queue the ASSOC_CHANGE event */
869 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
870
871 /* Send up the Adaptation Layer Indication event */
872 if (ai_ev)
873 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
874 SCTP_ULPEVENT(ai_ev));
875
876 if (auth_ev)
877 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
878 SCTP_ULPEVENT(auth_ev));
879
880 return SCTP_DISPOSITION_CONSUME;
881
882 nomem_authev:
883 sctp_ulpevent_free(ai_ev);
884 nomem_aiev:
885 sctp_ulpevent_free(ev);
886 nomem_ev:
887 sctp_chunk_free(repl);
888 nomem_init:
889 sctp_association_free(new_asoc);
890 nomem:
891 return SCTP_DISPOSITION_NOMEM;
892 }
893
894 /*
895 * Respond to a normal COOKIE ACK chunk.
896 * We are the side that is asking for an association.
897 *
898 * RFC 2960 5.1 Normal Establishment of an Association
899 *
900 * E) Upon reception of the COOKIE ACK, endpoint "A" will move from the
901 * COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie
902 * timer. It may also notify its ULP about the successful
903 * establishment of the association with a Communication Up
904 * notification (see Section 10).
905 *
906 * Verification Tag:
907 * Inputs
908 * (endpoint, asoc, chunk)
909 *
910 * Outputs
911 * (asoc, reply_msg, msg_up, timers, counters)
912 *
913 * The return value is the disposition of the chunk.
914 */
915 enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
916 const struct sctp_endpoint *ep,
917 const struct sctp_association *asoc,
918 const union sctp_subtype type,
919 void *arg,
920 struct sctp_cmd_seq *commands)
921 {
922 struct sctp_chunk *chunk = arg;
923 struct sctp_ulpevent *ev;
924
925 if (!sctp_vtag_verify(chunk, asoc))
926 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
927
928 /* Verify that the chunk length for the COOKIE-ACK is OK.
929 * If we don't do this, any bundled chunks may be junked.
930 */
931 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
932 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
933 commands);
934
935 /* Reset init error count upon receipt of COOKIE-ACK,
936 * to avoid problems with the management of this
937 * counter in stale cookie situations when a transition back
938 * from the COOKIE-ECHOED state to the COOKIE-WAIT
939 * state is performed.
940 */
941 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
942
943 /* Set peer label for connection. */
944 security_inet_conn_established(ep->base.sk, chunk->skb);
945
946 /* RFC 2960 5.1 Normal Establishment of an Association
947 *
948 * E) Upon reception of the COOKIE ACK, endpoint "A" will move
949 * from the COOKIE-ECHOED state to the ESTABLISHED state,
950 * stopping the T1-cookie timer.
951 */
952 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
953 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
954 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
955 SCTP_STATE(SCTP_STATE_ESTABLISHED));
956 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
957 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
958 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
959 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
960 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
961 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
962
963 /* It may also notify its ULP about the successful
964 * establishment of the association with a Communication Up
965 * notification (see Section 10).
966 */
967 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP,
968 0, asoc->c.sinit_num_ostreams,
969 asoc->c.sinit_max_instreams,
970 NULL, GFP_ATOMIC);
971
972 if (!ev)
973 goto nomem;
974
975 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
976
977 /* Sockets API Draft Section 5.3.1.6
978 * When a peer sends a Adaptation Layer Indication parameter , SCTP
979 * delivers this notification to inform the application that of the
980 * peers requested adaptation layer.
981 */
982 if (asoc->peer.adaptation_ind) {
983 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
984 if (!ev)
985 goto nomem;
986
987 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
988 SCTP_ULPEVENT(ev));
989 }
990
991 if (!asoc->peer.auth_capable) {
992 ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH,
993 GFP_ATOMIC);
994 if (!ev)
995 goto nomem;
996 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
997 SCTP_ULPEVENT(ev));
998 }
999
1000 return SCTP_DISPOSITION_CONSUME;
1001 nomem:
1002 return SCTP_DISPOSITION_NOMEM;
1003 }
1004
1005 /* Generate and sendout a heartbeat packet. */
1006 static enum sctp_disposition sctp_sf_heartbeat(
1007 const struct sctp_endpoint *ep,
1008 const struct sctp_association *asoc,
1009 const union sctp_subtype type,
1010 void *arg,
1011 struct sctp_cmd_seq *commands)
1012 {
1013 struct sctp_transport *transport = (struct sctp_transport *) arg;
1014 struct sctp_chunk *reply;
1015
1016 /* Send a heartbeat to our peer. */
1017 reply = sctp_make_heartbeat(asoc, transport, 0);
1018 if (!reply)
1019 return SCTP_DISPOSITION_NOMEM;
1020
1021 /* Set rto_pending indicating that an RTT measurement
1022 * is started with this heartbeat chunk.
1023 */
1024 sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
1025 SCTP_TRANSPORT(transport));
1026
1027 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
1028 return SCTP_DISPOSITION_CONSUME;
1029 }
1030
1031 /* Generate a HEARTBEAT packet on the given transport. */
1032 enum sctp_disposition sctp_sf_sendbeat_8_3(struct net *net,
1033 const struct sctp_endpoint *ep,
1034 const struct sctp_association *asoc,
1035 const union sctp_subtype type,
1036 void *arg,
1037 struct sctp_cmd_seq *commands)
1038 {
1039 struct sctp_transport *transport = (struct sctp_transport *) arg;
1040
1041 if (asoc->overall_error_count >= asoc->max_retrans) {
1042 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
1043 SCTP_ERROR(ETIMEDOUT));
1044 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
1045 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
1046 SCTP_PERR(SCTP_ERROR_NO_ERROR));
1047 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
1048 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
1049 return SCTP_DISPOSITION_DELETE_TCB;
1050 }
1051
1052 /* Section 3.3.5.
1053 * The Sender-specific Heartbeat Info field should normally include
1054 * information about the sender's current time when this HEARTBEAT
1055 * chunk is sent and the destination transport address to which this
1056 * HEARTBEAT is sent (see Section 8.3).
1057 */
1058
1059 if (transport->param_flags & SPP_HB_ENABLE) {
1060 if (SCTP_DISPOSITION_NOMEM ==
1061 sctp_sf_heartbeat(ep, asoc, type, arg,
1062 commands))
1063 return SCTP_DISPOSITION_NOMEM;
1064
1065 /* Set transport error counter and association error counter
1066 * when sending heartbeat.
1067 */
1068 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
1069 SCTP_TRANSPORT(transport));
1070 }
1071 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
1072 SCTP_TRANSPORT(transport));
1073 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
1074 SCTP_TRANSPORT(transport));
1075
1076 return SCTP_DISPOSITION_CONSUME;
1077 }
1078
1079 /* resend asoc strreset_chunk. */
1080 enum sctp_disposition sctp_sf_send_reconf(struct net *net,
1081 const struct sctp_endpoint *ep,
1082 const struct sctp_association *asoc,
1083 const union sctp_subtype type,
1084 void *arg,
1085 struct sctp_cmd_seq *commands)
1086 {
1087 struct sctp_transport *transport = arg;
1088
1089 if (asoc->overall_error_count >= asoc->max_retrans) {
1090 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
1091 SCTP_ERROR(ETIMEDOUT));
1092 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
1093 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
1094 SCTP_PERR(SCTP_ERROR_NO_ERROR));
1095 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
1096 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
1097 return SCTP_DISPOSITION_DELETE_TCB;
1098 }
1099
1100 sctp_chunk_hold(asoc->strreset_chunk);
1101 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1102 SCTP_CHUNK(asoc->strreset_chunk));
1103 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
1104
1105 return SCTP_DISPOSITION_CONSUME;
1106 }
1107
1108 /* send hb chunk with padding for PLPMUTD. */
1109 enum sctp_disposition sctp_sf_send_probe(struct net *net,
1110 const struct sctp_endpoint *ep,
1111 const struct sctp_association *asoc,
1112 const union sctp_subtype type,
1113 void *arg,
1114 struct sctp_cmd_seq *commands)
1115 {
1116 struct sctp_transport *transport = (struct sctp_transport *)arg;
1117 struct sctp_chunk *reply;
1118
1119 if (!sctp_transport_pl_enabled(transport))
1120 return SCTP_DISPOSITION_CONSUME;
1121
1122 if (sctp_transport_pl_send(transport)) {
1123 reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
1124 if (!reply)
1125 return SCTP_DISPOSITION_NOMEM;
1126 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
1127 }
1128 sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
1129 SCTP_TRANSPORT(transport));
1130
1131 return SCTP_DISPOSITION_CONSUME;
1132 }
1133
1134 /*
1135 * Process an heartbeat request.
1136 *
1137 * Section: 8.3 Path Heartbeat
1138 * The receiver of the HEARTBEAT should immediately respond with a
1139 * HEARTBEAT ACK that contains the Heartbeat Information field copied
1140 * from the received HEARTBEAT chunk.
1141 *
1142 * Verification Tag: 8.5 Verification Tag [Normal verification]
1143 * When receiving an SCTP packet, the endpoint MUST ensure that the
1144 * value in the Verification Tag field of the received SCTP packet
1145 * matches its own Tag. If the received Verification Tag value does not
1146 * match the receiver's own tag value, the receiver shall silently
1147 * discard the packet and shall not process it any further except for
1148 * those cases listed in Section 8.5.1 below.
1149 *
1150 * Inputs
1151 * (endpoint, asoc, chunk)
1152 *
1153 * Outputs
1154 * (asoc, reply_msg, msg_up, timers, counters)
1155 *
1156 * The return value is the disposition of the chunk.
1157 */
1158 enum sctp_disposition sctp_sf_beat_8_3(struct net *net,
1159 const struct sctp_endpoint *ep,
1160 const struct sctp_association *asoc,
1161 const union sctp_subtype type,
1162 void *arg, struct sctp_cmd_seq *commands)
1163 {
1164 struct sctp_paramhdr *param_hdr;
1165 struct sctp_chunk *chunk = arg;
1166 struct sctp_chunk *reply;
1167 size_t paylen = 0;
1168
1169 if (!sctp_vtag_verify(chunk, asoc))
1170 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1171
1172 /* Make sure that the HEARTBEAT chunk has a valid length. */
1173 if (!sctp_chunk_length_valid(chunk,
1174 sizeof(struct sctp_heartbeat_chunk)))
1175 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1176 commands);
1177
1178 /* 8.3 The receiver of the HEARTBEAT should immediately
1179 * respond with a HEARTBEAT ACK that contains the Heartbeat
1180 * Information field copied from the received HEARTBEAT chunk.
1181 */
1182 chunk->subh.hb_hdr = (struct sctp_heartbeathdr *)chunk->skb->data;
1183 param_hdr = (struct sctp_paramhdr *)chunk->subh.hb_hdr;
1184 paylen = ntohs(chunk->chunk_hdr->length) - sizeof(struct sctp_chunkhdr);
1185
1186 if (ntohs(param_hdr->length) > paylen)
1187 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
1188 param_hdr, commands);
1189
1190 if (!pskb_pull(chunk->skb, paylen))
1191 goto nomem;
1192
1193 reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen);
1194 if (!reply)
1195 goto nomem;
1196
1197 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
1198 return SCTP_DISPOSITION_CONSUME;
1199
1200 nomem:
1201 return SCTP_DISPOSITION_NOMEM;
1202 }
1203
1204 /*
1205 * Process the returning HEARTBEAT ACK.
1206 *
1207 * Section: 8.3 Path Heartbeat
1208 * Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT
1209 * should clear the error counter of the destination transport
1210 * address to which the HEARTBEAT was sent, and mark the destination
1211 * transport address as active if it is not so marked. The endpoint may
1212 * optionally report to the upper layer when an inactive destination
1213 * address is marked as active due to the reception of the latest
1214 * HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also
1215 * clear the association overall error count as well (as defined
1216 * in section 8.1).
1217 *
1218 * The receiver of the HEARTBEAT ACK should also perform an RTT
1219 * measurement for that destination transport address using the time
1220 * value carried in the HEARTBEAT ACK chunk.
1221 *
1222 * Verification Tag: 8.5 Verification Tag [Normal verification]
1223 *
1224 * Inputs
1225 * (endpoint, asoc, chunk)
1226 *
1227 * Outputs
1228 * (asoc, reply_msg, msg_up, timers, counters)
1229 *
1230 * The return value is the disposition of the chunk.
1231 */
1232 enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
1233 const struct sctp_endpoint *ep,
1234 const struct sctp_association *asoc,
1235 const union sctp_subtype type,
1236 void *arg,
1237 struct sctp_cmd_seq *commands)
1238 {
1239 struct sctp_sender_hb_info *hbinfo;
1240 struct sctp_chunk *chunk = arg;
1241 struct sctp_transport *link;
1242 unsigned long max_interval;
1243 union sctp_addr from_addr;
1244
1245 if (!sctp_vtag_verify(chunk, asoc))
1246 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1247
1248 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
1249 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr) +
1250 sizeof(*hbinfo)))
1251 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1252 commands);
1253
1254 hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
1255 /* Make sure that the length of the parameter is what we expect */
1256 if (ntohs(hbinfo->param_hdr.length) != sizeof(*hbinfo))
1257 return SCTP_DISPOSITION_DISCARD;
1258
1259 from_addr = hbinfo->daddr;
1260 link = sctp_assoc_lookup_paddr(asoc, &from_addr);
1261
1262 /* This should never happen, but lets log it if so. */
1263 if (unlikely(!link)) {
1264 if (from_addr.sa.sa_family == AF_INET6) {
1265 net_warn_ratelimited("%s association %p could not find address %pI6\n",
1266 __func__,
1267 asoc,
1268 &from_addr.v6.sin6_addr);
1269 } else {
1270 net_warn_ratelimited("%s association %p could not find address %pI4\n",
1271 __func__,
1272 asoc,
1273 &from_addr.v4.sin_addr.s_addr);
1274 }
1275 return SCTP_DISPOSITION_DISCARD;
1276 }
1277
1278 /* Validate the 64-bit random nonce. */
1279 if (hbinfo->hb_nonce != link->hb_nonce)
1280 return SCTP_DISPOSITION_DISCARD;
1281
1282 if (hbinfo->probe_size) {
1283 if (hbinfo->probe_size != link->pl.probe_size ||
1284 !sctp_transport_pl_enabled(link))
1285 return SCTP_DISPOSITION_DISCARD;
1286
1287 if (sctp_transport_pl_recv(link))
1288 return SCTP_DISPOSITION_CONSUME;
1289
1290 return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
1291 }
1292
1293 max_interval = link->hbinterval + link->rto;
1294
1295 /* Check if the timestamp looks valid. */
1296 if (time_after(hbinfo->sent_at, jiffies) ||
1297 time_after(jiffies, hbinfo->sent_at + max_interval)) {
1298 pr_debug("%s: HEARTBEAT ACK with invalid timestamp received "
1299 "for transport:%p\n", __func__, link);
1300
1301 return SCTP_DISPOSITION_DISCARD;
1302 }
1303
1304 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of
1305 * the HEARTBEAT should clear the error counter of the
1306 * destination transport address to which the HEARTBEAT was
1307 * sent and mark the destination transport address as active if
1308 * it is not so marked.
1309 */
1310 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link));
1311
1312 return SCTP_DISPOSITION_CONSUME;
1313 }
1314
1315 /* Helper function to send out an abort for the restart
1316 * condition.
1317 */
1318 static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
1319 struct sctp_chunk *init,
1320 struct sctp_cmd_seq *commands)
1321 {
1322 struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family);
1323 union sctp_addr_param *addrparm;
1324 struct sctp_errhdr *errhdr;
1325 char buffer[sizeof(*errhdr) + sizeof(*addrparm)];
1326 struct sctp_endpoint *ep;
1327 struct sctp_packet *pkt;
1328 int len;
1329
1330 /* Build the error on the stack. We are way to malloc crazy
1331 * throughout the code today.
1332 */
1333 errhdr = (struct sctp_errhdr *)buffer;
1334 addrparm = (union sctp_addr_param *)errhdr->variable;
1335
1336 /* Copy into a parm format. */
1337 len = af->to_addr_param(ssa, addrparm);
1338 len += sizeof(*errhdr);
1339
1340 errhdr->cause = SCTP_ERROR_RESTART;
1341 errhdr->length = htons(len);
1342
1343 /* Assign to the control socket. */
1344 ep = sctp_sk(net->sctp.ctl_sock)->ep;
1345
1346 /* Association is NULL since this may be a restart attack and we
1347 * want to send back the attacker's vtag.
1348 */
1349 pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
1350
1351 if (!pkt)
1352 goto out;
1353 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
1354
1355 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
1356
1357 /* Discard the rest of the inbound packet. */
1358 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
1359
1360 out:
1361 /* Even if there is no memory, treat as a failure so
1362 * the packet will get dropped.
1363 */
1364 return 0;
1365 }
1366
1367 static bool list_has_sctp_addr(const struct list_head *list,
1368 union sctp_addr *ipaddr)
1369 {
1370 struct sctp_transport *addr;
1371
1372 list_for_each_entry(addr, list, transports) {
1373 if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
1374 return true;
1375 }
1376
1377 return false;
1378 }
1379 /* A restart is occurring, check to make sure no new addresses
1380 * are being added as we may be under a takeover attack.
1381 */
1382 static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1383 const struct sctp_association *asoc,
1384 struct sctp_chunk *init,
1385 struct sctp_cmd_seq *commands)
1386 {
1387 struct net *net = new_asoc->base.net;
1388 struct sctp_transport *new_addr;
1389 int ret = 1;
1390
1391 /* Implementor's Guide - Section 5.2.2
1392 * ...
1393 * Before responding the endpoint MUST check to see if the
1394 * unexpected INIT adds new addresses to the association. If new
1395 * addresses are added to the association, the endpoint MUST respond
1396 * with an ABORT..
1397 */
1398
1399 /* Search through all current addresses and make sure
1400 * we aren't adding any new ones.
1401 */
1402 list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
1403 transports) {
1404 if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
1405 &new_addr->ipaddr)) {
1406 sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
1407 commands);
1408 ret = 0;
1409 break;
1410 }
1411 }
1412
1413 /* Return success if all addresses were found. */
1414 return ret;
1415 }
1416
1417 /* Populate the verification/tie tags based on overlapping INIT
1418 * scenario.
1419 *
1420 * Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state.
1421 */
1422 static void sctp_tietags_populate(struct sctp_association *new_asoc,
1423 const struct sctp_association *asoc)
1424 {
1425 switch (asoc->state) {
1426
1427 /* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */
1428
1429 case SCTP_STATE_COOKIE_WAIT:
1430 new_asoc->c.my_vtag = asoc->c.my_vtag;
1431 new_asoc->c.my_ttag = asoc->c.my_vtag;
1432 new_asoc->c.peer_ttag = 0;
1433 break;
1434
1435 case SCTP_STATE_COOKIE_ECHOED:
1436 new_asoc->c.my_vtag = asoc->c.my_vtag;
1437 new_asoc->c.my_ttag = asoc->c.my_vtag;
1438 new_asoc->c.peer_ttag = asoc->c.peer_vtag;
1439 break;
1440
1441 /* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED,
1442 * COOKIE-WAIT and SHUTDOWN-ACK-SENT
1443 */
1444 default:
1445 new_asoc->c.my_ttag = asoc->c.my_vtag;
1446 new_asoc->c.peer_ttag = asoc->c.peer_vtag;
1447 break;
1448 }
1449
1450 /* Other parameters for the endpoint SHOULD be copied from the
1451 * existing parameters of the association (e.g. number of
1452 * outbound streams) into the INIT ACK and cookie.
1453 */
1454 new_asoc->rwnd = asoc->rwnd;
1455 new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams;
1456 new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams;
1457 new_asoc->c.initial_tsn = asoc->c.initial_tsn;
1458 }
1459
1460 /*
1461 * Compare vtag/tietag values to determine unexpected COOKIE-ECHO
1462 * handling action.
1463 *
1464 * RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists.
1465 *
1466 * Returns value representing action to be taken. These action values
1467 * correspond to Action/Description values in RFC 2960, Table 2.
1468 */
1469 static char sctp_tietags_compare(struct sctp_association *new_asoc,
1470 const struct sctp_association *asoc)
1471 {
1472 /* In this case, the peer may have restarted. */
1473 if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
1474 (asoc->c.peer_vtag != new_asoc->c.peer_vtag) &&
1475 (asoc->c.my_vtag == new_asoc->c.my_ttag) &&
1476 (asoc->c.peer_vtag == new_asoc->c.peer_ttag))
1477 return 'A';
1478
1479 /* Collision case B. */
1480 if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
1481 ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) ||
1482 (0 == asoc->c.peer_vtag))) {
1483 return 'B';
1484 }
1485
1486 /* Collision case D. */
1487 if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
1488 (asoc->c.peer_vtag == new_asoc->c.peer_vtag))
1489 return 'D';
1490
1491 /* Collision case C. */
1492 if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
1493 (asoc->c.peer_vtag == new_asoc->c.peer_vtag) &&
1494 (0 == new_asoc->c.my_ttag) &&
1495 (0 == new_asoc->c.peer_ttag))
1496 return 'C';
1497
1498 /* No match to any of the special cases; discard this packet. */
1499 return 'E';
1500 }
1501
1502 /* Common helper routine for both duplicate and simultaneous INIT
1503 * chunk handling.
1504 */
1505 static enum sctp_disposition sctp_sf_do_unexpected_init(
1506 struct net *net,
1507 const struct sctp_endpoint *ep,
1508 const struct sctp_association *asoc,
1509 const union sctp_subtype type,
1510 void *arg,
1511 struct sctp_cmd_seq *commands)
1512 {
1513 struct sctp_chunk *chunk = arg, *repl, *err_chunk;
1514 struct sctp_unrecognized_param *unk_param;
1515 struct sctp_association *new_asoc;
1516 enum sctp_disposition retval;
1517 struct sctp_packet *packet;
1518 int len;
1519
1520 /* Update socket peer label if first association. */
1521 if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
1522 chunk->skb))
1523 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1524
1525 /* 6.10 Bundling
1526 * An endpoint MUST NOT bundle INIT, INIT ACK or
1527 * SHUTDOWN COMPLETE with any other chunks.
1528 *
1529 * IG Section 2.11.2
1530 * Furthermore, we require that the receiver of an INIT chunk MUST
1531 * enforce these rules by silently discarding an arriving packet
1532 * with an INIT chunk that is bundled with other chunks.
1533 */
1534 if (!chunk->singleton)
1535 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1536
1537 /* Make sure that the INIT chunk has a valid length. */
1538 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
1539 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1540
1541 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
1542 * Tag.
1543 */
1544 if (chunk->sctp_hdr->vtag != 0)
1545 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
1546
1547 if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port)
1548 return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands);
1549
1550 /* Grab the INIT header. */
1551 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
1552
1553 /* Tag the variable length parameters. */
1554 chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
1555
1556 /* Verify the INIT chunk before processing it. */
1557 err_chunk = NULL;
1558 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
1559 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk,
1560 &err_chunk)) {
1561 /* This chunk contains fatal error. It is to be discarded.
1562 * Send an ABORT, with causes if there is any.
1563 */
1564 if (err_chunk) {
1565 packet = sctp_abort_pkt_new(net, ep, asoc, arg,
1566 (__u8 *)(err_chunk->chunk_hdr) +
1567 sizeof(struct sctp_chunkhdr),
1568 ntohs(err_chunk->chunk_hdr->length) -
1569 sizeof(struct sctp_chunkhdr));
1570
1571 if (packet) {
1572 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
1573 SCTP_PACKET(packet));
1574 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
1575 retval = SCTP_DISPOSITION_CONSUME;
1576 } else {
1577 retval = SCTP_DISPOSITION_NOMEM;
1578 }
1579 goto cleanup;
1580 } else {
1581 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
1582 commands);
1583 }
1584 }
1585
1586 /*
1587 * Other parameters for the endpoint SHOULD be copied from the
1588 * existing parameters of the association (e.g. number of
1589 * outbound streams) into the INIT ACK and cookie.
1590 * FIXME: We are copying parameters from the endpoint not the
1591 * association.
1592 */
1593 new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
1594 if (!new_asoc)
1595 goto nomem;
1596
1597 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
1598 sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
1599 goto nomem;
1600
1601 /* In the outbound INIT ACK the endpoint MUST copy its current
1602 * Verification Tag and Peers Verification tag into a reserved
1603 * place (local tie-tag and per tie-tag) within the state cookie.
1604 */
1605 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
1606 (struct sctp_init_chunk *)chunk->chunk_hdr,
1607 GFP_ATOMIC))
1608 goto nomem;
1609
1610 /* Make sure no new addresses are being added during the
1611 * restart. Do not do this check for COOKIE-WAIT state,
1612 * since there are no peer addresses to check against.
1613 * Upon return an ABORT will have been sent if needed.
1614 */
1615 if (!sctp_state(asoc, COOKIE_WAIT)) {
1616 if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
1617 commands)) {
1618 retval = SCTP_DISPOSITION_CONSUME;
1619 goto nomem_retval;
1620 }
1621 }
1622
1623 sctp_tietags_populate(new_asoc, asoc);
1624
1625 /* B) "Z" shall respond immediately with an INIT ACK chunk. */
1626
1627 /* If there are errors need to be reported for unknown parameters,
1628 * make sure to reserve enough room in the INIT ACK for them.
1629 */
1630 len = 0;
1631 if (err_chunk) {
1632 len = ntohs(err_chunk->chunk_hdr->length) -
1633 sizeof(struct sctp_chunkhdr);
1634 }
1635
1636 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
1637 if (!repl)
1638 goto nomem;
1639
1640 /* If there are errors need to be reported for unknown parameters,
1641 * include them in the outgoing INIT ACK as "Unrecognized parameter"
1642 * parameter.
1643 */
1644 if (err_chunk) {
1645 /* Get the "Unrecognized parameter" parameter(s) out of the
1646 * ERROR chunk generated by sctp_verify_init(). Since the
1647 * error cause code for "unknown parameter" and the
1648 * "Unrecognized parameter" type is the same, we can
1649 * construct the parameters in INIT ACK by copying the
1650 * ERROR causes over.
1651 */
1652 unk_param = (struct sctp_unrecognized_param *)
1653 ((__u8 *)(err_chunk->chunk_hdr) +
1654 sizeof(struct sctp_chunkhdr));
1655 /* Replace the cause code with the "Unrecognized parameter"
1656 * parameter type.
1657 */
1658 sctp_addto_chunk(repl, len, unk_param);
1659 }
1660
1661 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
1662 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1663
1664 /*
1665 * Note: After sending out INIT ACK with the State Cookie parameter,
1666 * "Z" MUST NOT allocate any resources for this new association.
1667 * Otherwise, "Z" will be vulnerable to resource attacks.
1668 */
1669 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
1670 retval = SCTP_DISPOSITION_CONSUME;
1671
1672 return retval;
1673
1674 nomem:
1675 retval = SCTP_DISPOSITION_NOMEM;
1676 nomem_retval:
1677 if (new_asoc)
1678 sctp_association_free(new_asoc);
1679 cleanup:
1680 if (err_chunk)
1681 sctp_chunk_free(err_chunk);
1682 return retval;
1683 }
1684
1685 /*
1686 * Handle simultaneous INIT.
1687 * This means we started an INIT and then we got an INIT request from
1688 * our peer.
1689 *
1690 * Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B)
1691 * This usually indicates an initialization collision, i.e., each
1692 * endpoint is attempting, at about the same time, to establish an
1693 * association with the other endpoint.
1694 *
1695 * Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an
1696 * endpoint MUST respond with an INIT ACK using the same parameters it
1697 * sent in its original INIT chunk (including its Verification Tag,
1698 * unchanged). These original parameters are combined with those from the
1699 * newly received INIT chunk. The endpoint shall also generate a State
1700 * Cookie with the INIT ACK. The endpoint uses the parameters sent in its
1701 * INIT to calculate the State Cookie.
1702 *
1703 * After that, the endpoint MUST NOT change its state, the T1-init
1704 * timer shall be left running and the corresponding TCB MUST NOT be
1705 * destroyed. The normal procedures for handling State Cookies when
1706 * a TCB exists will resolve the duplicate INITs to a single association.
1707 *
1708 * For an endpoint that is in the COOKIE-ECHOED state it MUST populate
1709 * its Tie-Tags with the Tag information of itself and its peer (see
1710 * section 5.2.2 for a description of the Tie-Tags).
1711 *
1712 * Verification Tag: Not explicit, but an INIT can not have a valid
1713 * verification tag, so we skip the check.
1714 *
1715 * Inputs
1716 * (endpoint, asoc, chunk)
1717 *
1718 * Outputs
1719 * (asoc, reply_msg, msg_up, timers, counters)
1720 *
1721 * The return value is the disposition of the chunk.
1722 */
1723 enum sctp_disposition sctp_sf_do_5_2_1_siminit(
1724 struct net *net,
1725 const struct sctp_endpoint *ep,
1726 const struct sctp_association *asoc,
1727 const union sctp_subtype type,
1728 void *arg,
1729 struct sctp_cmd_seq *commands)
1730 {
1731 /* Call helper to do the real work for both simultaneous and
1732 * duplicate INIT chunk handling.
1733 */
1734 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
1735 }
1736
1737 /*
1738 * Handle duplicated INIT messages. These are usually delayed
1739 * restransmissions.
1740 *
1741 * Section: 5.2.2 Unexpected INIT in States Other than CLOSED,
1742 * COOKIE-ECHOED and COOKIE-WAIT
1743 *
1744 * Unless otherwise stated, upon reception of an unexpected INIT for
1745 * this association, the endpoint shall generate an INIT ACK with a
1746 * State Cookie. In the outbound INIT ACK the endpoint MUST copy its
1747 * current Verification Tag and peer's Verification Tag into a reserved
1748 * place within the state cookie. We shall refer to these locations as
1749 * the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet
1750 * containing this INIT ACK MUST carry a Verification Tag value equal to
1751 * the Initiation Tag found in the unexpected INIT. And the INIT ACK
1752 * MUST contain a new Initiation Tag (randomly generated see Section
1753 * 5.3.1). Other parameters for the endpoint SHOULD be copied from the
1754 * existing parameters of the association (e.g. number of outbound
1755 * streams) into the INIT ACK and cookie.
1756 *
1757 * After sending out the INIT ACK, the endpoint shall take no further
1758 * actions, i.e., the existing association, including its current state,
1759 * and the corresponding TCB MUST NOT be changed.
1760 *
1761 * Note: Only when a TCB exists and the association is not in a COOKIE-
1762 * WAIT state are the Tie-Tags populated. For a normal association INIT
1763 * (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be
1764 * set to 0 (indicating that no previous TCB existed). The INIT ACK and
1765 * State Cookie are populated as specified in section 5.2.1.
1766 *
1767 * Verification Tag: Not specified, but an INIT has no way of knowing
1768 * what the verification tag could be, so we ignore it.
1769 *
1770 * Inputs
1771 * (endpoint, asoc, chunk)
1772 *
1773 * Outputs
1774 * (asoc, reply_msg, msg_up, timers, counters)
1775 *
1776 * The return value is the disposition of the chunk.
1777 */
1778 enum sctp_disposition sctp_sf_do_5_2_2_dupinit(
1779 struct net *net,
1780 const struct sctp_endpoint *ep,
1781 const struct sctp_association *asoc,
1782 const union sctp_subtype type,
1783 void *arg,
1784 struct sctp_cmd_seq *commands)
1785 {
1786 /* Call helper to do the real work for both simultaneous and
1787 * duplicate INIT chunk handling.
1788 */
1789 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
1790 }
1791
1792
1793 /*
1794 * Unexpected INIT-ACK handler.
1795 *
1796 * Section 5.2.3
1797 * If an INIT ACK received by an endpoint in any state other than the
1798 * COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk.
1799 * An unexpected INIT ACK usually indicates the processing of an old or
1800 * duplicated INIT chunk.
1801 */
1802 enum sctp_disposition sctp_sf_do_5_2_3_initack(
1803 struct net *net,
1804 const struct sctp_endpoint *ep,
1805 const struct sctp_association *asoc,
1806 const union sctp_subtype type,
1807 void *arg,
1808 struct sctp_cmd_seq *commands)
1809 {
1810 /* Per the above section, we'll discard the chunk if we have an
1811 * endpoint. If this is an OOTB INIT-ACK, treat it as such.
1812 */
1813 if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
1814 return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
1815 else
1816 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
1817 }
1818
1819 static int sctp_sf_do_assoc_update(struct sctp_association *asoc,
1820 struct sctp_association *new,
1821 struct sctp_cmd_seq *cmds)
1822 {
1823 struct net *net = asoc->base.net;
1824 struct sctp_chunk *abort;
1825
1826 if (!sctp_assoc_update(asoc, new))
1827 return 0;
1828
1829 abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
1830 if (abort) {
1831 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
1832 sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
1833 }
1834 sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
1835 sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
1836 SCTP_PERR(SCTP_ERROR_RSRC_LOW));
1837 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
1838 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
1839
1840 return -ENOMEM;
1841 }
1842
1843 /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
1844 *
1845 * Section 5.2.4
1846 * A) In this case, the peer may have restarted.
1847 */
1848 static enum sctp_disposition sctp_sf_do_dupcook_a(
1849 struct net *net,
1850 const struct sctp_endpoint *ep,
1851 const struct sctp_association *asoc,
1852 struct sctp_chunk *chunk,
1853 struct sctp_cmd_seq *commands,
1854 struct sctp_association *new_asoc)
1855 {
1856 struct sctp_init_chunk *peer_init;
1857 enum sctp_disposition disposition;
1858 struct sctp_ulpevent *ev;
1859 struct sctp_chunk *repl;
1860 struct sctp_chunk *err;
1861
1862 /* new_asoc is a brand-new association, so these are not yet
1863 * side effects--it is safe to run them here.
1864 */
1865 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1866
1867 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1868 GFP_ATOMIC))
1869 goto nomem;
1870
1871 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
1872 goto nomem;
1873
1874 if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1875 return SCTP_DISPOSITION_DISCARD;
1876
1877 /* Make sure no new addresses are being added during the
1878 * restart. Though this is a pretty complicated attack
1879 * since you'd have to get inside the cookie.
1880 */
1881 if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
1882 return SCTP_DISPOSITION_CONSUME;
1883
1884 /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
1885 * the peer has restarted (Action A), it MUST NOT setup a new
1886 * association but instead resend the SHUTDOWN ACK and send an ERROR
1887 * chunk with a "Cookie Received while Shutting Down" error cause to
1888 * its peer.
1889 */
1890 if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
1891 disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
1892 SCTP_ST_CHUNK(chunk->chunk_hdr->type),
1893 chunk, commands);
1894 if (SCTP_DISPOSITION_NOMEM == disposition)
1895 goto nomem;
1896
1897 err = sctp_make_op_error(asoc, chunk,
1898 SCTP_ERROR_COOKIE_IN_SHUTDOWN,
1899 NULL, 0, 0);
1900 if (err)
1901 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1902 SCTP_CHUNK(err));
1903
1904 return SCTP_DISPOSITION_CONSUME;
1905 }
1906
1907 /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
1908 * data. Consider the optional choice of resending of this data.
1909 */
1910 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
1911 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1912 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
1913 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
1914
1915 /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
1916 * and ASCONF-ACK cache.
1917 */
1918 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1919 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
1920 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
1921
1922 /* Update the content of current association. */
1923 if (sctp_sf_do_assoc_update((struct sctp_association *)asoc, new_asoc, commands))
1924 goto nomem;
1925
1926 repl = sctp_make_cookie_ack(asoc, chunk);
1927 if (!repl)
1928 goto nomem;
1929
1930 /* Report association restart to upper layer. */
1931 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
1932 asoc->c.sinit_num_ostreams,
1933 asoc->c.sinit_max_instreams,
1934 NULL, GFP_ATOMIC);
1935 if (!ev)
1936 goto nomem_ev;
1937
1938 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
1939 if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
1940 sctp_state(asoc, SHUTDOWN_SENT)) &&
1941 (sctp_sstate(asoc->base.sk, CLOSING) ||
1942 sock_flag(asoc->base.sk, SOCK_DEAD))) {
1943 /* If the socket has been closed by user, don't
1944 * transition to ESTABLISHED. Instead trigger SHUTDOWN
1945 * bundled with COOKIE_ACK.
1946 */
1947 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1948 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
1949 SCTP_ST_CHUNK(0), repl,
1950 commands);
1951 } else {
1952 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1953 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1954 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1955 }
1956 return SCTP_DISPOSITION_CONSUME;
1957
1958 nomem_ev:
1959 sctp_chunk_free(repl);
1960 nomem:
1961 return SCTP_DISPOSITION_NOMEM;
1962 }
1963
1964 /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B')
1965 *
1966 * Section 5.2.4
1967 * B) In this case, both sides may be attempting to start an association
1968 * at about the same time but the peer endpoint started its INIT
1969 * after responding to the local endpoint's INIT
1970 */
1971 /* This case represents an initialization collision. */
1972 static enum sctp_disposition sctp_sf_do_dupcook_b(
1973 struct net *net,
1974 const struct sctp_endpoint *ep,
1975 const struct sctp_association *asoc,
1976 struct sctp_chunk *chunk,
1977 struct sctp_cmd_seq *commands,
1978 struct sctp_association *new_asoc)
1979 {
1980 struct sctp_init_chunk *peer_init;
1981 struct sctp_chunk *repl;
1982
1983 /* new_asoc is a brand-new association, so these are not yet
1984 * side effects--it is safe to run them here.
1985 */
1986 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1987 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1988 GFP_ATOMIC))
1989 goto nomem;
1990
1991 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
1992 goto nomem;
1993
1994 if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1995 return SCTP_DISPOSITION_DISCARD;
1996
1997 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1998 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1999 if (asoc->state < SCTP_STATE_ESTABLISHED)
2000 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
2001 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
2002
2003 /* Update the content of current association. */
2004 if (sctp_sf_do_assoc_update((struct sctp_association *)asoc, new_asoc, commands))
2005 goto nomem;
2006
2007 repl = sctp_make_cookie_ack(asoc, chunk);
2008 if (!repl)
2009 goto nomem;
2010
2011 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
2012
2013 /* RFC 2960 5.1 Normal Establishment of an Association
2014 *
2015 * D) IMPLEMENTATION NOTE: An implementation may choose to
2016 * send the Communication Up notification to the SCTP user
2017 * upon reception of a valid COOKIE ECHO chunk.
2018 *
2019 * Sadly, this needs to be implemented as a side-effect, because
2020 * we are not guaranteed to have set the association id of the real
2021 * association and so these notifications need to be delayed until
2022 * the association id is allocated.
2023 */
2024
2025 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP));
2026
2027 /* Sockets API Draft Section 5.3.1.6
2028 * When a peer sends a Adaptation Layer Indication parameter , SCTP
2029 * delivers this notification to inform the application that of the
2030 * peers requested adaptation layer.
2031 *
2032 * This also needs to be done as a side effect for the same reason as
2033 * above.
2034 */
2035 if (asoc->peer.adaptation_ind)
2036 sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL());
2037
2038 if (!asoc->peer.auth_capable)
2039 sctp_add_cmd_sf(commands, SCTP_CMD_PEER_NO_AUTH, SCTP_NULL());
2040
2041 return SCTP_DISPOSITION_CONSUME;
2042
2043 nomem:
2044 return SCTP_DISPOSITION_NOMEM;
2045 }
2046
2047 /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C')
2048 *
2049 * Section 5.2.4
2050 * C) In this case, the local endpoint's cookie has arrived late.
2051 * Before it arrived, the local endpoint sent an INIT and received an
2052 * INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag
2053 * but a new tag of its own.
2054 */
2055 /* This case represents an initialization collision. */
2056 static enum sctp_disposition sctp_sf_do_dupcook_c(
2057 struct net *net,
2058 const struct sctp_endpoint *ep,
2059 const struct sctp_association *asoc,
2060 struct sctp_chunk *chunk,
2061 struct sctp_cmd_seq *commands,
2062 struct sctp_association *new_asoc)
2063 {
2064 /* The cookie should be silently discarded.
2065 * The endpoint SHOULD NOT change states and should leave
2066 * any timers running.
2067 */
2068 return SCTP_DISPOSITION_DISCARD;
2069 }
2070
2071 /* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D')
2072 *
2073 * Section 5.2.4
2074 *
2075 * D) When both local and remote tags match the endpoint should always
2076 * enter the ESTABLISHED state, if it has not already done so.
2077 */
2078 /* This case represents an initialization collision. */
2079 static enum sctp_disposition sctp_sf_do_dupcook_d(
2080 struct net *net,
2081 const struct sctp_endpoint *ep,
2082 const struct sctp_association *asoc,
2083 struct sctp_chunk *chunk,
2084 struct sctp_cmd_seq *commands,
2085 struct sctp_association *new_asoc)
2086 {
2087 struct sctp_ulpevent *ev = NULL, *ai_ev = NULL, *auth_ev = NULL;
2088 struct sctp_chunk *repl;
2089
2090 /* Clarification from Implementor's Guide:
2091 * D) When both local and remote tags match the endpoint should
2092 * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
2093 * It should stop any cookie timer that may be running and send
2094 * a COOKIE ACK.
2095 */
2096
2097 if (!sctp_auth_chunk_verify(net, chunk, asoc))
2098 return SCTP_DISPOSITION_DISCARD;
2099
2100 /* Don't accidentally move back into established state. */
2101 if (asoc->state < SCTP_STATE_ESTABLISHED) {
2102 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2103 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
2104 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2105 SCTP_STATE(SCTP_STATE_ESTABLISHED));
2106 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
2107 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
2108 SCTP_NULL());
2109
2110 /* RFC 2960 5.1 Normal Establishment of an Association
2111 *
2112 * D) IMPLEMENTATION NOTE: An implementation may choose
2113 * to send the Communication Up notification to the
2114 * SCTP user upon reception of a valid COOKIE
2115 * ECHO chunk.
2116 */
2117 ev = sctp_ulpevent_make_assoc_change(asoc, 0,
2118 SCTP_COMM_UP, 0,
2119 asoc->c.sinit_num_ostreams,
2120 asoc->c.sinit_max_instreams,
2121 NULL, GFP_ATOMIC);
2122 if (!ev)
2123 goto nomem;
2124
2125 /* Sockets API Draft Section 5.3.1.6
2126 * When a peer sends a Adaptation Layer Indication parameter,
2127 * SCTP delivers this notification to inform the application
2128 * that of the peers requested adaptation layer.
2129 */
2130 if (asoc->peer.adaptation_ind) {
2131 ai_ev = sctp_ulpevent_make_adaptation_indication(asoc,
2132 GFP_ATOMIC);
2133 if (!ai_ev)
2134 goto nomem;
2135
2136 }
2137
2138 if (!asoc->peer.auth_capable) {
2139 auth_ev = sctp_ulpevent_make_authkey(asoc, 0,
2140 SCTP_AUTH_NO_AUTH,
2141 GFP_ATOMIC);
2142 if (!auth_ev)
2143 goto nomem;
2144 }
2145 }
2146
2147 repl = sctp_make_cookie_ack(asoc, chunk);
2148 if (!repl)
2149 goto nomem;
2150
2151 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
2152
2153 if (ev)
2154 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
2155 SCTP_ULPEVENT(ev));
2156 if (ai_ev)
2157 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
2158 SCTP_ULPEVENT(ai_ev));
2159 if (auth_ev)
2160 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
2161 SCTP_ULPEVENT(auth_ev));
2162
2163 return SCTP_DISPOSITION_CONSUME;
2164
2165 nomem:
2166 if (auth_ev)
2167 sctp_ulpevent_free(auth_ev);
2168 if (ai_ev)
2169 sctp_ulpevent_free(ai_ev);
2170 if (ev)
2171 sctp_ulpevent_free(ev);
2172 return SCTP_DISPOSITION_NOMEM;
2173 }
2174
2175 /*
2176 * Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying
2177 * chunk was retransmitted and then delayed in the network.
2178 *
2179 * Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists
2180 *
2181 * Verification Tag: None. Do cookie validation.
2182 *
2183 * Inputs
2184 * (endpoint, asoc, chunk)
2185 *
2186 * Outputs
2187 * (asoc, reply_msg, msg_up, timers, counters)
2188 *
2189 * The return value is the disposition of the chunk.
2190 */
2191 enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
2192 struct net *net,
2193 const struct sctp_endpoint *ep,
2194 const struct sctp_association *asoc,
2195 const union sctp_subtype type,
2196 void *arg,
2197 struct sctp_cmd_seq *commands)
2198 {
2199 struct sctp_association *new_asoc;
2200 struct sctp_chunk *chunk = arg;
2201 enum sctp_disposition retval;
2202 struct sctp_chunk *err_chk_p;
2203 int error = 0;
2204 char action;
2205
2206 /* Make sure that the chunk has a valid length from the protocol
2207 * perspective. In this case check to make sure we have at least
2208 * enough for the chunk header. Cookie length verification is
2209 * done later.
2210 */
2211 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) {
2212 if (!sctp_vtag_verify(chunk, asoc))
2213 asoc = NULL;
2214 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands);
2215 }
2216
2217 /* "Decode" the chunk. We have no optional parameters so we
2218 * are in good shape.
2219 */
2220 chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
2221 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
2222 sizeof(struct sctp_chunkhdr)))
2223 goto nomem;
2224
2225 /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
2226 * of a duplicate COOKIE ECHO match the Verification Tags of the
2227 * current association, consider the State Cookie valid even if
2228 * the lifespan is exceeded.
2229 */
2230 new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
2231 &err_chk_p);
2232
2233 /* FIXME:
2234 * If the re-build failed, what is the proper error path
2235 * from here?
2236 *
2237 * [We should abort the association. --piggy]
2238 */
2239 if (!new_asoc) {
2240 /* FIXME: Several errors are possible. A bad cookie should
2241 * be silently discarded, but think about logging it too.
2242 */
2243 switch (error) {
2244 case -SCTP_IERROR_NOMEM:
2245 goto nomem;
2246
2247 case -SCTP_IERROR_STALE_COOKIE:
2248 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
2249 err_chk_p);
2250 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2251 case -SCTP_IERROR_BAD_SIG:
2252 default:
2253 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2254 }
2255 }
2256
2257 /* Update socket peer label if first association. */
2258 if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
2259 chunk->skb)) {
2260 sctp_association_free(new_asoc);
2261 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2262 }
2263
2264 /* Set temp so that it won't be added into hashtable */
2265 new_asoc->temp = 1;
2266
2267 /* Compare the tie_tag in cookie with the verification tag of
2268 * current association.
2269 */
2270 action = sctp_tietags_compare(new_asoc, asoc);
2271
2272 switch (action) {
2273 case 'A': /* Association restart. */
2274 retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
2275 new_asoc);
2276 break;
2277
2278 case 'B': /* Collision case B. */
2279 retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
2280 new_asoc);
2281 break;
2282
2283 case 'C': /* Collision case C. */
2284 retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
2285 new_asoc);
2286 break;
2287
2288 case 'D': /* Collision case D. */
2289 retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
2290 new_asoc);
2291 break;
2292
2293 default: /* Discard packet for all others. */
2294 retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2295 break;
2296 }
2297
2298 /* Delete the temporary new association. */
2299 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
2300 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2301
2302 /* Restore association pointer to provide SCTP command interpreter
2303 * with a valid context in case it needs to manipulate
2304 * the queues */
2305 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
2306 SCTP_ASOC((struct sctp_association *)asoc));
2307
2308 return retval;
2309
2310 nomem:
2311 return SCTP_DISPOSITION_NOMEM;
2312 }
2313
2314 /*
2315 * Process an ABORT. (SHUTDOWN-PENDING state)
2316 *
2317 * See sctp_sf_do_9_1_abort().
2318 */
2319 enum sctp_disposition sctp_sf_shutdown_pending_abort(
2320 struct net *net,
2321 const struct sctp_endpoint *ep,
2322 const struct sctp_association *asoc,
2323 const union sctp_subtype type,
2324 void *arg,
2325 struct sctp_cmd_seq *commands)
2326 {
2327 struct sctp_chunk *chunk = arg;
2328
2329 if (!sctp_vtag_verify_either(chunk, asoc))
2330 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2331
2332 /* Make sure that the ABORT chunk has a valid length.
2333 * Since this is an ABORT chunk, we have to discard it
2334 * because of the following text:
2335 * RFC 2960, Section 3.3.7
2336 * If an endpoint receives an ABORT with a format error or for an
2337 * association that doesn't exist, it MUST silently discard it.
2338 * Because the length is "invalid", we can't really discard just
2339 * as we do not know its true length. So, to be safe, discard the
2340 * packet.
2341 */
2342 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
2343 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2344
2345 /* ADD-IP: Special case for ABORT chunks
2346 * F4) One special consideration is that ABORT Chunks arriving
2347 * destined to the IP address being deleted MUST be
2348 * ignored (see Section 5.3.1 for further details).
2349 */
2350 if (SCTP_ADDR_DEL ==
2351 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2352 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2353
2354 if (!sctp_err_chunk_valid(chunk))
2355 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2356
2357 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2358 }
2359
2360 /*
2361 * Process an ABORT. (SHUTDOWN-SENT state)
2362 *
2363 * See sctp_sf_do_9_1_abort().
2364 */
2365 enum sctp_disposition sctp_sf_shutdown_sent_abort(
2366 struct net *net,
2367 const struct sctp_endpoint *ep,
2368 const struct sctp_association *asoc,
2369 const union sctp_subtype type,
2370 void *arg,
2371 struct sctp_cmd_seq *commands)
2372 {
2373 struct sctp_chunk *chunk = arg;
2374
2375 if (!sctp_vtag_verify_either(chunk, asoc))
2376 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2377
2378 /* Make sure that the ABORT chunk has a valid length.
2379 * Since this is an ABORT chunk, we have to discard it
2380 * because of the following text:
2381 * RFC 2960, Section 3.3.7
2382 * If an endpoint receives an ABORT with a format error or for an
2383 * association that doesn't exist, it MUST silently discard it.
2384 * Because the length is "invalid", we can't really discard just
2385 * as we do not know its true length. So, to be safe, discard the
2386 * packet.
2387 */
2388 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
2389 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2390
2391 /* ADD-IP: Special case for ABORT chunks
2392 * F4) One special consideration is that ABORT Chunks arriving
2393 * destined to the IP address being deleted MUST be
2394 * ignored (see Section 5.3.1 for further details).
2395 */
2396 if (SCTP_ADDR_DEL ==
2397 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2398 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2399
2400 if (!sctp_err_chunk_valid(chunk))
2401 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2402
2403 /* Stop the T2-shutdown timer. */
2404 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2405 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
2406
2407 /* Stop the T5-shutdown guard timer. */
2408 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2409 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
2410
2411 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2412 }
2413
2414 /*
2415 * Process an ABORT. (SHUTDOWN-ACK-SENT state)
2416 *
2417 * See sctp_sf_do_9_1_abort().
2418 */
2419 enum sctp_disposition sctp_sf_shutdown_ack_sent_abort(
2420 struct net *net,
2421 const struct sctp_endpoint *ep,
2422 const struct sctp_association *asoc,
2423 const union sctp_subtype type,
2424 void *arg,
2425 struct sctp_cmd_seq *commands)
2426 {
2427 /* The same T2 timer, so we should be able to use
2428 * common function with the SHUTDOWN-SENT state.
2429 */
2430 return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
2431 }
2432
2433 /*
2434 * Handle an Error received in COOKIE_ECHOED state.
2435 *
2436 * Only handle the error type of stale COOKIE Error, the other errors will
2437 * be ignored.
2438 *
2439 * Inputs
2440 * (endpoint, asoc, chunk)
2441 *
2442 * Outputs
2443 * (asoc, reply_msg, msg_up, timers, counters)
2444 *
2445 * The return value is the disposition of the chunk.
2446 */
2447 enum sctp_disposition sctp_sf_cookie_echoed_err(
2448 struct net *net,
2449 const struct sctp_endpoint *ep,
2450 const struct sctp_association *asoc,
2451 const union sctp_subtype type,
2452 void *arg,
2453 struct sctp_cmd_seq *commands)
2454 {
2455 struct sctp_chunk *chunk = arg;
2456 struct sctp_errhdr *err;
2457
2458 if (!sctp_vtag_verify(chunk, asoc))
2459 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2460
2461 /* Make sure that the ERROR chunk has a valid length.
2462 * The parameter walking depends on this as well.
2463 */
2464 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk)))
2465 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2466 commands);
2467
2468 /* Process the error here */
2469 /* FUTURE FIXME: When PR-SCTP related and other optional
2470 * parms are emitted, this will have to change to handle multiple
2471 * errors.
2472 */
2473 sctp_walk_errors(err, chunk->chunk_hdr) {
2474 if (SCTP_ERROR_STALE_COOKIE == err->cause)
2475 return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
2476 arg, commands);
2477 }
2478
2479 /* It is possible to have malformed error causes, and that
2480 * will cause us to end the walk early. However, since
2481 * we are discarding the packet, there should be no adverse
2482 * affects.
2483 */
2484 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2485 }
2486
2487 /*
2488 * Handle a Stale COOKIE Error
2489 *
2490 * Section: 5.2.6 Handle Stale COOKIE Error
2491 * If the association is in the COOKIE-ECHOED state, the endpoint may elect
2492 * one of the following three alternatives.
2493 * ...
2494 * 3) Send a new INIT chunk to the endpoint, adding a Cookie
2495 * Preservative parameter requesting an extension to the lifetime of
2496 * the State Cookie. When calculating the time extension, an
2497 * implementation SHOULD use the RTT information measured based on the
2498 * previous COOKIE ECHO / ERROR exchange, and should add no more
2499 * than 1 second beyond the measured RTT, due to long State Cookie
2500 * lifetimes making the endpoint more subject to a replay attack.
2501 *
2502 * Verification Tag: Not explicit, but safe to ignore.
2503 *
2504 * Inputs
2505 * (endpoint, asoc, chunk)
2506 *
2507 * Outputs
2508 * (asoc, reply_msg, msg_up, timers, counters)
2509 *
2510 * The return value is the disposition of the chunk.
2511 */
2512 static enum sctp_disposition sctp_sf_do_5_2_6_stale(
2513 struct net *net,
2514 const struct sctp_endpoint *ep,
2515 const struct sctp_association *asoc,
2516 const union sctp_subtype type,
2517 void *arg,
2518 struct sctp_cmd_seq *commands)
2519 {
2520 int attempts = asoc->init_err_counter + 1;
2521 struct sctp_chunk *chunk = arg, *reply;
2522 struct sctp_cookie_preserve_param bht;
2523 struct sctp_bind_addr *bp;
2524 struct sctp_errhdr *err;
2525 u32 stale;
2526
2527 if (attempts > asoc->max_init_attempts) {
2528 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
2529 SCTP_ERROR(ETIMEDOUT));
2530 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2531 SCTP_PERR(SCTP_ERROR_STALE_COOKIE));
2532 return SCTP_DISPOSITION_DELETE_TCB;
2533 }
2534
2535 err = (struct sctp_errhdr *)(chunk->skb->data);
2536
2537 /* When calculating the time extension, an implementation
2538 * SHOULD use the RTT information measured based on the
2539 * previous COOKIE ECHO / ERROR exchange, and should add no
2540 * more than 1 second beyond the measured RTT, due to long
2541 * State Cookie lifetimes making the endpoint more subject to
2542 * a replay attack.
2543 * Measure of Staleness's unit is usec. (1/1000000 sec)
2544 * Suggested Cookie Life-span Increment's unit is msec.
2545 * (1/1000 sec)
2546 * In general, if you use the suggested cookie life, the value
2547 * found in the field of measure of staleness should be doubled
2548 * to give ample time to retransmit the new cookie and thus
2549 * yield a higher probability of success on the reattempt.
2550 */
2551 stale = ntohl(*(__be32 *)((u8 *)err + sizeof(*err)));
2552 stale = (stale * 2) / 1000;
2553
2554 bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE;
2555 bht.param_hdr.length = htons(sizeof(bht));
2556 bht.lifespan_increment = htonl(stale);
2557
2558 /* Build that new INIT chunk. */
2559 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
2560 reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht));
2561 if (!reply)
2562 goto nomem;
2563
2564 sctp_addto_chunk(reply, sizeof(bht), &bht);
2565
2566 /* Clear peer's init_tag cached in assoc as we are sending a new INIT */
2567 sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
2568
2569 /* Stop pending T3-rtx and heartbeat timers */
2570 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
2571 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
2572
2573 /* Delete non-primary peer ip addresses since we are transitioning
2574 * back to the COOKIE-WAIT state
2575 */
2576 sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
2577
2578 /* If we've sent any data bundled with COOKIE-ECHO we will need to
2579 * resend
2580 */
2581 sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
2582 SCTP_TRANSPORT(asoc->peer.primary_path));
2583
2584 /* Cast away the const modifier, as we want to just
2585 * rerun it through as a sideffect.
2586 */
2587 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL());
2588
2589 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2590 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
2591 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2592 SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
2593 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
2594 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
2595
2596 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
2597
2598 return SCTP_DISPOSITION_CONSUME;
2599
2600 nomem:
2601 return SCTP_DISPOSITION_NOMEM;
2602 }
2603
2604 /*
2605 * Process an ABORT.
2606 *
2607 * Section: 9.1
2608 * After checking the Verification Tag, the receiving endpoint shall
2609 * remove the association from its record, and shall report the
2610 * termination to its upper layer.
2611 *
2612 * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
2613 * B) Rules for packet carrying ABORT:
2614 *
2615 * - The endpoint shall always fill in the Verification Tag field of the
2616 * outbound packet with the destination endpoint's tag value if it
2617 * is known.
2618 *
2619 * - If the ABORT is sent in response to an OOTB packet, the endpoint
2620 * MUST follow the procedure described in Section 8.4.
2621 *
2622 * - The receiver MUST accept the packet if the Verification Tag
2623 * matches either its own tag, OR the tag of its peer. Otherwise, the
2624 * receiver MUST silently discard the packet and take no further
2625 * action.
2626 *
2627 * Inputs
2628 * (endpoint, asoc, chunk)
2629 *
2630 * Outputs
2631 * (asoc, reply_msg, msg_up, timers, counters)
2632 *
2633 * The return value is the disposition of the chunk.
2634 */
2635 enum sctp_disposition sctp_sf_do_9_1_abort(
2636 struct net *net,
2637 const struct sctp_endpoint *ep,
2638 const struct sctp_association *asoc,
2639 const union sctp_subtype type,
2640 void *arg,
2641 struct sctp_cmd_seq *commands)
2642 {
2643 struct sctp_chunk *chunk = arg;
2644
2645 if (!sctp_vtag_verify_either(chunk, asoc))
2646 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2647
2648 /* Make sure that the ABORT chunk has a valid length.
2649 * Since this is an ABORT chunk, we have to discard it
2650 * because of the following text:
2651 * RFC 2960, Section 3.3.7
2652 * If an endpoint receives an ABORT with a format error or for an
2653 * association that doesn't exist, it MUST silently discard it.
2654 * Because the length is "invalid", we can't really discard just
2655 * as we do not know its true length. So, to be safe, discard the
2656 * packet.
2657 */
2658 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
2659 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2660
2661 /* ADD-IP: Special case for ABORT chunks
2662 * F4) One special consideration is that ABORT Chunks arriving
2663 * destined to the IP address being deleted MUST be
2664 * ignored (see Section 5.3.1 for further details).
2665 */
2666 if (SCTP_ADDR_DEL ==
2667 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2668 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2669
2670 if (!sctp_err_chunk_valid(chunk))
2671 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2672
2673 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2674 }
2675
2676 static enum sctp_disposition __sctp_sf_do_9_1_abort(
2677 struct net *net,
2678 const struct sctp_endpoint *ep,
2679 const struct sctp_association *asoc,
2680 const union sctp_subtype type,
2681 void *arg,
2682 struct sctp_cmd_seq *commands)
2683 {
2684 __be16 error = SCTP_ERROR_NO_ERROR;
2685 struct sctp_chunk *chunk = arg;
2686 unsigned int len;
2687
2688 /* See if we have an error cause code in the chunk. */
2689 len = ntohs(chunk->chunk_hdr->length);
2690 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2691 error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
2692
2693 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2694 /* ASSOC_FAILED will DELETE_TCB. */
2695 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
2696 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
2697 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
2698
2699 return SCTP_DISPOSITION_ABORT;
2700 }
2701
2702 /*
2703 * Process an ABORT. (COOKIE-WAIT state)
2704 *
2705 * See sctp_sf_do_9_1_abort() above.
2706 */
2707 enum sctp_disposition sctp_sf_cookie_wait_abort(
2708 struct net *net,
2709 const struct sctp_endpoint *ep,
2710 const struct sctp_association *asoc,
2711 const union sctp_subtype type,
2712 void *arg,
2713 struct sctp_cmd_seq *commands)
2714 {
2715 __be16 error = SCTP_ERROR_NO_ERROR;
2716 struct sctp_chunk *chunk = arg;
2717 unsigned int len;
2718
2719 if (!sctp_vtag_verify_either(chunk, asoc))
2720 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2721
2722 /* Make sure that the ABORT chunk has a valid length.
2723 * Since this is an ABORT chunk, we have to discard it
2724 * because of the following text:
2725 * RFC 2960, Section 3.3.7
2726 * If an endpoint receives an ABORT with a format error or for an
2727 * association that doesn't exist, it MUST silently discard it.
2728 * Because the length is "invalid", we can't really discard just
2729 * as we do not know its true length. So, to be safe, discard the
2730 * packet.
2731 */
2732 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
2733 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2734
2735 /* See if we have an error cause code in the chunk. */
2736 len = ntohs(chunk->chunk_hdr->length);
2737 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2738 error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
2739
2740 return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
2741 chunk->transport);
2742 }
2743
2744 /*
2745 * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state)
2746 */
2747 enum sctp_disposition sctp_sf_cookie_wait_icmp_abort(
2748 struct net *net,
2749 const struct sctp_endpoint *ep,
2750 const struct sctp_association *asoc,
2751 const union sctp_subtype type,
2752 void *arg,
2753 struct sctp_cmd_seq *commands)
2754 {
2755 return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
2756 ENOPROTOOPT, asoc,
2757 (struct sctp_transport *)arg);
2758 }
2759
2760 /*
2761 * Process an ABORT. (COOKIE-ECHOED state)
2762 */
2763 enum sctp_disposition sctp_sf_cookie_echoed_abort(
2764 struct net *net,
2765 const struct sctp_endpoint *ep,
2766 const struct sctp_association *asoc,
2767 const union sctp_subtype type,
2768 void *arg,
2769 struct sctp_cmd_seq *commands)
2770 {
2771 /* There is a single T1 timer, so we should be able to use
2772 * common function with the COOKIE-WAIT state.
2773 */
2774 return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
2775 }
2776
2777 /*
2778 * Stop T1 timer and abort association with "INIT failed".
2779 *
2780 * This is common code called by several sctp_sf_*_abort() functions above.
2781 */
2782 static enum sctp_disposition sctp_stop_t1_and_abort(
2783 struct net *net,
2784 struct sctp_cmd_seq *commands,
2785 __be16 error, int sk_err,
2786 const struct sctp_association *asoc,
2787 struct sctp_transport *transport)
2788 {
2789 pr_debug("%s: ABORT received (INIT)\n", __func__);
2790
2791 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2792 SCTP_STATE(SCTP_STATE_CLOSED));
2793 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
2794 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2795 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
2796 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
2797 /* CMD_INIT_FAILED will DELETE_TCB. */
2798 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2799 SCTP_PERR(error));
2800
2801 return SCTP_DISPOSITION_ABORT;
2802 }
2803
2804 /*
2805 * sctp_sf_do_9_2_shut
2806 *
2807 * Section: 9.2
2808 * Upon the reception of the SHUTDOWN, the peer endpoint shall
2809 * - enter the SHUTDOWN-RECEIVED state,
2810 *
2811 * - stop accepting new data from its SCTP user
2812 *
2813 * - verify, by checking the Cumulative TSN Ack field of the chunk,
2814 * that all its outstanding DATA chunks have been received by the
2815 * SHUTDOWN sender.
2816 *
2817 * Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT
2818 * send a SHUTDOWN in response to a ULP request. And should discard
2819 * subsequent SHUTDOWN chunks.
2820 *
2821 * If there are still outstanding DATA chunks left, the SHUTDOWN
2822 * receiver shall continue to follow normal data transmission
2823 * procedures defined in Section 6 until all outstanding DATA chunks
2824 * are acknowledged; however, the SHUTDOWN receiver MUST NOT accept
2825 * new data from its SCTP user.
2826 *
2827 * Verification Tag: 8.5 Verification Tag [Normal verification]
2828 *
2829 * Inputs
2830 * (endpoint, asoc, chunk)
2831 *
2832 * Outputs
2833 * (asoc, reply_msg, msg_up, timers, counters)
2834 *
2835 * The return value is the disposition of the chunk.
2836 */
2837 enum sctp_disposition sctp_sf_do_9_2_shutdown(
2838 struct net *net,
2839 const struct sctp_endpoint *ep,
2840 const struct sctp_association *asoc,
2841 const union sctp_subtype type,
2842 void *arg,
2843 struct sctp_cmd_seq *commands)
2844 {
2845 enum sctp_disposition disposition;
2846 struct sctp_chunk *chunk = arg;
2847 struct sctp_shutdownhdr *sdh;
2848 struct sctp_ulpevent *ev;
2849 __u32 ctsn;
2850
2851 if (!sctp_vtag_verify(chunk, asoc))
2852 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2853
2854 /* Make sure that the SHUTDOWN chunk has a valid length. */
2855 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk)))
2856 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2857 commands);
2858
2859 /* Convert the elaborate header. */
2860 sdh = (struct sctp_shutdownhdr *)chunk->skb->data;
2861 skb_pull(chunk->skb, sizeof(*sdh));
2862 chunk->subh.shutdown_hdr = sdh;
2863 ctsn = ntohl(sdh->cum_tsn_ack);
2864
2865 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
2866 pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
2867 asoc->ctsn_ack_point);
2868
2869 return SCTP_DISPOSITION_DISCARD;
2870 }
2871
2872 /* If Cumulative TSN Ack beyond the max tsn currently
2873 * send, terminating the association and respond to the
2874 * sender with an ABORT.
2875 */
2876 if (!TSN_lt(ctsn, asoc->next_tsn))
2877 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
2878
2879 /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
2880 * When a peer sends a SHUTDOWN, SCTP delivers this notification to
2881 * inform the application that it should cease sending data.
2882 */
2883 ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
2884 if (!ev) {
2885 disposition = SCTP_DISPOSITION_NOMEM;
2886 goto out;
2887 }
2888 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
2889
2890 /* Upon the reception of the SHUTDOWN, the peer endpoint shall
2891 * - enter the SHUTDOWN-RECEIVED state,
2892 * - stop accepting new data from its SCTP user
2893 *
2894 * [This is implicit in the new state.]
2895 */
2896 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2897 SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
2898 disposition = SCTP_DISPOSITION_CONSUME;
2899
2900 if (sctp_outq_is_empty(&asoc->outqueue)) {
2901 disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
2902 arg, commands);
2903 }
2904
2905 if (SCTP_DISPOSITION_NOMEM == disposition)
2906 goto out;
2907
2908 /* - verify, by checking the Cumulative TSN Ack field of the
2909 * chunk, that all its outstanding DATA chunks have been
2910 * received by the SHUTDOWN sender.
2911 */
2912 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
2913 SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack));
2914
2915 out:
2916 return disposition;
2917 }
2918
2919 /*
2920 * sctp_sf_do_9_2_shut_ctsn
2921 *
2922 * Once an endpoint has reached the SHUTDOWN-RECEIVED state,
2923 * it MUST NOT send a SHUTDOWN in response to a ULP request.
2924 * The Cumulative TSN Ack of the received SHUTDOWN chunk
2925 * MUST be processed.
2926 */
2927 enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
2928 struct net *net,
2929 const struct sctp_endpoint *ep,
2930 const struct sctp_association *asoc,
2931 const union sctp_subtype type,
2932 void *arg,
2933 struct sctp_cmd_seq *commands)
2934 {
2935 struct sctp_chunk *chunk = arg;
2936 struct sctp_shutdownhdr *sdh;
2937 __u32 ctsn;
2938
2939 if (!sctp_vtag_verify(chunk, asoc))
2940 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2941
2942 /* Make sure that the SHUTDOWN chunk has a valid length. */
2943 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk)))
2944 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2945 commands);
2946
2947 sdh = (struct sctp_shutdownhdr *)chunk->skb->data;
2948 ctsn = ntohl(sdh->cum_tsn_ack);
2949
2950 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
2951 pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
2952 asoc->ctsn_ack_point);
2953
2954 return SCTP_DISPOSITION_DISCARD;
2955 }
2956
2957 /* If Cumulative TSN Ack beyond the max tsn currently
2958 * send, terminating the association and respond to the
2959 * sender with an ABORT.
2960 */
2961 if (!TSN_lt(ctsn, asoc->next_tsn))
2962 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
2963
2964 /* verify, by checking the Cumulative TSN Ack field of the
2965 * chunk, that all its outstanding DATA chunks have been
2966 * received by the SHUTDOWN sender.
2967 */
2968 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
2969 SCTP_BE32(sdh->cum_tsn_ack));
2970
2971 return SCTP_DISPOSITION_CONSUME;
2972 }
2973
2974 /* RFC 2960 9.2
2975 * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk
2976 * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination
2977 * transport addresses (either in the IP addresses or in the INIT chunk)
2978 * that belong to this association, it should discard the INIT chunk and
2979 * retransmit the SHUTDOWN ACK chunk.
2980 */
2981 static enum sctp_disposition
2982 __sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
2983 const struct sctp_association *asoc,
2984 const union sctp_subtype type, void *arg,
2985 struct sctp_cmd_seq *commands)
2986 {
2987 struct sctp_chunk *chunk = arg;
2988 struct sctp_chunk *reply;
2989
2990 /* Make sure that the chunk has a valid length */
2991 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
2992 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2993 commands);
2994
2995 /* Since we are not going to really process this INIT, there
2996 * is no point in verifying chunk boundaries. Just generate
2997 * the SHUTDOWN ACK.
2998 */
2999 reply = sctp_make_shutdown_ack(asoc, chunk);
3000 if (NULL == reply)
3001 goto nomem;
3002
3003 /* Set the transport for the SHUTDOWN ACK chunk and the timeout for
3004 * the T2-SHUTDOWN timer.
3005 */
3006 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
3007
3008 /* and restart the T2-shutdown timer. */
3009 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3010 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
3011
3012 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
3013
3014 return SCTP_DISPOSITION_CONSUME;
3015 nomem:
3016 return SCTP_DISPOSITION_NOMEM;
3017 }
3018
3019 enum sctp_disposition
3020 sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
3021 const struct sctp_association *asoc,
3022 const union sctp_subtype type, void *arg,
3023 struct sctp_cmd_seq *commands)
3024 {
3025 struct sctp_chunk *chunk = arg;
3026
3027 if (!chunk->singleton)
3028 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3029
3030 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
3031 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3032
3033 if (chunk->sctp_hdr->vtag != 0)
3034 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
3035
3036 return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
3037 }
3038
3039 /*
3040 * sctp_sf_do_ecn_cwr
3041 *
3042 * Section: Appendix A: Explicit Congestion Notification
3043 *
3044 * CWR:
3045 *
3046 * RFC 2481 details a specific bit for a sender to send in the header of
3047 * its next outbound TCP segment to indicate to its peer that it has
3048 * reduced its congestion window. This is termed the CWR bit. For
3049 * SCTP the same indication is made by including the CWR chunk.
3050 * This chunk contains one data element, i.e. the TSN number that
3051 * was sent in the ECNE chunk. This element represents the lowest
3052 * TSN number in the datagram that was originally marked with the
3053 * CE bit.
3054 *
3055 * Verification Tag: 8.5 Verification Tag [Normal verification]
3056 * Inputs
3057 * (endpoint, asoc, chunk)
3058 *
3059 * Outputs
3060 * (asoc, reply_msg, msg_up, timers, counters)
3061 *
3062 * The return value is the disposition of the chunk.
3063 */
3064 enum sctp_disposition sctp_sf_do_ecn_cwr(struct net *net,
3065 const struct sctp_endpoint *ep,
3066 const struct sctp_association *asoc,
3067 const union sctp_subtype type,
3068 void *arg,
3069 struct sctp_cmd_seq *commands)
3070 {
3071 struct sctp_chunk *chunk = arg;
3072 struct sctp_cwrhdr *cwr;
3073 u32 lowest_tsn;
3074
3075 if (!sctp_vtag_verify(chunk, asoc))
3076 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3077
3078 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk)))
3079 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3080 commands);
3081
3082 cwr = (struct sctp_cwrhdr *)chunk->skb->data;
3083 skb_pull(chunk->skb, sizeof(*cwr));
3084
3085 lowest_tsn = ntohl(cwr->lowest_tsn);
3086
3087 /* Does this CWR ack the last sent congestion notification? */
3088 if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) {
3089 /* Stop sending ECNE. */
3090 sctp_add_cmd_sf(commands,
3091 SCTP_CMD_ECN_CWR,
3092 SCTP_U32(lowest_tsn));
3093 }
3094 return SCTP_DISPOSITION_CONSUME;
3095 }
3096
3097 /*
3098 * sctp_sf_do_ecne
3099 *
3100 * Section: Appendix A: Explicit Congestion Notification
3101 *
3102 * ECN-Echo
3103 *
3104 * RFC 2481 details a specific bit for a receiver to send back in its
3105 * TCP acknowledgements to notify the sender of the Congestion
3106 * Experienced (CE) bit having arrived from the network. For SCTP this
3107 * same indication is made by including the ECNE chunk. This chunk
3108 * contains one data element, i.e. the lowest TSN associated with the IP
3109 * datagram marked with the CE bit.....
3110 *
3111 * Verification Tag: 8.5 Verification Tag [Normal verification]
3112 * Inputs
3113 * (endpoint, asoc, chunk)
3114 *
3115 * Outputs
3116 * (asoc, reply_msg, msg_up, timers, counters)
3117 *
3118 * The return value is the disposition of the chunk.
3119 */
3120 enum sctp_disposition sctp_sf_do_ecne(struct net *net,
3121 const struct sctp_endpoint *ep,
3122 const struct sctp_association *asoc,
3123 const union sctp_subtype type,
3124 void *arg, struct sctp_cmd_seq *commands)
3125 {
3126 struct sctp_chunk *chunk = arg;
3127 struct sctp_ecnehdr *ecne;
3128
3129 if (!sctp_vtag_verify(chunk, asoc))
3130 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3131
3132 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk)))
3133 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3134 commands);
3135
3136 ecne = (struct sctp_ecnehdr *)chunk->skb->data;
3137 skb_pull(chunk->skb, sizeof(*ecne));
3138
3139 /* If this is a newer ECNE than the last CWR packet we sent out */
3140 sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
3141 SCTP_U32(ntohl(ecne->lowest_tsn)));
3142
3143 return SCTP_DISPOSITION_CONSUME;
3144 }
3145
3146 /*
3147 * Section: 6.2 Acknowledgement on Reception of DATA Chunks
3148 *
3149 * The SCTP endpoint MUST always acknowledge the reception of each valid
3150 * DATA chunk.
3151 *
3152 * The guidelines on delayed acknowledgement algorithm specified in
3153 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
3154 * acknowledgement SHOULD be generated for at least every second packet
3155 * (not every second DATA chunk) received, and SHOULD be generated within
3156 * 200 ms of the arrival of any unacknowledged DATA chunk. In some
3157 * situations it may be beneficial for an SCTP transmitter to be more
3158 * conservative than the algorithms detailed in this document allow.
3159 * However, an SCTP transmitter MUST NOT be more aggressive than the
3160 * following algorithms allow.
3161 *
3162 * A SCTP receiver MUST NOT generate more than one SACK for every
3163 * incoming packet, other than to update the offered window as the
3164 * receiving application consumes new data.
3165 *
3166 * Verification Tag: 8.5 Verification Tag [Normal verification]
3167 *
3168 * Inputs
3169 * (endpoint, asoc, chunk)
3170 *
3171 * Outputs
3172 * (asoc, reply_msg, msg_up, timers, counters)
3173 *
3174 * The return value is the disposition of the chunk.
3175 */
3176 enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
3177 const struct sctp_endpoint *ep,
3178 const struct sctp_association *asoc,
3179 const union sctp_subtype type,
3180 void *arg,
3181 struct sctp_cmd_seq *commands)
3182 {
3183 union sctp_arg force = SCTP_NOFORCE();
3184 struct sctp_chunk *chunk = arg;
3185 int error;
3186
3187 if (!sctp_vtag_verify(chunk, asoc)) {
3188 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3189 SCTP_NULL());
3190 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3191 }
3192
3193 if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
3194 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3195 commands);
3196
3197 error = sctp_eat_data(asoc, chunk, commands);
3198 switch (error) {
3199 case SCTP_IERROR_NO_ERROR:
3200 break;
3201 case SCTP_IERROR_HIGH_TSN:
3202 case SCTP_IERROR_BAD_STREAM:
3203 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
3204 goto discard_noforce;
3205 case SCTP_IERROR_DUP_TSN:
3206 case SCTP_IERROR_IGNORE_TSN:
3207 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
3208 goto discard_force;
3209 case SCTP_IERROR_NO_DATA:
3210 return SCTP_DISPOSITION_ABORT;
3211 case SCTP_IERROR_PROTO_VIOLATION:
3212 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
3213 (u8 *)chunk->subh.data_hdr,
3214 sctp_datahdr_len(&asoc->stream));
3215 default:
3216 BUG();
3217 }
3218
3219 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
3220 force = SCTP_FORCE();
3221
3222 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
3223 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3224 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
3225 }
3226
3227 /* If this is the last chunk in a packet, we need to count it
3228 * toward sack generation. Note that we need to SACK every
3229 * OTHER packet containing data chunks, EVEN IF WE DISCARD
3230 * THEM. We elect to NOT generate SACK's if the chunk fails
3231 * the verification tag test.
3232 *
3233 * RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
3234 *
3235 * The SCTP endpoint MUST always acknowledge the reception of
3236 * each valid DATA chunk.
3237 *
3238 * The guidelines on delayed acknowledgement algorithm
3239 * specified in Section 4.2 of [RFC2581] SHOULD be followed.
3240 * Specifically, an acknowledgement SHOULD be generated for at
3241 * least every second packet (not every second DATA chunk)
3242 * received, and SHOULD be generated within 200 ms of the
3243 * arrival of any unacknowledged DATA chunk. In some
3244 * situations it may be beneficial for an SCTP transmitter to
3245 * be more conservative than the algorithms detailed in this
3246 * document allow. However, an SCTP transmitter MUST NOT be
3247 * more aggressive than the following algorithms allow.
3248 */
3249 if (chunk->end_of_packet)
3250 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
3251
3252 return SCTP_DISPOSITION_CONSUME;
3253
3254 discard_force:
3255 /* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
3256 *
3257 * When a packet arrives with duplicate DATA chunk(s) and with
3258 * no new DATA chunk(s), the endpoint MUST immediately send a
3259 * SACK with no delay. If a packet arrives with duplicate
3260 * DATA chunk(s) bundled with new DATA chunks, the endpoint
3261 * MAY immediately send a SACK. Normally receipt of duplicate
3262 * DATA chunks will occur when the original SACK chunk was lost
3263 * and the peer's RTO has expired. The duplicate TSN number(s)
3264 * SHOULD be reported in the SACK as duplicate.
3265 */
3266 /* In our case, we split the MAY SACK advice up whether or not
3267 * the last chunk is a duplicate.'
3268 */
3269 if (chunk->end_of_packet)
3270 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
3271 return SCTP_DISPOSITION_DISCARD;
3272
3273 discard_noforce:
3274 if (chunk->end_of_packet)
3275 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
3276
3277 return SCTP_DISPOSITION_DISCARD;
3278 }
3279
3280 /*
3281 * sctp_sf_eat_data_fast_4_4
3282 *
3283 * Section: 4 (4)
3284 * (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received
3285 * DATA chunks without delay.
3286 *
3287 * Verification Tag: 8.5 Verification Tag [Normal verification]
3288 * Inputs
3289 * (endpoint, asoc, chunk)
3290 *
3291 * Outputs
3292 * (asoc, reply_msg, msg_up, timers, counters)
3293 *
3294 * The return value is the disposition of the chunk.
3295 */
3296 enum sctp_disposition sctp_sf_eat_data_fast_4_4(
3297 struct net *net,
3298 const struct sctp_endpoint *ep,
3299 const struct sctp_association *asoc,
3300 const union sctp_subtype type,
3301 void *arg,
3302 struct sctp_cmd_seq *commands)
3303 {
3304 struct sctp_chunk *chunk = arg;
3305 int error;
3306
3307 if (!sctp_vtag_verify(chunk, asoc)) {
3308 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3309 SCTP_NULL());
3310 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3311 }
3312
3313 if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
3314 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3315 commands);
3316
3317 error = sctp_eat_data(asoc, chunk, commands);
3318 switch (error) {
3319 case SCTP_IERROR_NO_ERROR:
3320 case SCTP_IERROR_HIGH_TSN:
3321 case SCTP_IERROR_DUP_TSN:
3322 case SCTP_IERROR_IGNORE_TSN:
3323 case SCTP_IERROR_BAD_STREAM:
3324 break;
3325 case SCTP_IERROR_NO_DATA:
3326 return SCTP_DISPOSITION_ABORT;
3327 case SCTP_IERROR_PROTO_VIOLATION:
3328 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
3329 (u8 *)chunk->subh.data_hdr,
3330 sctp_datahdr_len(&asoc->stream));
3331 default:
3332 BUG();
3333 }
3334
3335 /* Go a head and force a SACK, since we are shutting down. */
3336
3337 /* Implementor's Guide.
3338 *
3339 * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
3340 * respond to each received packet containing one or more DATA chunk(s)
3341 * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
3342 */
3343 if (chunk->end_of_packet) {
3344 /* We must delay the chunk creation since the cumulative
3345 * TSN has not been updated yet.
3346 */
3347 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
3348 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
3349 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3350 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
3351 }
3352
3353 return SCTP_DISPOSITION_CONSUME;
3354 }
3355
3356 /*
3357 * Section: 6.2 Processing a Received SACK
3358 * D) Any time a SACK arrives, the endpoint performs the following:
3359 *
3360 * i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point,
3361 * then drop the SACK. Since Cumulative TSN Ack is monotonically
3362 * increasing, a SACK whose Cumulative TSN Ack is less than the
3363 * Cumulative TSN Ack Point indicates an out-of-order SACK.
3364 *
3365 * ii) Set rwnd equal to the newly received a_rwnd minus the number
3366 * of bytes still outstanding after processing the Cumulative TSN Ack
3367 * and the Gap Ack Blocks.
3368 *
3369 * iii) If the SACK is missing a TSN that was previously
3370 * acknowledged via a Gap Ack Block (e.g., the data receiver
3371 * reneged on the data), then mark the corresponding DATA chunk
3372 * as available for retransmit: Mark it as missing for fast
3373 * retransmit as described in Section 7.2.4 and if no retransmit
3374 * timer is running for the destination address to which the DATA
3375 * chunk was originally transmitted, then T3-rtx is started for
3376 * that destination address.
3377 *
3378 * Verification Tag: 8.5 Verification Tag [Normal verification]
3379 *
3380 * Inputs
3381 * (endpoint, asoc, chunk)
3382 *
3383 * Outputs
3384 * (asoc, reply_msg, msg_up, timers, counters)
3385 *
3386 * The return value is the disposition of the chunk.
3387 */
3388 enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net,
3389 const struct sctp_endpoint *ep,
3390 const struct sctp_association *asoc,
3391 const union sctp_subtype type,
3392 void *arg,
3393 struct sctp_cmd_seq *commands)
3394 {
3395 struct sctp_chunk *chunk = arg;
3396 struct sctp_sackhdr *sackh;
3397 __u32 ctsn;
3398
3399 if (!sctp_vtag_verify(chunk, asoc))
3400 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3401
3402 /* Make sure that the SACK chunk has a valid length. */
3403 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_sack_chunk)))
3404 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3405 commands);
3406
3407 /* Pull the SACK chunk from the data buffer */
3408 sackh = sctp_sm_pull_sack(chunk);
3409 /* Was this a bogus SACK? */
3410 if (!sackh)
3411 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3412 chunk->subh.sack_hdr = sackh;
3413 ctsn = ntohl(sackh->cum_tsn_ack);
3414
3415 /* If Cumulative TSN Ack beyond the max tsn currently
3416 * send, terminating the association and respond to the
3417 * sender with an ABORT.
3418 */
3419 if (TSN_lte(asoc->next_tsn, ctsn))
3420 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
3421
3422 trace_sctp_probe(ep, asoc, chunk);
3423
3424 /* i) If Cumulative TSN Ack is less than the Cumulative TSN
3425 * Ack Point, then drop the SACK. Since Cumulative TSN
3426 * Ack is monotonically increasing, a SACK whose
3427 * Cumulative TSN Ack is less than the Cumulative TSN Ack
3428 * Point indicates an out-of-order SACK.
3429 */
3430 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
3431 pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
3432 asoc->ctsn_ack_point);
3433
3434 return SCTP_DISPOSITION_DISCARD;
3435 }
3436
3437 /* Return this SACK for further processing. */
3438 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk));
3439
3440 /* Note: We do the rest of the work on the PROCESS_SACK
3441 * sideeffect.
3442 */
3443 return SCTP_DISPOSITION_CONSUME;
3444 }
3445
3446 /*
3447 * Generate an ABORT in response to a packet.
3448 *
3449 * Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41
3450 *
3451 * 8) The receiver should respond to the sender of the OOTB packet with
3452 * an ABORT. When sending the ABORT, the receiver of the OOTB packet
3453 * MUST fill in the Verification Tag field of the outbound packet
3454 * with the value found in the Verification Tag field of the OOTB
3455 * packet and set the T-bit in the Chunk Flags to indicate that the
3456 * Verification Tag is reflected. After sending this ABORT, the
3457 * receiver of the OOTB packet shall discard the OOTB packet and take
3458 * no further action.
3459 *
3460 * Verification Tag:
3461 *
3462 * The return value is the disposition of the chunk.
3463 */
3464 static enum sctp_disposition sctp_sf_tabort_8_4_8(
3465 struct net *net,
3466 const struct sctp_endpoint *ep,
3467 const struct sctp_association *asoc,
3468 const union sctp_subtype type,
3469 void *arg,
3470 struct sctp_cmd_seq *commands)
3471 {
3472 struct sctp_packet *packet = NULL;
3473 struct sctp_chunk *chunk = arg;
3474 struct sctp_chunk *abort;
3475
3476 packet = sctp_ootb_pkt_new(net, asoc, chunk);
3477 if (!packet)
3478 return SCTP_DISPOSITION_NOMEM;
3479
3480 /* Make an ABORT. The T bit will be set if the asoc
3481 * is NULL.
3482 */
3483 abort = sctp_make_abort(asoc, chunk, 0);
3484 if (!abort) {
3485 sctp_ootb_pkt_free(packet);
3486 return SCTP_DISPOSITION_NOMEM;
3487 }
3488
3489 /* Reflect vtag if T-Bit is set */
3490 if (sctp_test_T_bit(abort))
3491 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
3492
3493 /* Set the skb to the belonging sock for accounting. */
3494 abort->skb->sk = ep->base.sk;
3495
3496 sctp_packet_append_chunk(packet, abort);
3497
3498 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet));
3499
3500 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
3501
3502 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3503 return SCTP_DISPOSITION_CONSUME;
3504 }
3505
3506 /* Handling of SCTP Packets Containing an INIT Chunk Matching an
3507 * Existing Associations when the UDP encap port is incorrect.
3508 *
3509 * From Section 4 at draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.
3510 */
3511 static enum sctp_disposition sctp_sf_new_encap_port(
3512 struct net *net,
3513 const struct sctp_endpoint *ep,
3514 const struct sctp_association *asoc,
3515 const union sctp_subtype type,
3516 void *arg,
3517 struct sctp_cmd_seq *commands)
3518 {
3519 struct sctp_packet *packet = NULL;
3520 struct sctp_chunk *chunk = arg;
3521 struct sctp_chunk *abort;
3522
3523 packet = sctp_ootb_pkt_new(net, asoc, chunk);
3524 if (!packet)
3525 return SCTP_DISPOSITION_NOMEM;
3526
3527 abort = sctp_make_new_encap_port(asoc, chunk);
3528 if (!abort) {
3529 sctp_ootb_pkt_free(packet);
3530 return SCTP_DISPOSITION_NOMEM;
3531 }
3532
3533 abort->skb->sk = ep->base.sk;
3534
3535 sctp_packet_append_chunk(packet, abort);
3536
3537 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3538 SCTP_PACKET(packet));
3539
3540 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
3541
3542 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3543 return SCTP_DISPOSITION_CONSUME;
3544 }
3545
3546 /*
3547 * Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR
3548 * event as ULP notification for each cause included in the chunk.
3549 *
3550 * API 5.3.1.3 - SCTP_REMOTE_ERROR
3551 *
3552 * The return value is the disposition of the chunk.
3553 */
3554 enum sctp_disposition sctp_sf_operr_notify(struct net *net,
3555 const struct sctp_endpoint *ep,
3556 const struct sctp_association *asoc,
3557 const union sctp_subtype type,
3558 void *arg,
3559 struct sctp_cmd_seq *commands)
3560 {
3561 struct sctp_chunk *chunk = arg;
3562 struct sctp_errhdr *err;
3563
3564 if (!sctp_vtag_verify(chunk, asoc))
3565 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3566
3567 /* Make sure that the ERROR chunk has a valid length. */
3568 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk)))
3569 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3570 commands);
3571 sctp_walk_errors(err, chunk->chunk_hdr);
3572 if ((void *)err != (void *)chunk->chunk_end)
3573 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3574 (void *)err, commands);
3575
3576 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3577 SCTP_CHUNK(chunk));
3578
3579 return SCTP_DISPOSITION_CONSUME;
3580 }
3581
3582 /*
3583 * Process an inbound SHUTDOWN ACK.
3584 *
3585 * From Section 9.2:
3586 * Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
3587 * stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its
3588 * peer, and remove all record of the association.
3589 *
3590 * The return value is the disposition.
3591 */
3592 enum sctp_disposition sctp_sf_do_9_2_final(struct net *net,
3593 const struct sctp_endpoint *ep,
3594 const struct sctp_association *asoc,
3595 const union sctp_subtype type,
3596 void *arg,
3597 struct sctp_cmd_seq *commands)
3598 {
3599 struct sctp_chunk *chunk = arg;
3600 struct sctp_chunk *reply;
3601 struct sctp_ulpevent *ev;
3602
3603 if (!sctp_vtag_verify(chunk, asoc))
3604 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3605
3606 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3607 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
3608 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3609 commands);
3610 /* 10.2 H) SHUTDOWN COMPLETE notification
3611 *
3612 * When SCTP completes the shutdown procedures (section 9.2) this
3613 * notification is passed to the upper layer.
3614 */
3615 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
3616 0, 0, 0, NULL, GFP_ATOMIC);
3617 if (!ev)
3618 goto nomem;
3619
3620 /* ...send a SHUTDOWN COMPLETE chunk to its peer, */
3621 reply = sctp_make_shutdown_complete(asoc, chunk);
3622 if (!reply)
3623 goto nomem_chunk;
3624
3625 /* Do all the commands now (after allocation), so that we
3626 * have consistent state if memory allocation fails
3627 */
3628 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
3629
3630 /* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
3631 * stop the T2-shutdown timer,
3632 */
3633 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3634 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
3635
3636 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3637 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
3638
3639 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
3640 SCTP_STATE(SCTP_STATE_CLOSED));
3641 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
3642 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
3643 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
3644
3645 /* ...and remove all record of the association. */
3646 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
3647 return SCTP_DISPOSITION_DELETE_TCB;
3648
3649 nomem_chunk:
3650 sctp_ulpevent_free(ev);
3651 nomem:
3652 return SCTP_DISPOSITION_NOMEM;
3653 }
3654
3655 /*
3656 * RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41.
3657 *
3658 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
3659 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
3660 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
3661 * packet must fill in the Verification Tag field of the outbound
3662 * packet with the Verification Tag received in the SHUTDOWN ACK and
3663 * set the T-bit in the Chunk Flags to indicate that the Verification
3664 * Tag is reflected.
3665 *
3666 * 8) The receiver should respond to the sender of the OOTB packet with
3667 * an ABORT. When sending the ABORT, the receiver of the OOTB packet
3668 * MUST fill in the Verification Tag field of the outbound packet
3669 * with the value found in the Verification Tag field of the OOTB
3670 * packet and set the T-bit in the Chunk Flags to indicate that the
3671 * Verification Tag is reflected. After sending this ABORT, the
3672 * receiver of the OOTB packet shall discard the OOTB packet and take
3673 * no further action.
3674 */
3675 enum sctp_disposition sctp_sf_ootb(struct net *net,
3676 const struct sctp_endpoint *ep,
3677 const struct sctp_association *asoc,
3678 const union sctp_subtype type,
3679 void *arg, struct sctp_cmd_seq *commands)
3680 {
3681 struct sctp_chunk *chunk = arg;
3682 struct sk_buff *skb = chunk->skb;
3683 struct sctp_chunkhdr *ch;
3684 struct sctp_errhdr *err;
3685 int ootb_cookie_ack = 0;
3686 int ootb_shut_ack = 0;
3687 __u8 *ch_end;
3688
3689 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
3690
3691 if (asoc && !sctp_vtag_verify(chunk, asoc))
3692 asoc = NULL;
3693
3694 ch = (struct sctp_chunkhdr *)chunk->chunk_hdr;
3695 do {
3696 /* Report violation if the chunk is less then minimal */
3697 if (ntohs(ch->length) < sizeof(*ch))
3698 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3699 commands);
3700
3701 /* Report violation if chunk len overflows */
3702 ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
3703 if (ch_end > skb_tail_pointer(skb))
3704 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3705 commands);
3706
3707 /* Now that we know we at least have a chunk header,
3708 * do things that are type appropriate.
3709 */
3710 if (SCTP_CID_SHUTDOWN_ACK == ch->type)
3711 ootb_shut_ack = 1;
3712
3713 /* RFC 2960, Section 3.3.7
3714 * Moreover, under any circumstances, an endpoint that
3715 * receives an ABORT MUST NOT respond to that ABORT by
3716 * sending an ABORT of its own.
3717 */
3718 if (SCTP_CID_ABORT == ch->type)
3719 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3720
3721 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
3722 * or a COOKIE ACK the SCTP Packet should be silently
3723 * discarded.
3724 */
3725
3726 if (SCTP_CID_COOKIE_ACK == ch->type)
3727 ootb_cookie_ack = 1;
3728
3729 if (SCTP_CID_ERROR == ch->type) {
3730 sctp_walk_errors(err, ch) {
3731 if (SCTP_ERROR_STALE_COOKIE == err->cause) {
3732 ootb_cookie_ack = 1;
3733 break;
3734 }
3735 }
3736 }
3737
3738 ch = (struct sctp_chunkhdr *)ch_end;
3739 } while (ch_end < skb_tail_pointer(skb));
3740
3741 if (ootb_shut_ack)
3742 return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
3743 else if (ootb_cookie_ack)
3744 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3745 else
3746 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
3747 }
3748
3749 /*
3750 * Handle an "Out of the blue" SHUTDOWN ACK.
3751 *
3752 * Section: 8.4 5, sctpimpguide 2.41.
3753 *
3754 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
3755 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
3756 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
3757 * packet must fill in the Verification Tag field of the outbound
3758 * packet with the Verification Tag received in the SHUTDOWN ACK and
3759 * set the T-bit in the Chunk Flags to indicate that the Verification
3760 * Tag is reflected.
3761 *
3762 * Inputs
3763 * (endpoint, asoc, type, arg, commands)
3764 *
3765 * Outputs
3766 * (enum sctp_disposition)
3767 *
3768 * The return value is the disposition of the chunk.
3769 */
3770 static enum sctp_disposition sctp_sf_shut_8_4_5(
3771 struct net *net,
3772 const struct sctp_endpoint *ep,
3773 const struct sctp_association *asoc,
3774 const union sctp_subtype type,
3775 void *arg,
3776 struct sctp_cmd_seq *commands)
3777 {
3778 struct sctp_packet *packet = NULL;
3779 struct sctp_chunk *chunk = arg;
3780 struct sctp_chunk *shut;
3781
3782 packet = sctp_ootb_pkt_new(net, asoc, chunk);
3783 if (!packet)
3784 return SCTP_DISPOSITION_NOMEM;
3785
3786 /* Make an SHUTDOWN_COMPLETE.
3787 * The T bit will be set if the asoc is NULL.
3788 */
3789 shut = sctp_make_shutdown_complete(asoc, chunk);
3790 if (!shut) {
3791 sctp_ootb_pkt_free(packet);
3792 return SCTP_DISPOSITION_NOMEM;
3793 }
3794
3795 /* Reflect vtag if T-Bit is set */
3796 if (sctp_test_T_bit(shut))
3797 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
3798
3799 /* Set the skb to the belonging sock for accounting. */
3800 shut->skb->sk = ep->base.sk;
3801
3802 sctp_packet_append_chunk(packet, shut);
3803
3804 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3805 SCTP_PACKET(packet));
3806
3807 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
3808
3809 /* We need to discard the rest of the packet to prevent
3810 * potential boomming attacks from additional bundled chunks.
3811 * This is documented in SCTP Threats ID.
3812 */
3813 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3814 }
3815
3816 /*
3817 * Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state.
3818 *
3819 * Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK
3820 * If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the
3821 * procedures in section 8.4 SHOULD be followed, in other words it
3822 * should be treated as an Out Of The Blue packet.
3823 * [This means that we do NOT check the Verification Tag on these
3824 * chunks. --piggy ]
3825 *
3826 */
3827 enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net,
3828 const struct sctp_endpoint *ep,
3829 const struct sctp_association *asoc,
3830 const union sctp_subtype type,
3831 void *arg,
3832 struct sctp_cmd_seq *commands)
3833 {
3834 struct sctp_chunk *chunk = arg;
3835
3836 if (!sctp_vtag_verify(chunk, asoc))
3837 asoc = NULL;
3838
3839 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3840 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
3841 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3842 commands);
3843
3844 /* Although we do have an association in this case, it corresponds
3845 * to a restarted association. So the packet is treated as an OOTB
3846 * packet and the state function that handles OOTB SHUTDOWN_ACK is
3847 * called with a NULL association.
3848 */
3849 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
3850
3851 return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
3852 }
3853
3854 /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */
3855 enum sctp_disposition sctp_sf_do_asconf(struct net *net,
3856 const struct sctp_endpoint *ep,
3857 const struct sctp_association *asoc,
3858 const union sctp_subtype type,
3859 void *arg,
3860 struct sctp_cmd_seq *commands)
3861 {
3862 struct sctp_paramhdr *err_param = NULL;
3863 struct sctp_chunk *asconf_ack = NULL;
3864 struct sctp_chunk *chunk = arg;
3865 struct sctp_addiphdr *hdr;
3866 __u32 serial;
3867
3868 if (!sctp_vtag_verify(chunk, asoc)) {
3869 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3870 SCTP_NULL());
3871 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3872 }
3873
3874 /* Make sure that the ASCONF ADDIP chunk has a valid length. */
3875 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
3876 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3877 commands);
3878
3879 /* ADD-IP: Section 4.1.1
3880 * This chunk MUST be sent in an authenticated way by using
3881 * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
3882 * is received unauthenticated it MUST be silently discarded as
3883 * described in [I-D.ietf-tsvwg-sctp-auth].
3884 */
3885 if (!asoc->peer.asconf_capable ||
3886 (!net->sctp.addip_noauth && !chunk->auth))
3887 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3888
3889 hdr = (struct sctp_addiphdr *)chunk->skb->data;
3890 serial = ntohl(hdr->serial);
3891
3892 /* Verify the ASCONF chunk before processing it. */
3893 if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
3894 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3895 (void *)err_param, commands);
3896
3897 /* ADDIP 5.2 E1) Compare the value of the serial number to the value
3898 * the endpoint stored in a new association variable
3899 * 'Peer-Serial-Number'.
3900 */
3901 if (serial == asoc->peer.addip_serial + 1) {
3902 /* If this is the first instance of ASCONF in the packet,
3903 * we can clean our old ASCONF-ACKs.
3904 */
3905 if (!chunk->has_asconf)
3906 sctp_assoc_clean_asconf_ack_cache(asoc);
3907
3908 /* ADDIP 5.2 E4) When the Sequence Number matches the next one
3909 * expected, process the ASCONF as described below and after
3910 * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to
3911 * the response packet and cache a copy of it (in the event it
3912 * later needs to be retransmitted).
3913 *
3914 * Essentially, do V1-V5.
3915 */
3916 asconf_ack = sctp_process_asconf((struct sctp_association *)
3917 asoc, chunk);
3918 if (!asconf_ack)
3919 return SCTP_DISPOSITION_NOMEM;
3920 } else if (serial < asoc->peer.addip_serial + 1) {
3921 /* ADDIP 5.2 E2)
3922 * If the value found in the Sequence Number is less than the
3923 * ('Peer- Sequence-Number' + 1), simply skip to the next
3924 * ASCONF, and include in the outbound response packet
3925 * any previously cached ASCONF-ACK response that was
3926 * sent and saved that matches the Sequence Number of the
3927 * ASCONF. Note: It is possible that no cached ASCONF-ACK
3928 * Chunk exists. This will occur when an older ASCONF
3929 * arrives out of order. In such a case, the receiver
3930 * should skip the ASCONF Chunk and not include ASCONF-ACK
3931 * Chunk for that chunk.
3932 */
3933 asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
3934 if (!asconf_ack)
3935 return SCTP_DISPOSITION_DISCARD;
3936
3937 /* Reset the transport so that we select the correct one
3938 * this time around. This is to make sure that we don't
3939 * accidentally use a stale transport that's been removed.
3940 */
3941 asconf_ack->transport = NULL;
3942 } else {
3943 /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
3944 * it must be either a stale packet or from an attacker.
3945 */
3946 return SCTP_DISPOSITION_DISCARD;
3947 }
3948
3949 /* ADDIP 5.2 E6) The destination address of the SCTP packet
3950 * containing the ASCONF-ACK Chunks MUST be the source address of
3951 * the SCTP packet that held the ASCONF Chunks.
3952 *
3953 * To do this properly, we'll set the destination address of the chunk
3954 * and at the transmit time, will try look up the transport to use.
3955 * Since ASCONFs may be bundled, the correct transport may not be
3956 * created until we process the entire packet, thus this workaround.
3957 */
3958 asconf_ack->dest = chunk->source;
3959 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
3960 if (asoc->new_transport) {
3961 sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands);
3962 ((struct sctp_association *)asoc)->new_transport = NULL;
3963 }
3964
3965 return SCTP_DISPOSITION_CONSUME;
3966 }
3967
3968 static enum sctp_disposition sctp_send_next_asconf(
3969 struct net *net,
3970 const struct sctp_endpoint *ep,
3971 struct sctp_association *asoc,
3972 const union sctp_subtype type,
3973 struct sctp_cmd_seq *commands)
3974 {
3975 struct sctp_chunk *asconf;
3976 struct list_head *entry;
3977
3978 if (list_empty(&asoc->addip_chunk_list))
3979 return SCTP_DISPOSITION_CONSUME;
3980
3981 entry = asoc->addip_chunk_list.next;
3982 asconf = list_entry(entry, struct sctp_chunk, list);
3983
3984 list_del_init(entry);
3985 sctp_chunk_hold(asconf);
3986 asoc->addip_last_asconf = asconf;
3987
3988 return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
3989 }
3990
3991 /*
3992 * ADDIP Section 4.3 General rules for address manipulation
3993 * When building TLV parameters for the ASCONF Chunk that will add or
3994 * delete IP addresses the D0 to D13 rules should be applied:
3995 */
3996 enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
3997 const struct sctp_endpoint *ep,
3998 const struct sctp_association *asoc,
3999 const union sctp_subtype type,
4000 void *arg,
4001 struct sctp_cmd_seq *commands)
4002 {
4003 struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
4004 struct sctp_paramhdr *err_param = NULL;
4005 struct sctp_chunk *asconf_ack = arg;
4006 struct sctp_addiphdr *addip_hdr;
4007 __u32 sent_serial, rcvd_serial;
4008 struct sctp_chunk *abort;
4009
4010 if (!sctp_vtag_verify(asconf_ack, asoc)) {
4011 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
4012 SCTP_NULL());
4013 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4014 }
4015
4016 /* Make sure that the ADDIP chunk has a valid length. */
4017 if (!sctp_chunk_length_valid(asconf_ack,
4018 sizeof(struct sctp_addip_chunk)))
4019 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4020 commands);
4021
4022 /* ADD-IP, Section 4.1.2:
4023 * This chunk MUST be sent in an authenticated way by using
4024 * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
4025 * is received unauthenticated it MUST be silently discarded as
4026 * described in [I-D.ietf-tsvwg-sctp-auth].
4027 */
4028 if (!asoc->peer.asconf_capable ||
4029 (!net->sctp.addip_noauth && !asconf_ack->auth))
4030 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4031
4032 addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
4033 rcvd_serial = ntohl(addip_hdr->serial);
4034
4035 /* Verify the ASCONF-ACK chunk before processing it. */
4036 if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
4037 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
4038 (void *)err_param, commands);
4039
4040 if (last_asconf) {
4041 addip_hdr = (struct sctp_addiphdr *)last_asconf->subh.addip_hdr;
4042 sent_serial = ntohl(addip_hdr->serial);
4043 } else {
4044 sent_serial = asoc->addip_serial - 1;
4045 }
4046
4047 /* D0) If an endpoint receives an ASCONF-ACK that is greater than or
4048 * equal to the next serial number to be used but no ASCONF chunk is
4049 * outstanding the endpoint MUST ABORT the association. Note that a
4050 * sequence number is greater than if it is no more than 2^^31-1
4051 * larger than the current sequence number (using serial arithmetic).
4052 */
4053 if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
4054 !(asoc->addip_last_asconf)) {
4055 abort = sctp_make_abort(asoc, asconf_ack,
4056 sizeof(struct sctp_errhdr));
4057 if (abort) {
4058 sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
4059 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4060 SCTP_CHUNK(abort));
4061 }
4062 /* We are going to ABORT, so we might as well stop
4063 * processing the rest of the chunks in the packet.
4064 */
4065 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4066 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
4067 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
4068 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4069 SCTP_ERROR(ECONNABORTED));
4070 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4071 SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
4072 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4073 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4074 return SCTP_DISPOSITION_ABORT;
4075 }
4076
4077 if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
4078 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4079 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
4080
4081 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
4082 asconf_ack))
4083 return sctp_send_next_asconf(net, ep,
4084 (struct sctp_association *)asoc,
4085 type, commands);
4086
4087 abort = sctp_make_abort(asoc, asconf_ack,
4088 sizeof(struct sctp_errhdr));
4089 if (abort) {
4090 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
4091 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4092 SCTP_CHUNK(abort));
4093 }
4094 /* We are going to ABORT, so we might as well stop
4095 * processing the rest of the chunks in the packet.
4096 */
4097 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
4098 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4099 SCTP_ERROR(ECONNABORTED));
4100 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4101 SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
4102 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4103 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4104 return SCTP_DISPOSITION_ABORT;
4105 }
4106
4107 return SCTP_DISPOSITION_DISCARD;
4108 }
4109
4110 /* RE-CONFIG Section 5.2 Upon reception of an RECONF Chunk. */
4111 enum sctp_disposition sctp_sf_do_reconf(struct net *net,
4112 const struct sctp_endpoint *ep,
4113 const struct sctp_association *asoc,
4114 const union sctp_subtype type,
4115 void *arg,
4116 struct sctp_cmd_seq *commands)
4117 {
4118 struct sctp_paramhdr *err_param = NULL;
4119 struct sctp_chunk *chunk = arg;
4120 struct sctp_reconf_chunk *hdr;
4121 union sctp_params param;
4122
4123 if (!sctp_vtag_verify(chunk, asoc)) {
4124 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
4125 SCTP_NULL());
4126 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4127 }
4128
4129 /* Make sure that the RECONF chunk has a valid length. */
4130 if (!sctp_chunk_length_valid(chunk, sizeof(*hdr)))
4131 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4132 commands);
4133
4134 if (!sctp_verify_reconf(asoc, chunk, &err_param))
4135 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
4136 (void *)err_param, commands);
4137
4138 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
4139 sctp_walk_params(param, hdr, params) {
4140 struct sctp_chunk *reply = NULL;
4141 struct sctp_ulpevent *ev = NULL;
4142
4143 if (param.p->type == SCTP_PARAM_RESET_OUT_REQUEST)
4144 reply = sctp_process_strreset_outreq(
4145 (struct sctp_association *)asoc, param, &ev);
4146 else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST)
4147 reply = sctp_process_strreset_inreq(
4148 (struct sctp_association *)asoc, param, &ev);
4149 else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST)
4150 reply = sctp_process_strreset_tsnreq(
4151 (struct sctp_association *)asoc, param, &ev);
4152 else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS)
4153 reply = sctp_process_strreset_addstrm_out(
4154 (struct sctp_association *)asoc, param, &ev);
4155 else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS)
4156 reply = sctp_process_strreset_addstrm_in(
4157 (struct sctp_association *)asoc, param, &ev);
4158 else if (param.p->type == SCTP_PARAM_RESET_RESPONSE)
4159 reply = sctp_process_strreset_resp(
4160 (struct sctp_association *)asoc, param, &ev);
4161
4162 if (ev)
4163 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
4164 SCTP_ULPEVENT(ev));
4165
4166 if (reply)
4167 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4168 SCTP_CHUNK(reply));
4169 }
4170
4171 return SCTP_DISPOSITION_CONSUME;
4172 }
4173
4174 /*
4175 * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
4176 *
4177 * When a FORWARD TSN chunk arrives, the data receiver MUST first update
4178 * its cumulative TSN point to the value carried in the FORWARD TSN
4179 * chunk, and then MUST further advance its cumulative TSN point locally
4180 * if possible.
4181 * After the above processing, the data receiver MUST stop reporting any
4182 * missing TSNs earlier than or equal to the new cumulative TSN point.
4183 *
4184 * Verification Tag: 8.5 Verification Tag [Normal verification]
4185 *
4186 * The return value is the disposition of the chunk.
4187 */
4188 enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
4189 const struct sctp_endpoint *ep,
4190 const struct sctp_association *asoc,
4191 const union sctp_subtype type,
4192 void *arg,
4193 struct sctp_cmd_seq *commands)
4194 {
4195 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
4196 struct sctp_chunk *chunk = arg;
4197 __u16 len;
4198 __u32 tsn;
4199
4200 if (!sctp_vtag_verify(chunk, asoc)) {
4201 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
4202 SCTP_NULL());
4203 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4204 }
4205
4206 if (!asoc->peer.prsctp_capable)
4207 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
4208
4209 /* Make sure that the FORWARD_TSN chunk has valid length. */
4210 if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
4211 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4212 commands);
4213
4214 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
4215 chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
4216 len = ntohs(chunk->chunk_hdr->length);
4217 len -= sizeof(struct sctp_chunkhdr);
4218 skb_pull(chunk->skb, len);
4219
4220 tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
4221 pr_debug("%s: TSN 0x%x\n", __func__, tsn);
4222
4223 /* The TSN is too high--silently discard the chunk and count on it
4224 * getting retransmitted later.
4225 */
4226 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
4227 goto discard_noforce;
4228
4229 if (!asoc->stream.si->validate_ftsn(chunk))
4230 goto discard_noforce;
4231
4232 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
4233 if (len > sctp_ftsnhdr_len(&asoc->stream))
4234 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
4235 SCTP_CHUNK(chunk));
4236
4237 /* Count this as receiving DATA. */
4238 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
4239 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
4240 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
4241 }
4242
4243 /* FIXME: For now send a SACK, but DATA processing may
4244 * send another.
4245 */
4246 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
4247
4248 return SCTP_DISPOSITION_CONSUME;
4249
4250 discard_noforce:
4251 return SCTP_DISPOSITION_DISCARD;
4252 }
4253
4254 enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
4255 struct net *net,
4256 const struct sctp_endpoint *ep,
4257 const struct sctp_association *asoc,
4258 const union sctp_subtype type,
4259 void *arg,
4260 struct sctp_cmd_seq *commands)
4261 {
4262 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
4263 struct sctp_chunk *chunk = arg;
4264 __u16 len;
4265 __u32 tsn;
4266
4267 if (!sctp_vtag_verify(chunk, asoc)) {
4268 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
4269 SCTP_NULL());
4270 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4271 }
4272
4273 if (!asoc->peer.prsctp_capable)
4274 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
4275
4276 /* Make sure that the FORWARD_TSN chunk has a valid length. */
4277 if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
4278 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4279 commands);
4280
4281 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
4282 chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
4283 len = ntohs(chunk->chunk_hdr->length);
4284 len -= sizeof(struct sctp_chunkhdr);
4285 skb_pull(chunk->skb, len);
4286
4287 tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
4288 pr_debug("%s: TSN 0x%x\n", __func__, tsn);
4289
4290 /* The TSN is too high--silently discard the chunk and count on it
4291 * getting retransmitted later.
4292 */
4293 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
4294 goto gen_shutdown;
4295
4296 if (!asoc->stream.si->validate_ftsn(chunk))
4297 goto gen_shutdown;
4298
4299 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
4300 if (len > sctp_ftsnhdr_len(&asoc->stream))
4301 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
4302 SCTP_CHUNK(chunk));
4303
4304 /* Go a head and force a SACK, since we are shutting down. */
4305 gen_shutdown:
4306 /* Implementor's Guide.
4307 *
4308 * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
4309 * respond to each received packet containing one or more DATA chunk(s)
4310 * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
4311 */
4312 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
4313 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
4314 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
4315 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
4316
4317 return SCTP_DISPOSITION_CONSUME;
4318 }
4319
4320 /*
4321 * SCTP-AUTH Section 6.3 Receiving authenticated chunks
4322 *
4323 * The receiver MUST use the HMAC algorithm indicated in the HMAC
4324 * Identifier field. If this algorithm was not specified by the
4325 * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk
4326 * during association setup, the AUTH chunk and all chunks after it MUST
4327 * be discarded and an ERROR chunk SHOULD be sent with the error cause
4328 * defined in Section 4.1.
4329 *
4330 * If an endpoint with no shared key receives a Shared Key Identifier
4331 * other than 0, it MUST silently discard all authenticated chunks. If
4332 * the endpoint has at least one endpoint pair shared key for the peer,
4333 * it MUST use the key specified by the Shared Key Identifier if a
4334 * key has been configured for that Shared Key Identifier. If no
4335 * endpoint pair shared key has been configured for that Shared Key
4336 * Identifier, all authenticated chunks MUST be silently discarded.
4337 *
4338 * Verification Tag: 8.5 Verification Tag [Normal verification]
4339 *
4340 * The return value is the disposition of the chunk.
4341 */
4342 static enum sctp_ierror sctp_sf_authenticate(
4343 const struct sctp_association *asoc,
4344 struct sctp_chunk *chunk)
4345 {
4346 struct sctp_shared_key *sh_key = NULL;
4347 struct sctp_authhdr *auth_hdr;
4348 __u8 *save_digest, *digest;
4349 struct sctp_hmac *hmac;
4350 unsigned int sig_len;
4351 __u16 key_id;
4352
4353 /* Pull in the auth header, so we can do some more verification */
4354 auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
4355 chunk->subh.auth_hdr = auth_hdr;
4356 skb_pull(chunk->skb, sizeof(*auth_hdr));
4357
4358 /* Make sure that we support the HMAC algorithm from the auth
4359 * chunk.
4360 */
4361 if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
4362 return SCTP_IERROR_AUTH_BAD_HMAC;
4363
4364 /* Make sure that the provided shared key identifier has been
4365 * configured
4366 */
4367 key_id = ntohs(auth_hdr->shkey_id);
4368 if (key_id != asoc->active_key_id) {
4369 sh_key = sctp_auth_get_shkey(asoc, key_id);
4370 if (!sh_key)
4371 return SCTP_IERROR_AUTH_BAD_KEYID;
4372 }
4373
4374 /* Make sure that the length of the signature matches what
4375 * we expect.
4376 */
4377 sig_len = ntohs(chunk->chunk_hdr->length) -
4378 sizeof(struct sctp_auth_chunk);
4379 hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id));
4380 if (sig_len != hmac->hmac_len)
4381 return SCTP_IERROR_PROTO_VIOLATION;
4382
4383 /* Now that we've done validation checks, we can compute and
4384 * verify the hmac. The steps involved are:
4385 * 1. Save the digest from the chunk.
4386 * 2. Zero out the digest in the chunk.
4387 * 3. Compute the new digest
4388 * 4. Compare saved and new digests.
4389 */
4390 digest = auth_hdr->hmac;
4391 skb_pull(chunk->skb, sig_len);
4392
4393 save_digest = kmemdup(digest, sig_len, GFP_ATOMIC);
4394 if (!save_digest)
4395 goto nomem;
4396
4397 memset(digest, 0, sig_len);
4398
4399 sctp_auth_calculate_hmac(asoc, chunk->skb,
4400 (struct sctp_auth_chunk *)chunk->chunk_hdr,
4401 sh_key, GFP_ATOMIC);
4402
4403 /* Discard the packet if the digests do not match */
4404 if (memcmp(save_digest, digest, sig_len)) {
4405 kfree(save_digest);
4406 return SCTP_IERROR_BAD_SIG;
4407 }
4408
4409 kfree(save_digest);
4410 chunk->auth = 1;
4411
4412 return SCTP_IERROR_NO_ERROR;
4413 nomem:
4414 return SCTP_IERROR_NOMEM;
4415 }
4416
4417 enum sctp_disposition sctp_sf_eat_auth(struct net *net,
4418 const struct sctp_endpoint *ep,
4419 const struct sctp_association *asoc,
4420 const union sctp_subtype type,
4421 void *arg, struct sctp_cmd_seq *commands)
4422 {
4423 struct sctp_chunk *chunk = arg;
4424 struct sctp_authhdr *auth_hdr;
4425 struct sctp_chunk *err_chunk;
4426 enum sctp_ierror error;
4427
4428 /* Make sure that the peer has AUTH capable */
4429 if (!asoc->peer.auth_capable)
4430 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
4431
4432 if (!sctp_vtag_verify(chunk, asoc)) {
4433 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
4434 SCTP_NULL());
4435 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4436 }
4437
4438 /* Make sure that the AUTH chunk has valid length. */
4439 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
4440 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4441 commands);
4442
4443 auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
4444 error = sctp_sf_authenticate(asoc, chunk);
4445 switch (error) {
4446 case SCTP_IERROR_AUTH_BAD_HMAC:
4447 /* Generate the ERROR chunk and discard the rest
4448 * of the packet
4449 */
4450 err_chunk = sctp_make_op_error(asoc, chunk,
4451 SCTP_ERROR_UNSUP_HMAC,
4452 &auth_hdr->hmac_id,
4453 sizeof(__u16), 0);
4454 if (err_chunk) {
4455 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4456 SCTP_CHUNK(err_chunk));
4457 }
4458 fallthrough;
4459 case SCTP_IERROR_AUTH_BAD_KEYID:
4460 case SCTP_IERROR_BAD_SIG:
4461 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4462
4463 case SCTP_IERROR_PROTO_VIOLATION:
4464 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4465 commands);
4466
4467 case SCTP_IERROR_NOMEM:
4468 return SCTP_DISPOSITION_NOMEM;
4469
4470 default: /* Prevent gcc warnings */
4471 break;
4472 }
4473
4474 if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
4475 struct sctp_ulpevent *ev;
4476
4477 ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id),
4478 SCTP_AUTH_NEW_KEY, GFP_ATOMIC);
4479
4480 if (!ev)
4481 return -ENOMEM;
4482
4483 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
4484 SCTP_ULPEVENT(ev));
4485 }
4486
4487 return SCTP_DISPOSITION_CONSUME;
4488 }
4489
4490 /*
4491 * Process an unknown chunk.
4492 *
4493 * Section: 3.2. Also, 2.1 in the implementor's guide.
4494 *
4495 * Chunk Types are encoded such that the highest-order two bits specify
4496 * the action that must be taken if the processing endpoint does not
4497 * recognize the Chunk Type.
4498 *
4499 * 00 - Stop processing this SCTP packet and discard it, do not process
4500 * any further chunks within it.
4501 *
4502 * 01 - Stop processing this SCTP packet and discard it, do not process
4503 * any further chunks within it, and report the unrecognized
4504 * chunk in an 'Unrecognized Chunk Type'.
4505 *
4506 * 10 - Skip this chunk and continue processing.
4507 *
4508 * 11 - Skip this chunk and continue processing, but report in an ERROR
4509 * Chunk using the 'Unrecognized Chunk Type' cause of error.
4510 *
4511 * The return value is the disposition of the chunk.
4512 */
4513 enum sctp_disposition sctp_sf_unk_chunk(struct net *net,
4514 const struct sctp_endpoint *ep,
4515 const struct sctp_association *asoc,
4516 const union sctp_subtype type,
4517 void *arg,
4518 struct sctp_cmd_seq *commands)
4519 {
4520 struct sctp_chunk *unk_chunk = arg;
4521 struct sctp_chunk *err_chunk;
4522 struct sctp_chunkhdr *hdr;
4523
4524 pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk);
4525
4526 if (!sctp_vtag_verify(unk_chunk, asoc))
4527 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4528
4529 /* Make sure that the chunk has a valid length.
4530 * Since we don't know the chunk type, we use a general
4531 * chunkhdr structure to make a comparison.
4532 */
4533 if (!sctp_chunk_length_valid(unk_chunk, sizeof(*hdr)))
4534 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4535 commands);
4536
4537 switch (type.chunk & SCTP_CID_ACTION_MASK) {
4538 case SCTP_CID_ACTION_DISCARD:
4539 /* Discard the packet. */
4540 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4541 case SCTP_CID_ACTION_DISCARD_ERR:
4542 /* Generate an ERROR chunk as response. */
4543 hdr = unk_chunk->chunk_hdr;
4544 err_chunk = sctp_make_op_error(asoc, unk_chunk,
4545 SCTP_ERROR_UNKNOWN_CHUNK, hdr,
4546 SCTP_PAD4(ntohs(hdr->length)),
4547 0);
4548 if (err_chunk) {
4549 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4550 SCTP_CHUNK(err_chunk));
4551 }
4552
4553 /* Discard the packet. */
4554 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4555 return SCTP_DISPOSITION_CONSUME;
4556 case SCTP_CID_ACTION_SKIP:
4557 /* Skip the chunk. */
4558 return SCTP_DISPOSITION_DISCARD;
4559 case SCTP_CID_ACTION_SKIP_ERR:
4560 /* Generate an ERROR chunk as response. */
4561 hdr = unk_chunk->chunk_hdr;
4562 err_chunk = sctp_make_op_error(asoc, unk_chunk,
4563 SCTP_ERROR_UNKNOWN_CHUNK, hdr,
4564 SCTP_PAD4(ntohs(hdr->length)),
4565 0);
4566 if (err_chunk) {
4567 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4568 SCTP_CHUNK(err_chunk));
4569 }
4570 /* Skip the chunk. */
4571 return SCTP_DISPOSITION_CONSUME;
4572 default:
4573 break;
4574 }
4575
4576 return SCTP_DISPOSITION_DISCARD;
4577 }
4578
4579 /*
4580 * Discard the chunk.
4581 *
4582 * Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2
4583 * [Too numerous to mention...]
4584 * Verification Tag: No verification needed.
4585 * Inputs
4586 * (endpoint, asoc, chunk)
4587 *
4588 * Outputs
4589 * (asoc, reply_msg, msg_up, timers, counters)
4590 *
4591 * The return value is the disposition of the chunk.
4592 */
4593 enum sctp_disposition sctp_sf_discard_chunk(struct net *net,
4594 const struct sctp_endpoint *ep,
4595 const struct sctp_association *asoc,
4596 const union sctp_subtype type,
4597 void *arg,
4598 struct sctp_cmd_seq *commands)
4599 {
4600 struct sctp_chunk *chunk = arg;
4601
4602 if (asoc && !sctp_vtag_verify(chunk, asoc))
4603 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4604
4605 /* Make sure that the chunk has a valid length.
4606 * Since we don't know the chunk type, we use a general
4607 * chunkhdr structure to make a comparison.
4608 */
4609 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
4610 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4611 commands);
4612
4613 pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk);
4614
4615 return SCTP_DISPOSITION_DISCARD;
4616 }
4617
4618 /*
4619 * Discard the whole packet.
4620 *
4621 * Section: 8.4 2)
4622 *
4623 * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST
4624 * silently discard the OOTB packet and take no further action.
4625 *
4626 * Verification Tag: No verification necessary
4627 *
4628 * Inputs
4629 * (endpoint, asoc, chunk)
4630 *
4631 * Outputs
4632 * (asoc, reply_msg, msg_up, timers, counters)
4633 *
4634 * The return value is the disposition of the chunk.
4635 */
4636 enum sctp_disposition sctp_sf_pdiscard(struct net *net,
4637 const struct sctp_endpoint *ep,
4638 const struct sctp_association *asoc,
4639 const union sctp_subtype type,
4640 void *arg, struct sctp_cmd_seq *commands)
4641 {
4642 SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
4643 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
4644
4645 return SCTP_DISPOSITION_CONSUME;
4646 }
4647
4648
4649 /*
4650 * The other end is violating protocol.
4651 *
4652 * Section: Not specified
4653 * Verification Tag: Not specified
4654 * Inputs
4655 * (endpoint, asoc, chunk)
4656 *
4657 * Outputs
4658 * (asoc, reply_msg, msg_up, timers, counters)
4659 *
4660 * We simply tag the chunk as a violation. The state machine will log
4661 * the violation and continue.
4662 */
4663 enum sctp_disposition sctp_sf_violation(struct net *net,
4664 const struct sctp_endpoint *ep,
4665 const struct sctp_association *asoc,
4666 const union sctp_subtype type,
4667 void *arg,
4668 struct sctp_cmd_seq *commands)
4669 {
4670 struct sctp_chunk *chunk = arg;
4671
4672 if (!sctp_vtag_verify(chunk, asoc))
4673 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4674
4675 /* Make sure that the chunk has a valid length. */
4676 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
4677 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4678 commands);
4679
4680 return SCTP_DISPOSITION_VIOLATION;
4681 }
4682
4683 /*
4684 * Common function to handle a protocol violation.
4685 */
4686 static enum sctp_disposition sctp_sf_abort_violation(
4687 struct net *net,
4688 const struct sctp_endpoint *ep,
4689 const struct sctp_association *asoc,
4690 void *arg,
4691 struct sctp_cmd_seq *commands,
4692 const __u8 *payload,
4693 const size_t paylen)
4694 {
4695 struct sctp_packet *packet = NULL;
4696 struct sctp_chunk *chunk = arg;
4697 struct sctp_chunk *abort = NULL;
4698
4699 /* SCTP-AUTH, Section 6.3:
4700 * It should be noted that if the receiver wants to tear
4701 * down an association in an authenticated way only, the
4702 * handling of malformed packets should not result in
4703 * tearing down the association.
4704 *
4705 * This means that if we only want to abort associations
4706 * in an authenticated way (i.e AUTH+ABORT), then we
4707 * can't destroy this association just because the packet
4708 * was malformed.
4709 */
4710 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
4711 goto discard;
4712
4713 /* Make the abort chunk. */
4714 abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
4715 if (!abort)
4716 goto nomem;
4717
4718 if (asoc) {
4719 /* Treat INIT-ACK as a special case during COOKIE-WAIT. */
4720 if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK &&
4721 !asoc->peer.i.init_tag) {
4722 struct sctp_initack_chunk *initack;
4723
4724 initack = (struct sctp_initack_chunk *)chunk->chunk_hdr;
4725 if (!sctp_chunk_length_valid(chunk, sizeof(*initack)))
4726 abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T;
4727 else {
4728 unsigned int inittag;
4729
4730 inittag = ntohl(initack->init_hdr.init_tag);
4731 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG,
4732 SCTP_U32(inittag));
4733 }
4734 }
4735
4736 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4737 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
4738
4739 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
4740 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4741 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
4742 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4743 SCTP_ERROR(ECONNREFUSED));
4744 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4745 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
4746 } else {
4747 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4748 SCTP_ERROR(ECONNABORTED));
4749 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4750 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
4751 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4752 }
4753 } else {
4754 packet = sctp_ootb_pkt_new(net, asoc, chunk);
4755
4756 if (!packet)
4757 goto nomem_pkt;
4758
4759 if (sctp_test_T_bit(abort))
4760 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
4761
4762 abort->skb->sk = ep->base.sk;
4763
4764 sctp_packet_append_chunk(packet, abort);
4765
4766 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
4767 SCTP_PACKET(packet));
4768
4769 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
4770 }
4771
4772 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4773
4774 discard:
4775 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
4776 return SCTP_DISPOSITION_ABORT;
4777
4778 nomem_pkt:
4779 sctp_chunk_free(abort);
4780 nomem:
4781 return SCTP_DISPOSITION_NOMEM;
4782 }
4783
4784 /*
4785 * Handle a protocol violation when the chunk length is invalid.
4786 * "Invalid" length is identified as smaller than the minimal length a
4787 * given chunk can be. For example, a SACK chunk has invalid length
4788 * if its length is set to be smaller than the size of struct sctp_sack_chunk.
4789 *
4790 * We inform the other end by sending an ABORT with a Protocol Violation
4791 * error code.
4792 *
4793 * Section: Not specified
4794 * Verification Tag: Nothing to do
4795 * Inputs
4796 * (endpoint, asoc, chunk)
4797 *
4798 * Outputs
4799 * (reply_msg, msg_up, counters)
4800 *
4801 * Generate an ABORT chunk and terminate the association.
4802 */
4803 static enum sctp_disposition sctp_sf_violation_chunklen(
4804 struct net *net,
4805 const struct sctp_endpoint *ep,
4806 const struct sctp_association *asoc,
4807 const union sctp_subtype type,
4808 void *arg,
4809 struct sctp_cmd_seq *commands)
4810 {
4811 static const char err_str[] = "The following chunk had invalid length:";
4812
4813 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
4814 sizeof(err_str));
4815 }
4816
4817 /*
4818 * Handle a protocol violation when the parameter length is invalid.
4819 * If the length is smaller than the minimum length of a given parameter,
4820 * or accumulated length in multi parameters exceeds the end of the chunk,
4821 * the length is considered as invalid.
4822 */
4823 static enum sctp_disposition sctp_sf_violation_paramlen(
4824 struct net *net,
4825 const struct sctp_endpoint *ep,
4826 const struct sctp_association *asoc,
4827 const union sctp_subtype type,
4828 void *arg, void *ext,
4829 struct sctp_cmd_seq *commands)
4830 {
4831 struct sctp_paramhdr *param = ext;
4832 struct sctp_chunk *abort = NULL;
4833 struct sctp_chunk *chunk = arg;
4834
4835 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
4836 goto discard;
4837
4838 /* Make the abort chunk. */
4839 abort = sctp_make_violation_paramlen(asoc, chunk, param);
4840 if (!abort)
4841 goto nomem;
4842
4843 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4844 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
4845
4846 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4847 SCTP_ERROR(ECONNABORTED));
4848 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4849 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
4850 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4851 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4852
4853 discard:
4854 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
4855 return SCTP_DISPOSITION_ABORT;
4856 nomem:
4857 return SCTP_DISPOSITION_NOMEM;
4858 }
4859
4860 /* Handle a protocol violation when the peer trying to advance the
4861 * cumulative tsn ack to a point beyond the max tsn currently sent.
4862 *
4863 * We inform the other end by sending an ABORT with a Protocol Violation
4864 * error code.
4865 */
4866 static enum sctp_disposition sctp_sf_violation_ctsn(
4867 struct net *net,
4868 const struct sctp_endpoint *ep,
4869 const struct sctp_association *asoc,
4870 const union sctp_subtype type,
4871 void *arg,
4872 struct sctp_cmd_seq *commands)
4873 {
4874 static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:";
4875
4876 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
4877 sizeof(err_str));
4878 }
4879
4880 /* Handle protocol violation of an invalid chunk bundling. For example,
4881 * when we have an association and we receive bundled INIT-ACK, or
4882 * SHUTDOWN-COMPLETE, our peer is clearly violating the "MUST NOT bundle"
4883 * statement from the specs. Additionally, there might be an attacker
4884 * on the path and we may not want to continue this communication.
4885 */
4886 static enum sctp_disposition sctp_sf_violation_chunk(
4887 struct net *net,
4888 const struct sctp_endpoint *ep,
4889 const struct sctp_association *asoc,
4890 const union sctp_subtype type,
4891 void *arg,
4892 struct sctp_cmd_seq *commands)
4893 {
4894 static const char err_str[] = "The following chunk violates protocol:";
4895
4896 if (!asoc)
4897 return sctp_sf_violation(net, ep, asoc, type, arg, commands);
4898
4899 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
4900 sizeof(err_str));
4901 }
4902 /***************************************************************************
4903 * These are the state functions for handling primitive (Section 10) events.
4904 ***************************************************************************/
4905 /*
4906 * sctp_sf_do_prm_asoc
4907 *
4908 * Section: 10.1 ULP-to-SCTP
4909 * B) Associate
4910 *
4911 * Format: ASSOCIATE(local SCTP instance name, destination transport addr,
4912 * outbound stream count)
4913 * -> association id [,destination transport addr list] [,outbound stream
4914 * count]
4915 *
4916 * This primitive allows the upper layer to initiate an association to a
4917 * specific peer endpoint.
4918 *
4919 * The peer endpoint shall be specified by one of the transport addresses
4920 * which defines the endpoint (see Section 1.4). If the local SCTP
4921 * instance has not been initialized, the ASSOCIATE is considered an
4922 * error.
4923 * [This is not relevant for the kernel implementation since we do all
4924 * initialization at boot time. It we hadn't initialized we wouldn't
4925 * get anywhere near this code.]
4926 *
4927 * An association id, which is a local handle to the SCTP association,
4928 * will be returned on successful establishment of the association. If
4929 * SCTP is not able to open an SCTP association with the peer endpoint,
4930 * an error is returned.
4931 * [In the kernel implementation, the struct sctp_association needs to
4932 * be created BEFORE causing this primitive to run.]
4933 *
4934 * Other association parameters may be returned, including the
4935 * complete destination transport addresses of the peer as well as the
4936 * outbound stream count of the local endpoint. One of the transport
4937 * address from the returned destination addresses will be selected by
4938 * the local endpoint as default primary path for sending SCTP packets
4939 * to this peer. The returned "destination transport addr list" can
4940 * be used by the ULP to change the default primary path or to force
4941 * sending a packet to a specific transport address. [All of this
4942 * stuff happens when the INIT ACK arrives. This is a NON-BLOCKING
4943 * function.]
4944 *
4945 * Mandatory attributes:
4946 *
4947 * o local SCTP instance name - obtained from the INITIALIZE operation.
4948 * [This is the argument asoc.]
4949 * o destination transport addr - specified as one of the transport
4950 * addresses of the peer endpoint with which the association is to be
4951 * established.
4952 * [This is asoc->peer.active_path.]
4953 * o outbound stream count - the number of outbound streams the ULP
4954 * would like to open towards this peer endpoint.
4955 * [BUG: This is not currently implemented.]
4956 * Optional attributes:
4957 *
4958 * None.
4959 *
4960 * The return value is a disposition.
4961 */
4962 enum sctp_disposition sctp_sf_do_prm_asoc(struct net *net,
4963 const struct sctp_endpoint *ep,
4964 const struct sctp_association *asoc,
4965 const union sctp_subtype type,
4966 void *arg,
4967 struct sctp_cmd_seq *commands)
4968 {
4969 struct sctp_association *my_asoc;
4970 struct sctp_chunk *repl;
4971
4972 /* The comment below says that we enter COOKIE-WAIT AFTER
4973 * sending the INIT, but that doesn't actually work in our
4974 * implementation...
4975 */
4976 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4977 SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
4978
4979 /* RFC 2960 5.1 Normal Establishment of an Association
4980 *
4981 * A) "A" first sends an INIT chunk to "Z". In the INIT, "A"
4982 * must provide its Verification Tag (Tag_A) in the Initiate
4983 * Tag field. Tag_A SHOULD be a random number in the range of
4984 * 1 to 4294967295 (see 5.3.1 for Tag value selection). ...
4985 */
4986
4987 repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0);
4988 if (!repl)
4989 goto nomem;
4990
4991 /* Choose transport for INIT. */
4992 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
4993 SCTP_CHUNK(repl));
4994
4995 /* Cast away the const modifier, as we want to just
4996 * rerun it through as a sideffect.
4997 */
4998 my_asoc = (struct sctp_association *)asoc;
4999 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
5000
5001 /* After sending the INIT, "A" starts the T1-init timer and
5002 * enters the COOKIE-WAIT state.
5003 */
5004 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
5005 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
5006 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
5007 return SCTP_DISPOSITION_CONSUME;
5008
5009 nomem:
5010 return SCTP_DISPOSITION_NOMEM;
5011 }
5012
5013 /*
5014 * Process the SEND primitive.
5015 *
5016 * Section: 10.1 ULP-to-SCTP
5017 * E) Send
5018 *
5019 * Format: SEND(association id, buffer address, byte count [,context]
5020 * [,stream id] [,life time] [,destination transport address]
5021 * [,unorder flag] [,no-bundle flag] [,payload protocol-id] )
5022 * -> result
5023 *
5024 * This is the main method to send user data via SCTP.
5025 *
5026 * Mandatory attributes:
5027 *
5028 * o association id - local handle to the SCTP association
5029 *
5030 * o buffer address - the location where the user message to be
5031 * transmitted is stored;
5032 *
5033 * o byte count - The size of the user data in number of bytes;
5034 *
5035 * Optional attributes:
5036 *
5037 * o context - an optional 32 bit integer that will be carried in the
5038 * sending failure notification to the ULP if the transportation of
5039 * this User Message fails.
5040 *
5041 * o stream id - to indicate which stream to send the data on. If not
5042 * specified, stream 0 will be used.
5043 *
5044 * o life time - specifies the life time of the user data. The user data
5045 * will not be sent by SCTP after the life time expires. This
5046 * parameter can be used to avoid efforts to transmit stale
5047 * user messages. SCTP notifies the ULP if the data cannot be
5048 * initiated to transport (i.e. sent to the destination via SCTP's
5049 * send primitive) within the life time variable. However, the
5050 * user data will be transmitted if SCTP has attempted to transmit a
5051 * chunk before the life time expired.
5052 *
5053 * o destination transport address - specified as one of the destination
5054 * transport addresses of the peer endpoint to which this packet
5055 * should be sent. Whenever possible, SCTP should use this destination
5056 * transport address for sending the packets, instead of the current
5057 * primary path.
5058 *
5059 * o unorder flag - this flag, if present, indicates that the user
5060 * would like the data delivered in an unordered fashion to the peer
5061 * (i.e., the U flag is set to 1 on all DATA chunks carrying this
5062 * message).
5063 *
5064 * o no-bundle flag - instructs SCTP not to bundle this user data with
5065 * other outbound DATA chunks. SCTP MAY still bundle even when
5066 * this flag is present, when faced with network congestion.
5067 *
5068 * o payload protocol-id - A 32 bit unsigned integer that is to be
5069 * passed to the peer indicating the type of payload protocol data
5070 * being transmitted. This value is passed as opaque data by SCTP.
5071 *
5072 * The return value is the disposition.
5073 */
5074 enum sctp_disposition sctp_sf_do_prm_send(struct net *net,
5075 const struct sctp_endpoint *ep,
5076 const struct sctp_association *asoc,
5077 const union sctp_subtype type,
5078 void *arg,
5079 struct sctp_cmd_seq *commands)
5080 {
5081 struct sctp_datamsg *msg = arg;
5082
5083 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
5084 return SCTP_DISPOSITION_CONSUME;
5085 }
5086
5087 /*
5088 * Process the SHUTDOWN primitive.
5089 *
5090 * Section: 10.1:
5091 * C) Shutdown
5092 *
5093 * Format: SHUTDOWN(association id)
5094 * -> result
5095 *
5096 * Gracefully closes an association. Any locally queued user data
5097 * will be delivered to the peer. The association will be terminated only
5098 * after the peer acknowledges all the SCTP packets sent. A success code
5099 * will be returned on successful termination of the association. If
5100 * attempting to terminate the association results in a failure, an error
5101 * code shall be returned.
5102 *
5103 * Mandatory attributes:
5104 *
5105 * o association id - local handle to the SCTP association
5106 *
5107 * Optional attributes:
5108 *
5109 * None.
5110 *
5111 * The return value is the disposition.
5112 */
5113 enum sctp_disposition sctp_sf_do_9_2_prm_shutdown(
5114 struct net *net,
5115 const struct sctp_endpoint *ep,
5116 const struct sctp_association *asoc,
5117 const union sctp_subtype type,
5118 void *arg,
5119 struct sctp_cmd_seq *commands)
5120 {
5121 enum sctp_disposition disposition;
5122
5123 /* From 9.2 Shutdown of an Association
5124 * Upon receipt of the SHUTDOWN primitive from its upper
5125 * layer, the endpoint enters SHUTDOWN-PENDING state and
5126 * remains there until all outstanding data has been
5127 * acknowledged by its peer. The endpoint accepts no new data
5128 * from its upper layer, but retransmits data to the far end
5129 * if necessary to fill gaps.
5130 */
5131 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
5132 SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
5133
5134 disposition = SCTP_DISPOSITION_CONSUME;
5135 if (sctp_outq_is_empty(&asoc->outqueue)) {
5136 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
5137 arg, commands);
5138 }
5139
5140 return disposition;
5141 }
5142
5143 /*
5144 * Process the ABORT primitive.
5145 *
5146 * Section: 10.1:
5147 * C) Abort
5148 *
5149 * Format: Abort(association id [, cause code])
5150 * -> result
5151 *
5152 * Ungracefully closes an association. Any locally queued user data
5153 * will be discarded and an ABORT chunk is sent to the peer. A success code
5154 * will be returned on successful abortion of the association. If
5155 * attempting to abort the association results in a failure, an error
5156 * code shall be returned.
5157 *
5158 * Mandatory attributes:
5159 *
5160 * o association id - local handle to the SCTP association
5161 *
5162 * Optional attributes:
5163 *
5164 * o cause code - reason of the abort to be passed to the peer
5165 *
5166 * None.
5167 *
5168 * The return value is the disposition.
5169 */
5170 enum sctp_disposition sctp_sf_do_9_1_prm_abort(
5171 struct net *net,
5172 const struct sctp_endpoint *ep,
5173 const struct sctp_association *asoc,
5174 const union sctp_subtype type,
5175 void *arg,
5176 struct sctp_cmd_seq *commands)
5177 {
5178 /* From 9.1 Abort of an Association
5179 * Upon receipt of the ABORT primitive from its upper
5180 * layer, the endpoint enters CLOSED state and
5181 * discard all outstanding data has been
5182 * acknowledged by its peer. The endpoint accepts no new data
5183 * from its upper layer, but retransmits data to the far end
5184 * if necessary to fill gaps.
5185 */
5186 struct sctp_chunk *abort = arg;
5187
5188 if (abort)
5189 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
5190
5191 /* Even if we can't send the ABORT due to low memory delete the
5192 * TCB. This is a departure from our typical NOMEM handling.
5193 */
5194
5195 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5196 SCTP_ERROR(ECONNABORTED));
5197 /* Delete the established association. */
5198 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5199 SCTP_PERR(SCTP_ERROR_USER_ABORT));
5200
5201 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5202 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
5203
5204 return SCTP_DISPOSITION_ABORT;
5205 }
5206
5207 /* We tried an illegal operation on an association which is closed. */
5208 enum sctp_disposition sctp_sf_error_closed(struct net *net,
5209 const struct sctp_endpoint *ep,
5210 const struct sctp_association *asoc,
5211 const union sctp_subtype type,
5212 void *arg,
5213 struct sctp_cmd_seq *commands)
5214 {
5215 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL));
5216 return SCTP_DISPOSITION_CONSUME;
5217 }
5218
5219 /* We tried an illegal operation on an association which is shutting
5220 * down.
5221 */
5222 enum sctp_disposition sctp_sf_error_shutdown(
5223 struct net *net,
5224 const struct sctp_endpoint *ep,
5225 const struct sctp_association *asoc,
5226 const union sctp_subtype type,
5227 void *arg,
5228 struct sctp_cmd_seq *commands)
5229 {
5230 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR,
5231 SCTP_ERROR(-ESHUTDOWN));
5232 return SCTP_DISPOSITION_CONSUME;
5233 }
5234
5235 /*
5236 * sctp_cookie_wait_prm_shutdown
5237 *
5238 * Section: 4 Note: 2
5239 * Verification Tag:
5240 * Inputs
5241 * (endpoint, asoc)
5242 *
5243 * The RFC does not explicitly address this issue, but is the route through the
5244 * state table when someone issues a shutdown while in COOKIE_WAIT state.
5245 *
5246 * Outputs
5247 * (timers)
5248 */
5249 enum sctp_disposition sctp_sf_cookie_wait_prm_shutdown(
5250 struct net *net,
5251 const struct sctp_endpoint *ep,
5252 const struct sctp_association *asoc,
5253 const union sctp_subtype type,
5254 void *arg,
5255 struct sctp_cmd_seq *commands)
5256 {
5257 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5258 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
5259
5260 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
5261 SCTP_STATE(SCTP_STATE_CLOSED));
5262
5263 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
5264
5265 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
5266
5267 return SCTP_DISPOSITION_DELETE_TCB;
5268 }
5269
5270 /*
5271 * sctp_cookie_echoed_prm_shutdown
5272 *
5273 * Section: 4 Note: 2
5274 * Verification Tag:
5275 * Inputs
5276 * (endpoint, asoc)
5277 *
5278 * The RFC does not explicitly address this issue, but is the route through the
5279 * state table when someone issues a shutdown while in COOKIE_ECHOED state.
5280 *
5281 * Outputs
5282 * (timers)
5283 */
5284 enum sctp_disposition sctp_sf_cookie_echoed_prm_shutdown(
5285 struct net *net,
5286 const struct sctp_endpoint *ep,
5287 const struct sctp_association *asoc,
5288 const union sctp_subtype type,
5289 void *arg,
5290 struct sctp_cmd_seq *commands)
5291 {
5292 /* There is a single T1 timer, so we should be able to use
5293 * common function with the COOKIE-WAIT state.
5294 */
5295 return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
5296 }
5297
5298 /*
5299 * sctp_sf_cookie_wait_prm_abort
5300 *
5301 * Section: 4 Note: 2
5302 * Verification Tag:
5303 * Inputs
5304 * (endpoint, asoc)
5305 *
5306 * The RFC does not explicitly address this issue, but is the route through the
5307 * state table when someone issues an abort while in COOKIE_WAIT state.
5308 *
5309 * Outputs
5310 * (timers)
5311 */
5312 enum sctp_disposition sctp_sf_cookie_wait_prm_abort(
5313 struct net *net,
5314 const struct sctp_endpoint *ep,
5315 const struct sctp_association *asoc,
5316 const union sctp_subtype type,
5317 void *arg,
5318 struct sctp_cmd_seq *commands)
5319 {
5320 struct sctp_chunk *abort = arg;
5321
5322 /* Stop T1-init timer */
5323 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5324 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
5325
5326 if (abort)
5327 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
5328
5329 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
5330 SCTP_STATE(SCTP_STATE_CLOSED));
5331
5332 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5333
5334 /* Even if we can't send the ABORT due to low memory delete the
5335 * TCB. This is a departure from our typical NOMEM handling.
5336 */
5337
5338 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5339 SCTP_ERROR(ECONNREFUSED));
5340 /* Delete the established association. */
5341 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
5342 SCTP_PERR(SCTP_ERROR_USER_ABORT));
5343
5344 return SCTP_DISPOSITION_ABORT;
5345 }
5346
5347 /*
5348 * sctp_sf_cookie_echoed_prm_abort
5349 *
5350 * Section: 4 Note: 3
5351 * Verification Tag:
5352 * Inputs
5353 * (endpoint, asoc)
5354 *
5355 * The RFC does not explcitly address this issue, but is the route through the
5356 * state table when someone issues an abort while in COOKIE_ECHOED state.
5357 *
5358 * Outputs
5359 * (timers)
5360 */
5361 enum sctp_disposition sctp_sf_cookie_echoed_prm_abort(
5362 struct net *net,
5363 const struct sctp_endpoint *ep,
5364 const struct sctp_association *asoc,
5365 const union sctp_subtype type,
5366 void *arg,
5367 struct sctp_cmd_seq *commands)
5368 {
5369 /* There is a single T1 timer, so we should be able to use
5370 * common function with the COOKIE-WAIT state.
5371 */
5372 return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
5373 }
5374
5375 /*
5376 * sctp_sf_shutdown_pending_prm_abort
5377 *
5378 * Inputs
5379 * (endpoint, asoc)
5380 *
5381 * The RFC does not explicitly address this issue, but is the route through the
5382 * state table when someone issues an abort while in SHUTDOWN-PENDING state.
5383 *
5384 * Outputs
5385 * (timers)
5386 */
5387 enum sctp_disposition sctp_sf_shutdown_pending_prm_abort(
5388 struct net *net,
5389 const struct sctp_endpoint *ep,
5390 const struct sctp_association *asoc,
5391 const union sctp_subtype type,
5392 void *arg,
5393 struct sctp_cmd_seq *commands)
5394 {
5395 /* Stop the T5-shutdown guard timer. */
5396 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5397 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5398
5399 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
5400 }
5401
5402 /*
5403 * sctp_sf_shutdown_sent_prm_abort
5404 *
5405 * Inputs
5406 * (endpoint, asoc)
5407 *
5408 * The RFC does not explicitly address this issue, but is the route through the
5409 * state table when someone issues an abort while in SHUTDOWN-SENT state.
5410 *
5411 * Outputs
5412 * (timers)
5413 */
5414 enum sctp_disposition sctp_sf_shutdown_sent_prm_abort(
5415 struct net *net,
5416 const struct sctp_endpoint *ep,
5417 const struct sctp_association *asoc,
5418 const union sctp_subtype type,
5419 void *arg,
5420 struct sctp_cmd_seq *commands)
5421 {
5422 /* Stop the T2-shutdown timer. */
5423 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5424 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
5425
5426 /* Stop the T5-shutdown guard timer. */
5427 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5428 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5429
5430 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
5431 }
5432
5433 /*
5434 * sctp_sf_cookie_echoed_prm_abort
5435 *
5436 * Inputs
5437 * (endpoint, asoc)
5438 *
5439 * The RFC does not explcitly address this issue, but is the route through the
5440 * state table when someone issues an abort while in COOKIE_ECHOED state.
5441 *
5442 * Outputs
5443 * (timers)
5444 */
5445 enum sctp_disposition sctp_sf_shutdown_ack_sent_prm_abort(
5446 struct net *net,
5447 const struct sctp_endpoint *ep,
5448 const struct sctp_association *asoc,
5449 const union sctp_subtype type,
5450 void *arg,
5451 struct sctp_cmd_seq *commands)
5452 {
5453 /* The same T2 timer, so we should be able to use
5454 * common function with the SHUTDOWN-SENT state.
5455 */
5456 return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
5457 }
5458
5459 /*
5460 * Process the REQUESTHEARTBEAT primitive
5461 *
5462 * 10.1 ULP-to-SCTP
5463 * J) Request Heartbeat
5464 *
5465 * Format: REQUESTHEARTBEAT(association id, destination transport address)
5466 *
5467 * -> result
5468 *
5469 * Instructs the local endpoint to perform a HeartBeat on the specified
5470 * destination transport address of the given association. The returned
5471 * result should indicate whether the transmission of the HEARTBEAT
5472 * chunk to the destination address is successful.
5473 *
5474 * Mandatory attributes:
5475 *
5476 * o association id - local handle to the SCTP association
5477 *
5478 * o destination transport address - the transport address of the
5479 * association on which a heartbeat should be issued.
5480 */
5481 enum sctp_disposition sctp_sf_do_prm_requestheartbeat(
5482 struct net *net,
5483 const struct sctp_endpoint *ep,
5484 const struct sctp_association *asoc,
5485 const union sctp_subtype type,
5486 void *arg,
5487 struct sctp_cmd_seq *commands)
5488 {
5489 if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
5490 (struct sctp_transport *)arg, commands))
5491 return SCTP_DISPOSITION_NOMEM;
5492
5493 /*
5494 * RFC 2960 (bis), section 8.3
5495 *
5496 * D) Request an on-demand HEARTBEAT on a specific destination
5497 * transport address of a given association.
5498 *
5499 * The endpoint should increment the respective error counter of
5500 * the destination transport address each time a HEARTBEAT is sent
5501 * to that address and not acknowledged within one RTO.
5502 *
5503 */
5504 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
5505 SCTP_TRANSPORT(arg));
5506 return SCTP_DISPOSITION_CONSUME;
5507 }
5508
5509 /*
5510 * ADDIP Section 4.1 ASCONF Chunk Procedures
5511 * When an endpoint has an ASCONF signaled change to be sent to the
5512 * remote endpoint it should do A1 to A9
5513 */
5514 enum sctp_disposition sctp_sf_do_prm_asconf(struct net *net,
5515 const struct sctp_endpoint *ep,
5516 const struct sctp_association *asoc,
5517 const union sctp_subtype type,
5518 void *arg,
5519 struct sctp_cmd_seq *commands)
5520 {
5521 struct sctp_chunk *chunk = arg;
5522
5523 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
5524 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
5525 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
5526 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
5527 return SCTP_DISPOSITION_CONSUME;
5528 }
5529
5530 /* RE-CONFIG Section 5.1 RECONF Chunk Procedures */
5531 enum sctp_disposition sctp_sf_do_prm_reconf(struct net *net,
5532 const struct sctp_endpoint *ep,
5533 const struct sctp_association *asoc,
5534 const union sctp_subtype type,
5535 void *arg,
5536 struct sctp_cmd_seq *commands)
5537 {
5538 struct sctp_chunk *chunk = arg;
5539
5540 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
5541 return SCTP_DISPOSITION_CONSUME;
5542 }
5543
5544 /*
5545 * Ignore the primitive event
5546 *
5547 * The return value is the disposition of the primitive.
5548 */
5549 enum sctp_disposition sctp_sf_ignore_primitive(
5550 struct net *net,
5551 const struct sctp_endpoint *ep,
5552 const struct sctp_association *asoc,
5553 const union sctp_subtype type,
5554 void *arg,
5555 struct sctp_cmd_seq *commands)
5556 {
5557 pr_debug("%s: primitive type:%d is ignored\n", __func__,
5558 type.primitive);
5559
5560 return SCTP_DISPOSITION_DISCARD;
5561 }
5562
5563 /***************************************************************************
5564 * These are the state functions for the OTHER events.
5565 ***************************************************************************/
5566
5567 /*
5568 * When the SCTP stack has no more user data to send or retransmit, this
5569 * notification is given to the user. Also, at the time when a user app
5570 * subscribes to this event, if there is no data to be sent or
5571 * retransmit, the stack will immediately send up this notification.
5572 */
5573 enum sctp_disposition sctp_sf_do_no_pending_tsn(
5574 struct net *net,
5575 const struct sctp_endpoint *ep,
5576 const struct sctp_association *asoc,
5577 const union sctp_subtype type,
5578 void *arg,
5579 struct sctp_cmd_seq *commands)
5580 {
5581 struct sctp_ulpevent *event;
5582
5583 event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
5584 if (!event)
5585 return SCTP_DISPOSITION_NOMEM;
5586
5587 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
5588
5589 return SCTP_DISPOSITION_CONSUME;
5590 }
5591
5592 /*
5593 * Start the shutdown negotiation.
5594 *
5595 * From Section 9.2:
5596 * Once all its outstanding data has been acknowledged, the endpoint
5597 * shall send a SHUTDOWN chunk to its peer including in the Cumulative
5598 * TSN Ack field the last sequential TSN it has received from the peer.
5599 * It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT
5600 * state. If the timer expires, the endpoint must re-send the SHUTDOWN
5601 * with the updated last sequential TSN received from its peer.
5602 *
5603 * The return value is the disposition.
5604 */
5605 enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
5606 struct net *net,
5607 const struct sctp_endpoint *ep,
5608 const struct sctp_association *asoc,
5609 const union sctp_subtype type,
5610 void *arg,
5611 struct sctp_cmd_seq *commands)
5612 {
5613 struct sctp_chunk *reply;
5614
5615 /* Once all its outstanding data has been acknowledged, the
5616 * endpoint shall send a SHUTDOWN chunk to its peer including
5617 * in the Cumulative TSN Ack field the last sequential TSN it
5618 * has received from the peer.
5619 */
5620 reply = sctp_make_shutdown(asoc, arg);
5621 if (!reply)
5622 goto nomem;
5623
5624 /* Set the transport for the SHUTDOWN chunk and the timeout for the
5625 * T2-shutdown timer.
5626 */
5627 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
5628
5629 /* It shall then start the T2-shutdown timer */
5630 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
5631 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
5632
5633 /* RFC 4960 Section 9.2
5634 * The sender of the SHUTDOWN MAY also start an overall guard timer
5635 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
5636 */
5637 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5638 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5639
5640 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
5641 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5642 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
5643
5644 /* and enter the SHUTDOWN-SENT state. */
5645 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
5646 SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT));
5647
5648 /* sctp-implguide 2.10 Issues with Heartbeating and failover
5649 *
5650 * HEARTBEAT ... is discontinued after sending either SHUTDOWN
5651 * or SHUTDOWN-ACK.
5652 */
5653 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
5654
5655 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
5656
5657 return SCTP_DISPOSITION_CONSUME;
5658
5659 nomem:
5660 return SCTP_DISPOSITION_NOMEM;
5661 }
5662
5663 /*
5664 * Generate a SHUTDOWN ACK now that everything is SACK'd.
5665 *
5666 * From Section 9.2:
5667 *
5668 * If it has no more outstanding DATA chunks, the SHUTDOWN receiver
5669 * shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own,
5670 * entering the SHUTDOWN-ACK-SENT state. If the timer expires, the
5671 * endpoint must re-send the SHUTDOWN ACK.
5672 *
5673 * The return value is the disposition.
5674 */
5675 enum sctp_disposition sctp_sf_do_9_2_shutdown_ack(
5676 struct net *net,
5677 const struct sctp_endpoint *ep,
5678 const struct sctp_association *asoc,
5679 const union sctp_subtype type,
5680 void *arg,
5681 struct sctp_cmd_seq *commands)
5682 {
5683 struct sctp_chunk *chunk = arg;
5684 struct sctp_chunk *reply;
5685
5686 /* There are 2 ways of getting here:
5687 * 1) called in response to a SHUTDOWN chunk
5688 * 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued.
5689 *
5690 * For the case (2), the arg parameter is set to NULL. We need
5691 * to check that we have a chunk before accessing it's fields.
5692 */
5693 if (chunk) {
5694 if (!sctp_vtag_verify(chunk, asoc))
5695 return sctp_sf_pdiscard(net, ep, asoc, type, arg,
5696 commands);
5697
5698 /* Make sure that the SHUTDOWN chunk has a valid length. */
5699 if (!sctp_chunk_length_valid(
5700 chunk, sizeof(struct sctp_shutdown_chunk)))
5701 return sctp_sf_violation_chunklen(net, ep, asoc, type,
5702 arg, commands);
5703 }
5704
5705 /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
5706 * shall send a SHUTDOWN ACK ...
5707 */
5708 reply = sctp_make_shutdown_ack(asoc, chunk);
5709 if (!reply)
5710 goto nomem;
5711
5712 /* Set the transport for the SHUTDOWN ACK chunk and the timeout for
5713 * the T2-shutdown timer.
5714 */
5715 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
5716
5717 /* and start/restart a T2-shutdown timer of its own, */
5718 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5719 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
5720
5721 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
5722 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5723 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
5724
5725 /* Enter the SHUTDOWN-ACK-SENT state. */
5726 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
5727 SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT));
5728
5729 /* sctp-implguide 2.10 Issues with Heartbeating and failover
5730 *
5731 * HEARTBEAT ... is discontinued after sending either SHUTDOWN
5732 * or SHUTDOWN-ACK.
5733 */
5734 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
5735
5736 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
5737
5738 return SCTP_DISPOSITION_CONSUME;
5739
5740 nomem:
5741 return SCTP_DISPOSITION_NOMEM;
5742 }
5743
5744 /*
5745 * Ignore the event defined as other
5746 *
5747 * The return value is the disposition of the event.
5748 */
5749 enum sctp_disposition sctp_sf_ignore_other(struct net *net,
5750 const struct sctp_endpoint *ep,
5751 const struct sctp_association *asoc,
5752 const union sctp_subtype type,
5753 void *arg,
5754 struct sctp_cmd_seq *commands)
5755 {
5756 pr_debug("%s: the event other type:%d is ignored\n",
5757 __func__, type.other);
5758
5759 return SCTP_DISPOSITION_DISCARD;
5760 }
5761
5762 /************************************************************
5763 * These are the state functions for handling timeout events.
5764 ************************************************************/
5765
5766 /*
5767 * RTX Timeout
5768 *
5769 * Section: 6.3.3 Handle T3-rtx Expiration
5770 *
5771 * Whenever the retransmission timer T3-rtx expires for a destination
5772 * address, do the following:
5773 * [See below]
5774 *
5775 * The return value is the disposition of the chunk.
5776 */
5777 enum sctp_disposition sctp_sf_do_6_3_3_rtx(struct net *net,
5778 const struct sctp_endpoint *ep,
5779 const struct sctp_association *asoc,
5780 const union sctp_subtype type,
5781 void *arg,
5782 struct sctp_cmd_seq *commands)
5783 {
5784 struct sctp_transport *transport = arg;
5785
5786 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
5787
5788 if (asoc->overall_error_count >= asoc->max_retrans) {
5789 if (asoc->peer.zero_window_announced &&
5790 asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
5791 /*
5792 * We are here likely because the receiver had its rwnd
5793 * closed for a while and we have not been able to
5794 * transmit the locally queued data within the maximum
5795 * retransmission attempts limit. Start the T5
5796 * shutdown guard timer to give the receiver one last
5797 * chance and some additional time to recover before
5798 * aborting.
5799 */
5800 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
5801 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5802 } else {
5803 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5804 SCTP_ERROR(ETIMEDOUT));
5805 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
5806 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5807 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5808 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5809 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
5810 return SCTP_DISPOSITION_DELETE_TCB;
5811 }
5812 }
5813
5814 /* E1) For the destination address for which the timer
5815 * expires, adjust its ssthresh with rules defined in Section
5816 * 7.2.3 and set the cwnd <- MTU.
5817 */
5818
5819 /* E2) For the destination address for which the timer
5820 * expires, set RTO <- RTO * 2 ("back off the timer"). The
5821 * maximum value discussed in rule C7 above (RTO.max) may be
5822 * used to provide an upper bound to this doubling operation.
5823 */
5824
5825 /* E3) Determine how many of the earliest (i.e., lowest TSN)
5826 * outstanding DATA chunks for the address for which the
5827 * T3-rtx has expired will fit into a single packet, subject
5828 * to the MTU constraint for the path corresponding to the
5829 * destination transport address to which the retransmission
5830 * is being sent (this may be different from the address for
5831 * which the timer expires [see Section 6.4]). Call this
5832 * value K. Bundle and retransmit those K DATA chunks in a
5833 * single packet to the destination endpoint.
5834 *
5835 * Note: Any DATA chunks that were sent to the address for
5836 * which the T3-rtx timer expired but did not fit in one MTU
5837 * (rule E3 above), should be marked for retransmission and
5838 * sent as soon as cwnd allows (normally when a SACK arrives).
5839 */
5840
5841 /* Do some failure management (Section 8.2). */
5842 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
5843
5844 /* NB: Rules E4 and F1 are implicit in R1. */
5845 sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
5846
5847 return SCTP_DISPOSITION_CONSUME;
5848 }
5849
5850 /*
5851 * Generate delayed SACK on timeout
5852 *
5853 * Section: 6.2 Acknowledgement on Reception of DATA Chunks
5854 *
5855 * The guidelines on delayed acknowledgement algorithm specified in
5856 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
5857 * acknowledgement SHOULD be generated for at least every second packet
5858 * (not every second DATA chunk) received, and SHOULD be generated
5859 * within 200 ms of the arrival of any unacknowledged DATA chunk. In
5860 * some situations it may be beneficial for an SCTP transmitter to be
5861 * more conservative than the algorithms detailed in this document
5862 * allow. However, an SCTP transmitter MUST NOT be more aggressive than
5863 * the following algorithms allow.
5864 */
5865 enum sctp_disposition sctp_sf_do_6_2_sack(struct net *net,
5866 const struct sctp_endpoint *ep,
5867 const struct sctp_association *asoc,
5868 const union sctp_subtype type,
5869 void *arg,
5870 struct sctp_cmd_seq *commands)
5871 {
5872 SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
5873 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
5874 return SCTP_DISPOSITION_CONSUME;
5875 }
5876
5877 /*
5878 * sctp_sf_t1_init_timer_expire
5879 *
5880 * Section: 4 Note: 2
5881 * Verification Tag:
5882 * Inputs
5883 * (endpoint, asoc)
5884 *
5885 * RFC 2960 Section 4 Notes
5886 * 2) If the T1-init timer expires, the endpoint MUST retransmit INIT
5887 * and re-start the T1-init timer without changing state. This MUST
5888 * be repeated up to 'Max.Init.Retransmits' times. After that, the
5889 * endpoint MUST abort the initialization process and report the
5890 * error to SCTP user.
5891 *
5892 * Outputs
5893 * (timers, events)
5894 *
5895 */
5896 enum sctp_disposition sctp_sf_t1_init_timer_expire(
5897 struct net *net,
5898 const struct sctp_endpoint *ep,
5899 const struct sctp_association *asoc,
5900 const union sctp_subtype type,
5901 void *arg,
5902 struct sctp_cmd_seq *commands)
5903 {
5904 int attempts = asoc->init_err_counter + 1;
5905 struct sctp_chunk *repl = NULL;
5906 struct sctp_bind_addr *bp;
5907
5908 pr_debug("%s: timer T1 expired (INIT)\n", __func__);
5909
5910 SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
5911
5912 if (attempts <= asoc->max_init_attempts) {
5913 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
5914 repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
5915 if (!repl)
5916 return SCTP_DISPOSITION_NOMEM;
5917
5918 /* Choose transport for INIT. */
5919 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
5920 SCTP_CHUNK(repl));
5921
5922 /* Issue a sideeffect to do the needed accounting. */
5923 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART,
5924 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
5925
5926 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
5927 } else {
5928 pr_debug("%s: giving up on INIT, attempts:%d "
5929 "max_init_attempts:%d\n", __func__, attempts,
5930 asoc->max_init_attempts);
5931
5932 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5933 SCTP_ERROR(ETIMEDOUT));
5934 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
5935 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5936 return SCTP_DISPOSITION_DELETE_TCB;
5937 }
5938
5939 return SCTP_DISPOSITION_CONSUME;
5940 }
5941
5942 /*
5943 * sctp_sf_t1_cookie_timer_expire
5944 *
5945 * Section: 4 Note: 2
5946 * Verification Tag:
5947 * Inputs
5948 * (endpoint, asoc)
5949 *
5950 * RFC 2960 Section 4 Notes
5951 * 3) If the T1-cookie timer expires, the endpoint MUST retransmit
5952 * COOKIE ECHO and re-start the T1-cookie timer without changing
5953 * state. This MUST be repeated up to 'Max.Init.Retransmits' times.
5954 * After that, the endpoint MUST abort the initialization process and
5955 * report the error to SCTP user.
5956 *
5957 * Outputs
5958 * (timers, events)
5959 *
5960 */
5961 enum sctp_disposition sctp_sf_t1_cookie_timer_expire(
5962 struct net *net,
5963 const struct sctp_endpoint *ep,
5964 const struct sctp_association *asoc,
5965 const union sctp_subtype type,
5966 void *arg,
5967 struct sctp_cmd_seq *commands)
5968 {
5969 int attempts = asoc->init_err_counter + 1;
5970 struct sctp_chunk *repl = NULL;
5971
5972 pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__);
5973
5974 SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
5975
5976 if (attempts <= asoc->max_init_attempts) {
5977 repl = sctp_make_cookie_echo(asoc, NULL);
5978 if (!repl)
5979 return SCTP_DISPOSITION_NOMEM;
5980
5981 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
5982 SCTP_CHUNK(repl));
5983 /* Issue a sideeffect to do the needed accounting. */
5984 sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART,
5985 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
5986
5987 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
5988 } else {
5989 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5990 SCTP_ERROR(ETIMEDOUT));
5991 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
5992 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5993 return SCTP_DISPOSITION_DELETE_TCB;
5994 }
5995
5996 return SCTP_DISPOSITION_CONSUME;
5997 }
5998
5999 /* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN
6000 * with the updated last sequential TSN received from its peer.
6001 *
6002 * An endpoint should limit the number of retransmission of the
6003 * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'.
6004 * If this threshold is exceeded the endpoint should destroy the TCB and
6005 * MUST report the peer endpoint unreachable to the upper layer (and
6006 * thus the association enters the CLOSED state). The reception of any
6007 * packet from its peer (i.e. as the peer sends all of its queued DATA
6008 * chunks) should clear the endpoint's retransmission count and restart
6009 * the T2-Shutdown timer, giving its peer ample opportunity to transmit
6010 * all of its queued DATA chunks that have not yet been sent.
6011 */
6012 enum sctp_disposition sctp_sf_t2_timer_expire(
6013 struct net *net,
6014 const struct sctp_endpoint *ep,
6015 const struct sctp_association *asoc,
6016 const union sctp_subtype type,
6017 void *arg,
6018 struct sctp_cmd_seq *commands)
6019 {
6020 struct sctp_chunk *reply = NULL;
6021
6022 pr_debug("%s: timer T2 expired\n", __func__);
6023
6024 SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
6025
6026 ((struct sctp_association *)asoc)->shutdown_retries++;
6027
6028 if (asoc->overall_error_count >= asoc->max_retrans) {
6029 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
6030 SCTP_ERROR(ETIMEDOUT));
6031 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
6032 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
6033 SCTP_PERR(SCTP_ERROR_NO_ERROR));
6034 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
6035 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
6036 return SCTP_DISPOSITION_DELETE_TCB;
6037 }
6038
6039 switch (asoc->state) {
6040 case SCTP_STATE_SHUTDOWN_SENT:
6041 reply = sctp_make_shutdown(asoc, NULL);
6042 break;
6043
6044 case SCTP_STATE_SHUTDOWN_ACK_SENT:
6045 reply = sctp_make_shutdown_ack(asoc, NULL);
6046 break;
6047
6048 default:
6049 BUG();
6050 break;
6051 }
6052
6053 if (!reply)
6054 goto nomem;
6055
6056 /* Do some failure management (Section 8.2).
6057 * If we remove the transport an SHUTDOWN was last sent to, don't
6058 * do failure management.
6059 */
6060 if (asoc->shutdown_last_sent_to)
6061 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
6062 SCTP_TRANSPORT(asoc->shutdown_last_sent_to));
6063
6064 /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for
6065 * the T2-shutdown timer.
6066 */
6067 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
6068
6069 /* Restart the T2-shutdown timer. */
6070 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
6071 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
6072 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
6073 return SCTP_DISPOSITION_CONSUME;
6074
6075 nomem:
6076 return SCTP_DISPOSITION_NOMEM;
6077 }
6078
6079 /*
6080 * ADDIP Section 4.1 ASCONF Chunk Procedures
6081 * If the T4 RTO timer expires the endpoint should do B1 to B5
6082 */
6083 enum sctp_disposition sctp_sf_t4_timer_expire(
6084 struct net *net,
6085 const struct sctp_endpoint *ep,
6086 const struct sctp_association *asoc,
6087 const union sctp_subtype type,
6088 void *arg,
6089 struct sctp_cmd_seq *commands)
6090 {
6091 struct sctp_chunk *chunk = asoc->addip_last_asconf;
6092 struct sctp_transport *transport = chunk->transport;
6093
6094 SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
6095
6096 /* ADDIP 4.1 B1) Increment the error counters and perform path failure
6097 * detection on the appropriate destination address as defined in
6098 * RFC2960 [5] section 8.1 and 8.2.
6099 */
6100 if (transport)
6101 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
6102 SCTP_TRANSPORT(transport));
6103
6104 /* Reconfig T4 timer and transport. */
6105 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
6106
6107 /* ADDIP 4.1 B2) Increment the association error counters and perform
6108 * endpoint failure detection on the association as defined in
6109 * RFC2960 [5] section 8.1 and 8.2.
6110 * association error counter is incremented in SCTP_CMD_STRIKE.
6111 */
6112 if (asoc->overall_error_count >= asoc->max_retrans) {
6113 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
6114 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
6115 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
6116 SCTP_ERROR(ETIMEDOUT));
6117 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
6118 SCTP_PERR(SCTP_ERROR_NO_ERROR));
6119 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
6120 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
6121 return SCTP_DISPOSITION_ABORT;
6122 }
6123
6124 /* ADDIP 4.1 B3) Back-off the destination address RTO value to which
6125 * the ASCONF chunk was sent by doubling the RTO timer value.
6126 * This is done in SCTP_CMD_STRIKE.
6127 */
6128
6129 /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
6130 * choose an alternate destination address (please refer to RFC2960
6131 * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
6132 * chunk, it MUST be the same (including its serial number) as the last
6133 * ASCONF sent.
6134 */
6135 sctp_chunk_hold(asoc->addip_last_asconf);
6136 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
6137 SCTP_CHUNK(asoc->addip_last_asconf));
6138
6139 /* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different
6140 * destination is selected, then the RTO used will be that of the new
6141 * destination address.
6142 */
6143 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
6144 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
6145
6146 return SCTP_DISPOSITION_CONSUME;
6147 }
6148
6149 /* sctpimpguide-05 Section 2.12.2
6150 * The sender of the SHUTDOWN MAY also start an overall guard timer
6151 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
6152 * At the expiration of this timer the sender SHOULD abort the association
6153 * by sending an ABORT chunk.
6154 */
6155 enum sctp_disposition sctp_sf_t5_timer_expire(
6156 struct net *net,
6157 const struct sctp_endpoint *ep,
6158 const struct sctp_association *asoc,
6159 const union sctp_subtype type,
6160 void *arg,
6161 struct sctp_cmd_seq *commands)
6162 {
6163 struct sctp_chunk *reply = NULL;
6164
6165 pr_debug("%s: timer T5 expired\n", __func__);
6166
6167 SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
6168
6169 reply = sctp_make_abort(asoc, NULL, 0);
6170 if (!reply)
6171 goto nomem;
6172
6173 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
6174 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
6175 SCTP_ERROR(ETIMEDOUT));
6176 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
6177 SCTP_PERR(SCTP_ERROR_NO_ERROR));
6178
6179 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
6180 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
6181
6182 return SCTP_DISPOSITION_DELETE_TCB;
6183 nomem:
6184 return SCTP_DISPOSITION_NOMEM;
6185 }
6186
6187 /* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires,
6188 * the association is automatically closed by starting the shutdown process.
6189 * The work that needs to be done is same as when SHUTDOWN is initiated by
6190 * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
6191 */
6192 enum sctp_disposition sctp_sf_autoclose_timer_expire(
6193 struct net *net,
6194 const struct sctp_endpoint *ep,
6195 const struct sctp_association *asoc,
6196 const union sctp_subtype type,
6197 void *arg,
6198 struct sctp_cmd_seq *commands)
6199 {
6200 enum sctp_disposition disposition;
6201
6202 SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
6203
6204 /* From 9.2 Shutdown of an Association
6205 * Upon receipt of the SHUTDOWN primitive from its upper
6206 * layer, the endpoint enters SHUTDOWN-PENDING state and
6207 * remains there until all outstanding data has been
6208 * acknowledged by its peer. The endpoint accepts no new data
6209 * from its upper layer, but retransmits data to the far end
6210 * if necessary to fill gaps.
6211 */
6212 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
6213 SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
6214
6215 disposition = SCTP_DISPOSITION_CONSUME;
6216 if (sctp_outq_is_empty(&asoc->outqueue)) {
6217 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
6218 NULL, commands);
6219 }
6220
6221 return disposition;
6222 }
6223
6224 /*****************************************************************************
6225 * These are sa state functions which could apply to all types of events.
6226 ****************************************************************************/
6227
6228 /*
6229 * This table entry is not implemented.
6230 *
6231 * Inputs
6232 * (endpoint, asoc, chunk)
6233 *
6234 * The return value is the disposition of the chunk.
6235 */
6236 enum sctp_disposition sctp_sf_not_impl(struct net *net,
6237 const struct sctp_endpoint *ep,
6238 const struct sctp_association *asoc,
6239 const union sctp_subtype type,
6240 void *arg, struct sctp_cmd_seq *commands)
6241 {
6242 return SCTP_DISPOSITION_NOT_IMPL;
6243 }
6244
6245 /*
6246 * This table entry represents a bug.
6247 *
6248 * Inputs
6249 * (endpoint, asoc, chunk)
6250 *
6251 * The return value is the disposition of the chunk.
6252 */
6253 enum sctp_disposition sctp_sf_bug(struct net *net,
6254 const struct sctp_endpoint *ep,
6255 const struct sctp_association *asoc,
6256 const union sctp_subtype type,
6257 void *arg, struct sctp_cmd_seq *commands)
6258 {
6259 return SCTP_DISPOSITION_BUG;
6260 }
6261
6262 /*
6263 * This table entry represents the firing of a timer in the wrong state.
6264 * Since timer deletion cannot be guaranteed a timer 'may' end up firing
6265 * when the association is in the wrong state. This event should
6266 * be ignored, so as to prevent any rearming of the timer.
6267 *
6268 * Inputs
6269 * (endpoint, asoc, chunk)
6270 *
6271 * The return value is the disposition of the chunk.
6272 */
6273 enum sctp_disposition sctp_sf_timer_ignore(struct net *net,
6274 const struct sctp_endpoint *ep,
6275 const struct sctp_association *asoc,
6276 const union sctp_subtype type,
6277 void *arg,
6278 struct sctp_cmd_seq *commands)
6279 {
6280 pr_debug("%s: timer %d ignored\n", __func__, type.chunk);
6281
6282 return SCTP_DISPOSITION_CONSUME;
6283 }
6284
6285 /********************************************************************
6286 * 2nd Level Abstractions
6287 ********************************************************************/
6288
6289 /* Pull the SACK chunk based on the SACK header. */
6290 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
6291 {
6292 struct sctp_sackhdr *sack;
6293 __u16 num_dup_tsns;
6294 unsigned int len;
6295 __u16 num_blocks;
6296
6297 /* Protect ourselves from reading too far into
6298 * the skb from a bogus sender.
6299 */
6300 sack = (struct sctp_sackhdr *) chunk->skb->data;
6301
6302 num_blocks = ntohs(sack->num_gap_ack_blocks);
6303 num_dup_tsns = ntohs(sack->num_dup_tsns);
6304 len = sizeof(struct sctp_sackhdr);
6305 len += (num_blocks + num_dup_tsns) * sizeof(__u32);
6306 if (len > chunk->skb->len)
6307 return NULL;
6308
6309 skb_pull(chunk->skb, len);
6310
6311 return sack;
6312 }
6313
6314 /* Create an ABORT packet to be sent as a response, with the specified
6315 * error causes.
6316 */
6317 static struct sctp_packet *sctp_abort_pkt_new(
6318 struct net *net,
6319 const struct sctp_endpoint *ep,
6320 const struct sctp_association *asoc,
6321 struct sctp_chunk *chunk,
6322 const void *payload, size_t paylen)
6323 {
6324 struct sctp_packet *packet;
6325 struct sctp_chunk *abort;
6326
6327 packet = sctp_ootb_pkt_new(net, asoc, chunk);
6328
6329 if (packet) {
6330 /* Make an ABORT.
6331 * The T bit will be set if the asoc is NULL.
6332 */
6333 abort = sctp_make_abort(asoc, chunk, paylen);
6334 if (!abort) {
6335 sctp_ootb_pkt_free(packet);
6336 return NULL;
6337 }
6338
6339 /* Reflect vtag if T-Bit is set */
6340 if (sctp_test_T_bit(abort))
6341 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
6342
6343 /* Add specified error causes, i.e., payload, to the
6344 * end of the chunk.
6345 */
6346 sctp_addto_chunk(abort, paylen, payload);
6347
6348 /* Set the skb to the belonging sock for accounting. */
6349 abort->skb->sk = ep->base.sk;
6350
6351 sctp_packet_append_chunk(packet, abort);
6352
6353 }
6354
6355 return packet;
6356 }
6357
6358 /* Allocate a packet for responding in the OOTB conditions. */
6359 static struct sctp_packet *sctp_ootb_pkt_new(
6360 struct net *net,
6361 const struct sctp_association *asoc,
6362 const struct sctp_chunk *chunk)
6363 {
6364 struct sctp_transport *transport;
6365 struct sctp_packet *packet;
6366 __u16 sport, dport;
6367 __u32 vtag;
6368
6369 /* Get the source and destination port from the inbound packet. */
6370 sport = ntohs(chunk->sctp_hdr->dest);
6371 dport = ntohs(chunk->sctp_hdr->source);
6372
6373 /* The V-tag is going to be the same as the inbound packet if no
6374 * association exists, otherwise, use the peer's vtag.
6375 */
6376 if (asoc) {
6377 /* Special case the INIT-ACK as there is no peer's vtag
6378 * yet.
6379 */
6380 switch (chunk->chunk_hdr->type) {
6381 case SCTP_CID_INIT:
6382 case SCTP_CID_INIT_ACK:
6383 {
6384 struct sctp_initack_chunk *initack;
6385
6386 initack = (struct sctp_initack_chunk *)chunk->chunk_hdr;
6387 vtag = ntohl(initack->init_hdr.init_tag);
6388 break;
6389 }
6390 default:
6391 vtag = asoc->peer.i.init_tag;
6392 break;
6393 }
6394 } else {
6395 /* Special case the INIT and stale COOKIE_ECHO as there is no
6396 * vtag yet.
6397 */
6398 switch (chunk->chunk_hdr->type) {
6399 case SCTP_CID_INIT:
6400 {
6401 struct sctp_init_chunk *init;
6402
6403 init = (struct sctp_init_chunk *)chunk->chunk_hdr;
6404 vtag = ntohl(init->init_hdr.init_tag);
6405 break;
6406 }
6407 default:
6408 vtag = ntohl(chunk->sctp_hdr->vtag);
6409 break;
6410 }
6411 }
6412
6413 /* Make a transport for the bucket, Eliza... */
6414 transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
6415 if (!transport)
6416 goto nomem;
6417
6418 transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
6419
6420 /* Cache a route for the transport with the chunk's destination as
6421 * the source address.
6422 */
6423 sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
6424 sctp_sk(net->sctp.ctl_sock));
6425
6426 packet = &transport->packet;
6427 sctp_packet_init(packet, transport, sport, dport);
6428 sctp_packet_config(packet, vtag, 0);
6429
6430 return packet;
6431
6432 nomem:
6433 return NULL;
6434 }
6435
6436 /* Free the packet allocated earlier for responding in the OOTB condition. */
6437 void sctp_ootb_pkt_free(struct sctp_packet *packet)
6438 {
6439 sctp_transport_free(packet->transport);
6440 }
6441
6442 /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
6443 static void sctp_send_stale_cookie_err(struct net *net,
6444 const struct sctp_endpoint *ep,
6445 const struct sctp_association *asoc,
6446 const struct sctp_chunk *chunk,
6447 struct sctp_cmd_seq *commands,
6448 struct sctp_chunk *err_chunk)
6449 {
6450 struct sctp_packet *packet;
6451
6452 if (err_chunk) {
6453 packet = sctp_ootb_pkt_new(net, asoc, chunk);
6454 if (packet) {
6455 struct sctp_signed_cookie *cookie;
6456
6457 /* Override the OOTB vtag from the cookie. */
6458 cookie = chunk->subh.cookie_hdr;
6459 packet->vtag = cookie->c.peer_vtag;
6460
6461 /* Set the skb to the belonging sock for accounting. */
6462 err_chunk->skb->sk = ep->base.sk;
6463 sctp_packet_append_chunk(packet, err_chunk);
6464 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
6465 SCTP_PACKET(packet));
6466 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
6467 } else
6468 sctp_chunk_free (err_chunk);
6469 }
6470 }
6471
6472
6473 /* Process a data chunk */
6474 static int sctp_eat_data(const struct sctp_association *asoc,
6475 struct sctp_chunk *chunk,
6476 struct sctp_cmd_seq *commands)
6477 {
6478 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
6479 struct sock *sk = asoc->base.sk;
6480 struct net *net = sock_net(sk);
6481 struct sctp_datahdr *data_hdr;
6482 struct sctp_chunk *err;
6483 enum sctp_verb deliver;
6484 size_t datalen;
6485 __u32 tsn;
6486 int tmp;
6487
6488 data_hdr = (struct sctp_datahdr *)chunk->skb->data;
6489 chunk->subh.data_hdr = data_hdr;
6490 skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
6491
6492 tsn = ntohl(data_hdr->tsn);
6493 pr_debug("%s: TSN 0x%x\n", __func__, tsn);
6494
6495 /* ASSERT: Now skb->data is really the user data. */
6496
6497 /* Process ECN based congestion.
6498 *
6499 * Since the chunk structure is reused for all chunks within
6500 * a packet, we use ecn_ce_done to track if we've already
6501 * done CE processing for this packet.
6502 *
6503 * We need to do ECN processing even if we plan to discard the
6504 * chunk later.
6505 */
6506
6507 if (asoc->peer.ecn_capable && !chunk->ecn_ce_done) {
6508 struct sctp_af *af = SCTP_INPUT_CB(chunk->skb)->af;
6509 chunk->ecn_ce_done = 1;
6510
6511 if (af->is_ce(sctp_gso_headskb(chunk->skb))) {
6512 /* Do real work as side effect. */
6513 sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
6514 SCTP_U32(tsn));
6515 }
6516 }
6517
6518 tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
6519 if (tmp < 0) {
6520 /* The TSN is too high--silently discard the chunk and
6521 * count on it getting retransmitted later.
6522 */
6523 if (chunk->asoc)
6524 chunk->asoc->stats.outofseqtsns++;
6525 return SCTP_IERROR_HIGH_TSN;
6526 } else if (tmp > 0) {
6527 /* This is a duplicate. Record it. */
6528 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
6529 return SCTP_IERROR_DUP_TSN;
6530 }
6531
6532 /* This is a new TSN. */
6533
6534 /* Discard if there is no room in the receive window.
6535 * Actually, allow a little bit of overflow (up to a MTU).
6536 */
6537 datalen = ntohs(chunk->chunk_hdr->length);
6538 datalen -= sctp_datachk_len(&asoc->stream);
6539
6540 deliver = SCTP_CMD_CHUNK_ULP;
6541
6542 /* Think about partial delivery. */
6543 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
6544
6545 /* Even if we don't accept this chunk there is
6546 * memory pressure.
6547 */
6548 sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
6549 }
6550
6551 /* Spill over rwnd a little bit. Note: While allowed, this spill over
6552 * seems a bit troublesome in that frag_point varies based on
6553 * PMTU. In cases, such as loopback, this might be a rather
6554 * large spill over.
6555 */
6556 if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
6557 (datalen > asoc->rwnd + asoc->frag_point))) {
6558
6559 /* If this is the next TSN, consider reneging to make
6560 * room. Note: Playing nice with a confused sender. A
6561 * malicious sender can still eat up all our buffer
6562 * space and in the future we may want to detect and
6563 * do more drastic reneging.
6564 */
6565 if (sctp_tsnmap_has_gap(map) &&
6566 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
6567 pr_debug("%s: reneging for tsn:%u\n", __func__, tsn);
6568 deliver = SCTP_CMD_RENEGE;
6569 } else {
6570 pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n",
6571 __func__, tsn, datalen, asoc->rwnd);
6572
6573 return SCTP_IERROR_IGNORE_TSN;
6574 }
6575 }
6576
6577 /*
6578 * Also try to renege to limit our memory usage in the event that
6579 * we are under memory pressure
6580 * If we can't renege, don't worry about it, the sk_rmem_schedule
6581 * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
6582 * memory usage too much
6583 */
6584 if (sk_under_memory_pressure(sk)) {
6585 if (sctp_tsnmap_has_gap(map) &&
6586 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
6587 pr_debug("%s: under pressure, reneging for tsn:%u\n",
6588 __func__, tsn);
6589 deliver = SCTP_CMD_RENEGE;
6590 } else {
6591 sk_mem_reclaim(sk);
6592 }
6593 }
6594
6595 /*
6596 * Section 3.3.10.9 No User Data (9)
6597 *
6598 * Cause of error
6599 * ---------------
6600 * No User Data: This error cause is returned to the originator of a
6601 * DATA chunk if a received DATA chunk has no user data.
6602 */
6603 if (unlikely(0 == datalen)) {
6604 err = sctp_make_abort_no_data(asoc, chunk, tsn);
6605 if (err) {
6606 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
6607 SCTP_CHUNK(err));
6608 }
6609 /* We are going to ABORT, so we might as well stop
6610 * processing the rest of the chunks in the packet.
6611 */
6612 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
6613 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
6614 SCTP_ERROR(ECONNABORTED));
6615 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
6616 SCTP_PERR(SCTP_ERROR_NO_DATA));
6617 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
6618 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
6619 return SCTP_IERROR_NO_DATA;
6620 }
6621
6622 chunk->data_accepted = 1;
6623
6624 /* Note: Some chunks may get overcounted (if we drop) or overcounted
6625 * if we renege and the chunk arrives again.
6626 */
6627 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
6628 SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
6629 if (chunk->asoc)
6630 chunk->asoc->stats.iuodchunks++;
6631 } else {
6632 SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
6633 if (chunk->asoc)
6634 chunk->asoc->stats.iodchunks++;
6635 }
6636
6637 /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
6638 *
6639 * If an endpoint receive a DATA chunk with an invalid stream
6640 * identifier, it shall acknowledge the reception of the DATA chunk
6641 * following the normal procedure, immediately send an ERROR chunk
6642 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
6643 * and discard the DATA chunk.
6644 */
6645 if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
6646 /* Mark tsn as received even though we drop it */
6647 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6648
6649 err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
6650 &data_hdr->stream,
6651 sizeof(data_hdr->stream),
6652 sizeof(u16));
6653 if (err)
6654 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
6655 SCTP_CHUNK(err));
6656 return SCTP_IERROR_BAD_STREAM;
6657 }
6658
6659 /* Check to see if the SSN is possible for this TSN.
6660 * The biggest gap we can record is 4K wide. Since SSNs wrap
6661 * at an unsigned short, there is no way that an SSN can
6662 * wrap and for a valid TSN. We can simply check if the current
6663 * SSN is smaller then the next expected one. If it is, it wrapped
6664 * and is invalid.
6665 */
6666 if (!asoc->stream.si->validate_data(chunk))
6667 return SCTP_IERROR_PROTO_VIOLATION;
6668
6669 /* Send the data up to the user. Note: Schedule the
6670 * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
6671 * chunk needs the updated rwnd.
6672 */
6673 sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
6674
6675 return SCTP_IERROR_NO_ERROR;
6676 }