1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * This file contains sctp stream maniuplation primitives and helpers.
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * Xin Long <lucien.xin@gmail.com>
35 #include <linux/list.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
40 static struct flex_array
*fa_alloc(size_t elem_size
, size_t elem_count
,
43 struct flex_array
*result
;
46 result
= flex_array_alloc(elem_size
, elem_count
, gfp
);
48 err
= flex_array_prealloc(result
, 0, elem_count
, gfp
);
50 flex_array_free(result
);
58 static void fa_free(struct flex_array
*fa
)
64 static void fa_copy(struct flex_array
*fa
, struct flex_array
*from
,
65 size_t index
, size_t count
)
70 elem
= flex_array_get(from
, index
);
71 flex_array_put(fa
, index
, elem
, 0);
76 static void fa_zero(struct flex_array
*fa
, size_t index
, size_t count
)
81 elem
= flex_array_get(fa
, index
);
82 memset(elem
, 0, fa
->element_size
);
87 static size_t fa_index(struct flex_array
*fa
, void *elem
, size_t count
)
92 if (elem
== flex_array_get(fa
, index
))
100 /* Migrates chunks from stream queues to new stream queues if needed,
101 * but not across associations. Also, removes those chunks to streams
102 * higher than the new max.
104 static void sctp_stream_outq_migrate(struct sctp_stream
*stream
,
105 struct sctp_stream
*new, __u16 outcnt
)
107 struct sctp_association
*asoc
;
108 struct sctp_chunk
*ch
, *temp
;
109 struct sctp_outq
*outq
;
112 asoc
= container_of(stream
, struct sctp_association
, stream
);
113 outq
= &asoc
->outqueue
;
115 list_for_each_entry_safe(ch
, temp
, &outq
->out_chunk_list
, list
) {
116 __u16 sid
= sctp_chunk_stream_no(ch
);
121 sctp_sched_dequeue_common(outq
, ch
);
122 /* No need to call dequeue_done here because
123 * the chunks are not scheduled by now.
126 /* Mark as failed send. */
127 sctp_chunk_fail(ch
, (__force __u32
)SCTP_ERROR_INV_STRM
);
128 if (asoc
->peer
.prsctp_capable
&&
129 SCTP_PR_PRIO_ENABLED(ch
->sinfo
.sinfo_flags
))
130 asoc
->sent_cnt_removable
--;
136 /* Here we actually move the old ext stuff into the new
137 * buffer, because we want to keep it. Then
138 * sctp_stream_update will swap ->out pointers.
140 for (i
= 0; i
< outcnt
; i
++) {
141 kfree(SCTP_SO(new, i
)->ext
);
142 SCTP_SO(new, i
)->ext
= SCTP_SO(stream
, i
)->ext
;
143 SCTP_SO(stream
, i
)->ext
= NULL
;
147 for (i
= outcnt
; i
< stream
->outcnt
; i
++)
148 kfree(SCTP_SO(stream
, i
)->ext
);
151 static int sctp_stream_alloc_out(struct sctp_stream
*stream
, __u16 outcnt
,
154 struct flex_array
*out
;
155 size_t elem_size
= sizeof(struct sctp_stream_out
);
157 out
= fa_alloc(elem_size
, outcnt
, gfp
);
162 fa_copy(out
, stream
->out
, 0, min(outcnt
, stream
->outcnt
));
163 if (stream
->out_curr
) {
164 size_t index
= fa_index(stream
->out
, stream
->out_curr
,
167 BUG_ON(index
== stream
->outcnt
);
168 stream
->out_curr
= flex_array_get(out
, index
);
170 fa_free(stream
->out
);
173 if (outcnt
> stream
->outcnt
)
174 fa_zero(out
, stream
->outcnt
, (outcnt
- stream
->outcnt
));
181 static int sctp_stream_alloc_in(struct sctp_stream
*stream
, __u16 incnt
,
184 struct flex_array
*in
;
185 size_t elem_size
= sizeof(struct sctp_stream_in
);
187 in
= fa_alloc(elem_size
, incnt
, gfp
);
192 fa_copy(in
, stream
->in
, 0, min(incnt
, stream
->incnt
));
196 if (incnt
> stream
->incnt
)
197 fa_zero(in
, stream
->incnt
, (incnt
- stream
->incnt
));
204 int sctp_stream_init(struct sctp_stream
*stream
, __u16 outcnt
, __u16 incnt
,
207 struct sctp_sched_ops
*sched
= sctp_sched_ops_from_stream(stream
);
212 /* Initial stream->out size may be very big, so free it and alloc
213 * a new one with new outcnt to save memory if needed.
215 if (outcnt
== stream
->outcnt
)
218 /* Filter out chunks queued on streams that won't exist anymore */
219 sched
->unsched_all(stream
);
220 sctp_stream_outq_migrate(stream
, NULL
, outcnt
);
221 sched
->sched_all(stream
);
223 ret
= sctp_stream_alloc_out(stream
, outcnt
, gfp
);
227 stream
->outcnt
= outcnt
;
228 for (i
= 0; i
< stream
->outcnt
; i
++)
229 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
234 sctp_stream_interleave_init(stream
);
238 ret
= sctp_stream_alloc_in(stream
, incnt
, gfp
);
241 fa_free(stream
->out
);
247 stream
->incnt
= incnt
;
253 int sctp_stream_init_ext(struct sctp_stream
*stream
, __u16 sid
)
255 struct sctp_stream_out_ext
*soute
;
257 soute
= kzalloc(sizeof(*soute
), GFP_KERNEL
);
260 SCTP_SO(stream
, sid
)->ext
= soute
;
262 return sctp_sched_init_sid(stream
, sid
, GFP_KERNEL
);
265 void sctp_stream_free(struct sctp_stream
*stream
)
267 struct sctp_sched_ops
*sched
= sctp_sched_ops_from_stream(stream
);
271 for (i
= 0; i
< stream
->outcnt
; i
++)
272 kfree(SCTP_SO(stream
, i
)->ext
);
273 fa_free(stream
->out
);
277 void sctp_stream_clear(struct sctp_stream
*stream
)
281 for (i
= 0; i
< stream
->outcnt
; i
++) {
282 SCTP_SO(stream
, i
)->mid
= 0;
283 SCTP_SO(stream
, i
)->mid_uo
= 0;
286 for (i
= 0; i
< stream
->incnt
; i
++)
287 SCTP_SI(stream
, i
)->mid
= 0;
290 void sctp_stream_update(struct sctp_stream
*stream
, struct sctp_stream
*new)
292 struct sctp_sched_ops
*sched
= sctp_sched_ops_from_stream(stream
);
294 sched
->unsched_all(stream
);
295 sctp_stream_outq_migrate(stream
, new, new->outcnt
);
296 sctp_stream_free(stream
);
298 stream
->out
= new->out
;
299 stream
->in
= new->in
;
300 stream
->outcnt
= new->outcnt
;
301 stream
->incnt
= new->incnt
;
303 sched
->sched_all(stream
);
311 static int sctp_send_reconf(struct sctp_association
*asoc
,
312 struct sctp_chunk
*chunk
)
314 struct net
*net
= sock_net(asoc
->base
.sk
);
317 retval
= sctp_primitive_RECONF(net
, asoc
, chunk
);
319 sctp_chunk_free(chunk
);
324 static bool sctp_stream_outq_is_empty(struct sctp_stream
*stream
,
325 __u16 str_nums
, __be16
*str_list
)
327 struct sctp_association
*asoc
;
330 asoc
= container_of(stream
, struct sctp_association
, stream
);
331 if (!asoc
->outqueue
.out_qlen
)
337 for (i
= 0; i
< str_nums
; i
++) {
338 __u16 sid
= ntohs(str_list
[i
]);
340 if (SCTP_SO(stream
, sid
)->ext
&&
341 !list_empty(&SCTP_SO(stream
, sid
)->ext
->outq
))
348 int sctp_send_reset_streams(struct sctp_association
*asoc
,
349 struct sctp_reset_streams
*params
)
351 struct sctp_stream
*stream
= &asoc
->stream
;
352 __u16 i
, str_nums
, *str_list
;
353 struct sctp_chunk
*chunk
;
354 int retval
= -EINVAL
;
358 if (!asoc
->peer
.reconf_capable
||
359 !(asoc
->strreset_enable
& SCTP_ENABLE_RESET_STREAM_REQ
)) {
360 retval
= -ENOPROTOOPT
;
364 if (asoc
->strreset_outstanding
) {
365 retval
= -EINPROGRESS
;
369 out
= params
->srs_flags
& SCTP_STREAM_RESET_OUTGOING
;
370 in
= params
->srs_flags
& SCTP_STREAM_RESET_INCOMING
;
374 str_nums
= params
->srs_number_streams
;
375 str_list
= params
->srs_stream_list
;
380 for (i
= 0; i
< str_nums
; i
++)
381 if (str_list
[i
] >= stream
->outcnt
)
384 param_len
= str_nums
* sizeof(__u16
) +
385 sizeof(struct sctp_strreset_outreq
);
389 for (i
= 0; i
< str_nums
; i
++)
390 if (str_list
[i
] >= stream
->incnt
)
393 param_len
+= str_nums
* sizeof(__u16
) +
394 sizeof(struct sctp_strreset_inreq
);
397 if (param_len
> SCTP_MAX_CHUNK_LEN
-
398 sizeof(struct sctp_reconf_chunk
))
402 nstr_list
= kcalloc(str_nums
, sizeof(__be16
), GFP_KERNEL
);
408 for (i
= 0; i
< str_nums
; i
++)
409 nstr_list
[i
] = htons(str_list
[i
]);
411 if (out
&& !sctp_stream_outq_is_empty(stream
, str_nums
, nstr_list
)) {
416 chunk
= sctp_make_strreset_req(asoc
, str_nums
, nstr_list
, out
, in
);
427 for (i
= 0; i
< str_nums
; i
++)
428 SCTP_SO(stream
, str_list
[i
])->state
=
431 for (i
= 0; i
< stream
->outcnt
; i
++)
432 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_CLOSED
;
435 asoc
->strreset_chunk
= chunk
;
436 sctp_chunk_hold(asoc
->strreset_chunk
);
438 retval
= sctp_send_reconf(asoc
, chunk
);
440 sctp_chunk_put(asoc
->strreset_chunk
);
441 asoc
->strreset_chunk
= NULL
;
446 for (i
= 0; i
< str_nums
; i
++)
447 SCTP_SO(stream
, str_list
[i
])->state
=
450 for (i
= 0; i
< stream
->outcnt
; i
++)
451 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
456 asoc
->strreset_outstanding
= out
+ in
;
462 int sctp_send_reset_assoc(struct sctp_association
*asoc
)
464 struct sctp_stream
*stream
= &asoc
->stream
;
465 struct sctp_chunk
*chunk
= NULL
;
469 if (!asoc
->peer
.reconf_capable
||
470 !(asoc
->strreset_enable
& SCTP_ENABLE_RESET_ASSOC_REQ
))
473 if (asoc
->strreset_outstanding
)
476 if (!sctp_outq_is_empty(&asoc
->outqueue
))
479 chunk
= sctp_make_strreset_tsnreq(asoc
);
483 /* Block further xmit of data until this request is completed */
484 for (i
= 0; i
< stream
->outcnt
; i
++)
485 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_CLOSED
;
487 asoc
->strreset_chunk
= chunk
;
488 sctp_chunk_hold(asoc
->strreset_chunk
);
490 retval
= sctp_send_reconf(asoc
, chunk
);
492 sctp_chunk_put(asoc
->strreset_chunk
);
493 asoc
->strreset_chunk
= NULL
;
495 for (i
= 0; i
< stream
->outcnt
; i
++)
496 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
501 asoc
->strreset_outstanding
= 1;
506 int sctp_send_add_streams(struct sctp_association
*asoc
,
507 struct sctp_add_streams
*params
)
509 struct sctp_stream
*stream
= &asoc
->stream
;
510 struct sctp_chunk
*chunk
= NULL
;
515 if (!asoc
->peer
.reconf_capable
||
516 !(asoc
->strreset_enable
& SCTP_ENABLE_CHANGE_ASSOC_REQ
)) {
517 retval
= -ENOPROTOOPT
;
521 if (asoc
->strreset_outstanding
) {
522 retval
= -EINPROGRESS
;
526 out
= params
->sas_outstrms
;
527 in
= params
->sas_instrms
;
528 outcnt
= stream
->outcnt
+ out
;
529 incnt
= stream
->incnt
+ in
;
530 if (outcnt
> SCTP_MAX_STREAM
|| incnt
> SCTP_MAX_STREAM
||
537 retval
= sctp_stream_alloc_out(stream
, outcnt
, GFP_KERNEL
);
542 chunk
= sctp_make_strreset_addstrm(asoc
, out
, in
);
548 asoc
->strreset_chunk
= chunk
;
549 sctp_chunk_hold(asoc
->strreset_chunk
);
551 retval
= sctp_send_reconf(asoc
, chunk
);
553 sctp_chunk_put(asoc
->strreset_chunk
);
554 asoc
->strreset_chunk
= NULL
;
558 stream
->outcnt
= outcnt
;
560 asoc
->strreset_outstanding
= !!out
+ !!in
;
566 static struct sctp_paramhdr
*sctp_chunk_lookup_strreset_param(
567 struct sctp_association
*asoc
, __be32 resp_seq
,
570 struct sctp_chunk
*chunk
= asoc
->strreset_chunk
;
571 struct sctp_reconf_chunk
*hdr
;
572 union sctp_params param
;
577 hdr
= (struct sctp_reconf_chunk
*)chunk
->chunk_hdr
;
578 sctp_walk_params(param
, hdr
, params
) {
579 /* sctp_strreset_tsnreq is actually the basic structure
580 * of all stream reconf params, so it's safe to use it
581 * to access request_seq.
583 struct sctp_strreset_tsnreq
*req
= param
.v
;
585 if ((!resp_seq
|| req
->request_seq
== resp_seq
) &&
586 (!type
|| type
== req
->param_hdr
.type
))
593 static void sctp_update_strreset_result(struct sctp_association
*asoc
,
596 asoc
->strreset_result
[1] = asoc
->strreset_result
[0];
597 asoc
->strreset_result
[0] = result
;
600 struct sctp_chunk
*sctp_process_strreset_outreq(
601 struct sctp_association
*asoc
,
602 union sctp_params param
,
603 struct sctp_ulpevent
**evp
)
605 struct sctp_strreset_outreq
*outreq
= param
.v
;
606 struct sctp_stream
*stream
= &asoc
->stream
;
607 __u32 result
= SCTP_STRRESET_DENIED
;
608 __be16
*str_p
= NULL
;
612 request_seq
= ntohl(outreq
->request_seq
);
614 if (ntohl(outreq
->send_reset_at_tsn
) >
615 sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
)) {
616 result
= SCTP_STRRESET_IN_PROGRESS
;
620 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
621 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
622 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
624 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
625 i
= asoc
->strreset_inseq
- request_seq
- 1;
626 result
= asoc
->strreset_result
[i
];
629 asoc
->strreset_inseq
++;
631 /* Check strreset_enable after inseq inc, as sender cannot tell
632 * the peer doesn't enable strreset after receiving response with
633 * result denied, as well as to keep consistent with bsd.
635 if (!(asoc
->strreset_enable
& SCTP_ENABLE_RESET_STREAM_REQ
))
638 nums
= (ntohs(param
.p
->length
) - sizeof(*outreq
)) / sizeof(__u16
);
639 str_p
= outreq
->list_of_streams
;
640 for (i
= 0; i
< nums
; i
++) {
641 if (ntohs(str_p
[i
]) >= stream
->incnt
) {
642 result
= SCTP_STRRESET_ERR_WRONG_SSN
;
647 if (asoc
->strreset_chunk
) {
648 if (!sctp_chunk_lookup_strreset_param(
649 asoc
, outreq
->response_seq
,
650 SCTP_PARAM_RESET_IN_REQUEST
)) {
651 /* same process with outstanding isn't 0 */
652 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
656 asoc
->strreset_outstanding
--;
657 asoc
->strreset_outseq
++;
659 if (!asoc
->strreset_outstanding
) {
660 struct sctp_transport
*t
;
662 t
= asoc
->strreset_chunk
->transport
;
663 if (del_timer(&t
->reconf_timer
))
664 sctp_transport_put(t
);
666 sctp_chunk_put(asoc
->strreset_chunk
);
667 asoc
->strreset_chunk
= NULL
;
672 for (i
= 0; i
< nums
; i
++)
673 SCTP_SI(stream
, ntohs(str_p
[i
]))->mid
= 0;
675 for (i
= 0; i
< stream
->incnt
; i
++)
676 SCTP_SI(stream
, i
)->mid
= 0;
678 result
= SCTP_STRRESET_PERFORMED
;
680 *evp
= sctp_ulpevent_make_stream_reset_event(asoc
,
681 SCTP_STREAM_RESET_INCOMING_SSN
, nums
, str_p
, GFP_ATOMIC
);
684 sctp_update_strreset_result(asoc
, result
);
686 return sctp_make_strreset_resp(asoc
, result
, request_seq
);
689 struct sctp_chunk
*sctp_process_strreset_inreq(
690 struct sctp_association
*asoc
,
691 union sctp_params param
,
692 struct sctp_ulpevent
**evp
)
694 struct sctp_strreset_inreq
*inreq
= param
.v
;
695 struct sctp_stream
*stream
= &asoc
->stream
;
696 __u32 result
= SCTP_STRRESET_DENIED
;
697 struct sctp_chunk
*chunk
= NULL
;
702 request_seq
= ntohl(inreq
->request_seq
);
703 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
704 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
705 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
707 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
708 i
= asoc
->strreset_inseq
- request_seq
- 1;
709 result
= asoc
->strreset_result
[i
];
710 if (result
== SCTP_STRRESET_PERFORMED
)
714 asoc
->strreset_inseq
++;
716 if (!(asoc
->strreset_enable
& SCTP_ENABLE_RESET_STREAM_REQ
))
719 if (asoc
->strreset_outstanding
) {
720 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
724 nums
= (ntohs(param
.p
->length
) - sizeof(*inreq
)) / sizeof(__u16
);
725 str_p
= inreq
->list_of_streams
;
726 for (i
= 0; i
< nums
; i
++) {
727 if (ntohs(str_p
[i
]) >= stream
->outcnt
) {
728 result
= SCTP_STRRESET_ERR_WRONG_SSN
;
733 if (!sctp_stream_outq_is_empty(stream
, nums
, str_p
)) {
734 result
= SCTP_STRRESET_IN_PROGRESS
;
735 asoc
->strreset_inseq
--;
739 chunk
= sctp_make_strreset_req(asoc
, nums
, str_p
, 1, 0);
744 for (i
= 0; i
< nums
; i
++)
745 SCTP_SO(stream
, ntohs(str_p
[i
]))->state
=
748 for (i
= 0; i
< stream
->outcnt
; i
++)
749 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_CLOSED
;
751 asoc
->strreset_chunk
= chunk
;
752 asoc
->strreset_outstanding
= 1;
753 sctp_chunk_hold(asoc
->strreset_chunk
);
755 result
= SCTP_STRRESET_PERFORMED
;
758 sctp_update_strreset_result(asoc
, result
);
761 chunk
= sctp_make_strreset_resp(asoc
, result
, request_seq
);
766 struct sctp_chunk
*sctp_process_strreset_tsnreq(
767 struct sctp_association
*asoc
,
768 union sctp_params param
,
769 struct sctp_ulpevent
**evp
)
771 __u32 init_tsn
= 0, next_tsn
= 0, max_tsn_seen
;
772 struct sctp_strreset_tsnreq
*tsnreq
= param
.v
;
773 struct sctp_stream
*stream
= &asoc
->stream
;
774 __u32 result
= SCTP_STRRESET_DENIED
;
778 request_seq
= ntohl(tsnreq
->request_seq
);
779 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
780 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
781 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
783 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
784 i
= asoc
->strreset_inseq
- request_seq
- 1;
785 result
= asoc
->strreset_result
[i
];
786 if (result
== SCTP_STRRESET_PERFORMED
) {
787 next_tsn
= asoc
->ctsn_ack_point
+ 1;
789 sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
) + 1;
794 if (!sctp_outq_is_empty(&asoc
->outqueue
)) {
795 result
= SCTP_STRRESET_IN_PROGRESS
;
799 asoc
->strreset_inseq
++;
801 if (!(asoc
->strreset_enable
& SCTP_ENABLE_RESET_ASSOC_REQ
))
804 if (asoc
->strreset_outstanding
) {
805 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
809 /* G4: The same processing as though a FWD-TSN chunk (as defined in
810 * [RFC3758]) with all streams affected and a new cumulative TSN
811 * ACK of the Receiver's Next TSN minus 1 were received MUST be
814 max_tsn_seen
= sctp_tsnmap_get_max_tsn_seen(&asoc
->peer
.tsn_map
);
815 asoc
->stream
.si
->report_ftsn(&asoc
->ulpq
, max_tsn_seen
);
817 /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
818 * TSN that the peer should use to send the next DATA chunk. The
819 * value SHOULD be the smallest TSN not acknowledged by the
820 * receiver of the request plus 2^31.
822 init_tsn
= sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
) + (1 << 31);
823 sctp_tsnmap_init(&asoc
->peer
.tsn_map
, SCTP_TSN_MAP_INITIAL
,
824 init_tsn
, GFP_ATOMIC
);
826 /* G3: The same processing as though a SACK chunk with no gap report
827 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
828 * received MUST be performed.
830 sctp_outq_free(&asoc
->outqueue
);
832 /* G2: Compute an appropriate value for the local endpoint's next TSN,
833 * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
834 * chunk. The value SHOULD be the highest TSN sent by the receiver
835 * of the request plus 1.
837 next_tsn
= asoc
->next_tsn
;
838 asoc
->ctsn_ack_point
= next_tsn
- 1;
839 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
841 /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
842 * incoming and outgoing streams.
844 for (i
= 0; i
< stream
->outcnt
; i
++) {
845 SCTP_SO(stream
, i
)->mid
= 0;
846 SCTP_SO(stream
, i
)->mid_uo
= 0;
848 for (i
= 0; i
< stream
->incnt
; i
++)
849 SCTP_SI(stream
, i
)->mid
= 0;
851 result
= SCTP_STRRESET_PERFORMED
;
853 *evp
= sctp_ulpevent_make_assoc_reset_event(asoc
, 0, init_tsn
,
854 next_tsn
, GFP_ATOMIC
);
857 sctp_update_strreset_result(asoc
, result
);
859 return sctp_make_strreset_tsnresp(asoc
, result
, request_seq
,
863 struct sctp_chunk
*sctp_process_strreset_addstrm_out(
864 struct sctp_association
*asoc
,
865 union sctp_params param
,
866 struct sctp_ulpevent
**evp
)
868 struct sctp_strreset_addstrm
*addstrm
= param
.v
;
869 struct sctp_stream
*stream
= &asoc
->stream
;
870 __u32 result
= SCTP_STRRESET_DENIED
;
871 __u32 request_seq
, incnt
;
874 request_seq
= ntohl(addstrm
->request_seq
);
875 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
876 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
877 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
879 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
880 i
= asoc
->strreset_inseq
- request_seq
- 1;
881 result
= asoc
->strreset_result
[i
];
884 asoc
->strreset_inseq
++;
886 if (!(asoc
->strreset_enable
& SCTP_ENABLE_CHANGE_ASSOC_REQ
))
889 in
= ntohs(addstrm
->number_of_streams
);
890 incnt
= stream
->incnt
+ in
;
891 if (!in
|| incnt
> SCTP_MAX_STREAM
)
894 if (sctp_stream_alloc_in(stream
, incnt
, GFP_ATOMIC
))
897 if (asoc
->strreset_chunk
) {
898 if (!sctp_chunk_lookup_strreset_param(
899 asoc
, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS
)) {
900 /* same process with outstanding isn't 0 */
901 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
905 asoc
->strreset_outstanding
--;
906 asoc
->strreset_outseq
++;
908 if (!asoc
->strreset_outstanding
) {
909 struct sctp_transport
*t
;
911 t
= asoc
->strreset_chunk
->transport
;
912 if (del_timer(&t
->reconf_timer
))
913 sctp_transport_put(t
);
915 sctp_chunk_put(asoc
->strreset_chunk
);
916 asoc
->strreset_chunk
= NULL
;
920 stream
->incnt
= incnt
;
922 result
= SCTP_STRRESET_PERFORMED
;
924 *evp
= sctp_ulpevent_make_stream_change_event(asoc
,
925 0, ntohs(addstrm
->number_of_streams
), 0, GFP_ATOMIC
);
928 sctp_update_strreset_result(asoc
, result
);
930 return sctp_make_strreset_resp(asoc
, result
, request_seq
);
933 struct sctp_chunk
*sctp_process_strreset_addstrm_in(
934 struct sctp_association
*asoc
,
935 union sctp_params param
,
936 struct sctp_ulpevent
**evp
)
938 struct sctp_strreset_addstrm
*addstrm
= param
.v
;
939 struct sctp_stream
*stream
= &asoc
->stream
;
940 __u32 result
= SCTP_STRRESET_DENIED
;
941 struct sctp_chunk
*chunk
= NULL
;
942 __u32 request_seq
, outcnt
;
946 request_seq
= ntohl(addstrm
->request_seq
);
947 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
948 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
949 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
951 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
952 i
= asoc
->strreset_inseq
- request_seq
- 1;
953 result
= asoc
->strreset_result
[i
];
954 if (result
== SCTP_STRRESET_PERFORMED
)
958 asoc
->strreset_inseq
++;
960 if (!(asoc
->strreset_enable
& SCTP_ENABLE_CHANGE_ASSOC_REQ
))
963 if (asoc
->strreset_outstanding
) {
964 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
968 out
= ntohs(addstrm
->number_of_streams
);
969 outcnt
= stream
->outcnt
+ out
;
970 if (!out
|| outcnt
> SCTP_MAX_STREAM
)
973 ret
= sctp_stream_alloc_out(stream
, outcnt
, GFP_ATOMIC
);
977 chunk
= sctp_make_strreset_addstrm(asoc
, out
, 0);
981 asoc
->strreset_chunk
= chunk
;
982 asoc
->strreset_outstanding
= 1;
983 sctp_chunk_hold(asoc
->strreset_chunk
);
985 stream
->outcnt
= outcnt
;
987 result
= SCTP_STRRESET_PERFORMED
;
990 sctp_update_strreset_result(asoc
, result
);
993 chunk
= sctp_make_strreset_resp(asoc
, result
, request_seq
);
998 struct sctp_chunk
*sctp_process_strreset_resp(
999 struct sctp_association
*asoc
,
1000 union sctp_params param
,
1001 struct sctp_ulpevent
**evp
)
1003 struct sctp_stream
*stream
= &asoc
->stream
;
1004 struct sctp_strreset_resp
*resp
= param
.v
;
1005 struct sctp_transport
*t
;
1006 __u16 i
, nums
, flags
= 0;
1007 struct sctp_paramhdr
*req
;
1010 req
= sctp_chunk_lookup_strreset_param(asoc
, resp
->response_seq
, 0);
1014 result
= ntohl(resp
->result
);
1015 if (result
!= SCTP_STRRESET_PERFORMED
) {
1016 /* if in progress, do nothing but retransmit */
1017 if (result
== SCTP_STRRESET_IN_PROGRESS
)
1019 else if (result
== SCTP_STRRESET_DENIED
)
1020 flags
= SCTP_STREAM_RESET_DENIED
;
1022 flags
= SCTP_STREAM_RESET_FAILED
;
1025 if (req
->type
== SCTP_PARAM_RESET_OUT_REQUEST
) {
1026 struct sctp_strreset_outreq
*outreq
;
1029 outreq
= (struct sctp_strreset_outreq
*)req
;
1030 str_p
= outreq
->list_of_streams
;
1031 nums
= (ntohs(outreq
->param_hdr
.length
) - sizeof(*outreq
)) /
1034 if (result
== SCTP_STRRESET_PERFORMED
) {
1035 struct sctp_stream_out
*sout
;
1037 for (i
= 0; i
< nums
; i
++) {
1038 sout
= SCTP_SO(stream
, ntohs(str_p
[i
]));
1043 for (i
= 0; i
< stream
->outcnt
; i
++) {
1044 sout
= SCTP_SO(stream
, i
);
1051 flags
|= SCTP_STREAM_RESET_OUTGOING_SSN
;
1053 for (i
= 0; i
< stream
->outcnt
; i
++)
1054 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
1056 *evp
= sctp_ulpevent_make_stream_reset_event(asoc
, flags
,
1057 nums
, str_p
, GFP_ATOMIC
);
1058 } else if (req
->type
== SCTP_PARAM_RESET_IN_REQUEST
) {
1059 struct sctp_strreset_inreq
*inreq
;
1062 /* if the result is performed, it's impossible for inreq */
1063 if (result
== SCTP_STRRESET_PERFORMED
)
1066 inreq
= (struct sctp_strreset_inreq
*)req
;
1067 str_p
= inreq
->list_of_streams
;
1068 nums
= (ntohs(inreq
->param_hdr
.length
) - sizeof(*inreq
)) /
1071 flags
|= SCTP_STREAM_RESET_INCOMING_SSN
;
1073 *evp
= sctp_ulpevent_make_stream_reset_event(asoc
, flags
,
1074 nums
, str_p
, GFP_ATOMIC
);
1075 } else if (req
->type
== SCTP_PARAM_RESET_TSN_REQUEST
) {
1076 struct sctp_strreset_resptsn
*resptsn
;
1079 /* check for resptsn, as sctp_verify_reconf didn't do it*/
1080 if (ntohs(param
.p
->length
) != sizeof(*resptsn
))
1083 resptsn
= (struct sctp_strreset_resptsn
*)resp
;
1084 stsn
= ntohl(resptsn
->senders_next_tsn
);
1085 rtsn
= ntohl(resptsn
->receivers_next_tsn
);
1087 if (result
== SCTP_STRRESET_PERFORMED
) {
1088 __u32 mtsn
= sctp_tsnmap_get_max_tsn_seen(
1089 &asoc
->peer
.tsn_map
);
1092 asoc
->stream
.si
->report_ftsn(&asoc
->ulpq
, mtsn
);
1094 sctp_tsnmap_init(&asoc
->peer
.tsn_map
,
1095 SCTP_TSN_MAP_INITIAL
,
1098 /* Clean up sacked and abandoned queues only. As the
1099 * out_chunk_list may not be empty, splice it to temp,
1100 * then get it back after sctp_outq_free is done.
1102 list_splice_init(&asoc
->outqueue
.out_chunk_list
, &temp
);
1103 sctp_outq_free(&asoc
->outqueue
);
1104 list_splice_init(&temp
, &asoc
->outqueue
.out_chunk_list
);
1106 asoc
->next_tsn
= rtsn
;
1107 asoc
->ctsn_ack_point
= asoc
->next_tsn
- 1;
1108 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
1110 for (i
= 0; i
< stream
->outcnt
; i
++) {
1111 SCTP_SO(stream
, i
)->mid
= 0;
1112 SCTP_SO(stream
, i
)->mid_uo
= 0;
1114 for (i
= 0; i
< stream
->incnt
; i
++)
1115 SCTP_SI(stream
, i
)->mid
= 0;
1118 for (i
= 0; i
< stream
->outcnt
; i
++)
1119 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
1121 *evp
= sctp_ulpevent_make_assoc_reset_event(asoc
, flags
,
1122 stsn
, rtsn
, GFP_ATOMIC
);
1123 } else if (req
->type
== SCTP_PARAM_RESET_ADD_OUT_STREAMS
) {
1124 struct sctp_strreset_addstrm
*addstrm
;
1127 addstrm
= (struct sctp_strreset_addstrm
*)req
;
1128 nums
= ntohs(addstrm
->number_of_streams
);
1129 number
= stream
->outcnt
- nums
;
1131 if (result
== SCTP_STRRESET_PERFORMED
)
1132 for (i
= number
; i
< stream
->outcnt
; i
++)
1133 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
1135 stream
->outcnt
= number
;
1137 *evp
= sctp_ulpevent_make_stream_change_event(asoc
, flags
,
1138 0, nums
, GFP_ATOMIC
);
1139 } else if (req
->type
== SCTP_PARAM_RESET_ADD_IN_STREAMS
) {
1140 struct sctp_strreset_addstrm
*addstrm
;
1142 /* if the result is performed, it's impossible for addstrm in
1145 if (result
== SCTP_STRRESET_PERFORMED
)
1148 addstrm
= (struct sctp_strreset_addstrm
*)req
;
1149 nums
= ntohs(addstrm
->number_of_streams
);
1151 *evp
= sctp_ulpevent_make_stream_change_event(asoc
, flags
,
1152 nums
, 0, GFP_ATOMIC
);
1155 asoc
->strreset_outstanding
--;
1156 asoc
->strreset_outseq
++;
1158 /* remove everything for this reconf request */
1159 if (!asoc
->strreset_outstanding
) {
1160 t
= asoc
->strreset_chunk
->transport
;
1161 if (del_timer(&t
->reconf_timer
))
1162 sctp_transport_put(t
);
1164 sctp_chunk_put(asoc
->strreset_chunk
);
1165 asoc
->strreset_chunk
= NULL
;