]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/sctp/stream_sched.c
x86/speculation/mmio: Enable CPU Fill buffer clearing on idle
[mirror_ubuntu-jammy-kernel.git] / net / sctp / stream_sched.c
CommitLineData
47505b8b 1// SPDX-License-Identifier: GPL-2.0-or-later
5bbbbe32
MRL
2/* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
4 *
5 * This file is part of the SCTP kernel implementation
6 *
7 * These functions manipulate sctp stream queue/scheduling.
8 *
5bbbbe32
MRL
9 * Please send any bug reports or fixes you make to the
10 * email addresched(es):
11 * lksctp developers <linux-sctp@vger.kernel.org>
12 *
13 * Written or modified by:
14 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
15 */
16
17#include <linux/list.h>
18#include <net/sctp/sctp.h>
19#include <net/sctp/sm.h>
20#include <net/sctp/stream_sched.h>
21
22/* First Come First Serve (a.k.a. FIFO)
23 * RFC DRAFT ndata Section 3.1
24 */
25static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
26 __u16 value, gfp_t gfp)
27{
28 return 0;
29}
30
31static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
32 __u16 *value)
33{
34 *value = 0;
35 return 0;
36}
37
38static int sctp_sched_fcfs_init(struct sctp_stream *stream)
39{
40 return 0;
41}
42
43static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
44 gfp_t gfp)
45{
46 return 0;
47}
48
49static void sctp_sched_fcfs_free(struct sctp_stream *stream)
50{
51}
52
53static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
54 struct sctp_datamsg *msg)
55{
56}
57
58static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
59{
60 struct sctp_stream *stream = &q->asoc->stream;
61 struct sctp_chunk *ch = NULL;
62 struct list_head *entry;
63
64 if (list_empty(&q->out_chunk_list))
65 goto out;
66
67 if (stream->out_curr) {
68 ch = list_entry(stream->out_curr->ext->outq.next,
69 struct sctp_chunk, stream_list);
70 } else {
71 entry = q->out_chunk_list.next;
72 ch = list_entry(entry, struct sctp_chunk, list);
73 }
74
75 sctp_sched_dequeue_common(q, ch);
76
77out:
78 return ch;
79}
80
81static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
82 struct sctp_chunk *chunk)
83{
84}
85
86static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
87{
88}
89
90static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
91{
92}
93
94static struct sctp_sched_ops sctp_sched_fcfs = {
95 .set = sctp_sched_fcfs_set,
96 .get = sctp_sched_fcfs_get,
97 .init = sctp_sched_fcfs_init,
98 .init_sid = sctp_sched_fcfs_init_sid,
99 .free = sctp_sched_fcfs_free,
100 .enqueue = sctp_sched_fcfs_enqueue,
101 .dequeue = sctp_sched_fcfs_dequeue,
102 .dequeue_done = sctp_sched_fcfs_dequeue_done,
103 .sched_all = sctp_sched_fcfs_sched_all,
104 .unsched_all = sctp_sched_fcfs_unsched_all,
105};
106
1ba896f6
XL
107static void sctp_sched_ops_fcfs_init(void)
108{
109 sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
110}
111
5bbbbe32
MRL
112/* API to other parts of the stack */
113
1ba896f6 114static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
637784ad 115
1ba896f6
XL
116void sctp_sched_ops_register(enum sctp_sched_type sched,
117 struct sctp_sched_ops *sched_ops)
118{
119 sctp_sched_ops[sched] = sched_ops;
120}
121
122void sctp_sched_ops_init(void)
123{
124 sctp_sched_ops_fcfs_init();
125 sctp_sched_ops_prio_init();
126 sctp_sched_ops_rr_init();
127}
5bbbbe32
MRL
128
129int sctp_sched_set_sched(struct sctp_association *asoc,
130 enum sctp_sched_type sched)
131{
132 struct sctp_sched_ops *n = sctp_sched_ops[sched];
133 struct sctp_sched_ops *old = asoc->outqueue.sched;
134 struct sctp_datamsg *msg = NULL;
135 struct sctp_chunk *ch;
136 int i, ret = 0;
137
138 if (old == n)
139 return ret;
140
141 if (sched > SCTP_SS_MAX)
142 return -EINVAL;
143
144 if (old) {
145 old->free(&asoc->stream);
146
147 /* Give the next scheduler a clean slate. */
148 for (i = 0; i < asoc->stream.outcnt; i++) {
05364ca0 149 void *p = SCTP_SO(&asoc->stream, i)->ext;
5bbbbe32
MRL
150
151 if (!p)
152 continue;
153
154 p += offsetofend(struct sctp_stream_out_ext, outq);
155 memset(p, 0, sizeof(struct sctp_stream_out_ext) -
156 offsetofend(struct sctp_stream_out_ext, outq));
157 }
158 }
159
160 asoc->outqueue.sched = n;
161 n->init(&asoc->stream);
162 for (i = 0; i < asoc->stream.outcnt; i++) {
05364ca0 163 if (!SCTP_SO(&asoc->stream, i)->ext)
5bbbbe32
MRL
164 continue;
165
166 ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
167 if (ret)
168 goto err;
169 }
170
171 /* We have to requeue all chunks already queued. */
172 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
173 if (ch->msg == msg)
174 continue;
175 msg = ch->msg;
176 n->enqueue(&asoc->outqueue, msg);
177 }
178
179 return ret;
180
181err:
182 n->free(&asoc->stream);
183 asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
184
185 return ret;
186}
187
188int sctp_sched_get_sched(struct sctp_association *asoc)
189{
190 int i;
191
192 for (i = 0; i <= SCTP_SS_MAX; i++)
193 if (asoc->outqueue.sched == sctp_sched_ops[i])
194 return i;
195
196 return 0;
197}
198
199int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
200 __u16 value, gfp_t gfp)
201{
202 if (sid >= asoc->stream.outcnt)
203 return -EINVAL;
204
05364ca0 205 if (!SCTP_SO(&asoc->stream, sid)->ext) {
5bbbbe32
MRL
206 int ret;
207
208 ret = sctp_stream_init_ext(&asoc->stream, sid);
209 if (ret)
210 return ret;
211 }
212
213 return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
214}
215
216int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
217 __u16 *value)
218{
219 if (sid >= asoc->stream.outcnt)
220 return -EINVAL;
221
05364ca0 222 if (!SCTP_SO(&asoc->stream, sid)->ext)
5bbbbe32
MRL
223 return 0;
224
225 return asoc->outqueue.sched->get(&asoc->stream, sid, value);
226}
227
228void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
229{
ef4775e3 230 if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
da1f6d4d 231 !q->asoc->peer.intl_capable) {
5bbbbe32
MRL
232 struct sctp_stream_out *sout;
233 __u16 sid;
234
235 /* datamsg is not finish, so save it as current one,
236 * in case application switch scheduler or a higher
237 * priority stream comes in.
238 */
239 sid = sctp_chunk_stream_no(ch);
05364ca0 240 sout = SCTP_SO(&q->asoc->stream, sid);
5bbbbe32
MRL
241 q->asoc->stream.out_curr = sout;
242 return;
243 }
244
245 q->asoc->stream.out_curr = NULL;
246 q->sched->dequeue_done(q, ch);
247}
248
249/* Auxiliary functions for the schedulers */
250void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
251{
252 list_del_init(&ch->list);
253 list_del_init(&ch->stream_list);
254 q->out_qlen -= ch->skb->len;
255}
256
257int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
258{
259 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
05364ca0 260 struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
5bbbbe32 261
05364ca0 262 INIT_LIST_HEAD(&ext->outq);
5bbbbe32
MRL
263 return sched->init_sid(stream, sid, gfp);
264}
265
266struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
267{
268 struct sctp_association *asoc;
269
270 asoc = container_of(stream, struct sctp_association, stream);
271
272 return asoc->outqueue.sched;
273}