]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/dccp/ackvec.c
mptcp: fix data stream corruption
[mirror_ubuntu-jammy-kernel.git] / net / dccp / ackvec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/dccp/ackvec.c
4 *
5 * An implementation of Ack Vectors for the DCCP protocol
6 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 */
9 #include "dccp.h"
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13
14 static struct kmem_cache *dccp_ackvec_slab;
15 static struct kmem_cache *dccp_ackvec_record_slab;
16
17 struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
18 {
19 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
20
21 if (av != NULL) {
22 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
23 INIT_LIST_HEAD(&av->av_records);
24 }
25 return av;
26 }
27
28 static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
29 {
30 struct dccp_ackvec_record *cur, *next;
31
32 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
33 kmem_cache_free(dccp_ackvec_record_slab, cur);
34 INIT_LIST_HEAD(&av->av_records);
35 }
36
37 void dccp_ackvec_free(struct dccp_ackvec *av)
38 {
39 if (likely(av != NULL)) {
40 dccp_ackvec_purge_records(av);
41 kmem_cache_free(dccp_ackvec_slab, av);
42 }
43 }
44
45 /**
46 * dccp_ackvec_update_records - Record information about sent Ack Vectors
47 * @av: Ack Vector records to update
48 * @seqno: Sequence number of the packet carrying the Ack Vector just sent
49 * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
50 */
51 int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
52 {
53 struct dccp_ackvec_record *avr;
54
55 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
56 if (avr == NULL)
57 return -ENOBUFS;
58
59 avr->avr_ack_seqno = seqno;
60 avr->avr_ack_ptr = av->av_buf_head;
61 avr->avr_ack_ackno = av->av_buf_ackno;
62 avr->avr_ack_nonce = nonce_sum;
63 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
64 /*
65 * When the buffer overflows, we keep no more than one record. This is
66 * the simplest way of disambiguating sender-Acks dating from before the
67 * overflow from sender-Acks which refer to after the overflow; a simple
68 * solution is preferable here since we are handling an exception.
69 */
70 if (av->av_overflow)
71 dccp_ackvec_purge_records(av);
72 /*
73 * Since GSS is incremented for each packet, the list is automatically
74 * arranged in descending order of @ack_seqno.
75 */
76 list_add(&avr->avr_node, &av->av_records);
77
78 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
79 (unsigned long long)avr->avr_ack_seqno,
80 (unsigned long long)avr->avr_ack_ackno,
81 avr->avr_ack_runlen);
82 return 0;
83 }
84
85 static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
86 const u64 ackno)
87 {
88 struct dccp_ackvec_record *avr;
89 /*
90 * Exploit that records are inserted in descending order of sequence
91 * number, start with the oldest record first. If @ackno is `before'
92 * the earliest ack_ackno, the packet is too old to be considered.
93 */
94 list_for_each_entry_reverse(avr, av_list, avr_node) {
95 if (avr->avr_ack_seqno == ackno)
96 return avr;
97 if (before48(ackno, avr->avr_ack_seqno))
98 break;
99 }
100 return NULL;
101 }
102
103 /*
104 * Buffer index and length computation using modulo-buffersize arithmetic.
105 * Note that, as pointers move from right to left, head is `before' tail.
106 */
107 static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
108 {
109 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
110 }
111
112 static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
113 {
114 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
115 }
116
117 u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
118 {
119 if (unlikely(av->av_overflow))
120 return DCCPAV_MAX_ACKVEC_LEN;
121 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
122 }
123
124 /**
125 * dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1
126 * @av: non-empty buffer to update
127 * @distance: negative or zero distance of @seqno from buf_ackno downward
128 * @seqno: the (old) sequence number whose record is to be updated
129 * @state: state in which packet carrying @seqno was received
130 */
131 static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
132 u64 seqno, enum dccp_ackvec_states state)
133 {
134 u16 ptr = av->av_buf_head;
135
136 BUG_ON(distance > 0);
137 if (unlikely(dccp_ackvec_is_empty(av)))
138 return;
139
140 do {
141 u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
142
143 if (distance + runlen >= 0) {
144 /*
145 * Only update the state if packet has not been received
146 * yet. This is OK as per the second table in RFC 4340,
147 * 11.4.1; i.e. here we are using the following table:
148 * RECEIVED
149 * 0 1 3
150 * S +---+---+---+
151 * T 0 | 0 | 0 | 0 |
152 * O +---+---+---+
153 * R 1 | 1 | 1 | 1 |
154 * E +---+---+---+
155 * D 3 | 0 | 1 | 3 |
156 * +---+---+---+
157 * The "Not Received" state was set by reserve_seats().
158 */
159 if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
160 av->av_buf[ptr] = state;
161 else
162 dccp_pr_debug("Not changing %llu state to %u\n",
163 (unsigned long long)seqno, state);
164 break;
165 }
166
167 distance += runlen + 1;
168 ptr = __ackvec_idx_add(ptr, 1);
169
170 } while (ptr != av->av_buf_tail);
171 }
172
173 /* Mark @num entries after buf_head as "Not yet received". */
174 static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
175 {
176 u16 start = __ackvec_idx_add(av->av_buf_head, 1),
177 len = DCCPAV_MAX_ACKVEC_LEN - start;
178
179 /* check for buffer wrap-around */
180 if (num > len) {
181 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
182 start = 0;
183 num -= len;
184 }
185 if (num)
186 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
187 }
188
189 /**
190 * dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer
191 * @av: container of buffer to update (can be empty or non-empty)
192 * @num_packets: number of packets to register (must be >= 1)
193 * @seqno: sequence number of the first packet in @num_packets
194 * @state: state in which packet carrying @seqno was received
195 */
196 static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
197 u64 seqno, enum dccp_ackvec_states state)
198 {
199 u32 num_cells = num_packets;
200
201 if (num_packets > DCCPAV_BURST_THRESH) {
202 u32 lost_packets = num_packets - 1;
203
204 DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
205 /*
206 * We received 1 packet and have a loss of size "num_packets-1"
207 * which we squeeze into num_cells-1 rather than reserving an
208 * entire byte for each lost packet.
209 * The reason is that the vector grows in O(burst_length); when
210 * it grows too large there will no room left for the payload.
211 * This is a trade-off: if a few packets out of the burst show
212 * up later, their state will not be changed; it is simply too
213 * costly to reshuffle/reallocate/copy the buffer each time.
214 * Should such problems persist, we will need to switch to a
215 * different underlying data structure.
216 */
217 for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
218 u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN);
219
220 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
221 av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
222
223 lost_packets -= len;
224 }
225 }
226
227 if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
228 DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
229 av->av_overflow = true;
230 }
231
232 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
233 if (av->av_overflow)
234 av->av_buf_tail = av->av_buf_head;
235
236 av->av_buf[av->av_buf_head] = state;
237 av->av_buf_ackno = seqno;
238
239 if (num_packets > 1)
240 dccp_ackvec_reserve_seats(av, num_packets - 1);
241 }
242
243 /**
244 * dccp_ackvec_input - Register incoming packet in the buffer
245 * @av: Ack Vector to register packet to
246 * @skb: Packet to register
247 */
248 void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
249 {
250 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
251 enum dccp_ackvec_states state = DCCPAV_RECEIVED;
252
253 if (dccp_ackvec_is_empty(av)) {
254 dccp_ackvec_add_new(av, 1, seqno, state);
255 av->av_tail_ackno = seqno;
256
257 } else {
258 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
259 u8 *current_head = av->av_buf + av->av_buf_head;
260
261 if (num_packets == 1 &&
262 dccp_ackvec_state(current_head) == state &&
263 dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
264
265 *current_head += 1;
266 av->av_buf_ackno = seqno;
267
268 } else if (num_packets > 0) {
269 dccp_ackvec_add_new(av, num_packets, seqno, state);
270 } else {
271 dccp_ackvec_update_old(av, num_packets, seqno, state);
272 }
273 }
274 }
275
276 /**
277 * dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
278 * @av: Ack Vector record to clean
279 * @ackno: last Ack Vector which has been acknowledged
280 *
281 * This routine is called when the peer acknowledges the receipt of Ack Vectors
282 * up to and including @ackno. While based on section A.3 of RFC 4340, here
283 * are additional precautions to prevent corrupted buffer state. In particular,
284 * we use tail_ackno to identify outdated records; it always marks the earliest
285 * packet of group (2) in 11.4.2.
286 */
287 void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
288 {
289 struct dccp_ackvec_record *avr, *next;
290 u8 runlen_now, eff_runlen;
291 s64 delta;
292
293 avr = dccp_ackvec_lookup(&av->av_records, ackno);
294 if (avr == NULL)
295 return;
296 /*
297 * Deal with outdated acknowledgments: this arises when e.g. there are
298 * several old records and the acks from the peer come in slowly. In
299 * that case we may still have records that pre-date tail_ackno.
300 */
301 delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
302 if (delta < 0)
303 goto free_records;
304 /*
305 * Deal with overlapping Ack Vectors: don't subtract more than the
306 * number of packets between tail_ackno and ack_ackno.
307 */
308 eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
309
310 runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
311 /*
312 * The run length of Ack Vector cells does not decrease over time. If
313 * the run length is the same as at the time the Ack Vector was sent, we
314 * free the ack_ptr cell. That cell can however not be freed if the run
315 * length has increased: in this case we need to move the tail pointer
316 * backwards (towards higher indices), to its next-oldest neighbour.
317 */
318 if (runlen_now > eff_runlen) {
319
320 av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
321 av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
322
323 /* This move may not have cleared the overflow flag. */
324 if (av->av_overflow)
325 av->av_overflow = (av->av_buf_head == av->av_buf_tail);
326 } else {
327 av->av_buf_tail = avr->avr_ack_ptr;
328 /*
329 * We have made sure that avr points to a valid cell within the
330 * buffer. This cell is either older than head, or equals head
331 * (empty buffer): in both cases we no longer have any overflow.
332 */
333 av->av_overflow = 0;
334 }
335
336 /*
337 * The peer has acknowledged up to and including ack_ackno. Hence the
338 * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
339 */
340 av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
341
342 free_records:
343 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
344 list_del(&avr->avr_node);
345 kmem_cache_free(dccp_ackvec_record_slab, avr);
346 }
347 }
348
349 /*
350 * Routines to keep track of Ack Vectors received in an skb
351 */
352 int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
353 {
354 struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
355
356 if (new == NULL)
357 return -ENOBUFS;
358 new->vec = vec;
359 new->len = len;
360 new->nonce = nonce;
361
362 list_add_tail(&new->node, head);
363 return 0;
364 }
365 EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
366
367 void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
368 {
369 struct dccp_ackvec_parsed *cur, *next;
370
371 list_for_each_entry_safe(cur, next, parsed_chunks, node)
372 kfree(cur);
373 INIT_LIST_HEAD(parsed_chunks);
374 }
375 EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
376
377 int __init dccp_ackvec_init(void)
378 {
379 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
380 sizeof(struct dccp_ackvec), 0,
381 SLAB_HWCACHE_ALIGN, NULL);
382 if (dccp_ackvec_slab == NULL)
383 goto out_err;
384
385 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
386 sizeof(struct dccp_ackvec_record),
387 0, SLAB_HWCACHE_ALIGN, NULL);
388 if (dccp_ackvec_record_slab == NULL)
389 goto out_destroy_slab;
390
391 return 0;
392
393 out_destroy_slab:
394 kmem_cache_destroy(dccp_ackvec_slab);
395 dccp_ackvec_slab = NULL;
396 out_err:
397 DCCP_CRIT("Unable to create Ack Vector slab cache");
398 return -ENOBUFS;
399 }
400
401 void dccp_ackvec_exit(void)
402 {
403 kmem_cache_destroy(dccp_ackvec_slab);
404 dccp_ackvec_slab = NULL;
405 kmem_cache_destroy(dccp_ackvec_record_slab);
406 dccp_ackvec_record_slab = NULL;
407 }