]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/rxrpc/input.c
rxrpc: Obtain RTT data by requesting ACKs on DATA packets
[mirror_ubuntu-zesty-kernel.git] / net / rxrpc / input.c
1 /* RxRPC packet reception
2 *
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <net/sock.h>
24 #include <net/af_rxrpc.h>
25 #include <net/ip.h>
26 #include <net/udp.h>
27 #include <net/net_namespace.h>
28 #include "ar-internal.h"
29
30 static void rxrpc_proto_abort(const char *why,
31 struct rxrpc_call *call, rxrpc_seq_t seq)
32 {
33 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) {
34 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
35 rxrpc_queue_call(call);
36 }
37 }
38
39 /*
40 * Ping the other end to fill our RTT cache and to retrieve the rwind
41 * and MTU parameters.
42 */
43 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
44 int skew)
45 {
46 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
47
48 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
49 true, true);
50 }
51
52 /*
53 * Apply a hard ACK by advancing the Tx window.
54 */
55 static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to)
56 {
57 struct sk_buff *skb, *list = NULL;
58 int ix;
59
60 spin_lock(&call->lock);
61
62 while (before(call->tx_hard_ack, to)) {
63 call->tx_hard_ack++;
64 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
65 skb = call->rxtx_buffer[ix];
66 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
67 call->rxtx_buffer[ix] = NULL;
68 call->rxtx_annotations[ix] = 0;
69 skb->next = list;
70 list = skb;
71 }
72
73 spin_unlock(&call->lock);
74
75 trace_rxrpc_transmit(call, rxrpc_transmit_rotate);
76 wake_up(&call->waitq);
77
78 while (list) {
79 skb = list;
80 list = skb->next;
81 skb->next = NULL;
82 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
83 }
84 }
85
86 /*
87 * End the transmission phase of a call.
88 *
89 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
90 * or a final ACK packet.
91 */
92 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, const char *abort_why)
93 {
94 _enter("");
95
96 switch (call->state) {
97 case RXRPC_CALL_CLIENT_RECV_REPLY:
98 return true;
99 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
100 case RXRPC_CALL_SERVER_AWAIT_ACK:
101 break;
102 default:
103 rxrpc_proto_abort(abort_why, call, call->tx_top);
104 return false;
105 }
106
107 rxrpc_rotate_tx_window(call, call->tx_top);
108
109 write_lock(&call->state_lock);
110
111 switch (call->state) {
112 default:
113 break;
114 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
115 call->tx_phase = false;
116 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
117 break;
118 case RXRPC_CALL_SERVER_AWAIT_ACK:
119 __rxrpc_call_completed(call);
120 rxrpc_notify_socket(call);
121 break;
122 }
123
124 write_unlock(&call->state_lock);
125 trace_rxrpc_transmit(call, rxrpc_transmit_end);
126 _leave(" = ok");
127 return true;
128 }
129
130 /*
131 * Scan a jumbo packet to validate its structure and to work out how many
132 * subpackets it contains.
133 *
134 * A jumbo packet is a collection of consecutive packets glued together with
135 * little headers between that indicate how to change the initial header for
136 * each subpacket.
137 *
138 * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
139 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
140 * size.
141 */
142 static bool rxrpc_validate_jumbo(struct sk_buff *skb)
143 {
144 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
145 unsigned int offset = sp->offset;
146 unsigned int len = skb->len;
147 int nr_jumbo = 1;
148 u8 flags = sp->hdr.flags;
149
150 do {
151 nr_jumbo++;
152 if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
153 goto protocol_error;
154 if (flags & RXRPC_LAST_PACKET)
155 goto protocol_error;
156 offset += RXRPC_JUMBO_DATALEN;
157 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
158 goto protocol_error;
159 offset += sizeof(struct rxrpc_jumbo_header);
160 } while (flags & RXRPC_JUMBO_PACKET);
161
162 sp->nr_jumbo = nr_jumbo;
163 return true;
164
165 protocol_error:
166 return false;
167 }
168
169 /*
170 * Handle reception of a duplicate packet.
171 *
172 * We have to take care to avoid an attack here whereby we're given a series of
173 * jumbograms, each with a sequence number one before the preceding one and
174 * filled up to maximum UDP size. If they never send us the first packet in
175 * the sequence, they can cause us to have to hold on to around 2MiB of kernel
176 * space until the call times out.
177 *
178 * We limit the space usage by only accepting three duplicate jumbo packets per
179 * call. After that, we tell the other side we're no longer accepting jumbos
180 * (that information is encoded in the ACK packet).
181 */
182 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
183 u8 annotation, bool *_jumbo_bad)
184 {
185 /* Discard normal packets that are duplicates. */
186 if (annotation == 0)
187 return;
188
189 /* Skip jumbo subpackets that are duplicates. When we've had three or
190 * more partially duplicate jumbo packets, we refuse to take any more
191 * jumbos for this call.
192 */
193 if (!*_jumbo_bad) {
194 call->nr_jumbo_bad++;
195 *_jumbo_bad = true;
196 }
197 }
198
199 /*
200 * Process a DATA packet, adding the packet to the Rx ring.
201 */
202 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
203 u16 skew)
204 {
205 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
206 unsigned int offset = sp->offset;
207 unsigned int ix;
208 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
209 rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
210 bool immediate_ack = false, jumbo_bad = false, queued;
211 u16 len;
212 u8 ack = 0, flags, annotation = 0;
213
214 _enter("{%u,%u},{%u,%u}",
215 call->rx_hard_ack, call->rx_top, skb->len, seq);
216
217 _proto("Rx DATA %%%u { #%u f=%02x }",
218 sp->hdr.serial, seq, sp->hdr.flags);
219
220 if (call->state >= RXRPC_CALL_COMPLETE)
221 return;
222
223 /* Received data implicitly ACKs all of the request packets we sent
224 * when we're acting as a client.
225 */
226 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY &&
227 !rxrpc_end_tx_phase(call, "ETD"))
228 return;
229
230 call->ackr_prev_seq = seq;
231
232 hard_ack = READ_ONCE(call->rx_hard_ack);
233 if (after(seq, hard_ack + call->rx_winsize)) {
234 ack = RXRPC_ACK_EXCEEDS_WINDOW;
235 ack_serial = serial;
236 goto ack;
237 }
238
239 flags = sp->hdr.flags;
240 if (flags & RXRPC_JUMBO_PACKET) {
241 if (call->nr_jumbo_bad > 3) {
242 ack = RXRPC_ACK_NOSPACE;
243 ack_serial = serial;
244 goto ack;
245 }
246 annotation = 1;
247 }
248
249 next_subpacket:
250 queued = false;
251 ix = seq & RXRPC_RXTX_BUFF_MASK;
252 len = skb->len;
253 if (flags & RXRPC_JUMBO_PACKET)
254 len = RXRPC_JUMBO_DATALEN;
255
256 if (flags & RXRPC_LAST_PACKET) {
257 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
258 seq != call->rx_top)
259 return rxrpc_proto_abort("LSN", call, seq);
260 } else {
261 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
262 after_eq(seq, call->rx_top))
263 return rxrpc_proto_abort("LSA", call, seq);
264 }
265
266 if (before_eq(seq, hard_ack)) {
267 ack = RXRPC_ACK_DUPLICATE;
268 ack_serial = serial;
269 goto skip;
270 }
271
272 if (flags & RXRPC_REQUEST_ACK && !ack) {
273 ack = RXRPC_ACK_REQUESTED;
274 ack_serial = serial;
275 }
276
277 if (call->rxtx_buffer[ix]) {
278 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
279 if (ack != RXRPC_ACK_DUPLICATE) {
280 ack = RXRPC_ACK_DUPLICATE;
281 ack_serial = serial;
282 }
283 immediate_ack = true;
284 goto skip;
285 }
286
287 /* Queue the packet. We use a couple of memory barriers here as need
288 * to make sure that rx_top is perceived to be set after the buffer
289 * pointer and that the buffer pointer is set after the annotation and
290 * the skb data.
291 *
292 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
293 * and also rxrpc_fill_out_ack().
294 */
295 rxrpc_get_skb(skb, rxrpc_skb_rx_got);
296 call->rxtx_annotations[ix] = annotation;
297 smp_wmb();
298 call->rxtx_buffer[ix] = skb;
299 if (after(seq, call->rx_top))
300 smp_store_release(&call->rx_top, seq);
301 if (flags & RXRPC_LAST_PACKET) {
302 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
303 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
304 } else {
305 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
306 }
307 queued = true;
308
309 if (after_eq(seq, call->rx_expect_next)) {
310 if (after(seq, call->rx_expect_next)) {
311 _net("OOS %u > %u", seq, call->rx_expect_next);
312 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
313 ack_serial = serial;
314 }
315 call->rx_expect_next = seq + 1;
316 }
317
318 skip:
319 offset += len;
320 if (flags & RXRPC_JUMBO_PACKET) {
321 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
322 return rxrpc_proto_abort("XJF", call, seq);
323 offset += sizeof(struct rxrpc_jumbo_header);
324 seq++;
325 serial++;
326 annotation++;
327 if (flags & RXRPC_JUMBO_PACKET)
328 annotation |= RXRPC_RX_ANNO_JLAST;
329 if (after(seq, hard_ack + call->rx_winsize)) {
330 ack = RXRPC_ACK_EXCEEDS_WINDOW;
331 ack_serial = serial;
332 if (!jumbo_bad) {
333 call->nr_jumbo_bad++;
334 jumbo_bad = true;
335 }
336 goto ack;
337 }
338
339 _proto("Rx DATA Jumbo %%%u", serial);
340 goto next_subpacket;
341 }
342
343 if (queued && flags & RXRPC_LAST_PACKET && !ack) {
344 ack = RXRPC_ACK_DELAY;
345 ack_serial = serial;
346 }
347
348 ack:
349 if (ack)
350 rxrpc_propose_ACK(call, ack, skew, ack_serial,
351 immediate_ack, true);
352
353 if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
354 rxrpc_notify_socket(call);
355 _leave(" [queued]");
356 }
357
358 /*
359 * Process a requested ACK.
360 */
361 static void rxrpc_input_requested_ack(struct rxrpc_call *call,
362 ktime_t resp_time,
363 rxrpc_serial_t orig_serial,
364 rxrpc_serial_t ack_serial)
365 {
366 struct rxrpc_skb_priv *sp;
367 struct sk_buff *skb;
368 ktime_t sent_at;
369 int ix;
370
371 for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
372 skb = call->rxtx_buffer[ix];
373 if (!skb)
374 continue;
375
376 sp = rxrpc_skb(skb);
377 if (sp->hdr.serial != orig_serial)
378 continue;
379 smp_rmb();
380 sent_at = skb->tstamp;
381 goto found;
382 }
383 return;
384
385 found:
386 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
387 orig_serial, ack_serial, sent_at, resp_time);
388 }
389
390 /*
391 * Process a ping response.
392 */
393 static void rxrpc_input_ping_response(struct rxrpc_call *call,
394 ktime_t resp_time,
395 rxrpc_serial_t orig_serial,
396 rxrpc_serial_t ack_serial)
397 {
398 rxrpc_serial_t ping_serial;
399 ktime_t ping_time;
400
401 ping_time = call->ackr_ping_time;
402 smp_rmb();
403 ping_serial = call->ackr_ping;
404
405 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
406 before(orig_serial, ping_serial))
407 return;
408 clear_bit(RXRPC_CALL_PINGING, &call->flags);
409 if (after(orig_serial, ping_serial))
410 return;
411
412 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
413 orig_serial, ack_serial, ping_time, resp_time);
414 }
415
416 /*
417 * Process the extra information that may be appended to an ACK packet
418 */
419 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
420 struct rxrpc_ackinfo *ackinfo)
421 {
422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
423 struct rxrpc_peer *peer;
424 unsigned int mtu;
425 u32 rwind = ntohl(ackinfo->rwind);
426
427 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
428 sp->hdr.serial,
429 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
430 rwind, ntohl(ackinfo->jumbo_max));
431
432 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
433 rwind = RXRPC_RXTX_BUFF_SIZE - 1;
434 call->tx_winsize = rwind;
435
436 mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
437
438 peer = call->peer;
439 if (mtu < peer->maxdata) {
440 spin_lock_bh(&peer->lock);
441 peer->maxdata = mtu;
442 peer->mtu = mtu + peer->hdrsize;
443 spin_unlock_bh(&peer->lock);
444 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
445 }
446 }
447
448 /*
449 * Process individual soft ACKs.
450 *
451 * Each ACK in the array corresponds to one packet and can be either an ACK or
452 * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
453 * packets that lie beyond the end of the ACK list are scheduled for resend by
454 * the timer on the basis that the peer might just not have processed them at
455 * the time the ACK was sent.
456 */
457 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
458 rxrpc_seq_t seq, int nr_acks)
459 {
460 bool resend = false;
461 int ix;
462 u8 annotation, anno_type;
463
464 for (; nr_acks > 0; nr_acks--, seq++) {
465 ix = seq & RXRPC_RXTX_BUFF_MASK;
466 annotation = call->rxtx_annotations[ix];
467 anno_type = annotation & RXRPC_TX_ANNO_MASK;
468 annotation &= ~RXRPC_TX_ANNO_MASK;
469 switch (*acks++) {
470 case RXRPC_ACK_TYPE_ACK:
471 if (anno_type == RXRPC_TX_ANNO_ACK)
472 continue;
473 call->rxtx_annotations[ix] =
474 RXRPC_TX_ANNO_ACK | annotation;
475 break;
476 case RXRPC_ACK_TYPE_NACK:
477 if (anno_type == RXRPC_TX_ANNO_NAK)
478 continue;
479 call->rxtx_annotations[ix] =
480 RXRPC_TX_ANNO_NAK | annotation;
481 resend = true;
482 break;
483 default:
484 return rxrpc_proto_abort("SFT", call, 0);
485 }
486 }
487
488 if (resend &&
489 !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
490 rxrpc_queue_call(call);
491 }
492
493 /*
494 * Process an ACK packet.
495 *
496 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
497 * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
498 *
499 * A hard-ACK means that a packet has been processed and may be discarded; a
500 * soft-ACK means that the packet may be discarded and retransmission
501 * requested. A phase is complete when all packets are hard-ACK'd.
502 */
503 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
504 u16 skew)
505 {
506 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
507 union {
508 struct rxrpc_ackpacket ack;
509 struct rxrpc_ackinfo info;
510 u8 acks[RXRPC_MAXACKS];
511 } buf;
512 rxrpc_serial_t acked_serial;
513 rxrpc_seq_t first_soft_ack, hard_ack;
514 int nr_acks, offset;
515
516 _enter("");
517
518 if (skb_copy_bits(skb, sp->offset, &buf.ack, sizeof(buf.ack)) < 0) {
519 _debug("extraction failure");
520 return rxrpc_proto_abort("XAK", call, 0);
521 }
522 sp->offset += sizeof(buf.ack);
523
524 acked_serial = ntohl(buf.ack.serial);
525 first_soft_ack = ntohl(buf.ack.firstPacket);
526 hard_ack = first_soft_ack - 1;
527 nr_acks = buf.ack.nAcks;
528
529 trace_rxrpc_rx_ack(call, first_soft_ack, buf.ack.reason, nr_acks);
530
531 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
532 sp->hdr.serial,
533 ntohs(buf.ack.maxSkew),
534 first_soft_ack,
535 ntohl(buf.ack.previousPacket),
536 acked_serial,
537 rxrpc_acks(buf.ack.reason),
538 buf.ack.nAcks);
539
540 if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
541 rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
542 sp->hdr.serial);
543 if (buf.ack.reason == RXRPC_ACK_REQUESTED)
544 rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
545 sp->hdr.serial);
546
547 if (buf.ack.reason == RXRPC_ACK_PING) {
548 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
549 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
550 skew, sp->hdr.serial, true, true);
551 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
552 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
553 skew, sp->hdr.serial, true, true);
554 }
555
556 offset = sp->offset + nr_acks + 3;
557 if (skb->len >= offset + sizeof(buf.info)) {
558 if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0)
559 return rxrpc_proto_abort("XAI", call, 0);
560 rxrpc_input_ackinfo(call, skb, &buf.info);
561 }
562
563 if (first_soft_ack == 0)
564 return rxrpc_proto_abort("AK0", call, 0);
565
566 /* Ignore ACKs unless we are or have just been transmitting. */
567 switch (call->state) {
568 case RXRPC_CALL_CLIENT_SEND_REQUEST:
569 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
570 case RXRPC_CALL_SERVER_SEND_REPLY:
571 case RXRPC_CALL_SERVER_AWAIT_ACK:
572 break;
573 default:
574 return;
575 }
576
577 /* Discard any out-of-order or duplicate ACKs. */
578 if ((int)sp->hdr.serial - (int)call->acks_latest <= 0) {
579 _debug("discard ACK %d <= %d",
580 sp->hdr.serial, call->acks_latest);
581 return;
582 }
583 call->acks_latest = sp->hdr.serial;
584
585 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
586 hard_ack == call->tx_top) {
587 rxrpc_end_tx_phase(call, "ETA");
588 return;
589 }
590
591 if (before(hard_ack, call->tx_hard_ack) ||
592 after(hard_ack, call->tx_top))
593 return rxrpc_proto_abort("AKW", call, 0);
594
595 if (after(hard_ack, call->tx_hard_ack))
596 rxrpc_rotate_tx_window(call, hard_ack);
597
598 if (after(first_soft_ack, call->tx_top))
599 return;
600
601 if (nr_acks > call->tx_top - first_soft_ack + 1)
602 nr_acks = first_soft_ack - call->tx_top + 1;
603 if (skb_copy_bits(skb, sp->offset, buf.acks, nr_acks) < 0)
604 return rxrpc_proto_abort("XSA", call, 0);
605 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks);
606 }
607
608 /*
609 * Process an ACKALL packet.
610 */
611 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
612 {
613 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
614
615 _proto("Rx ACKALL %%%u", sp->hdr.serial);
616
617 rxrpc_end_tx_phase(call, "ETL");
618 }
619
620 /*
621 * Process an ABORT packet.
622 */
623 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
624 {
625 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
626 __be32 wtmp;
627 u32 abort_code = RX_CALL_DEAD;
628
629 _enter("");
630
631 if (skb->len >= 4 &&
632 skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) >= 0)
633 abort_code = ntohl(wtmp);
634
635 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
636
637 if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
638 abort_code, ECONNABORTED))
639 rxrpc_notify_socket(call);
640 }
641
642 /*
643 * Process an incoming call packet.
644 */
645 static void rxrpc_input_call_packet(struct rxrpc_call *call,
646 struct sk_buff *skb, u16 skew)
647 {
648 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
649
650 _enter("%p,%p", call, skb);
651
652 switch (sp->hdr.type) {
653 case RXRPC_PACKET_TYPE_DATA:
654 rxrpc_input_data(call, skb, skew);
655 break;
656
657 case RXRPC_PACKET_TYPE_ACK:
658 rxrpc_input_ack(call, skb, skew);
659 break;
660
661 case RXRPC_PACKET_TYPE_BUSY:
662 _proto("Rx BUSY %%%u", sp->hdr.serial);
663
664 /* Just ignore BUSY packets from the server; the retry and
665 * lifespan timers will take care of business. BUSY packets
666 * from the client don't make sense.
667 */
668 break;
669
670 case RXRPC_PACKET_TYPE_ABORT:
671 rxrpc_input_abort(call, skb);
672 break;
673
674 case RXRPC_PACKET_TYPE_ACKALL:
675 rxrpc_input_ackall(call, skb);
676 break;
677
678 default:
679 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
680 break;
681 }
682
683 _leave("");
684 }
685
686 /*
687 * post connection-level events to the connection
688 * - this includes challenges, responses, some aborts and call terminal packet
689 * retransmission.
690 */
691 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
692 struct sk_buff *skb)
693 {
694 _enter("%p,%p", conn, skb);
695
696 skb_queue_tail(&conn->rx_queue, skb);
697 rxrpc_queue_conn(conn);
698 }
699
700 /*
701 * post endpoint-level events to the local endpoint
702 * - this includes debug and version messages
703 */
704 static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
705 struct sk_buff *skb)
706 {
707 _enter("%p,%p", local, skb);
708
709 skb_queue_tail(&local->event_queue, skb);
710 rxrpc_queue_local(local);
711 }
712
713 /*
714 * put a packet up for transport-level abort
715 */
716 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
717 {
718 CHECK_SLAB_OKAY(&local->usage);
719
720 skb_queue_tail(&local->reject_queue, skb);
721 rxrpc_queue_local(local);
722 }
723
724 /*
725 * Extract the wire header from a packet and translate the byte order.
726 */
727 static noinline
728 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
729 {
730 struct rxrpc_wire_header whdr;
731
732 /* dig out the RxRPC connection details */
733 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
734 return -EBADMSG;
735
736 memset(sp, 0, sizeof(*sp));
737 sp->hdr.epoch = ntohl(whdr.epoch);
738 sp->hdr.cid = ntohl(whdr.cid);
739 sp->hdr.callNumber = ntohl(whdr.callNumber);
740 sp->hdr.seq = ntohl(whdr.seq);
741 sp->hdr.serial = ntohl(whdr.serial);
742 sp->hdr.flags = whdr.flags;
743 sp->hdr.type = whdr.type;
744 sp->hdr.userStatus = whdr.userStatus;
745 sp->hdr.securityIndex = whdr.securityIndex;
746 sp->hdr._rsvd = ntohs(whdr._rsvd);
747 sp->hdr.serviceId = ntohs(whdr.serviceId);
748 sp->offset = sizeof(whdr);
749 return 0;
750 }
751
752 /*
753 * handle data received on the local endpoint
754 * - may be called in interrupt context
755 *
756 * The socket is locked by the caller and this prevents the socket from being
757 * shut down and the local endpoint from going away, thus sk_user_data will not
758 * be cleared until this function returns.
759 */
760 void rxrpc_data_ready(struct sock *udp_sk)
761 {
762 struct rxrpc_connection *conn;
763 struct rxrpc_channel *chan;
764 struct rxrpc_call *call;
765 struct rxrpc_skb_priv *sp;
766 struct rxrpc_local *local = udp_sk->sk_user_data;
767 struct sk_buff *skb;
768 unsigned int channel;
769 int ret, skew;
770
771 _enter("%p", udp_sk);
772
773 ASSERT(!irqs_disabled());
774
775 skb = skb_recv_datagram(udp_sk, 0, 1, &ret);
776 if (!skb) {
777 if (ret == -EAGAIN)
778 return;
779 _debug("UDP socket error %d", ret);
780 return;
781 }
782
783 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
784
785 _net("recv skb %p", skb);
786
787 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
788 if (skb_checksum_complete(skb)) {
789 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
790 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
791 _leave(" [CSUM failed]");
792 return;
793 }
794
795 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
796
797 /* The socket buffer we have is owned by UDP, with UDP's data all over
798 * it, but we really want our own data there.
799 */
800 skb_orphan(skb);
801 sp = rxrpc_skb(skb);
802
803 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
804 static int lose;
805 if ((lose++ & 7) == 7) {
806 rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
807 return;
808 }
809 }
810
811 _net("Rx UDP packet from %08x:%04hu",
812 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
813
814 /* dig out the RxRPC connection details */
815 if (rxrpc_extract_header(sp, skb) < 0)
816 goto bad_message;
817 trace_rxrpc_rx_packet(sp);
818
819 _net("Rx RxRPC %s ep=%x call=%x:%x",
820 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
821 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
822
823 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
824 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
825 _proto("Rx Bad Packet Type %u", sp->hdr.type);
826 goto bad_message;
827 }
828
829 switch (sp->hdr.type) {
830 case RXRPC_PACKET_TYPE_VERSION:
831 rxrpc_post_packet_to_local(local, skb);
832 goto out;
833
834 case RXRPC_PACKET_TYPE_BUSY:
835 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
836 goto discard;
837
838 case RXRPC_PACKET_TYPE_DATA:
839 if (sp->hdr.callNumber == 0)
840 goto bad_message;
841 if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
842 !rxrpc_validate_jumbo(skb))
843 goto bad_message;
844 break;
845 }
846
847 rcu_read_lock();
848
849 conn = rxrpc_find_connection_rcu(local, skb);
850 if (conn) {
851 if (sp->hdr.securityIndex != conn->security_ix)
852 goto wrong_security;
853
854 if (sp->hdr.callNumber == 0) {
855 /* Connection-level packet */
856 _debug("CONN %p {%d}", conn, conn->debug_id);
857 rxrpc_post_packet_to_conn(conn, skb);
858 goto out_unlock;
859 }
860
861 /* Note the serial number skew here */
862 skew = (int)sp->hdr.serial - (int)conn->hi_serial;
863 if (skew >= 0) {
864 if (skew > 0)
865 conn->hi_serial = sp->hdr.serial;
866 } else {
867 skew = -skew;
868 skew = min(skew, 65535);
869 }
870
871 /* Call-bound packets are routed by connection channel. */
872 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
873 chan = &conn->channels[channel];
874
875 /* Ignore really old calls */
876 if (sp->hdr.callNumber < chan->last_call)
877 goto discard_unlock;
878
879 if (sp->hdr.callNumber == chan->last_call) {
880 /* For the previous service call, if completed successfully, we
881 * discard all further packets.
882 */
883 if (rxrpc_conn_is_service(conn) &&
884 (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
885 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
886 goto discard_unlock;
887
888 /* But otherwise we need to retransmit the final packet from
889 * data cached in the connection record.
890 */
891 rxrpc_post_packet_to_conn(conn, skb);
892 goto out_unlock;
893 }
894
895 call = rcu_dereference(chan->call);
896 } else {
897 skew = 0;
898 call = NULL;
899 }
900
901 if (!call || atomic_read(&call->usage) == 0) {
902 if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
903 sp->hdr.callNumber == 0 ||
904 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
905 goto bad_message_unlock;
906 if (sp->hdr.seq != 1)
907 goto discard_unlock;
908 call = rxrpc_new_incoming_call(local, conn, skb);
909 if (!call) {
910 rcu_read_unlock();
911 goto reject_packet;
912 }
913 rxrpc_send_ping(call, skb, skew);
914 }
915
916 rxrpc_input_call_packet(call, skb, skew);
917 goto discard_unlock;
918
919 discard_unlock:
920 rcu_read_unlock();
921 discard:
922 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
923 out:
924 trace_rxrpc_rx_done(0, 0);
925 return;
926
927 out_unlock:
928 rcu_read_unlock();
929 goto out;
930
931 wrong_security:
932 rcu_read_unlock();
933 trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
934 RXKADINCONSISTENCY, EBADMSG);
935 skb->priority = RXKADINCONSISTENCY;
936 goto post_abort;
937
938 bad_message_unlock:
939 rcu_read_unlock();
940 bad_message:
941 trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
942 RX_PROTOCOL_ERROR, EBADMSG);
943 skb->priority = RX_PROTOCOL_ERROR;
944 post_abort:
945 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
946 reject_packet:
947 trace_rxrpc_rx_done(skb->mark, skb->priority);
948 rxrpc_reject_packet(local, skb);
949 _leave(" [badmsg]");
950 }