1 /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/circ_buf.h>
16 #include <linux/net.h>
17 #include <linux/skbuff.h>
18 #include <linux/slab.h>
19 #include <linux/udp.h>
21 #include <net/af_rxrpc.h>
22 #include "ar-internal.h"
25 * propose an ACK be sent
27 void __rxrpc_propose_ACK(struct rxrpc_call
*call
, u8 ack_reason
,
28 u32 serial
, bool immediate
)
31 s8 prior
= rxrpc_ack_priority
[ack_reason
];
33 ASSERTCMP(prior
, >, 0);
35 _enter("{%d},%s,%%%x,%u",
36 call
->debug_id
, rxrpc_acks(ack_reason
), serial
, immediate
);
38 if (prior
< rxrpc_ack_priority
[call
->ackr_reason
]) {
44 /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
46 if (prior
== rxrpc_ack_priority
[call
->ackr_reason
]) {
48 call
->ackr_serial
= serial
;
54 call
->ackr_reason
= ack_reason
;
55 call
->ackr_serial
= serial
;
59 _debug("run delay timer");
60 expiry
= rxrpc_soft_ack_delay
;
65 _debug("run defer timer");
66 expiry
= rxrpc_idle_ack_delay
;
71 case RXRPC_ACK_REQUESTED
:
72 expiry
= rxrpc_requested_ack_delay
;
75 if (!immediate
|| serial
== 1) {
76 _debug("run defer timer");
81 _debug("immediate ACK");
87 if (!timer_pending(&call
->ack_timer
) ||
88 time_after(call
->ack_timer
.expires
, expiry
))
89 mod_timer(&call
->ack_timer
, expiry
);
93 _debug("cancel timer %%%u", serial
);
94 try_to_del_timer_sync(&call
->ack_timer
);
95 read_lock_bh(&call
->state_lock
);
96 if (call
->state
<= RXRPC_CALL_COMPLETE
&&
97 !test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
98 rxrpc_queue_call(call
);
99 read_unlock_bh(&call
->state_lock
);
103 * propose an ACK be sent, locking the call structure
105 void rxrpc_propose_ACK(struct rxrpc_call
*call
, u8 ack_reason
,
106 u32 serial
, bool immediate
)
108 s8 prior
= rxrpc_ack_priority
[ack_reason
];
110 if (prior
> rxrpc_ack_priority
[call
->ackr_reason
]) {
111 spin_lock_bh(&call
->lock
);
112 __rxrpc_propose_ACK(call
, ack_reason
, serial
, immediate
);
113 spin_unlock_bh(&call
->lock
);
118 * set the resend timer
120 static void rxrpc_set_resend(struct rxrpc_call
*call
, u8 resend
,
121 unsigned long resend_at
)
123 read_lock_bh(&call
->state_lock
);
124 if (call
->state
>= RXRPC_CALL_COMPLETE
)
128 _debug("SET RESEND");
129 set_bit(RXRPC_CALL_EV_RESEND
, &call
->events
);
133 _debug("MODIFY RESEND TIMER");
134 set_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
135 mod_timer(&call
->resend_timer
, resend_at
);
137 _debug("KILL RESEND TIMER");
138 del_timer_sync(&call
->resend_timer
);
139 clear_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
);
140 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
142 read_unlock_bh(&call
->state_lock
);
148 static void rxrpc_resend(struct rxrpc_call
*call
)
150 struct rxrpc_wire_header
*whdr
;
151 struct rxrpc_skb_priv
*sp
;
153 unsigned long *p_txb
, resend_at
;
158 _enter("{%d,%d,%d,%d},",
159 call
->acks_hard
, call
->acks_unacked
,
160 atomic_read(&call
->sequence
),
161 CIRC_CNT(call
->acks_head
, call
->acks_tail
, call
->acks_winsz
));
167 for (loop
= call
->acks_tail
;
168 loop
!= call
->acks_head
|| stop
;
169 loop
= (loop
+ 1) & (call
->acks_winsz
- 1)
171 p_txb
= call
->acks_window
+ loop
;
172 smp_read_barrier_depends();
176 txb
= (struct sk_buff
*) *p_txb
;
179 if (sp
->need_resend
) {
180 sp
->need_resend
= false;
182 /* each Tx packet has a new serial number */
183 sp
->hdr
.serial
= atomic_inc_return(&call
->conn
->serial
);
185 whdr
= (struct rxrpc_wire_header
*)txb
->head
;
186 whdr
->serial
= htonl(sp
->hdr
.serial
);
188 _proto("Tx DATA %%%u { #%d }",
189 sp
->hdr
.serial
, sp
->hdr
.seq
);
190 if (rxrpc_send_data_packet(call
->conn
, txb
) < 0) {
192 sp
->resend_at
= jiffies
+ 3;
195 jiffies
+ rxrpc_resend_timeout
;
199 if (time_after_eq(jiffies
+ 1, sp
->resend_at
)) {
200 sp
->need_resend
= true;
202 } else if (resend
& 2) {
203 if (time_before(sp
->resend_at
, resend_at
))
204 resend_at
= sp
->resend_at
;
206 resend_at
= sp
->resend_at
;
211 rxrpc_set_resend(call
, resend
, resend_at
);
216 * handle resend timer expiry
218 static void rxrpc_resend_timer(struct rxrpc_call
*call
)
220 struct rxrpc_skb_priv
*sp
;
222 unsigned long *p_txb
, resend_at
;
227 call
->acks_tail
, call
->acks_unacked
, call
->acks_head
);
229 if (call
->state
>= RXRPC_CALL_COMPLETE
)
235 for (loop
= call
->acks_unacked
;
236 loop
!= call
->acks_head
;
237 loop
= (loop
+ 1) & (call
->acks_winsz
- 1)
239 p_txb
= call
->acks_window
+ loop
;
240 smp_read_barrier_depends();
241 txb
= (struct sk_buff
*) (*p_txb
& ~1);
244 ASSERT(!(*p_txb
& 1));
246 if (sp
->need_resend
) {
248 } else if (time_after_eq(jiffies
+ 1, sp
->resend_at
)) {
249 sp
->need_resend
= true;
251 } else if (resend
& 2) {
252 if (time_before(sp
->resend_at
, resend_at
))
253 resend_at
= sp
->resend_at
;
255 resend_at
= sp
->resend_at
;
260 rxrpc_set_resend(call
, resend
, resend_at
);
265 * process soft ACKs of our transmitted packets
266 * - these indicate packets the peer has or has not received, but hasn't yet
267 * given to the consumer, and so can still be discarded and re-requested
269 static int rxrpc_process_soft_ACKs(struct rxrpc_call
*call
,
270 struct rxrpc_ackpacket
*ack
,
273 struct rxrpc_skb_priv
*sp
;
275 unsigned long *p_txb
, resend_at
;
277 u8 sacks
[RXRPC_MAXACKS
], resend
;
279 _enter("{%d,%d},{%d},",
281 CIRC_CNT(call
->acks_head
, call
->acks_tail
, call
->acks_winsz
),
284 if (skb_copy_bits(skb
, 0, sacks
, ack
->nAcks
) < 0)
289 for (loop
= 0; loop
< ack
->nAcks
; loop
++) {
290 p_txb
= call
->acks_window
;
291 p_txb
+= (call
->acks_tail
+ loop
) & (call
->acks_winsz
- 1);
292 smp_read_barrier_depends();
293 txb
= (struct sk_buff
*) (*p_txb
& ~1);
296 switch (sacks
[loop
]) {
297 case RXRPC_ACK_TYPE_ACK
:
298 sp
->need_resend
= false;
301 case RXRPC_ACK_TYPE_NACK
:
302 sp
->need_resend
= true;
307 _debug("Unsupported ACK type %d", sacks
[loop
]);
313 call
->acks_unacked
= (call
->acks_tail
+ loop
) & (call
->acks_winsz
- 1);
315 /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
316 * have been received or processed yet by the far end */
317 for (loop
= call
->acks_unacked
;
318 loop
!= call
->acks_head
;
319 loop
= (loop
+ 1) & (call
->acks_winsz
- 1)
321 p_txb
= call
->acks_window
+ loop
;
322 smp_read_barrier_depends();
323 txb
= (struct sk_buff
*) (*p_txb
& ~1);
327 /* packet must have been discarded */
328 sp
->need_resend
= true;
331 } else if (sp
->need_resend
) {
333 } else if (time_after_eq(jiffies
+ 1, sp
->resend_at
)) {
334 sp
->need_resend
= true;
336 } else if (resend
& 2) {
337 if (time_before(sp
->resend_at
, resend_at
))
338 resend_at
= sp
->resend_at
;
340 resend_at
= sp
->resend_at
;
345 rxrpc_set_resend(call
, resend
, resend_at
);
350 _leave(" = -EPROTO");
355 * discard hard-ACK'd packets from the Tx window
357 static void rxrpc_rotate_tx_window(struct rxrpc_call
*call
, u32 hard
)
360 int tail
= call
->acks_tail
, old_tail
;
361 int win
= CIRC_CNT(call
->acks_head
, tail
, call
->acks_winsz
);
363 _enter("{%u,%u},%u", call
->acks_hard
, win
, hard
);
365 ASSERTCMP(hard
- call
->acks_hard
, <=, win
);
367 while (call
->acks_hard
< hard
) {
368 smp_read_barrier_depends();
369 _skb
= call
->acks_window
[tail
] & ~1;
370 rxrpc_free_skb((struct sk_buff
*) _skb
);
372 tail
= (tail
+ 1) & (call
->acks_winsz
- 1);
373 call
->acks_tail
= tail
;
374 if (call
->acks_unacked
== old_tail
)
375 call
->acks_unacked
= tail
;
379 wake_up(&call
->tx_waitq
);
383 * clear the Tx window in the event of a failure
385 static void rxrpc_clear_tx_window(struct rxrpc_call
*call
)
387 rxrpc_rotate_tx_window(call
, atomic_read(&call
->sequence
));
391 * drain the out of sequence received packet queue into the packet Rx queue
393 static int rxrpc_drain_rx_oos_queue(struct rxrpc_call
*call
)
395 struct rxrpc_skb_priv
*sp
;
400 _enter("{%d,%d}", call
->rx_data_post
, call
->rx_first_oos
);
402 spin_lock_bh(&call
->lock
);
405 if (test_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
406 goto socket_unavailable
;
408 skb
= skb_dequeue(&call
->rx_oos_queue
);
413 _debug("drain OOS packet %d [%d]",
414 sp
->hdr
.seq
, call
->rx_first_oos
);
416 if (sp
->hdr
.seq
!= call
->rx_first_oos
) {
417 skb_queue_head(&call
->rx_oos_queue
, skb
);
418 call
->rx_first_oos
= rxrpc_skb(skb
)->hdr
.seq
;
419 _debug("requeue %p {%u}", skb
, call
->rx_first_oos
);
421 skb
->mark
= RXRPC_SKB_MARK_DATA
;
422 terminal
= ((sp
->hdr
.flags
& RXRPC_LAST_PACKET
) &&
423 !(sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
));
424 ret
= rxrpc_queue_rcv_skb(call
, skb
, true, terminal
);
426 _debug("drain #%u", call
->rx_data_post
);
427 call
->rx_data_post
++;
429 /* find out what the next packet is */
430 skb
= skb_peek(&call
->rx_oos_queue
);
433 call
->rx_first_oos
= rxrpc_skb(skb
)->hdr
.seq
;
435 call
->rx_first_oos
= 0;
436 _debug("peek %p {%u}", skb
, call
->rx_first_oos
);
442 spin_unlock_bh(&call
->lock
);
443 _leave(" = %d", ret
);
448 * insert an out of sequence packet into the buffer
450 static void rxrpc_insert_oos_packet(struct rxrpc_call
*call
,
453 struct rxrpc_skb_priv
*sp
, *psp
;
459 _enter(",,{%u}", seq
);
461 skb
->destructor
= rxrpc_packet_destructor
;
462 ASSERTCMP(sp
->call
, ==, NULL
);
464 rxrpc_get_call(call
);
465 atomic_inc(&call
->skb_count
);
467 /* insert into the buffer in sequence order */
468 spin_lock_bh(&call
->lock
);
470 skb_queue_walk(&call
->rx_oos_queue
, p
) {
472 if (psp
->hdr
.seq
> seq
) {
473 _debug("insert oos #%u before #%u", seq
, psp
->hdr
.seq
);
474 skb_insert(p
, skb
, &call
->rx_oos_queue
);
479 _debug("append oos #%u", seq
);
480 skb_queue_tail(&call
->rx_oos_queue
, skb
);
483 /* we might now have a new front to the queue */
484 if (call
->rx_first_oos
== 0 || seq
< call
->rx_first_oos
)
485 call
->rx_first_oos
= seq
;
487 read_lock(&call
->state_lock
);
488 if (call
->state
< RXRPC_CALL_COMPLETE
&&
489 call
->rx_data_post
== call
->rx_first_oos
) {
490 _debug("drain rx oos now");
491 set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS
, &call
->events
);
493 read_unlock(&call
->state_lock
);
495 spin_unlock_bh(&call
->lock
);
496 _leave(" [stored #%u]", call
->rx_first_oos
);
500 * clear the Tx window on final ACK reception
502 static void rxrpc_zap_tx_window(struct rxrpc_call
*call
)
504 struct rxrpc_skb_priv
*sp
;
506 unsigned long _skb
, *acks_window
;
507 u8 winsz
= call
->acks_winsz
;
510 acks_window
= call
->acks_window
;
511 call
->acks_window
= NULL
;
513 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
, winsz
) > 0) {
514 tail
= call
->acks_tail
;
515 smp_read_barrier_depends();
516 _skb
= acks_window
[tail
] & ~1;
518 call
->acks_tail
= (call
->acks_tail
+ 1) & (winsz
- 1);
520 skb
= (struct sk_buff
*) _skb
;
522 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
530 * process the extra information that may be appended to an ACK packet
532 static void rxrpc_extract_ackinfo(struct rxrpc_call
*call
, struct sk_buff
*skb
,
533 unsigned int latest
, int nAcks
)
535 struct rxrpc_ackinfo ackinfo
;
536 struct rxrpc_peer
*peer
;
539 if (skb_copy_bits(skb
, nAcks
+ 3, &ackinfo
, sizeof(ackinfo
)) < 0) {
540 _leave(" [no ackinfo]");
544 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
546 ntohl(ackinfo
.rxMTU
), ntohl(ackinfo
.maxMTU
),
547 ntohl(ackinfo
.rwind
), ntohl(ackinfo
.jumbo_max
));
549 mtu
= min(ntohl(ackinfo
.rxMTU
), ntohl(ackinfo
.maxMTU
));
551 peer
= call
->conn
->params
.peer
;
552 if (mtu
< peer
->maxdata
) {
553 spin_lock_bh(&peer
->lock
);
555 peer
->mtu
= mtu
+ peer
->hdrsize
;
556 spin_unlock_bh(&peer
->lock
);
557 _net("Net MTU %u (maxdata %u)", peer
->mtu
, peer
->maxdata
);
562 * process packets in the reception queue
564 static int rxrpc_process_rx_queue(struct rxrpc_call
*call
,
567 struct rxrpc_ackpacket ack
;
568 struct rxrpc_skb_priv
*sp
;
577 skb
= skb_dequeue(&call
->rx_queue
);
582 _net("deferred skb %p", skb
);
586 _debug("process %s [st %d]", rxrpc_pkts
[sp
->hdr
.type
], call
->state
);
590 switch (sp
->hdr
.type
) {
591 /* data packets that wind up here have been received out of
592 * order, need security processing or are jumbo packets */
593 case RXRPC_PACKET_TYPE_DATA
:
594 _proto("OOSQ DATA %%%u { #%u }", sp
->hdr
.serial
, sp
->hdr
.seq
);
596 /* secured packets must be verified and possibly decrypted */
597 if (call
->conn
->security
->verify_packet(call
, skb
,
601 rxrpc_insert_oos_packet(call
, skb
);
602 goto process_further
;
604 /* partial ACK to process */
605 case RXRPC_PACKET_TYPE_ACK
:
606 if (skb_copy_bits(skb
, 0, &ack
, sizeof(ack
)) < 0) {
607 _debug("extraction failure");
610 if (!skb_pull(skb
, sizeof(ack
)))
613 latest
= sp
->hdr
.serial
;
614 hard
= ntohl(ack
.firstPacket
);
615 tx
= atomic_read(&call
->sequence
);
617 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
621 ntohl(ack
.previousPacket
),
623 rxrpc_acks(ack
.reason
),
626 rxrpc_extract_ackinfo(call
, skb
, latest
, ack
.nAcks
);
628 if (ack
.reason
== RXRPC_ACK_PING
) {
629 _proto("Rx ACK %%%u PING Request", latest
);
630 rxrpc_propose_ACK(call
, RXRPC_ACK_PING_RESPONSE
,
631 sp
->hdr
.serial
, true);
634 /* discard any out-of-order or duplicate ACKs */
635 if (latest
- call
->acks_latest
<= 0) {
636 _debug("discard ACK %d <= %d",
637 latest
, call
->acks_latest
);
640 call
->acks_latest
= latest
;
642 if (call
->state
!= RXRPC_CALL_CLIENT_SEND_REQUEST
&&
643 call
->state
!= RXRPC_CALL_CLIENT_AWAIT_REPLY
&&
644 call
->state
!= RXRPC_CALL_SERVER_SEND_REPLY
&&
645 call
->state
!= RXRPC_CALL_SERVER_AWAIT_ACK
)
648 _debug("Tx=%d H=%u S=%d", tx
, call
->acks_hard
, call
->state
);
652 _debug("hard-ACK'd packet %d not transmitted"
658 if ((call
->state
== RXRPC_CALL_CLIENT_AWAIT_REPLY
||
659 call
->state
== RXRPC_CALL_SERVER_AWAIT_ACK
) &&
661 call
->acks_hard
= tx
;
666 rxrpc_rotate_tx_window(call
, hard
- 1);
670 if (hard
- 1 + ack
.nAcks
> tx
) {
671 _debug("soft-ACK'd packet %d+%d not"
672 " transmitted (%d top)",
673 hard
- 1, ack
.nAcks
, tx
);
677 if (rxrpc_process_soft_ACKs(call
, &ack
, skb
) < 0)
682 /* complete ACK to process */
683 case RXRPC_PACKET_TYPE_ACKALL
:
686 /* abort and busy are handled elsewhere */
687 case RXRPC_PACKET_TYPE_BUSY
:
688 case RXRPC_PACKET_TYPE_ABORT
:
691 /* connection level events - also handled elsewhere */
692 case RXRPC_PACKET_TYPE_CHALLENGE
:
693 case RXRPC_PACKET_TYPE_RESPONSE
:
694 case RXRPC_PACKET_TYPE_DEBUG
:
698 /* if we've had a hard ACK that covers all the packets we've sent, then
699 * that ends that phase of the operation */
701 write_lock_bh(&call
->state_lock
);
702 _debug("ack all %d", call
->state
);
704 switch (call
->state
) {
705 case RXRPC_CALL_CLIENT_AWAIT_REPLY
:
706 call
->state
= RXRPC_CALL_CLIENT_RECV_REPLY
;
708 case RXRPC_CALL_SERVER_AWAIT_ACK
:
709 _debug("srv complete");
710 call
->state
= RXRPC_CALL_COMPLETE
;
713 case RXRPC_CALL_CLIENT_SEND_REQUEST
:
714 case RXRPC_CALL_SERVER_RECV_REQUEST
:
715 goto protocol_error_unlock
; /* can't occur yet */
717 write_unlock_bh(&call
->state_lock
);
718 goto discard
; /* assume packet left over from earlier phase */
721 write_unlock_bh(&call
->state_lock
);
723 /* if all the packets we sent are hard-ACK'd, then we can discard
724 * whatever we've got left */
725 _debug("clear Tx %d",
726 CIRC_CNT(call
->acks_head
, call
->acks_tail
, call
->acks_winsz
));
728 del_timer_sync(&call
->resend_timer
);
729 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
730 clear_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
);
732 if (call
->acks_window
)
733 rxrpc_zap_tx_window(call
);
736 /* post the final ACK message for userspace to pick up */
738 skb
->mark
= RXRPC_SKB_MARK_FINAL_ACK
;
740 rxrpc_get_call(call
);
741 atomic_inc(&call
->skb_count
);
742 spin_lock_bh(&call
->lock
);
743 if (rxrpc_queue_rcv_skb(call
, skb
, true, true) < 0)
745 spin_unlock_bh(&call
->lock
);
746 goto process_further
;
751 goto process_further
;
753 protocol_error_unlock
:
754 write_unlock_bh(&call
->state_lock
);
757 _leave(" = -EPROTO");
762 * post a message to the socket Rx queue for recvmsg() to pick up
764 static int rxrpc_post_message(struct rxrpc_call
*call
, u32 mark
, u32 error
,
767 struct rxrpc_skb_priv
*sp
;
771 _enter("{%d,%lx},%u,%u,%d",
772 call
->debug_id
, call
->flags
, mark
, error
, fatal
);
774 /* remove timers and things for fatal messages */
776 del_timer_sync(&call
->resend_timer
);
777 del_timer_sync(&call
->ack_timer
);
778 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
781 if (mark
!= RXRPC_SKB_MARK_NEW_CALL
&&
782 !test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
783 _leave("[no userid]");
787 if (!test_bit(RXRPC_CALL_TERMINAL_MSG
, &call
->flags
)) {
788 skb
= alloc_skb(0, GFP_NOFS
);
797 memset(sp
, 0, sizeof(*sp
));
800 rxrpc_get_call(call
);
801 atomic_inc(&call
->skb_count
);
803 spin_lock_bh(&call
->lock
);
804 ret
= rxrpc_queue_rcv_skb(call
, skb
, true, fatal
);
805 spin_unlock_bh(&call
->lock
);
813 * handle background processing of incoming call packets and ACK / abort
816 void rxrpc_process_call(struct work_struct
*work
)
818 struct rxrpc_call
*call
=
819 container_of(work
, struct rxrpc_call
, processor
);
820 struct rxrpc_wire_header whdr
;
821 struct rxrpc_ackpacket ack
;
822 struct rxrpc_ackinfo ackinfo
;
825 enum rxrpc_call_event genbit
;
829 int loop
, nbit
, ioc
, ret
, mtu
;
830 u32 serial
, abort_code
= RX_PROTOCOL_ERROR
;
833 //printk("\n--------------------\n");
834 _enter("{%d,%s,%lx} [%lu]",
835 call
->debug_id
, rxrpc_call_states
[call
->state
], call
->events
,
836 (jiffies
- call
->creation_jif
) / (HZ
/ 10));
841 /* there's a good chance we're going to have to send a message, so set
842 * one up in advance */
843 msg
.msg_name
= &call
->conn
->params
.peer
->srx
.transport
;
844 msg
.msg_namelen
= call
->conn
->params
.peer
->srx
.transport_len
;
845 msg
.msg_control
= NULL
;
846 msg
.msg_controllen
= 0;
849 whdr
.epoch
= htonl(call
->conn
->proto
.epoch
);
850 whdr
.cid
= htonl(call
->cid
);
851 whdr
.callNumber
= htonl(call
->call_id
);
853 whdr
.type
= RXRPC_PACKET_TYPE_ACK
;
854 whdr
.flags
= call
->conn
->out_clientflag
;
856 whdr
.securityIndex
= call
->conn
->security_ix
;
858 whdr
.serviceId
= htons(call
->service_id
);
860 memset(iov
, 0, sizeof(iov
));
861 iov
[0].iov_base
= &whdr
;
862 iov
[0].iov_len
= sizeof(whdr
);
865 /* deal with events of a final nature */
866 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR
, &call
->events
)) {
867 enum rxrpc_skb_mark mark
;
870 clear_bit(RXRPC_CALL_EV_CONN_ABORT
, &call
->events
);
871 clear_bit(RXRPC_CALL_EV_REJECT_BUSY
, &call
->events
);
872 clear_bit(RXRPC_CALL_EV_ABORT
, &call
->events
);
874 error
= call
->error_report
;
875 if (error
< RXRPC_LOCAL_ERROR_OFFSET
) {
876 mark
= RXRPC_SKB_MARK_NET_ERROR
;
877 _debug("post net error %d", error
);
879 mark
= RXRPC_SKB_MARK_LOCAL_ERROR
;
880 error
-= RXRPC_LOCAL_ERROR_OFFSET
;
881 _debug("post net local error %d", error
);
884 if (rxrpc_post_message(call
, mark
, error
, true) < 0)
886 clear_bit(RXRPC_CALL_EV_RCVD_ERROR
, &call
->events
);
890 if (test_bit(RXRPC_CALL_EV_CONN_ABORT
, &call
->events
)) {
891 ASSERTCMP(call
->state
, >, RXRPC_CALL_COMPLETE
);
893 clear_bit(RXRPC_CALL_EV_REJECT_BUSY
, &call
->events
);
894 clear_bit(RXRPC_CALL_EV_ABORT
, &call
->events
);
896 _debug("post conn abort");
898 if (rxrpc_post_message(call
, RXRPC_SKB_MARK_LOCAL_ERROR
,
899 call
->conn
->error
, true) < 0)
901 clear_bit(RXRPC_CALL_EV_CONN_ABORT
, &call
->events
);
905 if (test_bit(RXRPC_CALL_EV_REJECT_BUSY
, &call
->events
)) {
906 whdr
.type
= RXRPC_PACKET_TYPE_BUSY
;
907 genbit
= RXRPC_CALL_EV_REJECT_BUSY
;
911 if (test_bit(RXRPC_CALL_EV_ABORT
, &call
->events
)) {
912 ASSERTCMP(call
->state
, >, RXRPC_CALL_COMPLETE
);
914 if (rxrpc_post_message(call
, RXRPC_SKB_MARK_LOCAL_ERROR
,
915 ECONNABORTED
, true) < 0)
917 whdr
.type
= RXRPC_PACKET_TYPE_ABORT
;
918 data
= htonl(call
->local_abort
);
919 iov
[1].iov_base
= &data
;
920 iov
[1].iov_len
= sizeof(data
);
921 genbit
= RXRPC_CALL_EV_ABORT
;
925 if (test_bit(RXRPC_CALL_EV_ACK_FINAL
, &call
->events
)) {
926 genbit
= RXRPC_CALL_EV_ACK_FINAL
;
928 ack
.bufferSpace
= htons(8);
931 ack
.reason
= RXRPC_ACK_IDLE
;
933 call
->ackr_reason
= 0;
935 spin_lock_bh(&call
->lock
);
936 ack
.serial
= htonl(call
->ackr_serial
);
937 ack
.previousPacket
= htonl(call
->ackr_prev_seq
);
938 ack
.firstPacket
= htonl(call
->rx_data_eaten
+ 1);
939 spin_unlock_bh(&call
->lock
);
943 iov
[1].iov_base
= &ack
;
944 iov
[1].iov_len
= sizeof(ack
);
945 iov
[2].iov_base
= &pad
;
947 iov
[3].iov_base
= &ackinfo
;
948 iov
[3].iov_len
= sizeof(ackinfo
);
952 if (call
->events
& ((1 << RXRPC_CALL_EV_RCVD_BUSY
) |
953 (1 << RXRPC_CALL_EV_RCVD_ABORT
))
957 if (test_bit(RXRPC_CALL_EV_RCVD_ABORT
, &call
->events
))
958 mark
= RXRPC_SKB_MARK_REMOTE_ABORT
;
960 mark
= RXRPC_SKB_MARK_BUSY
;
962 _debug("post abort/busy");
963 rxrpc_clear_tx_window(call
);
964 if (rxrpc_post_message(call
, mark
, ECONNABORTED
, true) < 0)
967 clear_bit(RXRPC_CALL_EV_RCVD_BUSY
, &call
->events
);
968 clear_bit(RXRPC_CALL_EV_RCVD_ABORT
, &call
->events
);
972 if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL
, &call
->events
)) {
973 _debug("do implicit ackall");
974 rxrpc_clear_tx_window(call
);
977 if (test_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
)) {
978 write_lock_bh(&call
->state_lock
);
979 if (call
->state
<= RXRPC_CALL_COMPLETE
) {
980 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
981 call
->local_abort
= RX_CALL_TIMEOUT
;
982 set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
);
984 write_unlock_bh(&call
->state_lock
);
986 _debug("post timeout");
987 if (rxrpc_post_message(call
, RXRPC_SKB_MARK_LOCAL_ERROR
,
991 clear_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
995 /* deal with assorted inbound messages */
996 if (!skb_queue_empty(&call
->rx_queue
)) {
997 switch (rxrpc_process_rx_queue(call
, &abort_code
)) {
1006 rxrpc_abort_call(call
, abort_code
);
1011 /* handle resending */
1012 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
1013 rxrpc_resend_timer(call
);
1014 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND
, &call
->events
))
1017 /* consider sending an ordinary ACK */
1018 if (test_bit(RXRPC_CALL_EV_ACK
, &call
->events
)) {
1019 _debug("send ACK: window: %d - %d { %lx }",
1020 call
->rx_data_eaten
, call
->ackr_win_top
,
1021 call
->ackr_window
[0]);
1023 if (call
->state
> RXRPC_CALL_SERVER_ACK_REQUEST
&&
1024 call
->ackr_reason
!= RXRPC_ACK_PING_RESPONSE
) {
1025 /* ACK by sending reply DATA packet in this state */
1026 clear_bit(RXRPC_CALL_EV_ACK
, &call
->events
);
1027 goto maybe_reschedule
;
1030 genbit
= RXRPC_CALL_EV_ACK
;
1032 acks
= kzalloc(call
->ackr_win_top
- call
->rx_data_eaten
,
1037 //hdr.flags = RXRPC_SLOW_START_OK;
1038 ack
.bufferSpace
= htons(8);
1041 spin_lock_bh(&call
->lock
);
1042 ack
.reason
= call
->ackr_reason
;
1043 ack
.serial
= htonl(call
->ackr_serial
);
1044 ack
.previousPacket
= htonl(call
->ackr_prev_seq
);
1045 ack
.firstPacket
= htonl(call
->rx_data_eaten
+ 1);
1048 for (loop
= 0; loop
< RXRPC_ACKR_WINDOW_ASZ
; loop
++) {
1049 nbit
= loop
* BITS_PER_LONG
;
1050 for (bits
= call
->ackr_window
[loop
]; bits
; bits
>>= 1
1052 _debug("- l=%d n=%d b=%lx", loop
, nbit
, bits
);
1054 acks
[nbit
] = RXRPC_ACK_TYPE_ACK
;
1055 ack
.nAcks
= nbit
+ 1;
1060 call
->ackr_reason
= 0;
1061 spin_unlock_bh(&call
->lock
);
1065 iov
[1].iov_base
= &ack
;
1066 iov
[1].iov_len
= sizeof(ack
);
1067 iov
[2].iov_base
= acks
;
1068 iov
[2].iov_len
= ack
.nAcks
;
1069 iov
[3].iov_base
= &pad
;
1071 iov
[4].iov_base
= &ackinfo
;
1072 iov
[4].iov_len
= sizeof(ackinfo
);
1074 switch (ack
.reason
) {
1075 case RXRPC_ACK_REQUESTED
:
1076 case RXRPC_ACK_DUPLICATE
:
1077 case RXRPC_ACK_OUT_OF_SEQUENCE
:
1078 case RXRPC_ACK_EXCEEDS_WINDOW
:
1079 case RXRPC_ACK_NOSPACE
:
1080 case RXRPC_ACK_PING
:
1081 case RXRPC_ACK_PING_RESPONSE
:
1082 goto send_ACK_with_skew
;
1083 case RXRPC_ACK_DELAY
:
1084 case RXRPC_ACK_IDLE
:
1089 /* handle completion of security negotiations on an incoming
1091 if (test_and_clear_bit(RXRPC_CALL_EV_SECURED
, &call
->events
)) {
1093 spin_lock_bh(&call
->lock
);
1095 if (call
->state
== RXRPC_CALL_SERVER_SECURING
) {
1097 write_lock(&call
->socket
->call_lock
);
1098 if (!test_bit(RXRPC_CALL_RELEASED
, &call
->flags
) &&
1099 !test_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
)) {
1100 _debug("not released");
1101 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
1102 list_move_tail(&call
->accept_link
,
1103 &call
->socket
->acceptq
);
1105 write_unlock(&call
->socket
->call_lock
);
1106 read_lock(&call
->state_lock
);
1107 if (call
->state
< RXRPC_CALL_COMPLETE
)
1108 set_bit(RXRPC_CALL_EV_POST_ACCEPT
, &call
->events
);
1109 read_unlock(&call
->state_lock
);
1112 spin_unlock_bh(&call
->lock
);
1113 if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT
, &call
->events
))
1114 goto maybe_reschedule
;
1117 /* post a notification of an acceptable connection to the app */
1118 if (test_bit(RXRPC_CALL_EV_POST_ACCEPT
, &call
->events
)) {
1119 _debug("post accept");
1120 if (rxrpc_post_message(call
, RXRPC_SKB_MARK_NEW_CALL
,
1123 clear_bit(RXRPC_CALL_EV_POST_ACCEPT
, &call
->events
);
1124 goto maybe_reschedule
;
1127 /* handle incoming call acceptance */
1128 if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED
, &call
->events
)) {
1130 ASSERTCMP(call
->rx_data_post
, ==, 0);
1131 call
->rx_data_post
= 1;
1132 read_lock_bh(&call
->state_lock
);
1133 if (call
->state
< RXRPC_CALL_COMPLETE
)
1134 set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS
, &call
->events
);
1135 read_unlock_bh(&call
->state_lock
);
1138 /* drain the out of sequence received packet queue into the packet Rx
1140 if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS
, &call
->events
)) {
1141 while (call
->rx_data_post
== call
->rx_first_oos
)
1142 if (rxrpc_drain_rx_oos_queue(call
) < 0)
1144 goto maybe_reschedule
;
1147 if (test_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
)) {
1148 rxrpc_release_call(call
);
1149 clear_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
);
1152 /* other events may have been raised since we started checking */
1153 goto maybe_reschedule
;
1156 ack
.maxSkew
= htons(atomic_read(&call
->conn
->hi_serial
) -
1159 mtu
= call
->conn
->params
.peer
->if_mtu
;
1160 mtu
-= call
->conn
->params
.peer
->hdrsize
;
1161 ackinfo
.maxMTU
= htonl(mtu
);
1162 ackinfo
.rwind
= htonl(rxrpc_rx_window_size
);
1164 /* permit the peer to send us jumbo packets if it wants to */
1165 ackinfo
.rxMTU
= htonl(rxrpc_rx_mtu
);
1166 ackinfo
.jumbo_max
= htonl(rxrpc_rx_jumbo_max
);
1168 serial
= atomic_inc_return(&call
->conn
->serial
);
1169 whdr
.serial
= htonl(serial
);
1170 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
1173 ntohl(ack
.firstPacket
),
1174 ntohl(ack
.previousPacket
),
1176 rxrpc_acks(ack
.reason
),
1179 del_timer_sync(&call
->ack_timer
);
1181 set_bit(RXRPC_CALL_TX_SOFT_ACK
, &call
->flags
);
1182 goto send_message_2
;
1185 _debug("send message");
1187 serial
= atomic_inc_return(&call
->conn
->serial
);
1188 whdr
.serial
= htonl(serial
);
1189 _proto("Tx %s %%%u", rxrpc_pkts
[whdr
.type
], serial
);
1192 len
= iov
[0].iov_len
;
1194 if (iov
[4].iov_len
) {
1196 len
+= iov
[4].iov_len
;
1197 len
+= iov
[3].iov_len
;
1198 len
+= iov
[2].iov_len
;
1199 len
+= iov
[1].iov_len
;
1200 } else if (iov
[3].iov_len
) {
1202 len
+= iov
[3].iov_len
;
1203 len
+= iov
[2].iov_len
;
1204 len
+= iov
[1].iov_len
;
1205 } else if (iov
[2].iov_len
) {
1207 len
+= iov
[2].iov_len
;
1208 len
+= iov
[1].iov_len
;
1209 } else if (iov
[1].iov_len
) {
1211 len
+= iov
[1].iov_len
;
1214 ret
= kernel_sendmsg(call
->conn
->params
.local
->socket
,
1215 &msg
, iov
, ioc
, len
);
1217 _debug("sendmsg failed: %d", ret
);
1218 read_lock_bh(&call
->state_lock
);
1219 if (call
->state
< RXRPC_CALL_DEAD
)
1220 rxrpc_queue_call(call
);
1221 read_unlock_bh(&call
->state_lock
);
1226 case RXRPC_CALL_EV_ABORT
:
1227 clear_bit(genbit
, &call
->events
);
1228 clear_bit(RXRPC_CALL_EV_RCVD_ABORT
, &call
->events
);
1231 case RXRPC_CALL_EV_ACK_FINAL
:
1232 write_lock_bh(&call
->state_lock
);
1233 if (call
->state
== RXRPC_CALL_CLIENT_FINAL_ACK
)
1234 call
->state
= RXRPC_CALL_COMPLETE
;
1235 write_unlock_bh(&call
->state_lock
);
1239 clear_bit(genbit
, &call
->events
);
1240 switch (call
->state
) {
1241 case RXRPC_CALL_CLIENT_AWAIT_REPLY
:
1242 case RXRPC_CALL_CLIENT_RECV_REPLY
:
1243 case RXRPC_CALL_SERVER_RECV_REQUEST
:
1244 case RXRPC_CALL_SERVER_ACK_REQUEST
:
1245 _debug("start ACK timer");
1246 rxrpc_propose_ACK(call
, RXRPC_ACK_DELAY
,
1247 call
->ackr_serial
, false);
1251 goto maybe_reschedule
;
1255 del_timer_sync(&call
->ack_timer
);
1256 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL
, &call
->events
))
1257 rxrpc_put_call(call
);
1258 clear_bit(RXRPC_CALL_EV_ACK
, &call
->events
);
1261 if (call
->events
|| !skb_queue_empty(&call
->rx_queue
)) {
1262 read_lock_bh(&call
->state_lock
);
1263 if (call
->state
< RXRPC_CALL_DEAD
)
1264 rxrpc_queue_call(call
);
1265 read_unlock_bh(&call
->state_lock
);
1268 /* don't leave aborted connections on the accept queue */
1269 if (call
->state
>= RXRPC_CALL_COMPLETE
&&
1270 !list_empty(&call
->accept_link
)) {
1271 _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
1272 call
, call
->events
, call
->flags
, call
->conn
->proto
.cid
);
1274 read_lock_bh(&call
->state_lock
);
1275 if (!test_bit(RXRPC_CALL_RELEASED
, &call
->flags
) &&
1276 !test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
1277 rxrpc_queue_call(call
);
1278 read_unlock_bh(&call
->state_lock
);
1284 /* because we don't want two CPUs both processing the work item for one
1285 * call at the same time, we use a flag to note when it's busy; however
1286 * this means there's a race between clearing the flag and setting the
1287 * work pending bit and the work item being processed again */
1288 if (call
->events
&& !work_pending(&call
->processor
)) {
1289 _debug("jumpstart %x", call
->conn
->proto
.cid
);
1290 rxrpc_queue_call(call
);
1297 _debug("out of memory");
1298 goto maybe_reschedule
;