]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/rxrpc/output.c
1 /* RxRPC packet transmission
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/net.h>
15 #include <linux/gfp.h>
16 #include <linux/skbuff.h>
17 #include <linux/export.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
22 struct rxrpc_pkt_buffer
{
23 struct rxrpc_wire_header whdr
;
26 struct rxrpc_ackpacket ack
;
32 struct rxrpc_ackinfo ackinfo
;
36 * Fill out an ACK packet.
38 static size_t rxrpc_fill_out_ack(struct rxrpc_call
*call
,
39 struct rxrpc_pkt_buffer
*pkt
)
41 rxrpc_serial_t serial
;
42 rxrpc_seq_t hard_ack
, top
, seq
;
47 /* Barrier against rxrpc_input_data(). */
48 serial
= call
->ackr_serial
;
49 hard_ack
= READ_ONCE(call
->rx_hard_ack
);
50 top
= smp_load_acquire(&call
->rx_top
);
52 pkt
->ack
.bufferSpace
= htons(8);
53 pkt
->ack
.maxSkew
= htons(call
->ackr_skew
);
54 pkt
->ack
.firstPacket
= htonl(hard_ack
+ 1);
55 pkt
->ack
.previousPacket
= htonl(call
->ackr_prev_seq
);
56 pkt
->ack
.serial
= htonl(serial
);
57 pkt
->ack
.reason
= call
->ackr_reason
;
58 pkt
->ack
.nAcks
= top
- hard_ack
;
60 if (after(top
, hard_ack
)) {
63 ix
= seq
& RXRPC_RXTX_BUFF_MASK
;
64 if (call
->rxtx_buffer
[ix
])
65 *ackp
++ = RXRPC_ACK_TYPE_ACK
;
67 *ackp
++ = RXRPC_ACK_TYPE_NACK
;
69 } while (before_eq(seq
, top
));
72 mtu
= call
->conn
->params
.peer
->if_mtu
;
73 mtu
-= call
->conn
->params
.peer
->hdrsize
;
74 jmax
= (call
->nr_jumbo_bad
> 3) ? 1 : rxrpc_rx_jumbo_max
;
75 pkt
->ackinfo
.rxMTU
= htonl(rxrpc_rx_mtu
);
76 pkt
->ackinfo
.maxMTU
= htonl(mtu
);
77 pkt
->ackinfo
.rwind
= htonl(call
->rx_winsize
);
78 pkt
->ackinfo
.jumbo_max
= htonl(jmax
);
80 trace_rxrpc_tx_ack(call
, hard_ack
+ 1, serial
, call
->ackr_reason
,
86 return top
- hard_ack
+ 3;
90 * Send an ACK or ABORT call packet.
92 int rxrpc_send_call_packet(struct rxrpc_call
*call
, u8 type
)
94 struct rxrpc_connection
*conn
= NULL
;
95 struct rxrpc_pkt_buffer
*pkt
;
98 rxrpc_serial_t serial
;
103 _enter("%u,%s", call
->debug_id
, rxrpc_pkts
[type
]);
105 spin_lock_bh(&call
->lock
);
107 conn
= rxrpc_get_connection_maybe(call
->conn
);
108 spin_unlock_bh(&call
->lock
);
112 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
114 rxrpc_put_connection(conn
);
118 serial
= atomic_inc_return(&conn
->serial
);
120 msg
.msg_name
= &call
->peer
->srx
.transport
;
121 msg
.msg_namelen
= call
->peer
->srx
.transport_len
;
122 msg
.msg_control
= NULL
;
123 msg
.msg_controllen
= 0;
126 pkt
->whdr
.epoch
= htonl(conn
->proto
.epoch
);
127 pkt
->whdr
.cid
= htonl(call
->cid
);
128 pkt
->whdr
.callNumber
= htonl(call
->call_id
);
130 pkt
->whdr
.serial
= htonl(serial
);
131 pkt
->whdr
.type
= type
;
132 pkt
->whdr
.flags
= conn
->out_clientflag
;
133 pkt
->whdr
.userStatus
= 0;
134 pkt
->whdr
.securityIndex
= call
->security_ix
;
136 pkt
->whdr
.serviceId
= htons(call
->service_id
);
138 iov
[0].iov_base
= pkt
;
139 iov
[0].iov_len
= sizeof(pkt
->whdr
);
140 len
= sizeof(pkt
->whdr
);
143 case RXRPC_PACKET_TYPE_ACK
:
144 spin_lock_bh(&call
->lock
);
145 if (!call
->ackr_reason
) {
146 spin_unlock_bh(&call
->lock
);
150 n
= rxrpc_fill_out_ack(call
, pkt
);
151 call
->ackr_reason
= 0;
153 spin_unlock_bh(&call
->lock
);
155 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
157 ntohs(pkt
->ack
.maxSkew
),
158 ntohl(pkt
->ack
.firstPacket
),
159 ntohl(pkt
->ack
.previousPacket
),
160 ntohl(pkt
->ack
.serial
),
161 rxrpc_acks(pkt
->ack
.reason
),
164 iov
[0].iov_len
+= sizeof(pkt
->ack
) + n
;
165 iov
[1].iov_base
= &pkt
->ackinfo
;
166 iov
[1].iov_len
= sizeof(pkt
->ackinfo
);
167 len
+= sizeof(pkt
->ack
) + n
+ sizeof(pkt
->ackinfo
);
171 case RXRPC_PACKET_TYPE_ABORT
:
172 abort_code
= call
->abort_code
;
173 pkt
->abort_code
= htonl(abort_code
);
174 _proto("Tx ABORT %%%u { %d }", serial
, abort_code
);
175 iov
[0].iov_len
+= sizeof(pkt
->abort_code
);
176 len
+= sizeof(pkt
->abort_code
);
186 ret
= kernel_sendmsg(conn
->params
.local
->socket
,
187 &msg
, iov
, ioc
, len
);
189 if (ret
< 0 && call
->state
< RXRPC_CALL_COMPLETE
) {
191 case RXRPC_PACKET_TYPE_ACK
:
192 rxrpc_propose_ACK(call
, pkt
->ack
.reason
,
193 ntohs(pkt
->ack
.maxSkew
),
194 ntohl(pkt
->ack
.serial
),
197 case RXRPC_PACKET_TYPE_ABORT
:
203 rxrpc_put_connection(conn
);
209 * send a packet through the transport endpoint
211 int rxrpc_send_data_packet(struct rxrpc_connection
*conn
, struct sk_buff
*skb
)
217 _enter(",{%d}", skb
->len
);
219 iov
[0].iov_base
= skb
->head
;
220 iov
[0].iov_len
= skb
->len
;
222 msg
.msg_name
= &conn
->params
.peer
->srx
.transport
;
223 msg
.msg_namelen
= conn
->params
.peer
->srx
.transport_len
;
224 msg
.msg_control
= NULL
;
225 msg
.msg_controllen
= 0;
228 /* send the packet with the don't fragment bit set if we currently
229 * think it's small enough */
230 if (skb
->len
- sizeof(struct rxrpc_wire_header
) < conn
->params
.peer
->maxdata
) {
231 down_read(&conn
->params
.local
->defrag_sem
);
232 /* send the packet by UDP
233 * - returns -EMSGSIZE if UDP would have to fragment the packet
234 * to go out of the interface
235 * - in which case, we'll have processed the ICMP error
236 * message and update the peer record
238 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
, iov
, 1,
241 up_read(&conn
->params
.local
->defrag_sem
);
242 if (ret
== -EMSGSIZE
)
243 goto send_fragmentable
;
245 _leave(" = %d [%u]", ret
, conn
->params
.peer
->maxdata
);
250 /* attempt to send this message with fragmentation enabled */
251 _debug("send fragment");
253 down_write(&conn
->params
.local
->defrag_sem
);
255 switch (conn
->params
.local
->srx
.transport
.family
) {
257 opt
= IP_PMTUDISC_DONT
;
258 ret
= kernel_setsockopt(conn
->params
.local
->socket
,
259 SOL_IP
, IP_MTU_DISCOVER
,
260 (char *)&opt
, sizeof(opt
));
262 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
, iov
, 1,
265 opt
= IP_PMTUDISC_DO
;
266 kernel_setsockopt(conn
->params
.local
->socket
, SOL_IP
,
268 (char *)&opt
, sizeof(opt
));
272 #ifdef CONFIG_AF_RXRPC_IPV6
274 opt
= IPV6_PMTUDISC_DONT
;
275 ret
= kernel_setsockopt(conn
->params
.local
->socket
,
276 SOL_IPV6
, IPV6_MTU_DISCOVER
,
277 (char *)&opt
, sizeof(opt
));
279 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
,
280 iov
, 1, iov
[0].iov_len
);
282 opt
= IPV6_PMTUDISC_DO
;
283 kernel_setsockopt(conn
->params
.local
->socket
,
284 SOL_IPV6
, IPV6_MTU_DISCOVER
,
285 (char *)&opt
, sizeof(opt
));
291 up_write(&conn
->params
.local
->defrag_sem
);
292 _leave(" = %d [frag %u]", ret
, conn
->params
.peer
->maxdata
);
297 * reject packets through the local endpoint
299 void rxrpc_reject_packets(struct rxrpc_local
*local
)
301 struct sockaddr_rxrpc srx
;
302 struct rxrpc_skb_priv
*sp
;
303 struct rxrpc_wire_header whdr
;
310 _enter("%d", local
->debug_id
);
312 iov
[0].iov_base
= &whdr
;
313 iov
[0].iov_len
= sizeof(whdr
);
314 iov
[1].iov_base
= &code
;
315 iov
[1].iov_len
= sizeof(code
);
316 size
= sizeof(whdr
) + sizeof(code
);
318 msg
.msg_name
= &srx
.transport
;
319 msg
.msg_control
= NULL
;
320 msg
.msg_controllen
= 0;
323 memset(&whdr
, 0, sizeof(whdr
));
324 whdr
.type
= RXRPC_PACKET_TYPE_ABORT
;
326 while ((skb
= skb_dequeue(&local
->reject_queue
))) {
327 rxrpc_see_skb(skb
, rxrpc_skb_rx_seen
);
330 if (rxrpc_extract_addr_from_skb(&srx
, skb
) == 0) {
331 msg
.msg_namelen
= srx
.transport_len
;
333 code
= htonl(skb
->priority
);
335 whdr
.epoch
= htonl(sp
->hdr
.epoch
);
336 whdr
.cid
= htonl(sp
->hdr
.cid
);
337 whdr
.callNumber
= htonl(sp
->hdr
.callNumber
);
338 whdr
.serviceId
= htons(sp
->hdr
.serviceId
);
339 whdr
.flags
= sp
->hdr
.flags
;
340 whdr
.flags
^= RXRPC_CLIENT_INITIATED
;
341 whdr
.flags
&= RXRPC_CLIENT_INITIATED
;
343 kernel_sendmsg(local
->socket
, &msg
, iov
, 2, size
);
346 rxrpc_free_skb(skb
, rxrpc_skb_rx_freed
);