]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/cifs/transport.c
Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyper...
[mirror_ubuntu-jammy-kernel.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
14e25977 36#include <linux/sched/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
f1f27ad7
VW
79 get_task_struct(current);
80 temp->creator = current;
a6f74e80
N
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
1da177e4 83
1da177e4 84 atomic_inc(&midCount);
7c9421e1 85 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
86 return temp;
87}
88
696e420b
LP
89static void _cifs_mid_q_entry_release(struct kref *refcount)
90{
abe57073
PS
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 93#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 94 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 96 unsigned long now;
433b8dd7 97 unsigned long roundtrip_time;
1047abc1 98#endif
7b71843f
PS
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
7c9421e1 106 midEntry->mid_state = MID_FREE;
8097531a 107 atomic_dec(&midCount);
7c9421e1 108 if (midEntry->large_buf)
b8643e1b
SF
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
112#ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
433b8dd7 114 if (now < midEntry->when_alloc)
a0a3036b 115 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
00778e22
SF
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 143 (midEntry->command != command)) {
f5942db5
SF
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
433b8dd7
SF
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 150
433b8dd7 151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
154 pr_debug("slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
1047abc1
SF
160 }
161 }
162#endif
f1f27ad7 163 put_task_struct(midEntry->creator);
abe57073
PS
164
165 mempool_free(midEntry, cifs_mid_poolp);
166}
167
168void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169{
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173}
174
175void DeleteMidQEntry(struct mid_q_entry *midEntry)
176{
696e420b 177 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
178}
179
3c1bf7e4
PS
180void
181cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
182{
183 spin_lock(&GlobalMid_Lock);
abe57073
PS
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
ddc8cf8f
JL
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191}
192
6f49f46b
JL
193/*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
3ab3f2a1 196 * @smb_msg: Message to send
6f49f46b
JL
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
d6e04ae6 202static int
3ab3f2a1
AV
203smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
1da177e4
LT
205{
206 int rc = 0;
3ab3f2a1 207 int retries = 0;
edf1ae40 208 struct socket *ssocket = server->ssocket;
50c2f753 209
6f49f46b
JL
210 *sent = 0;
211
3ab3f2a1
AV
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
0496e02d 216 if (server->noblocksnd)
3ab3f2a1 217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 218 else
3ab3f2a1 219 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 220
3ab3f2a1 221 while (msg_data_left(smb_msg)) {
6f49f46b
JL
222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
3ab3f2a1 240 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 241 if (rc == -EAGAIN) {
3ab3f2a1
AV
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
afe6f653 245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 246 ssocket);
3ab3f2a1 247 return -EAGAIN;
1da177e4 248 }
3ab3f2a1 249 msleep(1 << retries);
1da177e4
LT
250 continue;
251 }
6f49f46b 252
79a58d1f 253 if (rc < 0)
3ab3f2a1 254 return rc;
6f49f46b 255
79a58d1f 256 if (rc == 0) {
3e84469d
SF
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
afe6f653 259 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
260 msleep(500);
261 continue;
d6e04ae6 262 }
6f49f46b 263
3ab3f2a1
AV
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 267 }
3ab3f2a1 268 return 0;
97bc00b3
JL
269}
270
35e2cc1b 271unsigned long
81f39f95 272smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
273{
274 unsigned int i;
35e2cc1b
PA
275 struct kvec *iov;
276 int nvec;
a26054d1
JL
277 unsigned long buflen = 0;
278
81f39f95
RS
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
a26054d1 288 /* total up iov array first */
35e2cc1b 289 for (i = 0; i < nvec; i++)
a26054d1
JL
290 buflen += iov[i].iov_len;
291
c06a0f2d
LL
292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
a26054d1 298 if (rqst->rq_npages) {
c06a0f2d
LL
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
a26054d1
JL
310 }
311
312 return buflen;
313}
314
6f49f46b 315static int
07cd952f
RS
316__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
6f49f46b 318{
07cd952f
RS
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
b30c74c7 324 sigset_t mask, oldmask;
3ab3f2a1 325 size_t total_len = 0, sent, size;
b8eed283 326 struct socket *ssocket = server->ssocket;
3ab3f2a1 327 struct msghdr smb_msg;
c713c877
RS
328 __be32 rfc1002_marker;
329
4357d45f
LL
330 if (cifs_rdma_enabled(server)) {
331 /* return -EAGAIN when connecting or reconnecting */
332 rc = -EAGAIN;
333 if (server->smbd_conn)
334 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
335 goto smbd_done;
336 }
afc18a6f 337
ea702b80 338 if (ssocket == NULL)
afc18a6f 339 return -EAGAIN;
ea702b80 340
b30c74c7
PS
341 if (signal_pending(current)) {
342 cifs_dbg(FYI, "signal is pending before sending any data\n");
343 return -EINTR;
344 }
345
b8eed283 346 /* cork the socket */
db10538a 347 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 348
07cd952f 349 for (j = 0; j < num_rqst; j++)
81f39f95 350 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
351 rfc1002_marker = cpu_to_be32(send_length);
352
b30c74c7
PS
353 /*
354 * We should not allow signals to interrupt the network send because
355 * any partial send will cause session reconnects thus increasing
356 * latency of system calls and overload a server with unnecessary
357 * requests.
358 */
359
360 sigfillset(&mask);
361 sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
c713c877
RS
363 /* Generate a rfc1002 marker for SMB2+ */
364 if (server->vals->header_preamble_size == 0) {
365 struct kvec hiov = {
366 .iov_base = &rfc1002_marker,
367 .iov_len = 4
368 };
aa563d7b 369 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
370 rc = smb_send_kvec(server, &smb_msg, &sent);
371 if (rc < 0)
b30c74c7 372 goto unmask;
c713c877
RS
373
374 total_len += sent;
375 send_length += 4;
376 }
377
662bf5bc
PA
378 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
07cd952f
RS
380 for (j = 0; j < num_rqst; j++) {
381 iov = rqst[j].rq_iov;
382 n_vec = rqst[j].rq_nvec;
3ab3f2a1 383
07cd952f 384 size = 0;
662bf5bc
PA
385 for (i = 0; i < n_vec; i++) {
386 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 387 size += iov[i].iov_len;
662bf5bc 388 }
97bc00b3 389
aa563d7b 390 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 391
3ab3f2a1 392 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 393 if (rc < 0)
b30c74c7 394 goto unmask;
97bc00b3
JL
395
396 total_len += sent;
07cd952f
RS
397
398 /* now walk the page array and send each page in it */
399 for (i = 0; i < rqst[j].rq_npages; i++) {
400 struct bio_vec bvec;
401
402 bvec.bv_page = rqst[j].rq_pages[i];
403 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404 &bvec.bv_offset);
405
aa563d7b 406 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
407 &bvec, 1, bvec.bv_len);
408 rc = smb_send_kvec(server, &smb_msg, &sent);
409 if (rc < 0)
410 break;
411
412 total_len += sent;
413 }
97bc00b3 414 }
1da177e4 415
b30c74c7
PS
416unmask:
417 sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419 /*
420 * If signal is pending but we have already sent the whole packet to
421 * the server we need to return success status to allow a corresponding
422 * mid entry to be kept in the pending requests queue thus allowing
423 * to handle responses from the server by the client.
424 *
425 * If only part of the packet has been sent there is no need to hide
426 * interrupt because the session will be reconnected anyway, so there
427 * won't be any response from the server to handle.
428 */
429
430 if (signal_pending(current) && (total_len != send_length)) {
431 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432 rc = -EINTR;
433 }
434
b8eed283 435 /* uncork it */
db10538a 436 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 437
c713c877 438 if ((total_len > 0) && (total_len != send_length)) {
f96637be 439 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 440 send_length, total_len);
6f49f46b
JL
441 /*
442 * If we have only sent part of an SMB then the next SMB could
443 * be taken as the remainder of this one. We need to kill the
444 * socket so the server throws away the partial SMB
445 */
edf1ae40 446 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
447 trace_smb3_partial_send_reconnect(server->CurrentMid,
448 server->hostname);
edf1ae40 449 }
9762c2d0 450smbd_done:
d804d41d 451 if (rc < 0 && rc != -EINTR)
afe6f653 452 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 453 rc);
ee13919c 454 else if (rc > 0)
1da177e4 455 rc = 0;
1da177e4
LT
456
457 return rc;
458}
459
6f49f46b 460static int
1f3a8f5f
RS
461smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462 struct smb_rqst *rqst, int flags)
6f49f46b 463{
b2c96de7 464 struct kvec iov;
3946d0d0 465 struct smb2_transform_hdr *tr_hdr;
b2c96de7 466 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
467 int rc;
468
469 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
470 return __smb_send_rqst(server, num_rqst, rqst);
471
472 if (num_rqst > MAX_COMPOUND - 1)
473 return -ENOMEM;
7fb8986e 474
b2c96de7 475 if (!server->ops->init_transform_rq) {
a0a3036b 476 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
477 return -EIO;
478 }
6f49f46b 479
3946d0d0
LL
480 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481 if (!tr_hdr)
482 return -ENOMEM;
483
484 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485 memset(&iov, 0, sizeof(iov));
486 memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488 iov.iov_base = tr_hdr;
489 iov.iov_len = sizeof(*tr_hdr);
490 cur_rqst[0].rq_iov = &iov;
491 cur_rqst[0].rq_nvec = 1;
492
1f3a8f5f
RS
493 rc = server->ops->init_transform_rq(server, num_rqst + 1,
494 &cur_rqst[0], rqst);
7fb8986e 495 if (rc)
3946d0d0 496 goto out;
7fb8986e 497
1f3a8f5f
RS
498 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
500out:
501 kfree(tr_hdr);
7fb8986e 502 return rc;
6f49f46b
JL
503}
504
0496e02d
JL
505int
506smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507 unsigned int smb_buf_length)
508{
738f9de5 509 struct kvec iov[2];
7fb8986e
PS
510 struct smb_rqst rqst = { .rq_iov = iov,
511 .rq_nvec = 2 };
0496e02d 512
738f9de5
PS
513 iov[0].iov_base = smb_buffer;
514 iov[0].iov_len = 4;
515 iov[1].iov_base = (char *)smb_buffer + 4;
516 iov[1].iov_len = smb_buf_length;
0496e02d 517
07cd952f 518 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
519}
520
fc40f9cf 521static int
b227d215 522wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
523 const int timeout, const int flags,
524 unsigned int *instance)
1da177e4 525{
19e88867 526 long rc;
4230cff8
RS
527 int *credits;
528 int optype;
2b53b929
RS
529 long int t;
530
531 if (timeout < 0)
532 t = MAX_JIFFY_OFFSET;
533 else
534 t = msecs_to_jiffies(timeout);
4230cff8
RS
535
536 optype = flags & CIFS_OP_MASK;
5bc59498 537
34f4deb7
PS
538 *instance = 0;
539
4230cff8
RS
540 credits = server->ops->get_credits_field(server, optype);
541 /* Since an echo is already inflight, no need to wait to send another */
542 if (*credits <= 0 && optype == CIFS_ECHO_OP)
543 return -EAGAIN;
544
fc40f9cf 545 spin_lock(&server->req_lock);
392e1c5d 546 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 547 /* oplock breaks must not be held up */
fc40f9cf 548 server->in_flight++;
1b63f184
SF
549 if (server->in_flight > server->max_in_flight)
550 server->max_in_flight = server->in_flight;
bc205ed1 551 *credits -= 1;
34f4deb7 552 *instance = server->reconnect_instance;
fc40f9cf 553 spin_unlock(&server->req_lock);
27a97a61
VL
554 return 0;
555 }
556
27a97a61 557 while (1) {
b227d215 558 if (*credits < num_credits) {
fc40f9cf 559 spin_unlock(&server->req_lock);
789e6661 560 cifs_num_waiters_inc(server);
2b53b929
RS
561 rc = wait_event_killable_timeout(server->request_q,
562 has_credits(server, credits, num_credits), t);
789e6661 563 cifs_num_waiters_dec(server);
2b53b929 564 if (!rc) {
7937ca96
SF
565 trace_smb3_credit_timeout(server->CurrentMid,
566 server->hostname, num_credits);
afe6f653 567 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929
RS
568 timeout);
569 return -ENOTSUPP;
570 }
571 if (rc == -ERESTARTSYS)
572 return -ERESTARTSYS;
fc40f9cf 573 spin_lock(&server->req_lock);
27a97a61 574 } else {
c5797a94 575 if (server->tcpStatus == CifsExiting) {
fc40f9cf 576 spin_unlock(&server->req_lock);
27a97a61 577 return -ENOENT;
1da177e4 578 }
27a97a61 579
16b34aa4
RS
580 /*
581 * For normal commands, reserve the last MAX_COMPOUND
582 * credits to compound requests.
583 * Otherwise these compounds could be permanently
584 * starved for credits by single-credit requests.
585 *
586 * To prevent spinning CPU, block this thread until
587 * there are >MAX_COMPOUND credits available.
588 * But only do this is we already have a lot of
589 * credits in flight to avoid triggering this check
590 * for servers that are slow to hand out credits on
591 * new sessions.
592 */
593 if (!optype && num_credits == 1 &&
594 server->in_flight > 2 * MAX_COMPOUND &&
595 *credits <= MAX_COMPOUND) {
596 spin_unlock(&server->req_lock);
597 cifs_num_waiters_inc(server);
2b53b929
RS
598 rc = wait_event_killable_timeout(
599 server->request_q,
16b34aa4 600 has_credits(server, credits,
2b53b929
RS
601 MAX_COMPOUND + 1),
602 t);
16b34aa4 603 cifs_num_waiters_dec(server);
2b53b929 604 if (!rc) {
7937ca96
SF
605 trace_smb3_credit_timeout(
606 server->CurrentMid,
607 server->hostname, num_credits);
afe6f653 608 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929
RS
609 timeout);
610 return -ENOTSUPP;
611 }
612 if (rc == -ERESTARTSYS)
613 return -ERESTARTSYS;
16b34aa4
RS
614 spin_lock(&server->req_lock);
615 continue;
616 }
617
2d86dbc9
PS
618 /*
619 * Can not count locking commands against total
620 * as they are allowed to block on server.
621 */
27a97a61
VL
622
623 /* update # of requests on the wire to server */
4230cff8 624 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
625 *credits -= num_credits;
626 server->in_flight += num_credits;
1b63f184
SF
627 if (server->in_flight > server->max_in_flight)
628 server->max_in_flight = server->in_flight;
34f4deb7 629 *instance = server->reconnect_instance;
2d86dbc9 630 }
fc40f9cf 631 spin_unlock(&server->req_lock);
27a97a61 632 break;
1da177e4
LT
633 }
634 }
7ee1af76
JA
635 return 0;
636}
1da177e4 637
bc205ed1 638static int
480b1cb9
RS
639wait_for_free_request(struct TCP_Server_Info *server, const int flags,
640 unsigned int *instance)
bc205ed1 641{
2b53b929
RS
642 return wait_for_free_credits(server, 1, -1, flags,
643 instance);
bc205ed1
PS
644}
645
257b7809
RS
646static int
647wait_for_compound_request(struct TCP_Server_Info *server, int num,
648 const int flags, unsigned int *instance)
649{
650 int *credits;
651
652 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
653
654 spin_lock(&server->req_lock);
655 if (*credits < num) {
656 /*
657 * Return immediately if not too many requests in flight since
658 * we will likely be stuck on waiting for credits.
659 */
660 if (server->in_flight < num - *credits) {
661 spin_unlock(&server->req_lock);
662 return -ENOTSUPP;
663 }
664 }
665 spin_unlock(&server->req_lock);
666
667 return wait_for_free_credits(server, num, 60000, flags,
668 instance);
669}
670
cb7e9eab
PS
671int
672cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 673 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
674{
675 *num = size;
335b7b62
PS
676 credits->value = 0;
677 credits->instance = server->reconnect_instance;
cb7e9eab
PS
678 return 0;
679}
680
96daf2b0 681static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
682 struct mid_q_entry **ppmidQ)
683{
1da177e4 684 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 685 return -ENOENT;
8fbbd365
VL
686 }
687
688 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 689 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 690 return -EAGAIN;
8fbbd365
VL
691 }
692
7f48558e 693 if (ses->status == CifsNew) {
79a58d1f 694 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 695 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 696 return -EAGAIN;
ad7a2926 697 /* else ok - we are setting up session */
1da177e4 698 }
7f48558e
SP
699
700 if (ses->status == CifsExiting) {
701 /* check if SMB session is bad because we are setting it up */
702 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
703 return -EAGAIN;
704 /* else ok - we are shutting down session */
705 }
706
24b9b06b 707 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 708 if (*ppmidQ == NULL)
7ee1af76 709 return -ENOMEM;
ddc8cf8f
JL
710 spin_lock(&GlobalMid_Lock);
711 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
712 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
713 return 0;
714}
715
0ade640e
JL
716static int
717wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 718{
0ade640e 719 int error;
7ee1af76 720
5853cc2a 721 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 722 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
723 if (error < 0)
724 return -ERESTARTSYS;
7ee1af76 725
0ade640e 726 return 0;
7ee1af76
JA
727}
728
fec344e3
JL
729struct mid_q_entry *
730cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
731{
732 int rc;
fec344e3 733 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
734 struct mid_q_entry *mid;
735
738f9de5
PS
736 if (rqst->rq_iov[0].iov_len != 4 ||
737 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
738 return ERR_PTR(-EIO);
739
792af7b0 740 /* enable signing if server requires it */
38d77c50 741 if (server->sign)
792af7b0
PS
742 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
743
744 mid = AllocMidQEntry(hdr, server);
745 if (mid == NULL)
fec344e3 746 return ERR_PTR(-ENOMEM);
792af7b0 747
fec344e3 748 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
749 if (rc) {
750 DeleteMidQEntry(mid);
fec344e3 751 return ERR_PTR(rc);
ffc61ccb
SP
752 }
753
fec344e3 754 return mid;
792af7b0 755}
133672ef 756
a6827c18
JL
757/*
758 * Send a SMB request and set the callback function in the mid to handle
759 * the result. Caller is responsible for dealing with timeouts.
760 */
761int
fec344e3 762cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 763 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
764 mid_handle_t *handle, void *cbdata, const int flags,
765 const struct cifs_credits *exist_credits)
a6827c18 766{
480b1cb9 767 int rc;
a6827c18 768 struct mid_q_entry *mid;
335b7b62 769 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 770 unsigned int instance;
480b1cb9 771 int optype;
a6827c18 772
a891f0f8
PS
773 optype = flags & CIFS_OP_MASK;
774
cb7e9eab 775 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 776 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
777 if (rc)
778 return rc;
335b7b62 779 credits.value = 1;
34f4deb7 780 credits.instance = instance;
3349c3a7
PS
781 } else
782 instance = exist_credits->instance;
a6827c18
JL
783
784 mutex_lock(&server->srv_mutex);
3349c3a7
PS
785
786 /*
787 * We can't use credits obtained from the previous session to send this
788 * request. Check if there were reconnects after we obtained credits and
789 * return -EAGAIN in such cases to let callers handle it.
790 */
791 if (instance != server->reconnect_instance) {
792 mutex_unlock(&server->srv_mutex);
793 add_credits_and_wake_if(server, &credits, optype);
794 return -EAGAIN;
795 }
796
fec344e3
JL
797 mid = server->ops->setup_async_request(server, rqst);
798 if (IS_ERR(mid)) {
a6827c18 799 mutex_unlock(&server->srv_mutex);
335b7b62 800 add_credits_and_wake_if(server, &credits, optype);
fec344e3 801 return PTR_ERR(mid);
a6827c18
JL
802 }
803
44d22d84 804 mid->receive = receive;
a6827c18
JL
805 mid->callback = callback;
806 mid->callback_data = cbdata;
9b7c18a2 807 mid->handle = handle;
7c9421e1 808 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 809
ffc61ccb
SP
810 /* put it on the pending_mid_q */
811 spin_lock(&GlobalMid_Lock);
812 list_add_tail(&mid->qhead, &server->pending_mid_q);
813 spin_unlock(&GlobalMid_Lock);
814
93d2cb6c
LL
815 /*
816 * Need to store the time in mid before calling I/O. For call_async,
817 * I/O response may come back and free the mid entry on another thread.
818 */
819 cifs_save_when_sent(mid);
789e6661 820 cifs_in_send_inc(server);
1f3a8f5f 821 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 822 cifs_in_send_dec(server);
ad313cb8 823
820962dc 824 if (rc < 0) {
c781af7e 825 revert_current_mid(server, mid->credits);
ad313cb8 826 server->sequence_number -= 2;
820962dc
RV
827 cifs_delete_mid(mid);
828 }
829
a6827c18 830 mutex_unlock(&server->srv_mutex);
789e6661 831
ffc61ccb
SP
832 if (rc == 0)
833 return 0;
a6827c18 834
335b7b62 835 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
836 return rc;
837}
838
133672ef
SF
839/*
840 *
841 * Send an SMB Request. No response info (other than return code)
842 * needs to be parsed.
843 *
844 * flags indicate the type of request buffer and how long to wait
845 * and whether to log NT STATUS code (error) before mapping it to POSIX error
846 *
847 */
848int
96daf2b0 849SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 850 char *in_buf, int flags)
133672ef
SF
851{
852 int rc;
853 struct kvec iov[1];
da502f7d 854 struct kvec rsp_iov;
133672ef
SF
855 int resp_buf_type;
856
792af7b0
PS
857 iov[0].iov_base = in_buf;
858 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 859 flags |= CIFS_NO_RSP_BUF;
da502f7d 860 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 861 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 862
133672ef
SF
863 return rc;
864}
865
053d5034 866static int
3c1105df 867cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
868{
869 int rc = 0;
870
f96637be
JP
871 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
872 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 873
74dd92a8 874 spin_lock(&GlobalMid_Lock);
7c9421e1 875 switch (mid->mid_state) {
74dd92a8 876 case MID_RESPONSE_RECEIVED:
053d5034
JL
877 spin_unlock(&GlobalMid_Lock);
878 return rc;
74dd92a8
JL
879 case MID_RETRY_NEEDED:
880 rc = -EAGAIN;
881 break;
71823baf
JL
882 case MID_RESPONSE_MALFORMED:
883 rc = -EIO;
884 break;
3c1105df
JL
885 case MID_SHUTDOWN:
886 rc = -EHOSTDOWN;
887 break;
74dd92a8 888 default:
abe57073
PS
889 if (!(mid->mid_flags & MID_DELETED)) {
890 list_del_init(&mid->qhead);
891 mid->mid_flags |= MID_DELETED;
892 }
afe6f653 893 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 894 __func__, mid->mid, mid->mid_state);
74dd92a8 895 rc = -EIO;
053d5034
JL
896 }
897 spin_unlock(&GlobalMid_Lock);
898
2b84a36c 899 DeleteMidQEntry(mid);
053d5034
JL
900 return rc;
901}
902
121b046a 903static inline int
fb2036d8
PS
904send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
905 struct mid_q_entry *mid)
76dcc26f 906{
121b046a 907 return server->ops->send_cancel ?
fb2036d8 908 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
909}
910
2c8f981d
JL
911int
912cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
913 bool log_error)
914{
792af7b0 915 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
916
917 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
918
919 /* convert the length into a more usable form */
38d77c50 920 if (server->sign) {
738f9de5 921 struct kvec iov[2];
985e4ff0 922 int rc = 0;
738f9de5
PS
923 struct smb_rqst rqst = { .rq_iov = iov,
924 .rq_nvec = 2 };
826a95e4 925
738f9de5
PS
926 iov[0].iov_base = mid->resp_buf;
927 iov[0].iov_len = 4;
928 iov[1].iov_base = (char *)mid->resp_buf + 4;
929 iov[1].iov_len = len - 4;
2c8f981d 930 /* FIXME: add code to kill session */
bf5ea0e2 931 rc = cifs_verify_signature(&rqst, server,
0124cc45 932 mid->sequence_number);
985e4ff0 933 if (rc)
afe6f653 934 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 935 rc);
2c8f981d
JL
936 }
937
938 /* BB special case reconnect tid and uid here? */
939 return map_smb_to_linux_error(mid->resp_buf, log_error);
940}
941
fec344e3 942struct mid_q_entry *
f780bd3f
AA
943cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
944 struct smb_rqst *rqst)
792af7b0
PS
945{
946 int rc;
fec344e3 947 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
948 struct mid_q_entry *mid;
949
738f9de5
PS
950 if (rqst->rq_iov[0].iov_len != 4 ||
951 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
952 return ERR_PTR(-EIO);
953
792af7b0
PS
954 rc = allocate_mid(ses, hdr, &mid);
955 if (rc)
fec344e3
JL
956 return ERR_PTR(rc);
957 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
958 if (rc) {
3c1bf7e4 959 cifs_delete_mid(mid);
fec344e3
JL
960 return ERR_PTR(rc);
961 }
962 return mid;
792af7b0
PS
963}
964
4e34feb5 965static void
ee258d79 966cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
967{
968 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
969 struct cifs_credits credits;
970
971 credits.value = server->ops->get_credits(mid);
972 credits.instance = server->reconnect_instance;
8a26f0f7 973
34f4deb7 974 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
975}
976
ee258d79
PS
977static void
978cifs_compound_last_callback(struct mid_q_entry *mid)
979{
980 cifs_compound_callback(mid);
981 cifs_wake_up_task(mid);
982}
983
984static void
985cifs_cancelled_callback(struct mid_q_entry *mid)
986{
987 cifs_compound_callback(mid);
988 DeleteMidQEntry(mid);
989}
990
5f68ea4a
AA
991/*
992 * Return a channel (master if none) of @ses that can be used to send
993 * regular requests.
994 *
995 * If we are currently binding a new channel (negprot/sess.setup),
996 * return the new incomplete channel.
997 */
998struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
999{
1000 uint index = 0;
1001
1002 if (!ses)
1003 return NULL;
1004
1005 if (!ses->binding) {
1006 /* round robin */
1007 if (ses->chan_count > 1) {
1008 index = (uint)atomic_inc_return(&ses->chan_seq);
1009 index %= ses->chan_count;
1010 }
1011 return ses->chans[index].server;
1012 } else {
1013 return cifs_ses_server(ses);
1014 }
1015}
1016
b8f57ee8 1017int
e0bba0b8 1018compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1019 struct TCP_Server_Info *server,
e0bba0b8
RS
1020 const int flags, const int num_rqst, struct smb_rqst *rqst,
1021 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1022{
480b1cb9 1023 int i, j, optype, rc = 0;
e0bba0b8 1024 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1025 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1026 struct cifs_credits credits[MAX_COMPOUND] = {
1027 { .value = 0, .instance = 0 }
1028 };
1029 unsigned int instance;
738f9de5 1030 char *buf;
50c2f753 1031
a891f0f8 1032 optype = flags & CIFS_OP_MASK;
133672ef 1033
e0bba0b8
RS
1034 for (i = 0; i < num_rqst; i++)
1035 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1036
352d96f3 1037 if (!ses || !ses->server || !server) {
f96637be 1038 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1039 return -EIO;
1040 }
1041
3190b59a 1042 if (server->tcpStatus == CifsExiting)
7ee1af76 1043 return -ENOENT;
7ee1af76 1044
792af7b0 1045 /*
257b7809 1046 * Wait for all the requests to become available.
7091bcab
PS
1047 * This approach still leaves the possibility to be stuck waiting for
1048 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1049 * requests and if the client is completely idle, not generating any
1050 * other requests.
1051 * This can be handled by the eventual session reconnect.
792af7b0 1052 */
3190b59a 1053 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1054 &instance);
1055 if (rc)
1056 return rc;
97ea4998 1057
257b7809
RS
1058 for (i = 0; i < num_rqst; i++) {
1059 credits[i].value = 1;
1060 credits[i].instance = instance;
8544f4aa 1061 }
7ee1af76 1062
792af7b0
PS
1063 /*
1064 * Make sure that we sign in the same order that we send on this socket
1065 * and avoid races inside tcp sendmsg code that could cause corruption
1066 * of smb data.
1067 */
7ee1af76 1068
3190b59a 1069 mutex_lock(&server->srv_mutex);
7ee1af76 1070
97ea4998
PS
1071 /*
1072 * All the parts of the compound chain belong obtained credits from the
257b7809 1073 * same session. We can not use credits obtained from the previous
97ea4998
PS
1074 * session to send this request. Check if there were reconnects after
1075 * we obtained credits and return -EAGAIN in such cases to let callers
1076 * handle it.
1077 */
3190b59a
AA
1078 if (instance != server->reconnect_instance) {
1079 mutex_unlock(&server->srv_mutex);
97ea4998 1080 for (j = 0; j < num_rqst; j++)
3190b59a 1081 add_credits(server, &credits[j], optype);
97ea4998
PS
1082 return -EAGAIN;
1083 }
1084
e0bba0b8 1085 for (i = 0; i < num_rqst; i++) {
f780bd3f 1086 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1087 if (IS_ERR(midQ[i])) {
3190b59a 1088 revert_current_mid(server, i);
e0bba0b8
RS
1089 for (j = 0; j < i; j++)
1090 cifs_delete_mid(midQ[j]);
3190b59a 1091 mutex_unlock(&server->srv_mutex);
8544f4aa 1092
e0bba0b8 1093 /* Update # of requests on wire to server */
8544f4aa 1094 for (j = 0; j < num_rqst; j++)
3190b59a 1095 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1096 return PTR_ERR(midQ[i]);
1097 }
1098
1099 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1100 midQ[i]->optype = optype;
4e34feb5 1101 /*
ee258d79
PS
1102 * Invoke callback for every part of the compound chain
1103 * to calculate credits properly. Wake up this thread only when
1104 * the last element is received.
4e34feb5
RS
1105 */
1106 if (i < num_rqst - 1)
ee258d79
PS
1107 midQ[i]->callback = cifs_compound_callback;
1108 else
1109 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1110 }
3190b59a
AA
1111 cifs_in_send_inc(server);
1112 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1113 cifs_in_send_dec(server);
e0bba0b8
RS
1114
1115 for (i = 0; i < num_rqst; i++)
1116 cifs_save_when_sent(midQ[i]);
7ee1af76 1117
c781af7e 1118 if (rc < 0) {
3190b59a
AA
1119 revert_current_mid(server, num_rqst);
1120 server->sequence_number -= 2;
c781af7e 1121 }
e0bba0b8 1122
3190b59a 1123 mutex_unlock(&server->srv_mutex);
7ee1af76 1124
d69cb728
RS
1125 /*
1126 * If sending failed for some reason or it is an oplock break that we
1127 * will not receive a response to - return credits back
1128 */
1129 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1130 for (i = 0; i < num_rqst; i++)
3190b59a 1131 add_credits(server, &credits[i], optype);
cb5c2e63 1132 goto out;
ee258d79
PS
1133 }
1134
1135 /*
1136 * At this point the request is passed to the network stack - we assume
1137 * that any credits taken from the server structure on the client have
1138 * been spent and we can't return them back. Once we receive responses
1139 * we will collect credits granted by the server in the mid callbacks
1140 * and add those credits to the server structure.
1141 */
e0bba0b8 1142
cb5c2e63
RS
1143 /*
1144 * Compounding is never used during session establish.
1145 */
1146 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1147 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1148 rqst[0].rq_nvec);
e0bba0b8 1149
cb5c2e63 1150 for (i = 0; i < num_rqst; i++) {
3190b59a 1151 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1152 if (rc != 0)
1153 break;
1154 }
1155 if (rc != 0) {
1156 for (; i < num_rqst; i++) {
afe6f653 1157 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1158 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1159 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1160 spin_lock(&GlobalMid_Lock);
7b71843f 1161 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1162 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1163 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1164 cancelled_mid[i] = true;
34f4deb7 1165 credits[i].value = 0;
e0bba0b8 1166 }
1be912dd 1167 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1168 }
cb5c2e63
RS
1169 }
1170
cb5c2e63
RS
1171 for (i = 0; i < num_rqst; i++) {
1172 if (rc < 0)
1173 goto out;
e0bba0b8 1174
3190b59a 1175 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1176 if (rc != 0) {
8544f4aa
PS
1177 /* mark this mid as cancelled to not free it below */
1178 cancelled_mid[i] = true;
1179 goto out;
1be912dd 1180 }
2b2bdfba 1181
e0bba0b8
RS
1182 if (!midQ[i]->resp_buf ||
1183 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1184 rc = -EIO;
1185 cifs_dbg(FYI, "Bad MID state?\n");
1186 goto out;
1187 }
a891f0f8 1188
e0bba0b8
RS
1189 buf = (char *)midQ[i]->resp_buf;
1190 resp_iov[i].iov_base = buf;
1191 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1192 server->vals->header_preamble_size;
e0bba0b8
RS
1193
1194 if (midQ[i]->large_buf)
1195 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1196 else
1197 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1198
3190b59a 1199 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1200 flags & CIFS_LOG_ERROR);
1da177e4 1201
e0bba0b8 1202 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1203 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1204 midQ[i]->resp_buf = NULL;
cb5c2e63 1205
e0bba0b8 1206 }
cb5c2e63
RS
1207
1208 /*
1209 * Compounding is never used during session establish.
1210 */
1211 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1212 struct kvec iov = {
1213 .iov_base = resp_iov[0].iov_base,
1214 .iov_len = resp_iov[0].iov_len
1215 };
1216 smb311_update_preauth_hash(ses, &iov, 1);
1217 }
1218
7ee1af76 1219out:
4e34feb5
RS
1220 /*
1221 * This will dequeue all mids. After this it is important that the
1222 * demultiplex_thread will not process any of these mids any futher.
1223 * This is prevented above by using a noop callback that will not
1224 * wake this thread except for the very last PDU.
1225 */
8544f4aa
PS
1226 for (i = 0; i < num_rqst; i++) {
1227 if (!cancelled_mid[i])
1228 cifs_delete_mid(midQ[i]);
8544f4aa 1229 }
1da177e4 1230
d6e04ae6
SF
1231 return rc;
1232}
1da177e4 1233
e0bba0b8
RS
1234int
1235cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1236 struct TCP_Server_Info *server,
e0bba0b8
RS
1237 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1238 struct kvec *resp_iov)
1239{
352d96f3
AA
1240 return compound_send_recv(xid, ses, server, flags, 1,
1241 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1242}
1243
738f9de5
PS
1244int
1245SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1246 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1247 const int flags, struct kvec *resp_iov)
1248{
1249 struct smb_rqst rqst;
3cecf486 1250 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1251 int rc;
1252
3cecf486 1253 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1254 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1255 GFP_KERNEL);
117e3b7f
SF
1256 if (!new_iov) {
1257 /* otherwise cifs_send_recv below sets resp_buf_type */
1258 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1259 return -ENOMEM;
117e3b7f 1260 }
3cecf486
RS
1261 } else
1262 new_iov = s_iov;
738f9de5
PS
1263
1264 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1265 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1266
1267 new_iov[0].iov_base = new_iov[1].iov_base;
1268 new_iov[0].iov_len = 4;
1269 new_iov[1].iov_base += 4;
1270 new_iov[1].iov_len -= 4;
1271
1272 memset(&rqst, 0, sizeof(struct smb_rqst));
1273 rqst.rq_iov = new_iov;
1274 rqst.rq_nvec = n_vec + 1;
1275
352d96f3
AA
1276 rc = cifs_send_recv(xid, ses, ses->server,
1277 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1278 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1279 kfree(new_iov);
738f9de5
PS
1280 return rc;
1281}
1282
1da177e4 1283int
96daf2b0 1284SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1285 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1286 int *pbytes_returned, const int flags)
1da177e4
LT
1287{
1288 int rc = 0;
1da177e4 1289 struct mid_q_entry *midQ;
fb2036d8
PS
1290 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1291 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1292 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1293 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1294 struct TCP_Server_Info *server;
1da177e4
LT
1295
1296 if (ses == NULL) {
f96637be 1297 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1298 return -EIO;
1299 }
ac6ad7a8 1300 server = ses->server;
afe6f653 1301 if (server == NULL) {
f96637be 1302 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1303 return -EIO;
1304 }
1305
afe6f653 1306 if (server->tcpStatus == CifsExiting)
31ca3bc3
SF
1307 return -ENOENT;
1308
79a58d1f 1309 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1310 to the same server. We may make this configurable later or
1311 use ses->maxReq */
1da177e4 1312
fb2036d8 1313 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1314 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1315 len);
6d9c6d54
VL
1316 return -EIO;
1317 }
1318
afe6f653 1319 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1320 if (rc)
1321 return rc;
1322
79a58d1f 1323 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1324 and avoid races inside tcp sendmsg code that could cause corruption
1325 of smb data */
1326
afe6f653 1327 mutex_lock(&server->srv_mutex);
1da177e4 1328
7ee1af76
JA
1329 rc = allocate_mid(ses, in_buf, &midQ);
1330 if (rc) {
8bd3754c 1331 mutex_unlock(&server->srv_mutex);
7ee1af76 1332 /* Update # of requests on wire to server */
afe6f653 1333 add_credits(server, &credits, 0);
7ee1af76 1334 return rc;
1da177e4
LT
1335 }
1336
afe6f653 1337 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1338 if (rc) {
afe6f653 1339 mutex_unlock(&server->srv_mutex);
829049cb
VL
1340 goto out;
1341 }
1da177e4 1342
7c9421e1 1343 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1344
afe6f653
RS
1345 cifs_in_send_inc(server);
1346 rc = smb_send(server, in_buf, len);
1347 cifs_in_send_dec(server);
789e6661 1348 cifs_save_when_sent(midQ);
ad313cb8
JL
1349
1350 if (rc < 0)
afe6f653 1351 server->sequence_number -= 2;
ad313cb8 1352
afe6f653 1353 mutex_unlock(&server->srv_mutex);
7ee1af76 1354
79a58d1f 1355 if (rc < 0)
7ee1af76
JA
1356 goto out;
1357
afe6f653 1358 rc = wait_for_response(server, midQ);
1be912dd 1359 if (rc != 0) {
afe6f653 1360 send_cancel(server, &rqst, midQ);
1be912dd 1361 spin_lock(&GlobalMid_Lock);
7c9421e1 1362 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1363 /* no longer considered to be "in-flight" */
1364 midQ->callback = DeleteMidQEntry;
1365 spin_unlock(&GlobalMid_Lock);
afe6f653 1366 add_credits(server, &credits, 0);
1be912dd
JL
1367 return rc;
1368 }
1369 spin_unlock(&GlobalMid_Lock);
1370 }
1da177e4 1371
afe6f653 1372 rc = cifs_sync_mid_result(midQ, server);
053d5034 1373 if (rc != 0) {
afe6f653 1374 add_credits(server, &credits, 0);
1da177e4
LT
1375 return rc;
1376 }
50c2f753 1377
2c8f981d 1378 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1379 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1380 rc = -EIO;
afe6f653 1381 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1382 goto out;
1da177e4 1383 }
7ee1af76 1384
d4e4854f 1385 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1386 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1387 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1388out:
3c1bf7e4 1389 cifs_delete_mid(midQ);
afe6f653 1390 add_credits(server, &credits, 0);
1da177e4 1391
7ee1af76
JA
1392 return rc;
1393}
1da177e4 1394
7ee1af76
JA
1395/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1396 blocking lock to return. */
1397
1398static int
96daf2b0 1399send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1400 struct smb_hdr *in_buf,
1401 struct smb_hdr *out_buf)
1402{
1403 int bytes_returned;
96daf2b0 1404 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1405 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1406
1407 /* We just modify the current in_buf to change
1408 the type of lock from LOCKING_ANDX_SHARED_LOCK
1409 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1410 LOCKING_ANDX_CANCEL_LOCK. */
1411
1412 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1413 pSMB->Timeout = 0;
88257360 1414 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1415
1416 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1417 &bytes_returned, 0);
7ee1af76
JA
1418}
1419
1420int
96daf2b0 1421SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1422 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1423 int *pbytes_returned)
1424{
1425 int rc = 0;
1426 int rstart = 0;
7ee1af76 1427 struct mid_q_entry *midQ;
96daf2b0 1428 struct cifs_ses *ses;
fb2036d8
PS
1429 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1430 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1431 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1432 unsigned int instance;
afe6f653 1433 struct TCP_Server_Info *server;
7ee1af76
JA
1434
1435 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1436 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1437 return -EIO;
1438 }
1439 ses = tcon->ses;
afe6f653 1440 server = ses->server;
7ee1af76 1441
afe6f653 1442 if (server == NULL) {
f96637be 1443 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1444 return -EIO;
1445 }
1446
afe6f653 1447 if (server->tcpStatus == CifsExiting)
7ee1af76
JA
1448 return -ENOENT;
1449
79a58d1f 1450 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1451 to the same server. We may make this configurable later or
1452 use ses->maxReq */
1453
fb2036d8 1454 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1455 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1456 len);
6d9c6d54
VL
1457 return -EIO;
1458 }
1459
afe6f653 1460 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1461 if (rc)
1462 return rc;
1463
79a58d1f 1464 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1465 and avoid races inside tcp sendmsg code that could cause corruption
1466 of smb data */
1467
afe6f653 1468 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1469
1470 rc = allocate_mid(ses, in_buf, &midQ);
1471 if (rc) {
afe6f653 1472 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1473 return rc;
1474 }
1475
afe6f653 1476 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1477 if (rc) {
3c1bf7e4 1478 cifs_delete_mid(midQ);
afe6f653 1479 mutex_unlock(&server->srv_mutex);
829049cb
VL
1480 return rc;
1481 }
1da177e4 1482
7c9421e1 1483 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1484 cifs_in_send_inc(server);
1485 rc = smb_send(server, in_buf, len);
1486 cifs_in_send_dec(server);
789e6661 1487 cifs_save_when_sent(midQ);
ad313cb8
JL
1488
1489 if (rc < 0)
afe6f653 1490 server->sequence_number -= 2;
ad313cb8 1491
afe6f653 1492 mutex_unlock(&server->srv_mutex);
7ee1af76 1493
79a58d1f 1494 if (rc < 0) {
3c1bf7e4 1495 cifs_delete_mid(midQ);
7ee1af76
JA
1496 return rc;
1497 }
1498
1499 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1500 rc = wait_event_interruptible(server->response_q,
7c9421e1 1501 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1502 ((server->tcpStatus != CifsGood) &&
1503 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1504
1505 /* Were we interrupted by a signal ? */
1506 if ((rc == -ERESTARTSYS) &&
7c9421e1 1507 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1508 ((server->tcpStatus == CifsGood) ||
1509 (server->tcpStatus == CifsNew))) {
7ee1af76
JA
1510
1511 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1512 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1513 blocking lock to return. */
afe6f653 1514 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1515 if (rc) {
3c1bf7e4 1516 cifs_delete_mid(midQ);
7ee1af76
JA
1517 return rc;
1518 }
1519 } else {
1520 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1521 to cause the blocking lock to return. */
1522
1523 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1524
1525 /* If we get -ENOLCK back the lock may have
1526 already been removed. Don't exit in this case. */
1527 if (rc && rc != -ENOLCK) {
3c1bf7e4 1528 cifs_delete_mid(midQ);
7ee1af76
JA
1529 return rc;
1530 }
1531 }
1532
afe6f653 1533 rc = wait_for_response(server, midQ);
1be912dd 1534 if (rc) {
afe6f653 1535 send_cancel(server, &rqst, midQ);
1be912dd 1536 spin_lock(&GlobalMid_Lock);
7c9421e1 1537 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1538 /* no longer considered to be "in-flight" */
1539 midQ->callback = DeleteMidQEntry;
1540 spin_unlock(&GlobalMid_Lock);
1541 return rc;
1542 }
1543 spin_unlock(&GlobalMid_Lock);
7ee1af76 1544 }
1be912dd
JL
1545
1546 /* We got the response - restart system call. */
1547 rstart = 1;
7ee1af76
JA
1548 }
1549
afe6f653 1550 rc = cifs_sync_mid_result(midQ, server);
053d5034 1551 if (rc != 0)
7ee1af76 1552 return rc;
50c2f753 1553
17c8bfed 1554 /* rcvd frame is ok */
7c9421e1 1555 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1556 rc = -EIO;
3175eb9b 1557 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1558 goto out;
1559 }
1da177e4 1560
d4e4854f 1561 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1562 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1563 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1564out:
3c1bf7e4 1565 cifs_delete_mid(midQ);
7ee1af76
JA
1566 if (rstart && rc == -EACCES)
1567 return -ERESTARTSYS;
1da177e4
LT
1568 return rc;
1569}