]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/cifs/transport.c
CIFS: Fix wsize usage in iovec write
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
97bc00b3 31#include <linux/highmem.h>
1da177e4
LT
32#include <asm/uaccess.h>
33#include <asm/processor.h>
34#include <linux/mempool.h>
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_debug.h"
50c2f753 39
2dc7e1c0
PS
40void
41cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
42{
43 wake_up_process(mid->callback_data);
44}
45
a6827c18 46struct mid_q_entry *
24b9b06b 47AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
48{
49 struct mid_q_entry *temp;
50
24b9b06b 51 if (server == NULL) {
f96637be 52 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
53 return NULL;
54 }
50c2f753 55
232087cb 56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
1da177e4
LT
57 if (temp == NULL)
58 return temp;
59 else {
26f57364 60 memset(temp, 0, sizeof(struct mid_q_entry));
3d378d3f 61 temp->mid = get_mid(smb_buffer);
1da177e4 62 temp->pid = current->pid;
7c9421e1 63 temp->command = cpu_to_le16(smb_buffer->Command);
f96637be 64 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1
SF
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
2dc7e1c0 68 temp->server = server;
2b84a36c
JL
69
70 /*
71 * The default is for the mid to be synchronous, so the
72 * default callback just wakes up the current task.
73 */
2dc7e1c0 74 temp->callback = cifs_wake_up_task;
2b84a36c 75 temp->callback_data = current;
1da177e4
LT
76 }
77
1da177e4 78 atomic_inc(&midCount);
7c9421e1 79 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
80 return temp;
81}
82
766fdbb5 83void
1da177e4
LT
84DeleteMidQEntry(struct mid_q_entry *midEntry)
85{
1047abc1 86#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 87 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
88 unsigned long now;
89#endif
7c9421e1 90 midEntry->mid_state = MID_FREE;
8097531a 91 atomic_dec(&midCount);
7c9421e1 92 if (midEntry->large_buf)
b8643e1b
SF
93 cifs_buf_release(midEntry->resp_buf);
94 else
95 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
96#ifdef CONFIG_CIFS_STATS2
97 now = jiffies;
98 /* commands taking longer than one second are indications that
99 something is wrong, unless it is quite a slow link or server */
79a58d1f 100 if ((now - midEntry->when_alloc) > HZ) {
2dc7e1c0 101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
7c9421e1 102 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
1047abc1
SF
103 midEntry->command, midEntry->mid);
104 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 now - midEntry->when_alloc,
106 now - midEntry->when_sent,
107 now - midEntry->when_received);
108 }
109 }
110#endif
1da177e4
LT
111 mempool_free(midEntry, cifs_mid_poolp);
112}
113
3c1bf7e4
PS
114void
115cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
116{
117 spin_lock(&GlobalMid_Lock);
118 list_del(&mid->qhead);
119 spin_unlock(&GlobalMid_Lock);
120
121 DeleteMidQEntry(mid);
122}
123
6f49f46b
JL
124/*
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
d6e04ae6 134static int
6f49f46b
JL
135smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 size_t *sent)
1da177e4
LT
137{
138 int rc = 0;
139 int i = 0;
140 struct msghdr smb_msg;
6f49f46b
JL
141 unsigned int remaining;
142 size_t first_vec = 0;
edf1ae40 143 struct socket *ssocket = server->ssocket;
50c2f753 144
6f49f46b
JL
145 *sent = 0;
146
a9f1b85e 147 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
26f57364 148 smb_msg.msg_namelen = sizeof(struct sockaddr);
1da177e4
LT
149 smb_msg.msg_control = NULL;
150 smb_msg.msg_controllen = 0;
0496e02d 151 if (server->noblocksnd)
edf1ae40
SF
152 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
153 else
154 smb_msg.msg_flags = MSG_NOSIGNAL;
1da177e4 155
6f49f46b 156 remaining = 0;
3e84469d 157 for (i = 0; i < n_vec; i++)
6f49f46b 158 remaining += iov[i].iov_len;
1da177e4 159
17680356 160 i = 0;
6f49f46b
JL
161 while (remaining) {
162 /*
163 * If blocking send, we try 3 times, since each can block
164 * for 5 seconds. For nonblocking we have to try more
165 * but wait increasing amounts of time allowing time for
166 * socket to clear. The overall time we wait in either
167 * case to send on the socket is about 15 seconds.
168 * Similarly we wait for 15 seconds for a response from
169 * the server in SendReceive[2] for the server to send
170 * a response back for most types of requests (except
171 * SMB Write past end of file which can be slow, and
172 * blocking lock operations). NFS waits slightly longer
173 * than CIFS, but this can make it take longer for
174 * nonresponsive servers to be detected and 15 seconds
175 * is more than enough time for modern networks to
176 * send a packet. In most cases if we fail to send
177 * after the retries we will kill the socket and
178 * reconnect which may clear the network problem.
179 */
3e84469d 180 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
6f49f46b 181 n_vec - first_vec, remaining);
ce6c44e4 182 if (rc == -EAGAIN) {
1da177e4 183 i++;
6f49f46b 184 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
f96637be
JP
185 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
186 ssocket);
1da177e4
LT
187 rc = -EAGAIN;
188 break;
189 }
68058e75 190 msleep(1 << i);
1da177e4
LT
191 continue;
192 }
6f49f46b 193
79a58d1f 194 if (rc < 0)
1da177e4 195 break;
3e84469d 196
6f49f46b
JL
197 /* send was at least partially successful */
198 *sent += rc;
199
200 if (rc == remaining) {
201 remaining = 0;
61de800d 202 break;
6f49f46b
JL
203 }
204
205 if (rc > remaining) {
f96637be 206 cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
3e84469d
SF
207 break;
208 }
6f49f46b 209
79a58d1f 210 if (rc == 0) {
3e84469d
SF
211 /* should never happen, letting socket clear before
212 retrying is our only obvious option here */
f96637be 213 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
214 msleep(500);
215 continue;
d6e04ae6 216 }
6f49f46b
JL
217
218 remaining -= rc;
219
68058e75 220 /* the line below resets i */
3e84469d
SF
221 for (i = first_vec; i < n_vec; i++) {
222 if (iov[i].iov_len) {
223 if (rc > iov[i].iov_len) {
224 rc -= iov[i].iov_len;
225 iov[i].iov_len = 0;
226 } else {
227 iov[i].iov_base += rc;
228 iov[i].iov_len -= rc;
229 first_vec = i;
230 break;
231 }
232 }
d6e04ae6 233 }
6f49f46b 234
5e1253b5 235 i = 0; /* in case we get ENOSPC on the next send */
6f49f46b 236 rc = 0;
1da177e4 237 }
6f49f46b
JL
238 return rc;
239}
240
97bc00b3
JL
241/**
242 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
243 * @rqst: pointer to smb_rqst
244 * @idx: index into the array of the page
245 * @iov: pointer to struct kvec that will hold the result
246 *
247 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
248 * The page will be kmapped and the address placed into iov_base. The length
249 * will then be adjusted according to the ptailoff.
250 */
fb308a6f 251void
97bc00b3
JL
252cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
253 struct kvec *iov)
254{
255 /*
256 * FIXME: We could avoid this kmap altogether if we used
257 * kernel_sendpage instead of kernel_sendmsg. That will only
258 * work if signing is disabled though as sendpage inlines the
259 * page directly into the fraglist. If userspace modifies the
260 * page after we calculate the signature, then the server will
261 * reject it and may break the connection. kernel_sendmsg does
262 * an extra copy of the data and avoids that issue.
263 */
264 iov->iov_base = kmap(rqst->rq_pages[idx]);
265
266 /* if last page, don't send beyond this offset into page */
267 if (idx == (rqst->rq_npages - 1))
268 iov->iov_len = rqst->rq_tailsz;
269 else
270 iov->iov_len = rqst->rq_pagesz;
271}
272
a26054d1
JL
273static unsigned long
274rqst_len(struct smb_rqst *rqst)
275{
276 unsigned int i;
277 struct kvec *iov = rqst->rq_iov;
278 unsigned long buflen = 0;
279
280 /* total up iov array first */
281 for (i = 0; i < rqst->rq_nvec; i++)
282 buflen += iov[i].iov_len;
283
284 /* add in the page array if there is one */
285 if (rqst->rq_npages) {
286 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
287 buflen += rqst->rq_tailsz;
288 }
289
290 return buflen;
291}
292
6f49f46b
JL
293static int
294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
295{
296 int rc;
297 struct kvec *iov = rqst->rq_iov;
298 int n_vec = rqst->rq_nvec;
299 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
a26054d1 300 unsigned long send_length;
97bc00b3
JL
301 unsigned int i;
302 size_t total_len = 0, sent;
b8eed283
JL
303 struct socket *ssocket = server->ssocket;
304 int val = 1;
6f49f46b 305
ea702b80
JL
306 if (ssocket == NULL)
307 return -ENOTSOCK;
308
a26054d1
JL
309 /* sanity check send length */
310 send_length = rqst_len(rqst);
311 if (send_length != smb_buf_length + 4) {
312 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
313 send_length, smb_buf_length);
314 return -EIO;
315 }
316
f96637be 317 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
6f49f46b
JL
318 dump_smb(iov[0].iov_base, iov[0].iov_len);
319
b8eed283
JL
320 /* cork the socket */
321 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
322 (char *)&val, sizeof(val));
323
97bc00b3
JL
324 rc = smb_send_kvec(server, iov, n_vec, &sent);
325 if (rc < 0)
326 goto uncork;
327
328 total_len += sent;
329
330 /* now walk the page array and send each page in it */
331 for (i = 0; i < rqst->rq_npages; i++) {
332 struct kvec p_iov;
333
334 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
335 rc = smb_send_kvec(server, &p_iov, 1, &sent);
336 kunmap(rqst->rq_pages[i]);
337 if (rc < 0)
338 break;
339
340 total_len += sent;
341 }
1da177e4 342
97bc00b3 343uncork:
b8eed283
JL
344 /* uncork it */
345 val = 0;
346 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
347 (char *)&val, sizeof(val));
348
edf1ae40 349 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
f96637be
JP
350 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
351 smb_buf_length + 4, total_len);
6f49f46b
JL
352 /*
353 * If we have only sent part of an SMB then the next SMB could
354 * be taken as the remainder of this one. We need to kill the
355 * socket so the server throws away the partial SMB
356 */
edf1ae40
SF
357 server->tcpStatus = CifsNeedReconnect;
358 }
359
d804d41d 360 if (rc < 0 && rc != -EINTR)
f96637be
JP
361 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
362 rc);
d804d41d 363 else
1da177e4 364 rc = 0;
1da177e4
LT
365
366 return rc;
367}
368
6f49f46b
JL
369static int
370smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
371{
372 struct smb_rqst rqst = { .rq_iov = iov,
373 .rq_nvec = n_vec };
374
375 return smb_send_rqst(server, &rqst);
376}
377
0496e02d
JL
378int
379smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
380 unsigned int smb_buf_length)
381{
382 struct kvec iov;
383
384 iov.iov_base = smb_buffer;
385 iov.iov_len = smb_buf_length + 4;
386
387 return smb_sendv(server, &iov, 1);
388}
389
fc40f9cf 390static int
a891f0f8 391wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
bc205ed1 392 int *credits)
1da177e4 393{
5bc59498
PS
394 int rc;
395
fc40f9cf 396 spin_lock(&server->req_lock);
a891f0f8 397 if (timeout == CIFS_ASYNC_OP) {
1da177e4 398 /* oplock breaks must not be held up */
fc40f9cf 399 server->in_flight++;
bc205ed1 400 *credits -= 1;
fc40f9cf 401 spin_unlock(&server->req_lock);
27a97a61
VL
402 return 0;
403 }
404
27a97a61 405 while (1) {
bc205ed1 406 if (*credits <= 0) {
fc40f9cf 407 spin_unlock(&server->req_lock);
789e6661 408 cifs_num_waiters_inc(server);
5bc59498 409 rc = wait_event_killable(server->request_q,
bc205ed1 410 has_credits(server, credits));
789e6661 411 cifs_num_waiters_dec(server);
5bc59498
PS
412 if (rc)
413 return rc;
fc40f9cf 414 spin_lock(&server->req_lock);
27a97a61 415 } else {
c5797a94 416 if (server->tcpStatus == CifsExiting) {
fc40f9cf 417 spin_unlock(&server->req_lock);
27a97a61 418 return -ENOENT;
1da177e4 419 }
27a97a61 420
2d86dbc9
PS
421 /*
422 * Can not count locking commands against total
423 * as they are allowed to block on server.
424 */
27a97a61
VL
425
426 /* update # of requests on the wire to server */
a891f0f8 427 if (timeout != CIFS_BLOCKING_OP) {
bc205ed1 428 *credits -= 1;
fc40f9cf 429 server->in_flight++;
2d86dbc9 430 }
fc40f9cf 431 spin_unlock(&server->req_lock);
27a97a61 432 break;
1da177e4
LT
433 }
434 }
7ee1af76
JA
435 return 0;
436}
1da177e4 437
bc205ed1 438static int
a891f0f8
PS
439wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
440 const int optype)
bc205ed1 441{
eb4c7df6
SP
442 int *val;
443
444 val = server->ops->get_credits_field(server, optype);
445 /* Since an echo is already inflight, no need to wait to send another */
446 if (*val <= 0 && optype == CIFS_ECHO_OP)
447 return -EAGAIN;
448 return wait_for_free_credits(server, timeout, val);
bc205ed1
PS
449}
450
96daf2b0 451static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
452 struct mid_q_entry **ppmidQ)
453{
1da177e4 454 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 455 return -ENOENT;
8fbbd365
VL
456 }
457
458 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 459 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 460 return -EAGAIN;
8fbbd365
VL
461 }
462
7f48558e 463 if (ses->status == CifsNew) {
79a58d1f 464 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 465 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 466 return -EAGAIN;
ad7a2926 467 /* else ok - we are setting up session */
1da177e4 468 }
7f48558e
SP
469
470 if (ses->status == CifsExiting) {
471 /* check if SMB session is bad because we are setting it up */
472 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
473 return -EAGAIN;
474 /* else ok - we are shutting down session */
475 }
476
24b9b06b 477 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 478 if (*ppmidQ == NULL)
7ee1af76 479 return -ENOMEM;
ddc8cf8f
JL
480 spin_lock(&GlobalMid_Lock);
481 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
482 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
483 return 0;
484}
485
0ade640e
JL
486static int
487wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 488{
0ade640e 489 int error;
7ee1af76 490
5853cc2a 491 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 492 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
493 if (error < 0)
494 return -ERESTARTSYS;
7ee1af76 495
0ade640e 496 return 0;
7ee1af76
JA
497}
498
fec344e3
JL
499struct mid_q_entry *
500cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
501{
502 int rc;
fec344e3 503 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
504 struct mid_q_entry *mid;
505
506 /* enable signing if server requires it */
38d77c50 507 if (server->sign)
792af7b0
PS
508 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
509
510 mid = AllocMidQEntry(hdr, server);
511 if (mid == NULL)
fec344e3 512 return ERR_PTR(-ENOMEM);
792af7b0 513
fec344e3 514 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
515 if (rc) {
516 DeleteMidQEntry(mid);
fec344e3 517 return ERR_PTR(rc);
ffc61ccb
SP
518 }
519
fec344e3 520 return mid;
792af7b0 521}
133672ef 522
a6827c18
JL
523/*
524 * Send a SMB request and set the callback function in the mid to handle
525 * the result. Caller is responsible for dealing with timeouts.
526 */
527int
fec344e3
JL
528cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
529 mid_receive_t *receive, mid_callback_t *callback,
530 void *cbdata, const int flags)
a6827c18 531{
a891f0f8 532 int rc, timeout, optype;
a6827c18
JL
533 struct mid_q_entry *mid;
534
a891f0f8
PS
535 timeout = flags & CIFS_TIMEOUT_MASK;
536 optype = flags & CIFS_OP_MASK;
537
538 rc = wait_for_free_request(server, timeout, optype);
a6827c18
JL
539 if (rc)
540 return rc;
541
542 mutex_lock(&server->srv_mutex);
fec344e3
JL
543 mid = server->ops->setup_async_request(server, rqst);
544 if (IS_ERR(mid)) {
a6827c18 545 mutex_unlock(&server->srv_mutex);
a891f0f8 546 add_credits(server, 1, optype);
0193e072 547 wake_up(&server->request_q);
fec344e3 548 return PTR_ERR(mid);
a6827c18
JL
549 }
550
44d22d84 551 mid->receive = receive;
a6827c18
JL
552 mid->callback = callback;
553 mid->callback_data = cbdata;
7c9421e1 554 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 555
ffc61ccb
SP
556 /* put it on the pending_mid_q */
557 spin_lock(&GlobalMid_Lock);
558 list_add_tail(&mid->qhead, &server->pending_mid_q);
559 spin_unlock(&GlobalMid_Lock);
560
561
789e6661 562 cifs_in_send_inc(server);
fec344e3 563 rc = smb_send_rqst(server, rqst);
789e6661
SF
564 cifs_in_send_dec(server);
565 cifs_save_when_sent(mid);
ad313cb8
JL
566
567 if (rc < 0)
568 server->sequence_number -= 2;
a6827c18 569 mutex_unlock(&server->srv_mutex);
789e6661 570
ffc61ccb
SP
571 if (rc == 0)
572 return 0;
a6827c18 573
3c1bf7e4 574 cifs_delete_mid(mid);
a891f0f8 575 add_credits(server, 1, optype);
a6827c18
JL
576 wake_up(&server->request_q);
577 return rc;
578}
579
133672ef
SF
580/*
581 *
582 * Send an SMB Request. No response info (other than return code)
583 * needs to be parsed.
584 *
585 * flags indicate the type of request buffer and how long to wait
586 * and whether to log NT STATUS code (error) before mapping it to POSIX error
587 *
588 */
589int
96daf2b0 590SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 591 char *in_buf, int flags)
133672ef
SF
592{
593 int rc;
594 struct kvec iov[1];
595 int resp_buf_type;
596
792af7b0
PS
597 iov[0].iov_base = in_buf;
598 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef
SF
599 flags |= CIFS_NO_RESP;
600 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
f96637be 601 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 602
133672ef
SF
603 return rc;
604}
605
053d5034 606static int
3c1105df 607cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
608{
609 int rc = 0;
610
f96637be
JP
611 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
612 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 613
74dd92a8 614 spin_lock(&GlobalMid_Lock);
7c9421e1 615 switch (mid->mid_state) {
74dd92a8 616 case MID_RESPONSE_RECEIVED:
053d5034
JL
617 spin_unlock(&GlobalMid_Lock);
618 return rc;
74dd92a8
JL
619 case MID_RETRY_NEEDED:
620 rc = -EAGAIN;
621 break;
71823baf
JL
622 case MID_RESPONSE_MALFORMED:
623 rc = -EIO;
624 break;
3c1105df
JL
625 case MID_SHUTDOWN:
626 rc = -EHOSTDOWN;
627 break;
74dd92a8 628 default:
3c1105df 629 list_del_init(&mid->qhead);
f96637be
JP
630 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
631 __func__, mid->mid, mid->mid_state);
74dd92a8 632 rc = -EIO;
053d5034
JL
633 }
634 spin_unlock(&GlobalMid_Lock);
635
2b84a36c 636 DeleteMidQEntry(mid);
053d5034
JL
637 return rc;
638}
639
121b046a
JL
640static inline int
641send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
76dcc26f 642{
121b046a
JL
643 return server->ops->send_cancel ?
644 server->ops->send_cancel(server, buf, mid) : 0;
76dcc26f
JL
645}
646
2c8f981d
JL
647int
648cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
649 bool log_error)
650{
792af7b0 651 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
652
653 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
654
655 /* convert the length into a more usable form */
38d77c50 656 if (server->sign) {
826a95e4 657 struct kvec iov;
985e4ff0 658 int rc = 0;
bf5ea0e2
JL
659 struct smb_rqst rqst = { .rq_iov = &iov,
660 .rq_nvec = 1 };
826a95e4
JL
661
662 iov.iov_base = mid->resp_buf;
663 iov.iov_len = len;
2c8f981d 664 /* FIXME: add code to kill session */
bf5ea0e2 665 rc = cifs_verify_signature(&rqst, server,
0124cc45 666 mid->sequence_number);
985e4ff0 667 if (rc)
f96637be
JP
668 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
669 rc);
2c8f981d
JL
670 }
671
672 /* BB special case reconnect tid and uid here? */
673 return map_smb_to_linux_error(mid->resp_buf, log_error);
674}
675
fec344e3
JL
676struct mid_q_entry *
677cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
678{
679 int rc;
fec344e3 680 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
681 struct mid_q_entry *mid;
682
683 rc = allocate_mid(ses, hdr, &mid);
684 if (rc)
fec344e3
JL
685 return ERR_PTR(rc);
686 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
687 if (rc) {
3c1bf7e4 688 cifs_delete_mid(mid);
fec344e3
JL
689 return ERR_PTR(rc);
690 }
691 return mid;
792af7b0
PS
692}
693
7ee1af76 694int
96daf2b0 695SendReceive2(const unsigned int xid, struct cifs_ses *ses,
a891f0f8 696 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
133672ef 697 const int flags)
7ee1af76
JA
698{
699 int rc = 0;
a891f0f8 700 int timeout, optype;
7ee1af76 701 struct mid_q_entry *midQ;
792af7b0 702 char *buf = iov[0].iov_base;
a891f0f8 703 unsigned int credits = 1;
fec344e3
JL
704 struct smb_rqst rqst = { .rq_iov = iov,
705 .rq_nvec = n_vec };
50c2f753 706
a891f0f8
PS
707 timeout = flags & CIFS_TIMEOUT_MASK;
708 optype = flags & CIFS_OP_MASK;
133672ef 709
a891f0f8 710 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
711
712 if ((ses == NULL) || (ses->server == NULL)) {
792af7b0 713 cifs_small_buf_release(buf);
f96637be 714 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
715 return -EIO;
716 }
717
79a58d1f 718 if (ses->server->tcpStatus == CifsExiting) {
792af7b0 719 cifs_small_buf_release(buf);
7ee1af76
JA
720 return -ENOENT;
721 }
722
792af7b0
PS
723 /*
724 * Ensure that we do not send more than 50 overlapping requests
725 * to the same server. We may make this configurable later or
726 * use ses->maxReq.
727 */
7ee1af76 728
a891f0f8 729 rc = wait_for_free_request(ses->server, timeout, optype);
7ee1af76 730 if (rc) {
792af7b0 731 cifs_small_buf_release(buf);
7ee1af76
JA
732 return rc;
733 }
734
792af7b0
PS
735 /*
736 * Make sure that we sign in the same order that we send on this socket
737 * and avoid races inside tcp sendmsg code that could cause corruption
738 * of smb data.
739 */
7ee1af76 740
72ca545b 741 mutex_lock(&ses->server->srv_mutex);
7ee1af76 742
fec344e3
JL
743 midQ = ses->server->ops->setup_request(ses, &rqst);
744 if (IS_ERR(midQ)) {
72ca545b 745 mutex_unlock(&ses->server->srv_mutex);
792af7b0 746 cifs_small_buf_release(buf);
7ee1af76 747 /* Update # of requests on wire to server */
a891f0f8 748 add_credits(ses->server, 1, optype);
fec344e3 749 return PTR_ERR(midQ);
1da177e4 750 }
1da177e4 751
7c9421e1 752 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 753 cifs_in_send_inc(ses->server);
0496e02d 754 rc = smb_sendv(ses->server, iov, n_vec);
789e6661
SF
755 cifs_in_send_dec(ses->server);
756 cifs_save_when_sent(midQ);
7ee1af76 757
ad313cb8
JL
758 if (rc < 0)
759 ses->server->sequence_number -= 2;
72ca545b 760 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 761
2db7c581 762 if (rc < 0) {
792af7b0 763 cifs_small_buf_release(buf);
7ee1af76 764 goto out;
2db7c581 765 }
4b8f930f 766
a891f0f8 767 if (timeout == CIFS_ASYNC_OP) {
792af7b0 768 cifs_small_buf_release(buf);
133672ef 769 goto out;
2db7c581 770 }
d6e04ae6 771
0ade640e 772 rc = wait_for_response(ses->server, midQ);
1be912dd 773 if (rc != 0) {
121b046a 774 send_cancel(ses->server, buf, midQ);
1be912dd 775 spin_lock(&GlobalMid_Lock);
7c9421e1 776 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
777 midQ->callback = DeleteMidQEntry;
778 spin_unlock(&GlobalMid_Lock);
792af7b0 779 cifs_small_buf_release(buf);
a891f0f8 780 add_credits(ses->server, 1, optype);
1be912dd
JL
781 return rc;
782 }
783 spin_unlock(&GlobalMid_Lock);
784 }
d6e04ae6 785
792af7b0 786 cifs_small_buf_release(buf);
2db7c581 787
3c1105df 788 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 789 if (rc != 0) {
a891f0f8 790 add_credits(ses->server, 1, optype);
d6e04ae6
SF
791 return rc;
792 }
50c2f753 793
7c9421e1 794 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
d6e04ae6 795 rc = -EIO;
f96637be 796 cifs_dbg(FYI, "Bad MID state?\n");
2b2bdfba
SF
797 goto out;
798 }
799
792af7b0
PS
800 buf = (char *)midQ->resp_buf;
801 iov[0].iov_base = buf;
802 iov[0].iov_len = get_rfc1002_length(buf) + 4;
7c9421e1 803 if (midQ->large_buf)
a891f0f8 804 *resp_buf_type = CIFS_LARGE_BUFFER;
2c8f981d 805 else
a891f0f8
PS
806 *resp_buf_type = CIFS_SMALL_BUFFER;
807
808 credits = ses->server->ops->get_credits(midQ);
2b2bdfba 809
082d0642
PS
810 rc = ses->server->ops->check_receive(midQ, ses->server,
811 flags & CIFS_LOG_ERROR);
1da177e4 812
3c1bf7e4 813 /* mark it so buf will not be freed by cifs_delete_mid */
2c8f981d
JL
814 if ((flags & CIFS_NO_RESP) == 0)
815 midQ->resp_buf = NULL;
7ee1af76 816out:
3c1bf7e4 817 cifs_delete_mid(midQ);
a891f0f8 818 add_credits(ses->server, credits, optype);
1da177e4 819
d6e04ae6
SF
820 return rc;
821}
1da177e4
LT
822
823int
96daf2b0 824SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 825 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
a891f0f8 826 int *pbytes_returned, const int timeout)
1da177e4
LT
827{
828 int rc = 0;
1da177e4
LT
829 struct mid_q_entry *midQ;
830
831 if (ses == NULL) {
f96637be 832 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
833 return -EIO;
834 }
79a58d1f 835 if (ses->server == NULL) {
f96637be 836 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
837 return -EIO;
838 }
839
79a58d1f 840 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
841 return -ENOENT;
842
79a58d1f 843 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
844 to the same server. We may make this configurable later or
845 use ses->maxReq */
1da177e4 846
be8e3b00
SF
847 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
848 MAX_CIFS_HDR_SIZE - 4) {
f96637be
JP
849 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
850 be32_to_cpu(in_buf->smb_buf_length));
6d9c6d54
VL
851 return -EIO;
852 }
853
a891f0f8 854 rc = wait_for_free_request(ses->server, timeout, 0);
7ee1af76
JA
855 if (rc)
856 return rc;
857
79a58d1f 858 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
859 and avoid races inside tcp sendmsg code that could cause corruption
860 of smb data */
861
72ca545b 862 mutex_lock(&ses->server->srv_mutex);
1da177e4 863
7ee1af76
JA
864 rc = allocate_mid(ses, in_buf, &midQ);
865 if (rc) {
72ca545b 866 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 867 /* Update # of requests on wire to server */
a891f0f8 868 add_credits(ses->server, 1, 0);
7ee1af76 869 return rc;
1da177e4
LT
870 }
871
ad009ac9 872 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
873 if (rc) {
874 mutex_unlock(&ses->server->srv_mutex);
875 goto out;
876 }
1da177e4 877
7c9421e1 878 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
879
880 cifs_in_send_inc(ses->server);
be8e3b00 881 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
789e6661
SF
882 cifs_in_send_dec(ses->server);
883 cifs_save_when_sent(midQ);
ad313cb8
JL
884
885 if (rc < 0)
886 ses->server->sequence_number -= 2;
887
72ca545b 888 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 889
79a58d1f 890 if (rc < 0)
7ee1af76
JA
891 goto out;
892
a891f0f8 893 if (timeout == CIFS_ASYNC_OP)
7ee1af76 894 goto out;
1da177e4 895
0ade640e 896 rc = wait_for_response(ses->server, midQ);
1be912dd 897 if (rc != 0) {
121b046a 898 send_cancel(ses->server, in_buf, midQ);
1be912dd 899 spin_lock(&GlobalMid_Lock);
7c9421e1 900 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
901 /* no longer considered to be "in-flight" */
902 midQ->callback = DeleteMidQEntry;
903 spin_unlock(&GlobalMid_Lock);
a891f0f8 904 add_credits(ses->server, 1, 0);
1be912dd
JL
905 return rc;
906 }
907 spin_unlock(&GlobalMid_Lock);
908 }
1da177e4 909
3c1105df 910 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 911 if (rc != 0) {
a891f0f8 912 add_credits(ses->server, 1, 0);
1da177e4
LT
913 return rc;
914 }
50c2f753 915
2c8f981d 916 if (!midQ->resp_buf || !out_buf ||
7c9421e1 917 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 918 rc = -EIO;
f96637be 919 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 920 goto out;
1da177e4 921 }
7ee1af76 922
d4e4854f 923 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
924 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
925 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 926out:
3c1bf7e4 927 cifs_delete_mid(midQ);
a891f0f8 928 add_credits(ses->server, 1, 0);
1da177e4 929
7ee1af76
JA
930 return rc;
931}
1da177e4 932
7ee1af76
JA
933/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
934 blocking lock to return. */
935
936static int
96daf2b0 937send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
938 struct smb_hdr *in_buf,
939 struct smb_hdr *out_buf)
940{
941 int bytes_returned;
96daf2b0 942 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
943 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
944
945 /* We just modify the current in_buf to change
946 the type of lock from LOCKING_ANDX_SHARED_LOCK
947 or LOCKING_ANDX_EXCLUSIVE_LOCK to
948 LOCKING_ANDX_CANCEL_LOCK. */
949
950 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
951 pSMB->Timeout = 0;
88257360 952 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
953
954 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 955 &bytes_returned, 0);
7ee1af76
JA
956}
957
958int
96daf2b0 959SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
960 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
961 int *pbytes_returned)
962{
963 int rc = 0;
964 int rstart = 0;
7ee1af76 965 struct mid_q_entry *midQ;
96daf2b0 966 struct cifs_ses *ses;
7ee1af76
JA
967
968 if (tcon == NULL || tcon->ses == NULL) {
f96637be 969 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
970 return -EIO;
971 }
972 ses = tcon->ses;
973
79a58d1f 974 if (ses->server == NULL) {
f96637be 975 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
976 return -EIO;
977 }
978
79a58d1f 979 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
980 return -ENOENT;
981
79a58d1f 982 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
983 to the same server. We may make this configurable later or
984 use ses->maxReq */
985
be8e3b00
SF
986 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
987 MAX_CIFS_HDR_SIZE - 4) {
f96637be
JP
988 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
989 be32_to_cpu(in_buf->smb_buf_length));
6d9c6d54
VL
990 return -EIO;
991 }
992
a891f0f8 993 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
7ee1af76
JA
994 if (rc)
995 return rc;
996
79a58d1f 997 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
998 and avoid races inside tcp sendmsg code that could cause corruption
999 of smb data */
1000
72ca545b 1001 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1002
1003 rc = allocate_mid(ses, in_buf, &midQ);
1004 if (rc) {
72ca545b 1005 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1006 return rc;
1007 }
1008
7ee1af76 1009 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1010 if (rc) {
3c1bf7e4 1011 cifs_delete_mid(midQ);
829049cb
VL
1012 mutex_unlock(&ses->server->srv_mutex);
1013 return rc;
1014 }
1da177e4 1015
7c9421e1 1016 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1017 cifs_in_send_inc(ses->server);
be8e3b00 1018 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
789e6661
SF
1019 cifs_in_send_dec(ses->server);
1020 cifs_save_when_sent(midQ);
ad313cb8
JL
1021
1022 if (rc < 0)
1023 ses->server->sequence_number -= 2;
1024
72ca545b 1025 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1026
79a58d1f 1027 if (rc < 0) {
3c1bf7e4 1028 cifs_delete_mid(midQ);
7ee1af76
JA
1029 return rc;
1030 }
1031
1032 /* Wait for a reply - allow signals to interrupt. */
1033 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1034 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1035 ((ses->server->tcpStatus != CifsGood) &&
1036 (ses->server->tcpStatus != CifsNew)));
1037
1038 /* Were we interrupted by a signal ? */
1039 if ((rc == -ERESTARTSYS) &&
7c9421e1 1040 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1041 ((ses->server->tcpStatus == CifsGood) ||
1042 (ses->server->tcpStatus == CifsNew))) {
1043
1044 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1045 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1046 blocking lock to return. */
121b046a 1047 rc = send_cancel(ses->server, in_buf, midQ);
7ee1af76 1048 if (rc) {
3c1bf7e4 1049 cifs_delete_mid(midQ);
7ee1af76
JA
1050 return rc;
1051 }
1052 } else {
1053 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1054 to cause the blocking lock to return. */
1055
1056 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1057
1058 /* If we get -ENOLCK back the lock may have
1059 already been removed. Don't exit in this case. */
1060 if (rc && rc != -ENOLCK) {
3c1bf7e4 1061 cifs_delete_mid(midQ);
7ee1af76
JA
1062 return rc;
1063 }
1064 }
1065
1be912dd
JL
1066 rc = wait_for_response(ses->server, midQ);
1067 if (rc) {
121b046a 1068 send_cancel(ses->server, in_buf, midQ);
1be912dd 1069 spin_lock(&GlobalMid_Lock);
7c9421e1 1070 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1071 /* no longer considered to be "in-flight" */
1072 midQ->callback = DeleteMidQEntry;
1073 spin_unlock(&GlobalMid_Lock);
1074 return rc;
1075 }
1076 spin_unlock(&GlobalMid_Lock);
7ee1af76 1077 }
1be912dd
JL
1078
1079 /* We got the response - restart system call. */
1080 rstart = 1;
7ee1af76
JA
1081 }
1082
3c1105df 1083 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1084 if (rc != 0)
7ee1af76 1085 return rc;
50c2f753 1086
17c8bfed 1087 /* rcvd frame is ok */
7c9421e1 1088 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1089 rc = -EIO;
f96637be 1090 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1091 goto out;
1092 }
1da177e4 1093
d4e4854f 1094 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1095 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1096 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1097out:
3c1bf7e4 1098 cifs_delete_mid(midQ);
7ee1af76
JA
1099 if (rstart && rc == -EACCES)
1100 return -ERESTARTSYS;
1da177e4
LT
1101 return rc;
1102}