]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/cifs/transport.c
cifs: use the right include for signal_pending()
[mirror_ubuntu-hirsute-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
81
82 atomic_inc(&midCount);
83 temp->mid_state = MID_REQUEST_ALLOCATED;
84 return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93 }
94
95 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 {
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100 }
101
102 void
103 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 {
105 #ifdef CONFIG_CIFS_STATS2
106 __le16 command = midEntry->server->vals->lock_cmd;
107 __u16 smb_cmd = le16_to_cpu(midEntry->command);
108 unsigned long now;
109 unsigned long roundtrip_time;
110 struct TCP_Server_Info *server = midEntry->server;
111 #endif
112 midEntry->mid_state = MID_FREE;
113 atomic_dec(&midCount);
114 if (midEntry->large_buf)
115 cifs_buf_release(midEntry->resp_buf);
116 else
117 cifs_small_buf_release(midEntry->resp_buf);
118 #ifdef CONFIG_CIFS_STATS2
119 now = jiffies;
120 if (now < midEntry->when_alloc)
121 cifs_dbg(VFS, "invalid mid allocation time\n");
122 roundtrip_time = now - midEntry->when_alloc;
123
124 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
125 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
126 server->slowest_cmd[smb_cmd] = roundtrip_time;
127 server->fastest_cmd[smb_cmd] = roundtrip_time;
128 } else {
129 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
130 server->slowest_cmd[smb_cmd] = roundtrip_time;
131 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
132 server->fastest_cmd[smb_cmd] = roundtrip_time;
133 }
134 cifs_stats_inc(&server->num_cmds[smb_cmd]);
135 server->time_per_cmd[smb_cmd] += roundtrip_time;
136 }
137 /*
138 * commands taking longer than one second (default) can be indications
139 * that something is wrong, unless it is quite a slow link or a very
140 * busy server. Note that this calc is unlikely or impossible to wrap
141 * as long as slow_rsp_threshold is not set way above recommended max
142 * value (32767 ie 9 hours) and is generally harmless even if wrong
143 * since only affects debug counters - so leaving the calc as simple
144 * comparison rather than doing multiple conversions and overflow
145 * checks
146 */
147 if ((slow_rsp_threshold != 0) &&
148 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
149 (midEntry->command != command)) {
150 /*
151 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
152 * NB: le16_to_cpu returns unsigned so can not be negative below
153 */
154 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
155 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
156
157 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
158 midEntry->when_sent, midEntry->when_received);
159 if (cifsFYI & CIFS_TIMER) {
160 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
161 midEntry->command, midEntry->mid);
162 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
163 now - midEntry->when_alloc,
164 now - midEntry->when_sent,
165 now - midEntry->when_received);
166 }
167 }
168 #endif
169 cifs_mid_q_entry_release(midEntry);
170 }
171
172 void
173 cifs_delete_mid(struct mid_q_entry *mid)
174 {
175 spin_lock(&GlobalMid_Lock);
176 list_del_init(&mid->qhead);
177 mid->mid_flags |= MID_DELETED;
178 spin_unlock(&GlobalMid_Lock);
179
180 DeleteMidQEntry(mid);
181 }
182
183 /*
184 * smb_send_kvec - send an array of kvecs to the server
185 * @server: Server to send the data to
186 * @smb_msg: Message to send
187 * @sent: amount of data sent on socket is stored here
188 *
189 * Our basic "send data to server" function. Should be called with srv_mutex
190 * held. The caller is responsible for handling the results.
191 */
192 static int
193 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194 size_t *sent)
195 {
196 int rc = 0;
197 int retries = 0;
198 struct socket *ssocket = server->ssocket;
199
200 *sent = 0;
201
202 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203 smb_msg->msg_namelen = sizeof(struct sockaddr);
204 smb_msg->msg_control = NULL;
205 smb_msg->msg_controllen = 0;
206 if (server->noblocksnd)
207 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
208 else
209 smb_msg->msg_flags = MSG_NOSIGNAL;
210
211 while (msg_data_left(smb_msg)) {
212 /*
213 * If blocking send, we try 3 times, since each can block
214 * for 5 seconds. For nonblocking we have to try more
215 * but wait increasing amounts of time allowing time for
216 * socket to clear. The overall time we wait in either
217 * case to send on the socket is about 15 seconds.
218 * Similarly we wait for 15 seconds for a response from
219 * the server in SendReceive[2] for the server to send
220 * a response back for most types of requests (except
221 * SMB Write past end of file which can be slow, and
222 * blocking lock operations). NFS waits slightly longer
223 * than CIFS, but this can make it take longer for
224 * nonresponsive servers to be detected and 15 seconds
225 * is more than enough time for modern networks to
226 * send a packet. In most cases if we fail to send
227 * after the retries we will kill the socket and
228 * reconnect which may clear the network problem.
229 */
230 rc = sock_sendmsg(ssocket, smb_msg);
231 if (rc == -EAGAIN) {
232 retries++;
233 if (retries >= 14 ||
234 (!server->noblocksnd && (retries > 2))) {
235 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
236 ssocket);
237 return -EAGAIN;
238 }
239 msleep(1 << retries);
240 continue;
241 }
242
243 if (rc < 0)
244 return rc;
245
246 if (rc == 0) {
247 /* should never happen, letting socket clear before
248 retrying is our only obvious option here */
249 cifs_dbg(VFS, "tcp sent no data\n");
250 msleep(500);
251 continue;
252 }
253
254 /* send was at least partially successful */
255 *sent += rc;
256 retries = 0; /* in case we get ENOSPC on the next send */
257 }
258 return 0;
259 }
260
261 unsigned long
262 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
263 {
264 unsigned int i;
265 struct kvec *iov;
266 int nvec;
267 unsigned long buflen = 0;
268
269 if (server->vals->header_preamble_size == 0 &&
270 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
271 iov = &rqst->rq_iov[1];
272 nvec = rqst->rq_nvec - 1;
273 } else {
274 iov = rqst->rq_iov;
275 nvec = rqst->rq_nvec;
276 }
277
278 /* total up iov array first */
279 for (i = 0; i < nvec; i++)
280 buflen += iov[i].iov_len;
281
282 /*
283 * Add in the page array if there is one. The caller needs to make
284 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285 * multiple pages ends at page boundary, rq_tailsz needs to be set to
286 * PAGE_SIZE.
287 */
288 if (rqst->rq_npages) {
289 if (rqst->rq_npages == 1)
290 buflen += rqst->rq_tailsz;
291 else {
292 /*
293 * If there is more than one page, calculate the
294 * buffer length based on rq_offset and rq_tailsz
295 */
296 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297 rqst->rq_offset;
298 buflen += rqst->rq_tailsz;
299 }
300 }
301
302 return buflen;
303 }
304
305 static int
306 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307 struct smb_rqst *rqst)
308 {
309 int rc = 0;
310 struct kvec *iov;
311 int n_vec;
312 unsigned int send_length = 0;
313 unsigned int i, j;
314 sigset_t mask, oldmask;
315 size_t total_len = 0, sent, size;
316 struct socket *ssocket = server->ssocket;
317 struct msghdr smb_msg;
318 int val = 1;
319 __be32 rfc1002_marker;
320
321 if (cifs_rdma_enabled(server) && server->smbd_conn) {
322 rc = smbd_send(server, num_rqst, rqst);
323 goto smbd_done;
324 }
325
326 if (ssocket == NULL)
327 return -EAGAIN;
328
329 if (signal_pending(current)) {
330 cifs_dbg(FYI, "signal is pending before sending any data\n");
331 return -EINTR;
332 }
333
334 /* cork the socket */
335 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
336 (char *)&val, sizeof(val));
337
338 for (j = 0; j < num_rqst; j++)
339 send_length += smb_rqst_len(server, &rqst[j]);
340 rfc1002_marker = cpu_to_be32(send_length);
341
342 /*
343 * We should not allow signals to interrupt the network send because
344 * any partial send will cause session reconnects thus increasing
345 * latency of system calls and overload a server with unnecessary
346 * requests.
347 */
348
349 sigfillset(&mask);
350 sigprocmask(SIG_BLOCK, &mask, &oldmask);
351
352 /* Generate a rfc1002 marker for SMB2+ */
353 if (server->vals->header_preamble_size == 0) {
354 struct kvec hiov = {
355 .iov_base = &rfc1002_marker,
356 .iov_len = 4
357 };
358 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
359 rc = smb_send_kvec(server, &smb_msg, &sent);
360 if (rc < 0)
361 goto unmask;
362
363 total_len += sent;
364 send_length += 4;
365 }
366
367 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
368
369 for (j = 0; j < num_rqst; j++) {
370 iov = rqst[j].rq_iov;
371 n_vec = rqst[j].rq_nvec;
372
373 size = 0;
374 for (i = 0; i < n_vec; i++) {
375 dump_smb(iov[i].iov_base, iov[i].iov_len);
376 size += iov[i].iov_len;
377 }
378
379 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
380
381 rc = smb_send_kvec(server, &smb_msg, &sent);
382 if (rc < 0)
383 goto unmask;
384
385 total_len += sent;
386
387 /* now walk the page array and send each page in it */
388 for (i = 0; i < rqst[j].rq_npages; i++) {
389 struct bio_vec bvec;
390
391 bvec.bv_page = rqst[j].rq_pages[i];
392 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
393 &bvec.bv_offset);
394
395 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
396 &bvec, 1, bvec.bv_len);
397 rc = smb_send_kvec(server, &smb_msg, &sent);
398 if (rc < 0)
399 break;
400
401 total_len += sent;
402 }
403 }
404
405 unmask:
406 sigprocmask(SIG_SETMASK, &oldmask, NULL);
407
408 /*
409 * If signal is pending but we have already sent the whole packet to
410 * the server we need to return success status to allow a corresponding
411 * mid entry to be kept in the pending requests queue thus allowing
412 * to handle responses from the server by the client.
413 *
414 * If only part of the packet has been sent there is no need to hide
415 * interrupt because the session will be reconnected anyway, so there
416 * won't be any response from the server to handle.
417 */
418
419 if (signal_pending(current) && (total_len != send_length)) {
420 cifs_dbg(FYI, "signal is pending after attempt to send\n");
421 rc = -EINTR;
422 }
423
424 /* uncork it */
425 val = 0;
426 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
427 (char *)&val, sizeof(val));
428
429 if ((total_len > 0) && (total_len != send_length)) {
430 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
431 send_length, total_len);
432 /*
433 * If we have only sent part of an SMB then the next SMB could
434 * be taken as the remainder of this one. We need to kill the
435 * socket so the server throws away the partial SMB
436 */
437 server->tcpStatus = CifsNeedReconnect;
438 trace_smb3_partial_send_reconnect(server->CurrentMid,
439 server->hostname);
440 }
441 smbd_done:
442 if (rc < 0 && rc != -EINTR)
443 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
444 rc);
445 else if (rc > 0)
446 rc = 0;
447
448 return rc;
449 }
450
451 static int
452 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
453 struct smb_rqst *rqst, int flags)
454 {
455 struct kvec iov;
456 struct smb2_transform_hdr tr_hdr;
457 struct smb_rqst cur_rqst[MAX_COMPOUND];
458 int rc;
459
460 if (!(flags & CIFS_TRANSFORM_REQ))
461 return __smb_send_rqst(server, num_rqst, rqst);
462
463 if (num_rqst > MAX_COMPOUND - 1)
464 return -ENOMEM;
465
466 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
467 memset(&iov, 0, sizeof(iov));
468 memset(&tr_hdr, 0, sizeof(tr_hdr));
469
470 iov.iov_base = &tr_hdr;
471 iov.iov_len = sizeof(tr_hdr);
472 cur_rqst[0].rq_iov = &iov;
473 cur_rqst[0].rq_nvec = 1;
474
475 if (!server->ops->init_transform_rq) {
476 cifs_dbg(VFS, "Encryption requested but transform callback "
477 "is missing\n");
478 return -EIO;
479 }
480
481 rc = server->ops->init_transform_rq(server, num_rqst + 1,
482 &cur_rqst[0], rqst);
483 if (rc)
484 return rc;
485
486 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
488 return rc;
489 }
490
491 int
492 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493 unsigned int smb_buf_length)
494 {
495 struct kvec iov[2];
496 struct smb_rqst rqst = { .rq_iov = iov,
497 .rq_nvec = 2 };
498
499 iov[0].iov_base = smb_buffer;
500 iov[0].iov_len = 4;
501 iov[1].iov_base = (char *)smb_buffer + 4;
502 iov[1].iov_len = smb_buf_length;
503
504 return __smb_send_rqst(server, 1, &rqst);
505 }
506
507 static int
508 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
509 const int timeout, const int flags,
510 unsigned int *instance)
511 {
512 int rc;
513 int *credits;
514 int optype;
515 long int t;
516
517 if (timeout < 0)
518 t = MAX_JIFFY_OFFSET;
519 else
520 t = msecs_to_jiffies(timeout);
521
522 optype = flags & CIFS_OP_MASK;
523
524 *instance = 0;
525
526 credits = server->ops->get_credits_field(server, optype);
527 /* Since an echo is already inflight, no need to wait to send another */
528 if (*credits <= 0 && optype == CIFS_ECHO_OP)
529 return -EAGAIN;
530
531 spin_lock(&server->req_lock);
532 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
533 /* oplock breaks must not be held up */
534 server->in_flight++;
535 *credits -= 1;
536 *instance = server->reconnect_instance;
537 spin_unlock(&server->req_lock);
538 return 0;
539 }
540
541 while (1) {
542 if (*credits < num_credits) {
543 spin_unlock(&server->req_lock);
544 cifs_num_waiters_inc(server);
545 rc = wait_event_killable_timeout(server->request_q,
546 has_credits(server, credits, num_credits), t);
547 cifs_num_waiters_dec(server);
548 if (!rc) {
549 trace_smb3_credit_timeout(server->CurrentMid,
550 server->hostname, num_credits);
551 cifs_dbg(VFS, "wait timed out after %d ms\n",
552 timeout);
553 return -ENOTSUPP;
554 }
555 if (rc == -ERESTARTSYS)
556 return -ERESTARTSYS;
557 spin_lock(&server->req_lock);
558 } else {
559 if (server->tcpStatus == CifsExiting) {
560 spin_unlock(&server->req_lock);
561 return -ENOENT;
562 }
563
564 /*
565 * For normal commands, reserve the last MAX_COMPOUND
566 * credits to compound requests.
567 * Otherwise these compounds could be permanently
568 * starved for credits by single-credit requests.
569 *
570 * To prevent spinning CPU, block this thread until
571 * there are >MAX_COMPOUND credits available.
572 * But only do this is we already have a lot of
573 * credits in flight to avoid triggering this check
574 * for servers that are slow to hand out credits on
575 * new sessions.
576 */
577 if (!optype && num_credits == 1 &&
578 server->in_flight > 2 * MAX_COMPOUND &&
579 *credits <= MAX_COMPOUND) {
580 spin_unlock(&server->req_lock);
581 cifs_num_waiters_inc(server);
582 rc = wait_event_killable_timeout(
583 server->request_q,
584 has_credits(server, credits,
585 MAX_COMPOUND + 1),
586 t);
587 cifs_num_waiters_dec(server);
588 if (!rc) {
589 trace_smb3_credit_timeout(
590 server->CurrentMid,
591 server->hostname, num_credits);
592 cifs_dbg(VFS, "wait timed out after %d ms\n",
593 timeout);
594 return -ENOTSUPP;
595 }
596 if (rc == -ERESTARTSYS)
597 return -ERESTARTSYS;
598 spin_lock(&server->req_lock);
599 continue;
600 }
601
602 /*
603 * Can not count locking commands against total
604 * as they are allowed to block on server.
605 */
606
607 /* update # of requests on the wire to server */
608 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
609 *credits -= num_credits;
610 server->in_flight += num_credits;
611 *instance = server->reconnect_instance;
612 }
613 spin_unlock(&server->req_lock);
614 break;
615 }
616 }
617 return 0;
618 }
619
620 static int
621 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
622 unsigned int *instance)
623 {
624 return wait_for_free_credits(server, 1, -1, flags,
625 instance);
626 }
627
628 static int
629 wait_for_compound_request(struct TCP_Server_Info *server, int num,
630 const int flags, unsigned int *instance)
631 {
632 int *credits;
633
634 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
635
636 spin_lock(&server->req_lock);
637 if (*credits < num) {
638 /*
639 * Return immediately if not too many requests in flight since
640 * we will likely be stuck on waiting for credits.
641 */
642 if (server->in_flight < num - *credits) {
643 spin_unlock(&server->req_lock);
644 return -ENOTSUPP;
645 }
646 }
647 spin_unlock(&server->req_lock);
648
649 return wait_for_free_credits(server, num, 60000, flags,
650 instance);
651 }
652
653 int
654 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
655 unsigned int *num, struct cifs_credits *credits)
656 {
657 *num = size;
658 credits->value = 0;
659 credits->instance = server->reconnect_instance;
660 return 0;
661 }
662
663 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
664 struct mid_q_entry **ppmidQ)
665 {
666 if (ses->server->tcpStatus == CifsExiting) {
667 return -ENOENT;
668 }
669
670 if (ses->server->tcpStatus == CifsNeedReconnect) {
671 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
672 return -EAGAIN;
673 }
674
675 if (ses->status == CifsNew) {
676 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
677 (in_buf->Command != SMB_COM_NEGOTIATE))
678 return -EAGAIN;
679 /* else ok - we are setting up session */
680 }
681
682 if (ses->status == CifsExiting) {
683 /* check if SMB session is bad because we are setting it up */
684 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
685 return -EAGAIN;
686 /* else ok - we are shutting down session */
687 }
688
689 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
690 if (*ppmidQ == NULL)
691 return -ENOMEM;
692 spin_lock(&GlobalMid_Lock);
693 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
694 spin_unlock(&GlobalMid_Lock);
695 return 0;
696 }
697
698 static int
699 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
700 {
701 int error;
702
703 error = wait_event_freezekillable_unsafe(server->response_q,
704 midQ->mid_state != MID_REQUEST_SUBMITTED);
705 if (error < 0)
706 return -ERESTARTSYS;
707
708 return 0;
709 }
710
711 struct mid_q_entry *
712 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
713 {
714 int rc;
715 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
716 struct mid_q_entry *mid;
717
718 if (rqst->rq_iov[0].iov_len != 4 ||
719 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
720 return ERR_PTR(-EIO);
721
722 /* enable signing if server requires it */
723 if (server->sign)
724 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
725
726 mid = AllocMidQEntry(hdr, server);
727 if (mid == NULL)
728 return ERR_PTR(-ENOMEM);
729
730 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
731 if (rc) {
732 DeleteMidQEntry(mid);
733 return ERR_PTR(rc);
734 }
735
736 return mid;
737 }
738
739 /*
740 * Send a SMB request and set the callback function in the mid to handle
741 * the result. Caller is responsible for dealing with timeouts.
742 */
743 int
744 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
745 mid_receive_t *receive, mid_callback_t *callback,
746 mid_handle_t *handle, void *cbdata, const int flags,
747 const struct cifs_credits *exist_credits)
748 {
749 int rc;
750 struct mid_q_entry *mid;
751 struct cifs_credits credits = { .value = 0, .instance = 0 };
752 unsigned int instance;
753 int optype;
754
755 optype = flags & CIFS_OP_MASK;
756
757 if ((flags & CIFS_HAS_CREDITS) == 0) {
758 rc = wait_for_free_request(server, flags, &instance);
759 if (rc)
760 return rc;
761 credits.value = 1;
762 credits.instance = instance;
763 } else
764 instance = exist_credits->instance;
765
766 mutex_lock(&server->srv_mutex);
767
768 /*
769 * We can't use credits obtained from the previous session to send this
770 * request. Check if there were reconnects after we obtained credits and
771 * return -EAGAIN in such cases to let callers handle it.
772 */
773 if (instance != server->reconnect_instance) {
774 mutex_unlock(&server->srv_mutex);
775 add_credits_and_wake_if(server, &credits, optype);
776 return -EAGAIN;
777 }
778
779 mid = server->ops->setup_async_request(server, rqst);
780 if (IS_ERR(mid)) {
781 mutex_unlock(&server->srv_mutex);
782 add_credits_and_wake_if(server, &credits, optype);
783 return PTR_ERR(mid);
784 }
785
786 mid->receive = receive;
787 mid->callback = callback;
788 mid->callback_data = cbdata;
789 mid->handle = handle;
790 mid->mid_state = MID_REQUEST_SUBMITTED;
791
792 /* put it on the pending_mid_q */
793 spin_lock(&GlobalMid_Lock);
794 list_add_tail(&mid->qhead, &server->pending_mid_q);
795 spin_unlock(&GlobalMid_Lock);
796
797 /*
798 * Need to store the time in mid before calling I/O. For call_async,
799 * I/O response may come back and free the mid entry on another thread.
800 */
801 cifs_save_when_sent(mid);
802 cifs_in_send_inc(server);
803 rc = smb_send_rqst(server, 1, rqst, flags);
804 cifs_in_send_dec(server);
805
806 if (rc < 0) {
807 revert_current_mid(server, mid->credits);
808 server->sequence_number -= 2;
809 cifs_delete_mid(mid);
810 }
811
812 mutex_unlock(&server->srv_mutex);
813
814 if (rc == 0)
815 return 0;
816
817 add_credits_and_wake_if(server, &credits, optype);
818 return rc;
819 }
820
821 /*
822 *
823 * Send an SMB Request. No response info (other than return code)
824 * needs to be parsed.
825 *
826 * flags indicate the type of request buffer and how long to wait
827 * and whether to log NT STATUS code (error) before mapping it to POSIX error
828 *
829 */
830 int
831 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
832 char *in_buf, int flags)
833 {
834 int rc;
835 struct kvec iov[1];
836 struct kvec rsp_iov;
837 int resp_buf_type;
838
839 iov[0].iov_base = in_buf;
840 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
841 flags |= CIFS_NO_RSP_BUF;
842 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
843 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
844
845 return rc;
846 }
847
848 static int
849 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
850 {
851 int rc = 0;
852
853 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
854 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
855
856 spin_lock(&GlobalMid_Lock);
857 switch (mid->mid_state) {
858 case MID_RESPONSE_RECEIVED:
859 spin_unlock(&GlobalMid_Lock);
860 return rc;
861 case MID_RETRY_NEEDED:
862 rc = -EAGAIN;
863 break;
864 case MID_RESPONSE_MALFORMED:
865 rc = -EIO;
866 break;
867 case MID_SHUTDOWN:
868 rc = -EHOSTDOWN;
869 break;
870 default:
871 list_del_init(&mid->qhead);
872 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
873 __func__, mid->mid, mid->mid_state);
874 rc = -EIO;
875 }
876 spin_unlock(&GlobalMid_Lock);
877
878 DeleteMidQEntry(mid);
879 return rc;
880 }
881
882 static inline int
883 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
884 struct mid_q_entry *mid)
885 {
886 return server->ops->send_cancel ?
887 server->ops->send_cancel(server, rqst, mid) : 0;
888 }
889
890 int
891 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
892 bool log_error)
893 {
894 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
895
896 dump_smb(mid->resp_buf, min_t(u32, 92, len));
897
898 /* convert the length into a more usable form */
899 if (server->sign) {
900 struct kvec iov[2];
901 int rc = 0;
902 struct smb_rqst rqst = { .rq_iov = iov,
903 .rq_nvec = 2 };
904
905 iov[0].iov_base = mid->resp_buf;
906 iov[0].iov_len = 4;
907 iov[1].iov_base = (char *)mid->resp_buf + 4;
908 iov[1].iov_len = len - 4;
909 /* FIXME: add code to kill session */
910 rc = cifs_verify_signature(&rqst, server,
911 mid->sequence_number);
912 if (rc)
913 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
914 rc);
915 }
916
917 /* BB special case reconnect tid and uid here? */
918 return map_smb_to_linux_error(mid->resp_buf, log_error);
919 }
920
921 struct mid_q_entry *
922 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
923 {
924 int rc;
925 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
926 struct mid_q_entry *mid;
927
928 if (rqst->rq_iov[0].iov_len != 4 ||
929 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
930 return ERR_PTR(-EIO);
931
932 rc = allocate_mid(ses, hdr, &mid);
933 if (rc)
934 return ERR_PTR(rc);
935 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
936 if (rc) {
937 cifs_delete_mid(mid);
938 return ERR_PTR(rc);
939 }
940 return mid;
941 }
942
943 static void
944 cifs_compound_callback(struct mid_q_entry *mid)
945 {
946 struct TCP_Server_Info *server = mid->server;
947 struct cifs_credits credits;
948
949 credits.value = server->ops->get_credits(mid);
950 credits.instance = server->reconnect_instance;
951
952 add_credits(server, &credits, mid->optype);
953 }
954
955 static void
956 cifs_compound_last_callback(struct mid_q_entry *mid)
957 {
958 cifs_compound_callback(mid);
959 cifs_wake_up_task(mid);
960 }
961
962 static void
963 cifs_cancelled_callback(struct mid_q_entry *mid)
964 {
965 cifs_compound_callback(mid);
966 DeleteMidQEntry(mid);
967 }
968
969 int
970 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
971 const int flags, const int num_rqst, struct smb_rqst *rqst,
972 int *resp_buf_type, struct kvec *resp_iov)
973 {
974 int i, j, optype, rc = 0;
975 struct mid_q_entry *midQ[MAX_COMPOUND];
976 bool cancelled_mid[MAX_COMPOUND] = {false};
977 struct cifs_credits credits[MAX_COMPOUND] = {
978 { .value = 0, .instance = 0 }
979 };
980 unsigned int instance;
981 char *buf;
982
983 optype = flags & CIFS_OP_MASK;
984
985 for (i = 0; i < num_rqst; i++)
986 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
987
988 if ((ses == NULL) || (ses->server == NULL)) {
989 cifs_dbg(VFS, "Null session\n");
990 return -EIO;
991 }
992
993 if (ses->server->tcpStatus == CifsExiting)
994 return -ENOENT;
995
996 /*
997 * Wait for all the requests to become available.
998 * This approach still leaves the possibility to be stuck waiting for
999 * credits if the server doesn't grant credits to the outstanding
1000 * requests and if the client is completely idle, not generating any
1001 * other requests.
1002 * This can be handled by the eventual session reconnect.
1003 */
1004 rc = wait_for_compound_request(ses->server, num_rqst, flags,
1005 &instance);
1006 if (rc)
1007 return rc;
1008
1009 for (i = 0; i < num_rqst; i++) {
1010 credits[i].value = 1;
1011 credits[i].instance = instance;
1012 }
1013
1014 /*
1015 * Make sure that we sign in the same order that we send on this socket
1016 * and avoid races inside tcp sendmsg code that could cause corruption
1017 * of smb data.
1018 */
1019
1020 mutex_lock(&ses->server->srv_mutex);
1021
1022 /*
1023 * All the parts of the compound chain belong obtained credits from the
1024 * same session. We can not use credits obtained from the previous
1025 * session to send this request. Check if there were reconnects after
1026 * we obtained credits and return -EAGAIN in such cases to let callers
1027 * handle it.
1028 */
1029 if (instance != ses->server->reconnect_instance) {
1030 mutex_unlock(&ses->server->srv_mutex);
1031 for (j = 0; j < num_rqst; j++)
1032 add_credits(ses->server, &credits[j], optype);
1033 return -EAGAIN;
1034 }
1035
1036 for (i = 0; i < num_rqst; i++) {
1037 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1038 if (IS_ERR(midQ[i])) {
1039 revert_current_mid(ses->server, i);
1040 for (j = 0; j < i; j++)
1041 cifs_delete_mid(midQ[j]);
1042 mutex_unlock(&ses->server->srv_mutex);
1043
1044 /* Update # of requests on wire to server */
1045 for (j = 0; j < num_rqst; j++)
1046 add_credits(ses->server, &credits[j], optype);
1047 return PTR_ERR(midQ[i]);
1048 }
1049
1050 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1051 midQ[i]->optype = optype;
1052 /*
1053 * Invoke callback for every part of the compound chain
1054 * to calculate credits properly. Wake up this thread only when
1055 * the last element is received.
1056 */
1057 if (i < num_rqst - 1)
1058 midQ[i]->callback = cifs_compound_callback;
1059 else
1060 midQ[i]->callback = cifs_compound_last_callback;
1061 }
1062 cifs_in_send_inc(ses->server);
1063 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
1064 cifs_in_send_dec(ses->server);
1065
1066 for (i = 0; i < num_rqst; i++)
1067 cifs_save_when_sent(midQ[i]);
1068
1069 if (rc < 0) {
1070 revert_current_mid(ses->server, num_rqst);
1071 ses->server->sequence_number -= 2;
1072 }
1073
1074 mutex_unlock(&ses->server->srv_mutex);
1075
1076 /*
1077 * If sending failed for some reason or it is an oplock break that we
1078 * will not receive a response to - return credits back
1079 */
1080 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1081 for (i = 0; i < num_rqst; i++)
1082 add_credits(ses->server, &credits[i], optype);
1083 goto out;
1084 }
1085
1086 /*
1087 * At this point the request is passed to the network stack - we assume
1088 * that any credits taken from the server structure on the client have
1089 * been spent and we can't return them back. Once we receive responses
1090 * we will collect credits granted by the server in the mid callbacks
1091 * and add those credits to the server structure.
1092 */
1093
1094 /*
1095 * Compounding is never used during session establish.
1096 */
1097 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1098 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1099 rqst[0].rq_nvec);
1100
1101 for (i = 0; i < num_rqst; i++) {
1102 rc = wait_for_response(ses->server, midQ[i]);
1103 if (rc != 0)
1104 break;
1105 }
1106 if (rc != 0) {
1107 for (; i < num_rqst; i++) {
1108 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1109 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1110 send_cancel(ses->server, &rqst[i], midQ[i]);
1111 spin_lock(&GlobalMid_Lock);
1112 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1113 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1114 midQ[i]->callback = cifs_cancelled_callback;
1115 cancelled_mid[i] = true;
1116 credits[i].value = 0;
1117 }
1118 spin_unlock(&GlobalMid_Lock);
1119 }
1120 }
1121
1122 for (i = 0; i < num_rqst; i++) {
1123 if (rc < 0)
1124 goto out;
1125
1126 rc = cifs_sync_mid_result(midQ[i], ses->server);
1127 if (rc != 0) {
1128 /* mark this mid as cancelled to not free it below */
1129 cancelled_mid[i] = true;
1130 goto out;
1131 }
1132
1133 if (!midQ[i]->resp_buf ||
1134 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1135 rc = -EIO;
1136 cifs_dbg(FYI, "Bad MID state?\n");
1137 goto out;
1138 }
1139
1140 buf = (char *)midQ[i]->resp_buf;
1141 resp_iov[i].iov_base = buf;
1142 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1143 ses->server->vals->header_preamble_size;
1144
1145 if (midQ[i]->large_buf)
1146 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1147 else
1148 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1149
1150 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1151 flags & CIFS_LOG_ERROR);
1152
1153 /* mark it so buf will not be freed by cifs_delete_mid */
1154 if ((flags & CIFS_NO_RSP_BUF) == 0)
1155 midQ[i]->resp_buf = NULL;
1156
1157 }
1158
1159 /*
1160 * Compounding is never used during session establish.
1161 */
1162 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1163 struct kvec iov = {
1164 .iov_base = resp_iov[0].iov_base,
1165 .iov_len = resp_iov[0].iov_len
1166 };
1167 smb311_update_preauth_hash(ses, &iov, 1);
1168 }
1169
1170 out:
1171 /*
1172 * This will dequeue all mids. After this it is important that the
1173 * demultiplex_thread will not process any of these mids any futher.
1174 * This is prevented above by using a noop callback that will not
1175 * wake this thread except for the very last PDU.
1176 */
1177 for (i = 0; i < num_rqst; i++) {
1178 if (!cancelled_mid[i])
1179 cifs_delete_mid(midQ[i]);
1180 }
1181
1182 return rc;
1183 }
1184
1185 int
1186 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1187 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1188 struct kvec *resp_iov)
1189 {
1190 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1191 resp_iov);
1192 }
1193
1194 int
1195 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1196 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1197 const int flags, struct kvec *resp_iov)
1198 {
1199 struct smb_rqst rqst;
1200 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1201 int rc;
1202
1203 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1204 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1205 GFP_KERNEL);
1206 if (!new_iov) {
1207 /* otherwise cifs_send_recv below sets resp_buf_type */
1208 *resp_buf_type = CIFS_NO_BUFFER;
1209 return -ENOMEM;
1210 }
1211 } else
1212 new_iov = s_iov;
1213
1214 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1215 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1216
1217 new_iov[0].iov_base = new_iov[1].iov_base;
1218 new_iov[0].iov_len = 4;
1219 new_iov[1].iov_base += 4;
1220 new_iov[1].iov_len -= 4;
1221
1222 memset(&rqst, 0, sizeof(struct smb_rqst));
1223 rqst.rq_iov = new_iov;
1224 rqst.rq_nvec = n_vec + 1;
1225
1226 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1227 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1228 kfree(new_iov);
1229 return rc;
1230 }
1231
1232 int
1233 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1234 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1235 int *pbytes_returned, const int flags)
1236 {
1237 int rc = 0;
1238 struct mid_q_entry *midQ;
1239 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1240 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1241 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1242 struct cifs_credits credits = { .value = 1, .instance = 0 };
1243
1244 if (ses == NULL) {
1245 cifs_dbg(VFS, "Null smb session\n");
1246 return -EIO;
1247 }
1248 if (ses->server == NULL) {
1249 cifs_dbg(VFS, "Null tcp session\n");
1250 return -EIO;
1251 }
1252
1253 if (ses->server->tcpStatus == CifsExiting)
1254 return -ENOENT;
1255
1256 /* Ensure that we do not send more than 50 overlapping requests
1257 to the same server. We may make this configurable later or
1258 use ses->maxReq */
1259
1260 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1261 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1262 len);
1263 return -EIO;
1264 }
1265
1266 rc = wait_for_free_request(ses->server, flags, &credits.instance);
1267 if (rc)
1268 return rc;
1269
1270 /* make sure that we sign in the same order that we send on this socket
1271 and avoid races inside tcp sendmsg code that could cause corruption
1272 of smb data */
1273
1274 mutex_lock(&ses->server->srv_mutex);
1275
1276 rc = allocate_mid(ses, in_buf, &midQ);
1277 if (rc) {
1278 mutex_unlock(&ses->server->srv_mutex);
1279 /* Update # of requests on wire to server */
1280 add_credits(ses->server, &credits, 0);
1281 return rc;
1282 }
1283
1284 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1285 if (rc) {
1286 mutex_unlock(&ses->server->srv_mutex);
1287 goto out;
1288 }
1289
1290 midQ->mid_state = MID_REQUEST_SUBMITTED;
1291
1292 cifs_in_send_inc(ses->server);
1293 rc = smb_send(ses->server, in_buf, len);
1294 cifs_in_send_dec(ses->server);
1295 cifs_save_when_sent(midQ);
1296
1297 if (rc < 0)
1298 ses->server->sequence_number -= 2;
1299
1300 mutex_unlock(&ses->server->srv_mutex);
1301
1302 if (rc < 0)
1303 goto out;
1304
1305 rc = wait_for_response(ses->server, midQ);
1306 if (rc != 0) {
1307 send_cancel(ses->server, &rqst, midQ);
1308 spin_lock(&GlobalMid_Lock);
1309 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1310 /* no longer considered to be "in-flight" */
1311 midQ->callback = DeleteMidQEntry;
1312 spin_unlock(&GlobalMid_Lock);
1313 add_credits(ses->server, &credits, 0);
1314 return rc;
1315 }
1316 spin_unlock(&GlobalMid_Lock);
1317 }
1318
1319 rc = cifs_sync_mid_result(midQ, ses->server);
1320 if (rc != 0) {
1321 add_credits(ses->server, &credits, 0);
1322 return rc;
1323 }
1324
1325 if (!midQ->resp_buf || !out_buf ||
1326 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1327 rc = -EIO;
1328 cifs_dbg(VFS, "Bad MID state?\n");
1329 goto out;
1330 }
1331
1332 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1333 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1334 rc = cifs_check_receive(midQ, ses->server, 0);
1335 out:
1336 cifs_delete_mid(midQ);
1337 add_credits(ses->server, &credits, 0);
1338
1339 return rc;
1340 }
1341
1342 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1343 blocking lock to return. */
1344
1345 static int
1346 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1347 struct smb_hdr *in_buf,
1348 struct smb_hdr *out_buf)
1349 {
1350 int bytes_returned;
1351 struct cifs_ses *ses = tcon->ses;
1352 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1353
1354 /* We just modify the current in_buf to change
1355 the type of lock from LOCKING_ANDX_SHARED_LOCK
1356 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1357 LOCKING_ANDX_CANCEL_LOCK. */
1358
1359 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1360 pSMB->Timeout = 0;
1361 pSMB->hdr.Mid = get_next_mid(ses->server);
1362
1363 return SendReceive(xid, ses, in_buf, out_buf,
1364 &bytes_returned, 0);
1365 }
1366
1367 int
1368 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1369 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1370 int *pbytes_returned)
1371 {
1372 int rc = 0;
1373 int rstart = 0;
1374 struct mid_q_entry *midQ;
1375 struct cifs_ses *ses;
1376 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1377 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1378 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1379 unsigned int instance;
1380
1381 if (tcon == NULL || tcon->ses == NULL) {
1382 cifs_dbg(VFS, "Null smb session\n");
1383 return -EIO;
1384 }
1385 ses = tcon->ses;
1386
1387 if (ses->server == NULL) {
1388 cifs_dbg(VFS, "Null tcp session\n");
1389 return -EIO;
1390 }
1391
1392 if (ses->server->tcpStatus == CifsExiting)
1393 return -ENOENT;
1394
1395 /* Ensure that we do not send more than 50 overlapping requests
1396 to the same server. We may make this configurable later or
1397 use ses->maxReq */
1398
1399 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1400 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1401 len);
1402 return -EIO;
1403 }
1404
1405 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
1406 if (rc)
1407 return rc;
1408
1409 /* make sure that we sign in the same order that we send on this socket
1410 and avoid races inside tcp sendmsg code that could cause corruption
1411 of smb data */
1412
1413 mutex_lock(&ses->server->srv_mutex);
1414
1415 rc = allocate_mid(ses, in_buf, &midQ);
1416 if (rc) {
1417 mutex_unlock(&ses->server->srv_mutex);
1418 return rc;
1419 }
1420
1421 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1422 if (rc) {
1423 cifs_delete_mid(midQ);
1424 mutex_unlock(&ses->server->srv_mutex);
1425 return rc;
1426 }
1427
1428 midQ->mid_state = MID_REQUEST_SUBMITTED;
1429 cifs_in_send_inc(ses->server);
1430 rc = smb_send(ses->server, in_buf, len);
1431 cifs_in_send_dec(ses->server);
1432 cifs_save_when_sent(midQ);
1433
1434 if (rc < 0)
1435 ses->server->sequence_number -= 2;
1436
1437 mutex_unlock(&ses->server->srv_mutex);
1438
1439 if (rc < 0) {
1440 cifs_delete_mid(midQ);
1441 return rc;
1442 }
1443
1444 /* Wait for a reply - allow signals to interrupt. */
1445 rc = wait_event_interruptible(ses->server->response_q,
1446 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1447 ((ses->server->tcpStatus != CifsGood) &&
1448 (ses->server->tcpStatus != CifsNew)));
1449
1450 /* Were we interrupted by a signal ? */
1451 if ((rc == -ERESTARTSYS) &&
1452 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1453 ((ses->server->tcpStatus == CifsGood) ||
1454 (ses->server->tcpStatus == CifsNew))) {
1455
1456 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1457 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1458 blocking lock to return. */
1459 rc = send_cancel(ses->server, &rqst, midQ);
1460 if (rc) {
1461 cifs_delete_mid(midQ);
1462 return rc;
1463 }
1464 } else {
1465 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1466 to cause the blocking lock to return. */
1467
1468 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1469
1470 /* If we get -ENOLCK back the lock may have
1471 already been removed. Don't exit in this case. */
1472 if (rc && rc != -ENOLCK) {
1473 cifs_delete_mid(midQ);
1474 return rc;
1475 }
1476 }
1477
1478 rc = wait_for_response(ses->server, midQ);
1479 if (rc) {
1480 send_cancel(ses->server, &rqst, midQ);
1481 spin_lock(&GlobalMid_Lock);
1482 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1483 /* no longer considered to be "in-flight" */
1484 midQ->callback = DeleteMidQEntry;
1485 spin_unlock(&GlobalMid_Lock);
1486 return rc;
1487 }
1488 spin_unlock(&GlobalMid_Lock);
1489 }
1490
1491 /* We got the response - restart system call. */
1492 rstart = 1;
1493 }
1494
1495 rc = cifs_sync_mid_result(midQ, ses->server);
1496 if (rc != 0)
1497 return rc;
1498
1499 /* rcvd frame is ok */
1500 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1501 rc = -EIO;
1502 cifs_dbg(VFS, "Bad MID state?\n");
1503 goto out;
1504 }
1505
1506 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1507 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1508 rc = cifs_check_receive(midQ, ses->server, 0);
1509 out:
1510 cifs_delete_mid(midQ);
1511 if (rstart && rc == -EACCES)
1512 return -ERESTARTSYS;
1513 return rc;
1514 }