]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/cifs/transport.c
Merge tag 'fsnotify_for_v5.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smb2proto.h"
41 #include "smbdirect.h"
42
43 /* Max number of iovectors we can use off the stack when sending requests. */
44 #define CIFS_MAX_IOV_SIZE 8
45
46 void
47 cifs_wake_up_task(struct mid_q_entry *mid)
48 {
49 wake_up_process(mid->callback_data);
50 }
51
52 struct mid_q_entry *
53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54 {
55 struct mid_q_entry *temp;
56
57 if (server == NULL) {
58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 return NULL;
60 }
61
62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 memset(temp, 0, sizeof(struct mid_q_entry));
64 kref_init(&temp->refcount);
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
73
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
80
81 atomic_inc(&midCount);
82 temp->mid_state = MID_REQUEST_ALLOCATED;
83 return temp;
84 }
85
86 static void _cifs_mid_q_entry_release(struct kref *refcount)
87 {
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92 }
93
94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95 {
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99 }
100
101 void
102 DeleteMidQEntry(struct mid_q_entry *midEntry)
103 {
104 #ifdef CONFIG_CIFS_STATS2
105 __le16 command = midEntry->server->vals->lock_cmd;
106 unsigned long now;
107 #endif
108 midEntry->mid_state = MID_FREE;
109 atomic_dec(&midCount);
110 if (midEntry->large_buf)
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
116 /*
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
124 * checks
125 */
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
128 (midEntry->command != command)) {
129 /*
130 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
131 * NB: le16_to_cpu returns unsigned so can not be negative below
132 */
133 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
134 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
135
136 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
137 midEntry->mid, midEntry->pid,
138 midEntry->when_sent, midEntry->when_received);
139 if (cifsFYI & CIFS_TIMER) {
140 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
141 midEntry->command, midEntry->mid);
142 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
143 now - midEntry->when_alloc,
144 now - midEntry->when_sent,
145 now - midEntry->when_received);
146 }
147 }
148 #endif
149 cifs_mid_q_entry_release(midEntry);
150 }
151
152 void
153 cifs_delete_mid(struct mid_q_entry *mid)
154 {
155 spin_lock(&GlobalMid_Lock);
156 list_del_init(&mid->qhead);
157 mid->mid_flags |= MID_DELETED;
158 spin_unlock(&GlobalMid_Lock);
159
160 DeleteMidQEntry(mid);
161 }
162
163 /*
164 * smb_send_kvec - send an array of kvecs to the server
165 * @server: Server to send the data to
166 * @smb_msg: Message to send
167 * @sent: amount of data sent on socket is stored here
168 *
169 * Our basic "send data to server" function. Should be called with srv_mutex
170 * held. The caller is responsible for handling the results.
171 */
172 static int
173 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
174 size_t *sent)
175 {
176 int rc = 0;
177 int retries = 0;
178 struct socket *ssocket = server->ssocket;
179
180 *sent = 0;
181
182 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
183 smb_msg->msg_namelen = sizeof(struct sockaddr);
184 smb_msg->msg_control = NULL;
185 smb_msg->msg_controllen = 0;
186 if (server->noblocksnd)
187 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
188 else
189 smb_msg->msg_flags = MSG_NOSIGNAL;
190
191 while (msg_data_left(smb_msg)) {
192 /*
193 * If blocking send, we try 3 times, since each can block
194 * for 5 seconds. For nonblocking we have to try more
195 * but wait increasing amounts of time allowing time for
196 * socket to clear. The overall time we wait in either
197 * case to send on the socket is about 15 seconds.
198 * Similarly we wait for 15 seconds for a response from
199 * the server in SendReceive[2] for the server to send
200 * a response back for most types of requests (except
201 * SMB Write past end of file which can be slow, and
202 * blocking lock operations). NFS waits slightly longer
203 * than CIFS, but this can make it take longer for
204 * nonresponsive servers to be detected and 15 seconds
205 * is more than enough time for modern networks to
206 * send a packet. In most cases if we fail to send
207 * after the retries we will kill the socket and
208 * reconnect which may clear the network problem.
209 */
210 rc = sock_sendmsg(ssocket, smb_msg);
211 if (rc == -EAGAIN) {
212 retries++;
213 if (retries >= 14 ||
214 (!server->noblocksnd && (retries > 2))) {
215 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
216 ssocket);
217 return -EAGAIN;
218 }
219 msleep(1 << retries);
220 continue;
221 }
222
223 if (rc < 0)
224 return rc;
225
226 if (rc == 0) {
227 /* should never happen, letting socket clear before
228 retrying is our only obvious option here */
229 cifs_dbg(VFS, "tcp sent no data\n");
230 msleep(500);
231 continue;
232 }
233
234 /* send was at least partially successful */
235 *sent += rc;
236 retries = 0; /* in case we get ENOSPC on the next send */
237 }
238 return 0;
239 }
240
241 unsigned long
242 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
243 {
244 unsigned int i;
245 struct kvec *iov;
246 int nvec;
247 unsigned long buflen = 0;
248
249 if (server->vals->header_preamble_size == 0 &&
250 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
251 iov = &rqst->rq_iov[1];
252 nvec = rqst->rq_nvec - 1;
253 } else {
254 iov = rqst->rq_iov;
255 nvec = rqst->rq_nvec;
256 }
257
258 /* total up iov array first */
259 for (i = 0; i < nvec; i++)
260 buflen += iov[i].iov_len;
261
262 /*
263 * Add in the page array if there is one. The caller needs to make
264 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
265 * multiple pages ends at page boundary, rq_tailsz needs to be set to
266 * PAGE_SIZE.
267 */
268 if (rqst->rq_npages) {
269 if (rqst->rq_npages == 1)
270 buflen += rqst->rq_tailsz;
271 else {
272 /*
273 * If there is more than one page, calculate the
274 * buffer length based on rq_offset and rq_tailsz
275 */
276 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
277 rqst->rq_offset;
278 buflen += rqst->rq_tailsz;
279 }
280 }
281
282 return buflen;
283 }
284
285 static int
286 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
287 struct smb_rqst *rqst)
288 {
289 int rc = 0;
290 struct kvec *iov;
291 int n_vec;
292 unsigned int send_length = 0;
293 unsigned int i, j;
294 size_t total_len = 0, sent, size;
295 struct socket *ssocket = server->ssocket;
296 struct msghdr smb_msg;
297 int val = 1;
298 __be32 rfc1002_marker;
299
300 if (cifs_rdma_enabled(server) && server->smbd_conn) {
301 rc = smbd_send(server, rqst);
302 goto smbd_done;
303 }
304 if (ssocket == NULL)
305 return -ENOTSOCK;
306
307 /* cork the socket */
308 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
309 (char *)&val, sizeof(val));
310
311 for (j = 0; j < num_rqst; j++)
312 send_length += smb_rqst_len(server, &rqst[j]);
313 rfc1002_marker = cpu_to_be32(send_length);
314
315 /* Generate a rfc1002 marker for SMB2+ */
316 if (server->vals->header_preamble_size == 0) {
317 struct kvec hiov = {
318 .iov_base = &rfc1002_marker,
319 .iov_len = 4
320 };
321 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
322 rc = smb_send_kvec(server, &smb_msg, &sent);
323 if (rc < 0)
324 goto uncork;
325
326 total_len += sent;
327 send_length += 4;
328 }
329
330 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
331
332 for (j = 0; j < num_rqst; j++) {
333 iov = rqst[j].rq_iov;
334 n_vec = rqst[j].rq_nvec;
335
336 size = 0;
337 for (i = 0; i < n_vec; i++) {
338 dump_smb(iov[i].iov_base, iov[i].iov_len);
339 size += iov[i].iov_len;
340 }
341
342 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
343
344 rc = smb_send_kvec(server, &smb_msg, &sent);
345 if (rc < 0)
346 goto uncork;
347
348 total_len += sent;
349
350 /* now walk the page array and send each page in it */
351 for (i = 0; i < rqst[j].rq_npages; i++) {
352 struct bio_vec bvec;
353
354 bvec.bv_page = rqst[j].rq_pages[i];
355 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
356 &bvec.bv_offset);
357
358 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
359 &bvec, 1, bvec.bv_len);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
363
364 total_len += sent;
365 }
366 }
367
368 uncork:
369 /* uncork it */
370 val = 0;
371 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
372 (char *)&val, sizeof(val));
373
374 if ((total_len > 0) && (total_len != send_length)) {
375 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
376 send_length, total_len);
377 /*
378 * If we have only sent part of an SMB then the next SMB could
379 * be taken as the remainder of this one. We need to kill the
380 * socket so the server throws away the partial SMB
381 */
382 server->tcpStatus = CifsNeedReconnect;
383 trace_smb3_partial_send_reconnect(server->CurrentMid,
384 server->hostname);
385 }
386 smbd_done:
387 if (rc < 0 && rc != -EINTR)
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc);
390 else if (rc > 0)
391 rc = 0;
392
393 return rc;
394 }
395
396 static int
397 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
398 struct smb_rqst *rqst, int flags)
399 {
400 struct kvec iov;
401 struct smb2_transform_hdr tr_hdr;
402 struct smb_rqst cur_rqst[MAX_COMPOUND];
403 int rc;
404
405 if (!(flags & CIFS_TRANSFORM_REQ))
406 return __smb_send_rqst(server, num_rqst, rqst);
407
408 if (num_rqst > MAX_COMPOUND - 1)
409 return -ENOMEM;
410
411 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
412 memset(&iov, 0, sizeof(iov));
413 memset(&tr_hdr, 0, sizeof(tr_hdr));
414
415 iov.iov_base = &tr_hdr;
416 iov.iov_len = sizeof(tr_hdr);
417 cur_rqst[0].rq_iov = &iov;
418 cur_rqst[0].rq_nvec = 1;
419
420 if (!server->ops->init_transform_rq) {
421 cifs_dbg(VFS, "Encryption requested but transform callback "
422 "is missing\n");
423 return -EIO;
424 }
425
426 rc = server->ops->init_transform_rq(server, num_rqst + 1,
427 &cur_rqst[0], rqst);
428 if (rc)
429 return rc;
430
431 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
432 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
433 return rc;
434 }
435
436 int
437 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
439 {
440 struct kvec iov[2];
441 struct smb_rqst rqst = { .rq_iov = iov,
442 .rq_nvec = 2 };
443
444 iov[0].iov_base = smb_buffer;
445 iov[0].iov_len = 4;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
448
449 return __smb_send_rqst(server, 1, &rqst);
450 }
451
452 static int
453 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
454 int *credits)
455 {
456 int rc;
457
458 spin_lock(&server->req_lock);
459 if (timeout == CIFS_ASYNC_OP) {
460 /* oplock breaks must not be held up */
461 server->in_flight++;
462 *credits -= 1;
463 spin_unlock(&server->req_lock);
464 return 0;
465 }
466
467 while (1) {
468 if (*credits <= 0) {
469 spin_unlock(&server->req_lock);
470 cifs_num_waiters_inc(server);
471 rc = wait_event_killable(server->request_q,
472 has_credits(server, credits));
473 cifs_num_waiters_dec(server);
474 if (rc)
475 return rc;
476 spin_lock(&server->req_lock);
477 } else {
478 if (server->tcpStatus == CifsExiting) {
479 spin_unlock(&server->req_lock);
480 return -ENOENT;
481 }
482
483 /*
484 * Can not count locking commands against total
485 * as they are allowed to block on server.
486 */
487
488 /* update # of requests on the wire to server */
489 if (timeout != CIFS_BLOCKING_OP) {
490 *credits -= 1;
491 server->in_flight++;
492 }
493 spin_unlock(&server->req_lock);
494 break;
495 }
496 }
497 return 0;
498 }
499
500 static int
501 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
502 const int optype)
503 {
504 int *val;
505
506 val = server->ops->get_credits_field(server, optype);
507 /* Since an echo is already inflight, no need to wait to send another */
508 if (*val <= 0 && optype == CIFS_ECHO_OP)
509 return -EAGAIN;
510 return wait_for_free_credits(server, timeout, val);
511 }
512
513 int
514 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
515 unsigned int *num, unsigned int *credits)
516 {
517 *num = size;
518 *credits = 0;
519 return 0;
520 }
521
522 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
523 struct mid_q_entry **ppmidQ)
524 {
525 if (ses->server->tcpStatus == CifsExiting) {
526 return -ENOENT;
527 }
528
529 if (ses->server->tcpStatus == CifsNeedReconnect) {
530 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
531 return -EAGAIN;
532 }
533
534 if (ses->status == CifsNew) {
535 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
536 (in_buf->Command != SMB_COM_NEGOTIATE))
537 return -EAGAIN;
538 /* else ok - we are setting up session */
539 }
540
541 if (ses->status == CifsExiting) {
542 /* check if SMB session is bad because we are setting it up */
543 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
544 return -EAGAIN;
545 /* else ok - we are shutting down session */
546 }
547
548 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
549 if (*ppmidQ == NULL)
550 return -ENOMEM;
551 spin_lock(&GlobalMid_Lock);
552 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
553 spin_unlock(&GlobalMid_Lock);
554 return 0;
555 }
556
557 static int
558 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
559 {
560 int error;
561
562 error = wait_event_freezekillable_unsafe(server->response_q,
563 midQ->mid_state != MID_REQUEST_SUBMITTED);
564 if (error < 0)
565 return -ERESTARTSYS;
566
567 return 0;
568 }
569
570 struct mid_q_entry *
571 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
572 {
573 int rc;
574 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
575 struct mid_q_entry *mid;
576
577 if (rqst->rq_iov[0].iov_len != 4 ||
578 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
579 return ERR_PTR(-EIO);
580
581 /* enable signing if server requires it */
582 if (server->sign)
583 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
584
585 mid = AllocMidQEntry(hdr, server);
586 if (mid == NULL)
587 return ERR_PTR(-ENOMEM);
588
589 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
590 if (rc) {
591 DeleteMidQEntry(mid);
592 return ERR_PTR(rc);
593 }
594
595 return mid;
596 }
597
598 /*
599 * Send a SMB request and set the callback function in the mid to handle
600 * the result. Caller is responsible for dealing with timeouts.
601 */
602 int
603 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
604 mid_receive_t *receive, mid_callback_t *callback,
605 mid_handle_t *handle, void *cbdata, const int flags)
606 {
607 int rc, timeout, optype;
608 struct mid_q_entry *mid;
609 unsigned int credits = 0;
610
611 timeout = flags & CIFS_TIMEOUT_MASK;
612 optype = flags & CIFS_OP_MASK;
613
614 if ((flags & CIFS_HAS_CREDITS) == 0) {
615 rc = wait_for_free_request(server, timeout, optype);
616 if (rc)
617 return rc;
618 credits = 1;
619 }
620
621 mutex_lock(&server->srv_mutex);
622 mid = server->ops->setup_async_request(server, rqst);
623 if (IS_ERR(mid)) {
624 mutex_unlock(&server->srv_mutex);
625 add_credits_and_wake_if(server, credits, optype);
626 return PTR_ERR(mid);
627 }
628
629 mid->receive = receive;
630 mid->callback = callback;
631 mid->callback_data = cbdata;
632 mid->handle = handle;
633 mid->mid_state = MID_REQUEST_SUBMITTED;
634
635 /* put it on the pending_mid_q */
636 spin_lock(&GlobalMid_Lock);
637 list_add_tail(&mid->qhead, &server->pending_mid_q);
638 spin_unlock(&GlobalMid_Lock);
639
640 /*
641 * Need to store the time in mid before calling I/O. For call_async,
642 * I/O response may come back and free the mid entry on another thread.
643 */
644 cifs_save_when_sent(mid);
645 cifs_in_send_inc(server);
646 rc = smb_send_rqst(server, 1, rqst, flags);
647 cifs_in_send_dec(server);
648
649 if (rc < 0) {
650 server->sequence_number -= 2;
651 cifs_delete_mid(mid);
652 }
653
654 mutex_unlock(&server->srv_mutex);
655
656 if (rc == 0)
657 return 0;
658
659 add_credits_and_wake_if(server, credits, optype);
660 return rc;
661 }
662
663 /*
664 *
665 * Send an SMB Request. No response info (other than return code)
666 * needs to be parsed.
667 *
668 * flags indicate the type of request buffer and how long to wait
669 * and whether to log NT STATUS code (error) before mapping it to POSIX error
670 *
671 */
672 int
673 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
674 char *in_buf, int flags)
675 {
676 int rc;
677 struct kvec iov[1];
678 struct kvec rsp_iov;
679 int resp_buf_type;
680
681 iov[0].iov_base = in_buf;
682 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
683 flags |= CIFS_NO_RESP;
684 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
685 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
686
687 return rc;
688 }
689
690 static int
691 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
692 {
693 int rc = 0;
694
695 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
696 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
697
698 spin_lock(&GlobalMid_Lock);
699 switch (mid->mid_state) {
700 case MID_RESPONSE_RECEIVED:
701 spin_unlock(&GlobalMid_Lock);
702 return rc;
703 case MID_RETRY_NEEDED:
704 rc = -EAGAIN;
705 break;
706 case MID_RESPONSE_MALFORMED:
707 rc = -EIO;
708 break;
709 case MID_SHUTDOWN:
710 rc = -EHOSTDOWN;
711 break;
712 default:
713 list_del_init(&mid->qhead);
714 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
715 __func__, mid->mid, mid->mid_state);
716 rc = -EIO;
717 }
718 spin_unlock(&GlobalMid_Lock);
719
720 DeleteMidQEntry(mid);
721 return rc;
722 }
723
724 static inline int
725 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
726 struct mid_q_entry *mid)
727 {
728 return server->ops->send_cancel ?
729 server->ops->send_cancel(server, rqst, mid) : 0;
730 }
731
732 int
733 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
734 bool log_error)
735 {
736 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
737
738 dump_smb(mid->resp_buf, min_t(u32, 92, len));
739
740 /* convert the length into a more usable form */
741 if (server->sign) {
742 struct kvec iov[2];
743 int rc = 0;
744 struct smb_rqst rqst = { .rq_iov = iov,
745 .rq_nvec = 2 };
746
747 iov[0].iov_base = mid->resp_buf;
748 iov[0].iov_len = 4;
749 iov[1].iov_base = (char *)mid->resp_buf + 4;
750 iov[1].iov_len = len - 4;
751 /* FIXME: add code to kill session */
752 rc = cifs_verify_signature(&rqst, server,
753 mid->sequence_number);
754 if (rc)
755 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
756 rc);
757 }
758
759 /* BB special case reconnect tid and uid here? */
760 return map_smb_to_linux_error(mid->resp_buf, log_error);
761 }
762
763 struct mid_q_entry *
764 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
765 {
766 int rc;
767 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
768 struct mid_q_entry *mid;
769
770 if (rqst->rq_iov[0].iov_len != 4 ||
771 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
772 return ERR_PTR(-EIO);
773
774 rc = allocate_mid(ses, hdr, &mid);
775 if (rc)
776 return ERR_PTR(rc);
777 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
778 if (rc) {
779 cifs_delete_mid(mid);
780 return ERR_PTR(rc);
781 }
782 return mid;
783 }
784
785 static void
786 cifs_compound_callback(struct mid_q_entry *mid)
787 {
788 struct TCP_Server_Info *server = mid->server;
789 unsigned int optype = mid->optype;
790 unsigned int credits_received = 0;
791
792 if (mid->mid_state == MID_RESPONSE_RECEIVED) {
793 if (mid->resp_buf)
794 credits_received = server->ops->get_credits(mid);
795 else
796 cifs_dbg(FYI, "Bad state for cancelled MID\n");
797 }
798
799 add_credits(server, credits_received, optype);
800 }
801
802 static void
803 cifs_compound_last_callback(struct mid_q_entry *mid)
804 {
805 cifs_compound_callback(mid);
806 cifs_wake_up_task(mid);
807 }
808
809 static void
810 cifs_cancelled_callback(struct mid_q_entry *mid)
811 {
812 cifs_compound_callback(mid);
813 DeleteMidQEntry(mid);
814 }
815
816 int
817 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
818 const int flags, const int num_rqst, struct smb_rqst *rqst,
819 int *resp_buf_type, struct kvec *resp_iov)
820 {
821 int i, j, rc = 0;
822 int timeout, optype;
823 struct mid_q_entry *midQ[MAX_COMPOUND];
824 bool cancelled_mid[MAX_COMPOUND] = {false};
825 unsigned int credits[MAX_COMPOUND] = {0};
826 char *buf;
827
828 timeout = flags & CIFS_TIMEOUT_MASK;
829 optype = flags & CIFS_OP_MASK;
830
831 for (i = 0; i < num_rqst; i++)
832 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
833
834 if ((ses == NULL) || (ses->server == NULL)) {
835 cifs_dbg(VFS, "Null session\n");
836 return -EIO;
837 }
838
839 if (ses->server->tcpStatus == CifsExiting)
840 return -ENOENT;
841
842 /*
843 * Ensure we obtain 1 credit per request in the compound chain.
844 * It can be optimized further by waiting for all the credits
845 * at once but this can wait long enough if we don't have enough
846 * credits due to some heavy operations in progress or the server
847 * not granting us much, so a fallback to the current approach is
848 * needed anyway.
849 */
850 for (i = 0; i < num_rqst; i++) {
851 rc = wait_for_free_request(ses->server, timeout, optype);
852 if (rc) {
853 /*
854 * We haven't sent an SMB packet to the server yet but
855 * we already obtained credits for i requests in the
856 * compound chain - need to return those credits back
857 * for future use. Note that we need to call add_credits
858 * multiple times to match the way we obtained credits
859 * in the first place and to account for in flight
860 * requests correctly.
861 */
862 for (j = 0; j < i; j++)
863 add_credits(ses->server, 1, optype);
864 return rc;
865 }
866 credits[i] = 1;
867 }
868
869 /*
870 * Make sure that we sign in the same order that we send on this socket
871 * and avoid races inside tcp sendmsg code that could cause corruption
872 * of smb data.
873 */
874
875 mutex_lock(&ses->server->srv_mutex);
876
877 for (i = 0; i < num_rqst; i++) {
878 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
879 if (IS_ERR(midQ[i])) {
880 for (j = 0; j < i; j++)
881 cifs_delete_mid(midQ[j]);
882 mutex_unlock(&ses->server->srv_mutex);
883
884 /* Update # of requests on wire to server */
885 for (j = 0; j < num_rqst; j++)
886 add_credits(ses->server, credits[j], optype);
887 return PTR_ERR(midQ[i]);
888 }
889
890 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
891 midQ[i]->optype = optype;
892 /*
893 * Invoke callback for every part of the compound chain
894 * to calculate credits properly. Wake up this thread only when
895 * the last element is received.
896 */
897 if (i < num_rqst - 1)
898 midQ[i]->callback = cifs_compound_callback;
899 else
900 midQ[i]->callback = cifs_compound_last_callback;
901 }
902 cifs_in_send_inc(ses->server);
903 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
904 cifs_in_send_dec(ses->server);
905
906 for (i = 0; i < num_rqst; i++)
907 cifs_save_when_sent(midQ[i]);
908
909 if (rc < 0)
910 ses->server->sequence_number -= 2;
911
912 mutex_unlock(&ses->server->srv_mutex);
913
914 if (rc < 0) {
915 /* Sending failed for some reason - return credits back */
916 for (i = 0; i < num_rqst; i++)
917 add_credits(ses->server, credits[i], optype);
918 goto out;
919 }
920
921 /*
922 * At this point the request is passed to the network stack - we assume
923 * that any credits taken from the server structure on the client have
924 * been spent and we can't return them back. Once we receive responses
925 * we will collect credits granted by the server in the mid callbacks
926 * and add those credits to the server structure.
927 */
928
929 /*
930 * Compounding is never used during session establish.
931 */
932 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
933 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
934 rqst[0].rq_nvec);
935
936 if (timeout == CIFS_ASYNC_OP)
937 goto out;
938
939 for (i = 0; i < num_rqst; i++) {
940 rc = wait_for_response(ses->server, midQ[i]);
941 if (rc != 0)
942 break;
943 }
944 if (rc != 0) {
945 for (; i < num_rqst; i++) {
946 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
947 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
948 send_cancel(ses->server, &rqst[i], midQ[i]);
949 spin_lock(&GlobalMid_Lock);
950 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
951 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
952 midQ[i]->callback = cifs_cancelled_callback;
953 cancelled_mid[i] = true;
954 credits[i] = 0;
955 }
956 spin_unlock(&GlobalMid_Lock);
957 }
958 }
959
960 for (i = 0; i < num_rqst; i++) {
961 if (rc < 0)
962 goto out;
963
964 rc = cifs_sync_mid_result(midQ[i], ses->server);
965 if (rc != 0) {
966 /* mark this mid as cancelled to not free it below */
967 cancelled_mid[i] = true;
968 goto out;
969 }
970
971 if (!midQ[i]->resp_buf ||
972 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
973 rc = -EIO;
974 cifs_dbg(FYI, "Bad MID state?\n");
975 goto out;
976 }
977
978 buf = (char *)midQ[i]->resp_buf;
979 resp_iov[i].iov_base = buf;
980 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
981 ses->server->vals->header_preamble_size;
982
983 if (midQ[i]->large_buf)
984 resp_buf_type[i] = CIFS_LARGE_BUFFER;
985 else
986 resp_buf_type[i] = CIFS_SMALL_BUFFER;
987
988 rc = ses->server->ops->check_receive(midQ[i], ses->server,
989 flags & CIFS_LOG_ERROR);
990
991 /* mark it so buf will not be freed by cifs_delete_mid */
992 if ((flags & CIFS_NO_RESP) == 0)
993 midQ[i]->resp_buf = NULL;
994
995 }
996
997 /*
998 * Compounding is never used during session establish.
999 */
1000 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1001 struct kvec iov = {
1002 .iov_base = resp_iov[0].iov_base,
1003 .iov_len = resp_iov[0].iov_len
1004 };
1005 smb311_update_preauth_hash(ses, &iov, 1);
1006 }
1007
1008 out:
1009 /*
1010 * This will dequeue all mids. After this it is important that the
1011 * demultiplex_thread will not process any of these mids any futher.
1012 * This is prevented above by using a noop callback that will not
1013 * wake this thread except for the very last PDU.
1014 */
1015 for (i = 0; i < num_rqst; i++) {
1016 if (!cancelled_mid[i])
1017 cifs_delete_mid(midQ[i]);
1018 }
1019
1020 return rc;
1021 }
1022
1023 int
1024 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1025 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1026 struct kvec *resp_iov)
1027 {
1028 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1029 resp_iov);
1030 }
1031
1032 int
1033 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1034 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1035 const int flags, struct kvec *resp_iov)
1036 {
1037 struct smb_rqst rqst;
1038 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1039 int rc;
1040
1041 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1042 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1043 GFP_KERNEL);
1044 if (!new_iov) {
1045 /* otherwise cifs_send_recv below sets resp_buf_type */
1046 *resp_buf_type = CIFS_NO_BUFFER;
1047 return -ENOMEM;
1048 }
1049 } else
1050 new_iov = s_iov;
1051
1052 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1053 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1054
1055 new_iov[0].iov_base = new_iov[1].iov_base;
1056 new_iov[0].iov_len = 4;
1057 new_iov[1].iov_base += 4;
1058 new_iov[1].iov_len -= 4;
1059
1060 memset(&rqst, 0, sizeof(struct smb_rqst));
1061 rqst.rq_iov = new_iov;
1062 rqst.rq_nvec = n_vec + 1;
1063
1064 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1065 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1066 kfree(new_iov);
1067 return rc;
1068 }
1069
1070 int
1071 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1072 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1073 int *pbytes_returned, const int timeout)
1074 {
1075 int rc = 0;
1076 struct mid_q_entry *midQ;
1077 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1078 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1079 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1080
1081 if (ses == NULL) {
1082 cifs_dbg(VFS, "Null smb session\n");
1083 return -EIO;
1084 }
1085 if (ses->server == NULL) {
1086 cifs_dbg(VFS, "Null tcp session\n");
1087 return -EIO;
1088 }
1089
1090 if (ses->server->tcpStatus == CifsExiting)
1091 return -ENOENT;
1092
1093 /* Ensure that we do not send more than 50 overlapping requests
1094 to the same server. We may make this configurable later or
1095 use ses->maxReq */
1096
1097 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1098 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1099 len);
1100 return -EIO;
1101 }
1102
1103 rc = wait_for_free_request(ses->server, timeout, 0);
1104 if (rc)
1105 return rc;
1106
1107 /* make sure that we sign in the same order that we send on this socket
1108 and avoid races inside tcp sendmsg code that could cause corruption
1109 of smb data */
1110
1111 mutex_lock(&ses->server->srv_mutex);
1112
1113 rc = allocate_mid(ses, in_buf, &midQ);
1114 if (rc) {
1115 mutex_unlock(&ses->server->srv_mutex);
1116 /* Update # of requests on wire to server */
1117 add_credits(ses->server, 1, 0);
1118 return rc;
1119 }
1120
1121 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1122 if (rc) {
1123 mutex_unlock(&ses->server->srv_mutex);
1124 goto out;
1125 }
1126
1127 midQ->mid_state = MID_REQUEST_SUBMITTED;
1128
1129 cifs_in_send_inc(ses->server);
1130 rc = smb_send(ses->server, in_buf, len);
1131 cifs_in_send_dec(ses->server);
1132 cifs_save_when_sent(midQ);
1133
1134 if (rc < 0)
1135 ses->server->sequence_number -= 2;
1136
1137 mutex_unlock(&ses->server->srv_mutex);
1138
1139 if (rc < 0)
1140 goto out;
1141
1142 if (timeout == CIFS_ASYNC_OP)
1143 goto out;
1144
1145 rc = wait_for_response(ses->server, midQ);
1146 if (rc != 0) {
1147 send_cancel(ses->server, &rqst, midQ);
1148 spin_lock(&GlobalMid_Lock);
1149 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1150 /* no longer considered to be "in-flight" */
1151 midQ->callback = DeleteMidQEntry;
1152 spin_unlock(&GlobalMid_Lock);
1153 add_credits(ses->server, 1, 0);
1154 return rc;
1155 }
1156 spin_unlock(&GlobalMid_Lock);
1157 }
1158
1159 rc = cifs_sync_mid_result(midQ, ses->server);
1160 if (rc != 0) {
1161 add_credits(ses->server, 1, 0);
1162 return rc;
1163 }
1164
1165 if (!midQ->resp_buf || !out_buf ||
1166 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1167 rc = -EIO;
1168 cifs_dbg(VFS, "Bad MID state?\n");
1169 goto out;
1170 }
1171
1172 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1173 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1174 rc = cifs_check_receive(midQ, ses->server, 0);
1175 out:
1176 cifs_delete_mid(midQ);
1177 add_credits(ses->server, 1, 0);
1178
1179 return rc;
1180 }
1181
1182 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1183 blocking lock to return. */
1184
1185 static int
1186 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1187 struct smb_hdr *in_buf,
1188 struct smb_hdr *out_buf)
1189 {
1190 int bytes_returned;
1191 struct cifs_ses *ses = tcon->ses;
1192 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1193
1194 /* We just modify the current in_buf to change
1195 the type of lock from LOCKING_ANDX_SHARED_LOCK
1196 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1197 LOCKING_ANDX_CANCEL_LOCK. */
1198
1199 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1200 pSMB->Timeout = 0;
1201 pSMB->hdr.Mid = get_next_mid(ses->server);
1202
1203 return SendReceive(xid, ses, in_buf, out_buf,
1204 &bytes_returned, 0);
1205 }
1206
1207 int
1208 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1209 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1210 int *pbytes_returned)
1211 {
1212 int rc = 0;
1213 int rstart = 0;
1214 struct mid_q_entry *midQ;
1215 struct cifs_ses *ses;
1216 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1217 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1218 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1219
1220 if (tcon == NULL || tcon->ses == NULL) {
1221 cifs_dbg(VFS, "Null smb session\n");
1222 return -EIO;
1223 }
1224 ses = tcon->ses;
1225
1226 if (ses->server == NULL) {
1227 cifs_dbg(VFS, "Null tcp session\n");
1228 return -EIO;
1229 }
1230
1231 if (ses->server->tcpStatus == CifsExiting)
1232 return -ENOENT;
1233
1234 /* Ensure that we do not send more than 50 overlapping requests
1235 to the same server. We may make this configurable later or
1236 use ses->maxReq */
1237
1238 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1239 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1240 len);
1241 return -EIO;
1242 }
1243
1244 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1245 if (rc)
1246 return rc;
1247
1248 /* make sure that we sign in the same order that we send on this socket
1249 and avoid races inside tcp sendmsg code that could cause corruption
1250 of smb data */
1251
1252 mutex_lock(&ses->server->srv_mutex);
1253
1254 rc = allocate_mid(ses, in_buf, &midQ);
1255 if (rc) {
1256 mutex_unlock(&ses->server->srv_mutex);
1257 return rc;
1258 }
1259
1260 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1261 if (rc) {
1262 cifs_delete_mid(midQ);
1263 mutex_unlock(&ses->server->srv_mutex);
1264 return rc;
1265 }
1266
1267 midQ->mid_state = MID_REQUEST_SUBMITTED;
1268 cifs_in_send_inc(ses->server);
1269 rc = smb_send(ses->server, in_buf, len);
1270 cifs_in_send_dec(ses->server);
1271 cifs_save_when_sent(midQ);
1272
1273 if (rc < 0)
1274 ses->server->sequence_number -= 2;
1275
1276 mutex_unlock(&ses->server->srv_mutex);
1277
1278 if (rc < 0) {
1279 cifs_delete_mid(midQ);
1280 return rc;
1281 }
1282
1283 /* Wait for a reply - allow signals to interrupt. */
1284 rc = wait_event_interruptible(ses->server->response_q,
1285 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1286 ((ses->server->tcpStatus != CifsGood) &&
1287 (ses->server->tcpStatus != CifsNew)));
1288
1289 /* Were we interrupted by a signal ? */
1290 if ((rc == -ERESTARTSYS) &&
1291 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1292 ((ses->server->tcpStatus == CifsGood) ||
1293 (ses->server->tcpStatus == CifsNew))) {
1294
1295 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1296 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1297 blocking lock to return. */
1298 rc = send_cancel(ses->server, &rqst, midQ);
1299 if (rc) {
1300 cifs_delete_mid(midQ);
1301 return rc;
1302 }
1303 } else {
1304 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1305 to cause the blocking lock to return. */
1306
1307 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1308
1309 /* If we get -ENOLCK back the lock may have
1310 already been removed. Don't exit in this case. */
1311 if (rc && rc != -ENOLCK) {
1312 cifs_delete_mid(midQ);
1313 return rc;
1314 }
1315 }
1316
1317 rc = wait_for_response(ses->server, midQ);
1318 if (rc) {
1319 send_cancel(ses->server, &rqst, midQ);
1320 spin_lock(&GlobalMid_Lock);
1321 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1322 /* no longer considered to be "in-flight" */
1323 midQ->callback = DeleteMidQEntry;
1324 spin_unlock(&GlobalMid_Lock);
1325 return rc;
1326 }
1327 spin_unlock(&GlobalMid_Lock);
1328 }
1329
1330 /* We got the response - restart system call. */
1331 rstart = 1;
1332 }
1333
1334 rc = cifs_sync_mid_result(midQ, ses->server);
1335 if (rc != 0)
1336 return rc;
1337
1338 /* rcvd frame is ok */
1339 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1340 rc = -EIO;
1341 cifs_dbg(VFS, "Bad MID state?\n");
1342 goto out;
1343 }
1344
1345 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1346 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1347 rc = cifs_check_receive(midQ, ses->server, 0);
1348 out:
1349 cifs_delete_mid(midQ);
1350 if (rstart && rc == -EACCES)
1351 return -ERESTARTSYS;
1352 return rc;
1353 }