]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - fs/cifs/transport.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smb2proto.h"
41 #include "smbdirect.h"
42
43 /* Max number of iovectors we can use off the stack when sending requests. */
44 #define CIFS_MAX_IOV_SIZE 8
45
46 void
47 cifs_wake_up_task(struct mid_q_entry *mid)
48 {
49 wake_up_process(mid->callback_data);
50 }
51
52 struct mid_q_entry *
53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54 {
55 struct mid_q_entry *temp;
56
57 if (server == NULL) {
58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 return NULL;
60 }
61
62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 memset(temp, 0, sizeof(struct mid_q_entry));
64 kref_init(&temp->refcount);
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
73
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
80
81 atomic_inc(&midCount);
82 temp->mid_state = MID_REQUEST_ALLOCATED;
83 return temp;
84 }
85
86 static void _cifs_mid_q_entry_release(struct kref *refcount)
87 {
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92 }
93
94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95 {
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99 }
100
101 void
102 DeleteMidQEntry(struct mid_q_entry *midEntry)
103 {
104 #ifdef CONFIG_CIFS_STATS2
105 __le16 command = midEntry->server->vals->lock_cmd;
106 unsigned long now;
107 #endif
108 midEntry->mid_state = MID_FREE;
109 atomic_dec(&midCount);
110 if (midEntry->large_buf)
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
116 /*
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
124 * checks
125 */
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
128 (midEntry->command != command)) {
129 /*
130 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
131 * NB: le16_to_cpu returns unsigned so can not be negative below
132 */
133 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
134 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
135
136 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
137 midEntry->mid, midEntry->pid,
138 midEntry->when_sent, midEntry->when_received);
139 if (cifsFYI & CIFS_TIMER) {
140 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
141 midEntry->command, midEntry->mid);
142 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
143 now - midEntry->when_alloc,
144 now - midEntry->when_sent,
145 now - midEntry->when_received);
146 }
147 }
148 #endif
149 cifs_mid_q_entry_release(midEntry);
150 }
151
152 void
153 cifs_delete_mid(struct mid_q_entry *mid)
154 {
155 spin_lock(&GlobalMid_Lock);
156 list_del_init(&mid->qhead);
157 mid->mid_flags |= MID_DELETED;
158 spin_unlock(&GlobalMid_Lock);
159
160 DeleteMidQEntry(mid);
161 }
162
163 /*
164 * smb_send_kvec - send an array of kvecs to the server
165 * @server: Server to send the data to
166 * @smb_msg: Message to send
167 * @sent: amount of data sent on socket is stored here
168 *
169 * Our basic "send data to server" function. Should be called with srv_mutex
170 * held. The caller is responsible for handling the results.
171 */
172 static int
173 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
174 size_t *sent)
175 {
176 int rc = 0;
177 int retries = 0;
178 struct socket *ssocket = server->ssocket;
179
180 *sent = 0;
181
182 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
183 smb_msg->msg_namelen = sizeof(struct sockaddr);
184 smb_msg->msg_control = NULL;
185 smb_msg->msg_controllen = 0;
186 if (server->noblocksnd)
187 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
188 else
189 smb_msg->msg_flags = MSG_NOSIGNAL;
190
191 while (msg_data_left(smb_msg)) {
192 /*
193 * If blocking send, we try 3 times, since each can block
194 * for 5 seconds. For nonblocking we have to try more
195 * but wait increasing amounts of time allowing time for
196 * socket to clear. The overall time we wait in either
197 * case to send on the socket is about 15 seconds.
198 * Similarly we wait for 15 seconds for a response from
199 * the server in SendReceive[2] for the server to send
200 * a response back for most types of requests (except
201 * SMB Write past end of file which can be slow, and
202 * blocking lock operations). NFS waits slightly longer
203 * than CIFS, but this can make it take longer for
204 * nonresponsive servers to be detected and 15 seconds
205 * is more than enough time for modern networks to
206 * send a packet. In most cases if we fail to send
207 * after the retries we will kill the socket and
208 * reconnect which may clear the network problem.
209 */
210 rc = sock_sendmsg(ssocket, smb_msg);
211 if (rc == -EAGAIN) {
212 retries++;
213 if (retries >= 14 ||
214 (!server->noblocksnd && (retries > 2))) {
215 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
216 ssocket);
217 return -EAGAIN;
218 }
219 msleep(1 << retries);
220 continue;
221 }
222
223 if (rc < 0)
224 return rc;
225
226 if (rc == 0) {
227 /* should never happen, letting socket clear before
228 retrying is our only obvious option here */
229 cifs_dbg(VFS, "tcp sent no data\n");
230 msleep(500);
231 continue;
232 }
233
234 /* send was at least partially successful */
235 *sent += rc;
236 retries = 0; /* in case we get ENOSPC on the next send */
237 }
238 return 0;
239 }
240
241 unsigned long
242 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
243 {
244 unsigned int i;
245 struct kvec *iov;
246 int nvec;
247 unsigned long buflen = 0;
248
249 if (server->vals->header_preamble_size == 0 &&
250 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
251 iov = &rqst->rq_iov[1];
252 nvec = rqst->rq_nvec - 1;
253 } else {
254 iov = rqst->rq_iov;
255 nvec = rqst->rq_nvec;
256 }
257
258 /* total up iov array first */
259 for (i = 0; i < nvec; i++)
260 buflen += iov[i].iov_len;
261
262 /*
263 * Add in the page array if there is one. The caller needs to make
264 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
265 * multiple pages ends at page boundary, rq_tailsz needs to be set to
266 * PAGE_SIZE.
267 */
268 if (rqst->rq_npages) {
269 if (rqst->rq_npages == 1)
270 buflen += rqst->rq_tailsz;
271 else {
272 /*
273 * If there is more than one page, calculate the
274 * buffer length based on rq_offset and rq_tailsz
275 */
276 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
277 rqst->rq_offset;
278 buflen += rqst->rq_tailsz;
279 }
280 }
281
282 return buflen;
283 }
284
285 static int
286 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
287 struct smb_rqst *rqst)
288 {
289 int rc = 0;
290 struct kvec *iov;
291 int n_vec;
292 unsigned int send_length = 0;
293 unsigned int i, j;
294 size_t total_len = 0, sent, size;
295 struct socket *ssocket = server->ssocket;
296 struct msghdr smb_msg;
297 int val = 1;
298 __be32 rfc1002_marker;
299
300 if (cifs_rdma_enabled(server) && server->smbd_conn) {
301 rc = smbd_send(server, rqst);
302 goto smbd_done;
303 }
304 if (ssocket == NULL)
305 return -ENOTSOCK;
306
307 /* cork the socket */
308 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
309 (char *)&val, sizeof(val));
310
311 for (j = 0; j < num_rqst; j++)
312 send_length += smb_rqst_len(server, &rqst[j]);
313 rfc1002_marker = cpu_to_be32(send_length);
314
315 /* Generate a rfc1002 marker for SMB2+ */
316 if (server->vals->header_preamble_size == 0) {
317 struct kvec hiov = {
318 .iov_base = &rfc1002_marker,
319 .iov_len = 4
320 };
321 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
322 rc = smb_send_kvec(server, &smb_msg, &sent);
323 if (rc < 0)
324 goto uncork;
325
326 total_len += sent;
327 send_length += 4;
328 }
329
330 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
331
332 for (j = 0; j < num_rqst; j++) {
333 iov = rqst[j].rq_iov;
334 n_vec = rqst[j].rq_nvec;
335
336 size = 0;
337 for (i = 0; i < n_vec; i++) {
338 dump_smb(iov[i].iov_base, iov[i].iov_len);
339 size += iov[i].iov_len;
340 }
341
342 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
343
344 rc = smb_send_kvec(server, &smb_msg, &sent);
345 if (rc < 0)
346 goto uncork;
347
348 total_len += sent;
349
350 /* now walk the page array and send each page in it */
351 for (i = 0; i < rqst[j].rq_npages; i++) {
352 struct bio_vec bvec;
353
354 bvec.bv_page = rqst[j].rq_pages[i];
355 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
356 &bvec.bv_offset);
357
358 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
359 &bvec, 1, bvec.bv_len);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
363
364 total_len += sent;
365 }
366 }
367
368 uncork:
369 /* uncork it */
370 val = 0;
371 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
372 (char *)&val, sizeof(val));
373
374 if ((total_len > 0) && (total_len != send_length)) {
375 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
376 send_length, total_len);
377 /*
378 * If we have only sent part of an SMB then the next SMB could
379 * be taken as the remainder of this one. We need to kill the
380 * socket so the server throws away the partial SMB
381 */
382 server->tcpStatus = CifsNeedReconnect;
383 trace_smb3_partial_send_reconnect(server->CurrentMid,
384 server->hostname);
385 }
386 smbd_done:
387 if (rc < 0 && rc != -EINTR)
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc);
390 else if (rc > 0)
391 rc = 0;
392
393 return rc;
394 }
395
396 static int
397 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
398 struct smb_rqst *rqst, int flags)
399 {
400 struct kvec iov;
401 struct smb2_transform_hdr tr_hdr;
402 struct smb_rqst cur_rqst[MAX_COMPOUND];
403 int rc;
404
405 if (!(flags & CIFS_TRANSFORM_REQ))
406 return __smb_send_rqst(server, num_rqst, rqst);
407
408 if (num_rqst > MAX_COMPOUND - 1)
409 return -ENOMEM;
410
411 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
412 memset(&iov, 0, sizeof(iov));
413 memset(&tr_hdr, 0, sizeof(tr_hdr));
414
415 iov.iov_base = &tr_hdr;
416 iov.iov_len = sizeof(tr_hdr);
417 cur_rqst[0].rq_iov = &iov;
418 cur_rqst[0].rq_nvec = 1;
419
420 if (!server->ops->init_transform_rq) {
421 cifs_dbg(VFS, "Encryption requested but transform callback "
422 "is missing\n");
423 return -EIO;
424 }
425
426 rc = server->ops->init_transform_rq(server, num_rqst + 1,
427 &cur_rqst[0], rqst);
428 if (rc)
429 return rc;
430
431 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
432 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
433 return rc;
434 }
435
436 int
437 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
439 {
440 struct kvec iov[2];
441 struct smb_rqst rqst = { .rq_iov = iov,
442 .rq_nvec = 2 };
443
444 iov[0].iov_base = smb_buffer;
445 iov[0].iov_len = 4;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
448
449 return __smb_send_rqst(server, 1, &rqst);
450 }
451
452 static int
453 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
454 int *credits)
455 {
456 int rc;
457
458 spin_lock(&server->req_lock);
459 if (timeout == CIFS_ASYNC_OP) {
460 /* oplock breaks must not be held up */
461 server->in_flight++;
462 *credits -= 1;
463 spin_unlock(&server->req_lock);
464 return 0;
465 }
466
467 while (1) {
468 if (*credits <= 0) {
469 spin_unlock(&server->req_lock);
470 cifs_num_waiters_inc(server);
471 rc = wait_event_killable(server->request_q,
472 has_credits(server, credits));
473 cifs_num_waiters_dec(server);
474 if (rc)
475 return rc;
476 spin_lock(&server->req_lock);
477 } else {
478 if (server->tcpStatus == CifsExiting) {
479 spin_unlock(&server->req_lock);
480 return -ENOENT;
481 }
482
483 /*
484 * Can not count locking commands against total
485 * as they are allowed to block on server.
486 */
487
488 /* update # of requests on the wire to server */
489 if (timeout != CIFS_BLOCKING_OP) {
490 *credits -= 1;
491 server->in_flight++;
492 }
493 spin_unlock(&server->req_lock);
494 break;
495 }
496 }
497 return 0;
498 }
499
500 static int
501 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
502 const int optype)
503 {
504 int *val;
505
506 val = server->ops->get_credits_field(server, optype);
507 /* Since an echo is already inflight, no need to wait to send another */
508 if (*val <= 0 && optype == CIFS_ECHO_OP)
509 return -EAGAIN;
510 return wait_for_free_credits(server, timeout, val);
511 }
512
513 int
514 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
515 unsigned int *num, unsigned int *credits)
516 {
517 *num = size;
518 *credits = 0;
519 return 0;
520 }
521
522 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
523 struct mid_q_entry **ppmidQ)
524 {
525 if (ses->server->tcpStatus == CifsExiting) {
526 return -ENOENT;
527 }
528
529 if (ses->server->tcpStatus == CifsNeedReconnect) {
530 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
531 return -EAGAIN;
532 }
533
534 if (ses->status == CifsNew) {
535 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
536 (in_buf->Command != SMB_COM_NEGOTIATE))
537 return -EAGAIN;
538 /* else ok - we are setting up session */
539 }
540
541 if (ses->status == CifsExiting) {
542 /* check if SMB session is bad because we are setting it up */
543 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
544 return -EAGAIN;
545 /* else ok - we are shutting down session */
546 }
547
548 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
549 if (*ppmidQ == NULL)
550 return -ENOMEM;
551 spin_lock(&GlobalMid_Lock);
552 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
553 spin_unlock(&GlobalMid_Lock);
554 return 0;
555 }
556
557 static int
558 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
559 {
560 int error;
561
562 error = wait_event_freezekillable_unsafe(server->response_q,
563 midQ->mid_state != MID_REQUEST_SUBMITTED);
564 if (error < 0)
565 return -ERESTARTSYS;
566
567 return 0;
568 }
569
570 struct mid_q_entry *
571 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
572 {
573 int rc;
574 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
575 struct mid_q_entry *mid;
576
577 if (rqst->rq_iov[0].iov_len != 4 ||
578 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
579 return ERR_PTR(-EIO);
580
581 /* enable signing if server requires it */
582 if (server->sign)
583 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
584
585 mid = AllocMidQEntry(hdr, server);
586 if (mid == NULL)
587 return ERR_PTR(-ENOMEM);
588
589 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
590 if (rc) {
591 DeleteMidQEntry(mid);
592 return ERR_PTR(rc);
593 }
594
595 return mid;
596 }
597
598 /*
599 * Send a SMB request and set the callback function in the mid to handle
600 * the result. Caller is responsible for dealing with timeouts.
601 */
602 int
603 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
604 mid_receive_t *receive, mid_callback_t *callback,
605 mid_handle_t *handle, void *cbdata, const int flags)
606 {
607 int rc, timeout, optype;
608 struct mid_q_entry *mid;
609 unsigned int credits = 0;
610
611 timeout = flags & CIFS_TIMEOUT_MASK;
612 optype = flags & CIFS_OP_MASK;
613
614 if ((flags & CIFS_HAS_CREDITS) == 0) {
615 rc = wait_for_free_request(server, timeout, optype);
616 if (rc)
617 return rc;
618 credits = 1;
619 }
620
621 mutex_lock(&server->srv_mutex);
622 mid = server->ops->setup_async_request(server, rqst);
623 if (IS_ERR(mid)) {
624 mutex_unlock(&server->srv_mutex);
625 add_credits_and_wake_if(server, credits, optype);
626 return PTR_ERR(mid);
627 }
628
629 mid->receive = receive;
630 mid->callback = callback;
631 mid->callback_data = cbdata;
632 mid->handle = handle;
633 mid->mid_state = MID_REQUEST_SUBMITTED;
634
635 /* put it on the pending_mid_q */
636 spin_lock(&GlobalMid_Lock);
637 list_add_tail(&mid->qhead, &server->pending_mid_q);
638 spin_unlock(&GlobalMid_Lock);
639
640 /*
641 * Need to store the time in mid before calling I/O. For call_async,
642 * I/O response may come back and free the mid entry on another thread.
643 */
644 cifs_save_when_sent(mid);
645 cifs_in_send_inc(server);
646 rc = smb_send_rqst(server, 1, rqst, flags);
647 cifs_in_send_dec(server);
648
649 if (rc < 0) {
650 server->sequence_number -= 2;
651 cifs_delete_mid(mid);
652 }
653
654 mutex_unlock(&server->srv_mutex);
655
656 if (rc == 0)
657 return 0;
658
659 add_credits_and_wake_if(server, credits, optype);
660 return rc;
661 }
662
663 /*
664 *
665 * Send an SMB Request. No response info (other than return code)
666 * needs to be parsed.
667 *
668 * flags indicate the type of request buffer and how long to wait
669 * and whether to log NT STATUS code (error) before mapping it to POSIX error
670 *
671 */
672 int
673 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
674 char *in_buf, int flags)
675 {
676 int rc;
677 struct kvec iov[1];
678 struct kvec rsp_iov;
679 int resp_buf_type;
680
681 iov[0].iov_base = in_buf;
682 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
683 flags |= CIFS_NO_RESP;
684 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
685 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
686
687 return rc;
688 }
689
690 static int
691 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
692 {
693 int rc = 0;
694
695 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
696 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
697
698 spin_lock(&GlobalMid_Lock);
699 switch (mid->mid_state) {
700 case MID_RESPONSE_RECEIVED:
701 spin_unlock(&GlobalMid_Lock);
702 return rc;
703 case MID_RETRY_NEEDED:
704 rc = -EAGAIN;
705 break;
706 case MID_RESPONSE_MALFORMED:
707 rc = -EIO;
708 break;
709 case MID_SHUTDOWN:
710 rc = -EHOSTDOWN;
711 break;
712 default:
713 list_del_init(&mid->qhead);
714 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
715 __func__, mid->mid, mid->mid_state);
716 rc = -EIO;
717 }
718 spin_unlock(&GlobalMid_Lock);
719
720 DeleteMidQEntry(mid);
721 return rc;
722 }
723
724 static inline int
725 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
726 struct mid_q_entry *mid)
727 {
728 return server->ops->send_cancel ?
729 server->ops->send_cancel(server, rqst, mid) : 0;
730 }
731
732 int
733 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
734 bool log_error)
735 {
736 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
737
738 dump_smb(mid->resp_buf, min_t(u32, 92, len));
739
740 /* convert the length into a more usable form */
741 if (server->sign) {
742 struct kvec iov[2];
743 int rc = 0;
744 struct smb_rqst rqst = { .rq_iov = iov,
745 .rq_nvec = 2 };
746
747 iov[0].iov_base = mid->resp_buf;
748 iov[0].iov_len = 4;
749 iov[1].iov_base = (char *)mid->resp_buf + 4;
750 iov[1].iov_len = len - 4;
751 /* FIXME: add code to kill session */
752 rc = cifs_verify_signature(&rqst, server,
753 mid->sequence_number);
754 if (rc)
755 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
756 rc);
757 }
758
759 /* BB special case reconnect tid and uid here? */
760 return map_smb_to_linux_error(mid->resp_buf, log_error);
761 }
762
763 struct mid_q_entry *
764 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
765 {
766 int rc;
767 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
768 struct mid_q_entry *mid;
769
770 if (rqst->rq_iov[0].iov_len != 4 ||
771 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
772 return ERR_PTR(-EIO);
773
774 rc = allocate_mid(ses, hdr, &mid);
775 if (rc)
776 return ERR_PTR(rc);
777 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
778 if (rc) {
779 cifs_delete_mid(mid);
780 return ERR_PTR(rc);
781 }
782 return mid;
783 }
784
785 static void
786 cifs_compound_callback(struct mid_q_entry *mid)
787 {
788 struct TCP_Server_Info *server = mid->server;
789
790 add_credits(server, server->ops->get_credits(mid), mid->optype);
791 }
792
793 static void
794 cifs_compound_last_callback(struct mid_q_entry *mid)
795 {
796 cifs_compound_callback(mid);
797 cifs_wake_up_task(mid);
798 }
799
800 static void
801 cifs_cancelled_callback(struct mid_q_entry *mid)
802 {
803 cifs_compound_callback(mid);
804 DeleteMidQEntry(mid);
805 }
806
807 int
808 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
809 const int flags, const int num_rqst, struct smb_rqst *rqst,
810 int *resp_buf_type, struct kvec *resp_iov)
811 {
812 int i, j, rc = 0;
813 int timeout, optype;
814 struct mid_q_entry *midQ[MAX_COMPOUND];
815 bool cancelled_mid[MAX_COMPOUND] = {false};
816 unsigned int credits[MAX_COMPOUND] = {0};
817 char *buf;
818
819 timeout = flags & CIFS_TIMEOUT_MASK;
820 optype = flags & CIFS_OP_MASK;
821
822 for (i = 0; i < num_rqst; i++)
823 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
824
825 if ((ses == NULL) || (ses->server == NULL)) {
826 cifs_dbg(VFS, "Null session\n");
827 return -EIO;
828 }
829
830 if (ses->server->tcpStatus == CifsExiting)
831 return -ENOENT;
832
833 /*
834 * Ensure we obtain 1 credit per request in the compound chain.
835 * It can be optimized further by waiting for all the credits
836 * at once but this can wait long enough if we don't have enough
837 * credits due to some heavy operations in progress or the server
838 * not granting us much, so a fallback to the current approach is
839 * needed anyway.
840 */
841 for (i = 0; i < num_rqst; i++) {
842 rc = wait_for_free_request(ses->server, timeout, optype);
843 if (rc) {
844 /*
845 * We haven't sent an SMB packet to the server yet but
846 * we already obtained credits for i requests in the
847 * compound chain - need to return those credits back
848 * for future use. Note that we need to call add_credits
849 * multiple times to match the way we obtained credits
850 * in the first place and to account for in flight
851 * requests correctly.
852 */
853 for (j = 0; j < i; j++)
854 add_credits(ses->server, 1, optype);
855 return rc;
856 }
857 credits[i] = 1;
858 }
859
860 /*
861 * Make sure that we sign in the same order that we send on this socket
862 * and avoid races inside tcp sendmsg code that could cause corruption
863 * of smb data.
864 */
865
866 mutex_lock(&ses->server->srv_mutex);
867
868 for (i = 0; i < num_rqst; i++) {
869 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
870 if (IS_ERR(midQ[i])) {
871 for (j = 0; j < i; j++)
872 cifs_delete_mid(midQ[j]);
873 mutex_unlock(&ses->server->srv_mutex);
874
875 /* Update # of requests on wire to server */
876 for (j = 0; j < num_rqst; j++)
877 add_credits(ses->server, credits[j], optype);
878 return PTR_ERR(midQ[i]);
879 }
880
881 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
882 midQ[i]->optype = optype;
883 /*
884 * Invoke callback for every part of the compound chain
885 * to calculate credits properly. Wake up this thread only when
886 * the last element is received.
887 */
888 if (i < num_rqst - 1)
889 midQ[i]->callback = cifs_compound_callback;
890 else
891 midQ[i]->callback = cifs_compound_last_callback;
892 }
893 cifs_in_send_inc(ses->server);
894 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
895 cifs_in_send_dec(ses->server);
896
897 for (i = 0; i < num_rqst; i++)
898 cifs_save_when_sent(midQ[i]);
899
900 if (rc < 0)
901 ses->server->sequence_number -= 2;
902
903 mutex_unlock(&ses->server->srv_mutex);
904
905 if (rc < 0) {
906 /* Sending failed for some reason - return credits back */
907 for (i = 0; i < num_rqst; i++)
908 add_credits(ses->server, credits[i], optype);
909 goto out;
910 }
911
912 /*
913 * At this point the request is passed to the network stack - we assume
914 * that any credits taken from the server structure on the client have
915 * been spent and we can't return them back. Once we receive responses
916 * we will collect credits granted by the server in the mid callbacks
917 * and add those credits to the server structure.
918 */
919
920 /*
921 * Compounding is never used during session establish.
922 */
923 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
924 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
925 rqst[0].rq_nvec);
926
927 if (timeout == CIFS_ASYNC_OP)
928 goto out;
929
930 for (i = 0; i < num_rqst; i++) {
931 rc = wait_for_response(ses->server, midQ[i]);
932 if (rc != 0)
933 break;
934 }
935 if (rc != 0) {
936 for (; i < num_rqst; i++) {
937 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
938 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
939 send_cancel(ses->server, &rqst[i], midQ[i]);
940 spin_lock(&GlobalMid_Lock);
941 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
942 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
943 midQ[i]->callback = cifs_cancelled_callback;
944 cancelled_mid[i] = true;
945 credits[i] = 0;
946 }
947 spin_unlock(&GlobalMid_Lock);
948 }
949 }
950
951 for (i = 0; i < num_rqst; i++) {
952 if (rc < 0)
953 goto out;
954
955 rc = cifs_sync_mid_result(midQ[i], ses->server);
956 if (rc != 0) {
957 /* mark this mid as cancelled to not free it below */
958 cancelled_mid[i] = true;
959 goto out;
960 }
961
962 if (!midQ[i]->resp_buf ||
963 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
964 rc = -EIO;
965 cifs_dbg(FYI, "Bad MID state?\n");
966 goto out;
967 }
968
969 buf = (char *)midQ[i]->resp_buf;
970 resp_iov[i].iov_base = buf;
971 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
972 ses->server->vals->header_preamble_size;
973
974 if (midQ[i]->large_buf)
975 resp_buf_type[i] = CIFS_LARGE_BUFFER;
976 else
977 resp_buf_type[i] = CIFS_SMALL_BUFFER;
978
979 rc = ses->server->ops->check_receive(midQ[i], ses->server,
980 flags & CIFS_LOG_ERROR);
981
982 /* mark it so buf will not be freed by cifs_delete_mid */
983 if ((flags & CIFS_NO_RESP) == 0)
984 midQ[i]->resp_buf = NULL;
985
986 }
987
988 /*
989 * Compounding is never used during session establish.
990 */
991 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
992 struct kvec iov = {
993 .iov_base = resp_iov[0].iov_base,
994 .iov_len = resp_iov[0].iov_len
995 };
996 smb311_update_preauth_hash(ses, &iov, 1);
997 }
998
999 out:
1000 /*
1001 * This will dequeue all mids. After this it is important that the
1002 * demultiplex_thread will not process any of these mids any futher.
1003 * This is prevented above by using a noop callback that will not
1004 * wake this thread except for the very last PDU.
1005 */
1006 for (i = 0; i < num_rqst; i++) {
1007 if (!cancelled_mid[i])
1008 cifs_delete_mid(midQ[i]);
1009 }
1010
1011 return rc;
1012 }
1013
1014 int
1015 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1016 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1017 struct kvec *resp_iov)
1018 {
1019 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1020 resp_iov);
1021 }
1022
1023 int
1024 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1025 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1026 const int flags, struct kvec *resp_iov)
1027 {
1028 struct smb_rqst rqst;
1029 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1030 int rc;
1031
1032 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1033 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1034 GFP_KERNEL);
1035 if (!new_iov) {
1036 /* otherwise cifs_send_recv below sets resp_buf_type */
1037 *resp_buf_type = CIFS_NO_BUFFER;
1038 return -ENOMEM;
1039 }
1040 } else
1041 new_iov = s_iov;
1042
1043 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1044 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1045
1046 new_iov[0].iov_base = new_iov[1].iov_base;
1047 new_iov[0].iov_len = 4;
1048 new_iov[1].iov_base += 4;
1049 new_iov[1].iov_len -= 4;
1050
1051 memset(&rqst, 0, sizeof(struct smb_rqst));
1052 rqst.rq_iov = new_iov;
1053 rqst.rq_nvec = n_vec + 1;
1054
1055 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1056 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1057 kfree(new_iov);
1058 return rc;
1059 }
1060
1061 int
1062 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1063 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1064 int *pbytes_returned, const int timeout)
1065 {
1066 int rc = 0;
1067 struct mid_q_entry *midQ;
1068 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1069 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1070 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1071
1072 if (ses == NULL) {
1073 cifs_dbg(VFS, "Null smb session\n");
1074 return -EIO;
1075 }
1076 if (ses->server == NULL) {
1077 cifs_dbg(VFS, "Null tcp session\n");
1078 return -EIO;
1079 }
1080
1081 if (ses->server->tcpStatus == CifsExiting)
1082 return -ENOENT;
1083
1084 /* Ensure that we do not send more than 50 overlapping requests
1085 to the same server. We may make this configurable later or
1086 use ses->maxReq */
1087
1088 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1089 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1090 len);
1091 return -EIO;
1092 }
1093
1094 rc = wait_for_free_request(ses->server, timeout, 0);
1095 if (rc)
1096 return rc;
1097
1098 /* make sure that we sign in the same order that we send on this socket
1099 and avoid races inside tcp sendmsg code that could cause corruption
1100 of smb data */
1101
1102 mutex_lock(&ses->server->srv_mutex);
1103
1104 rc = allocate_mid(ses, in_buf, &midQ);
1105 if (rc) {
1106 mutex_unlock(&ses->server->srv_mutex);
1107 /* Update # of requests on wire to server */
1108 add_credits(ses->server, 1, 0);
1109 return rc;
1110 }
1111
1112 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1113 if (rc) {
1114 mutex_unlock(&ses->server->srv_mutex);
1115 goto out;
1116 }
1117
1118 midQ->mid_state = MID_REQUEST_SUBMITTED;
1119
1120 cifs_in_send_inc(ses->server);
1121 rc = smb_send(ses->server, in_buf, len);
1122 cifs_in_send_dec(ses->server);
1123 cifs_save_when_sent(midQ);
1124
1125 if (rc < 0)
1126 ses->server->sequence_number -= 2;
1127
1128 mutex_unlock(&ses->server->srv_mutex);
1129
1130 if (rc < 0)
1131 goto out;
1132
1133 if (timeout == CIFS_ASYNC_OP)
1134 goto out;
1135
1136 rc = wait_for_response(ses->server, midQ);
1137 if (rc != 0) {
1138 send_cancel(ses->server, &rqst, midQ);
1139 spin_lock(&GlobalMid_Lock);
1140 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1141 /* no longer considered to be "in-flight" */
1142 midQ->callback = DeleteMidQEntry;
1143 spin_unlock(&GlobalMid_Lock);
1144 add_credits(ses->server, 1, 0);
1145 return rc;
1146 }
1147 spin_unlock(&GlobalMid_Lock);
1148 }
1149
1150 rc = cifs_sync_mid_result(midQ, ses->server);
1151 if (rc != 0) {
1152 add_credits(ses->server, 1, 0);
1153 return rc;
1154 }
1155
1156 if (!midQ->resp_buf || !out_buf ||
1157 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1158 rc = -EIO;
1159 cifs_dbg(VFS, "Bad MID state?\n");
1160 goto out;
1161 }
1162
1163 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1164 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1165 rc = cifs_check_receive(midQ, ses->server, 0);
1166 out:
1167 cifs_delete_mid(midQ);
1168 add_credits(ses->server, 1, 0);
1169
1170 return rc;
1171 }
1172
1173 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1174 blocking lock to return. */
1175
1176 static int
1177 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1178 struct smb_hdr *in_buf,
1179 struct smb_hdr *out_buf)
1180 {
1181 int bytes_returned;
1182 struct cifs_ses *ses = tcon->ses;
1183 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1184
1185 /* We just modify the current in_buf to change
1186 the type of lock from LOCKING_ANDX_SHARED_LOCK
1187 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1188 LOCKING_ANDX_CANCEL_LOCK. */
1189
1190 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1191 pSMB->Timeout = 0;
1192 pSMB->hdr.Mid = get_next_mid(ses->server);
1193
1194 return SendReceive(xid, ses, in_buf, out_buf,
1195 &bytes_returned, 0);
1196 }
1197
1198 int
1199 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1200 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1201 int *pbytes_returned)
1202 {
1203 int rc = 0;
1204 int rstart = 0;
1205 struct mid_q_entry *midQ;
1206 struct cifs_ses *ses;
1207 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1208 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1209 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1210
1211 if (tcon == NULL || tcon->ses == NULL) {
1212 cifs_dbg(VFS, "Null smb session\n");
1213 return -EIO;
1214 }
1215 ses = tcon->ses;
1216
1217 if (ses->server == NULL) {
1218 cifs_dbg(VFS, "Null tcp session\n");
1219 return -EIO;
1220 }
1221
1222 if (ses->server->tcpStatus == CifsExiting)
1223 return -ENOENT;
1224
1225 /* Ensure that we do not send more than 50 overlapping requests
1226 to the same server. We may make this configurable later or
1227 use ses->maxReq */
1228
1229 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1230 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1231 len);
1232 return -EIO;
1233 }
1234
1235 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1236 if (rc)
1237 return rc;
1238
1239 /* make sure that we sign in the same order that we send on this socket
1240 and avoid races inside tcp sendmsg code that could cause corruption
1241 of smb data */
1242
1243 mutex_lock(&ses->server->srv_mutex);
1244
1245 rc = allocate_mid(ses, in_buf, &midQ);
1246 if (rc) {
1247 mutex_unlock(&ses->server->srv_mutex);
1248 return rc;
1249 }
1250
1251 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1252 if (rc) {
1253 cifs_delete_mid(midQ);
1254 mutex_unlock(&ses->server->srv_mutex);
1255 return rc;
1256 }
1257
1258 midQ->mid_state = MID_REQUEST_SUBMITTED;
1259 cifs_in_send_inc(ses->server);
1260 rc = smb_send(ses->server, in_buf, len);
1261 cifs_in_send_dec(ses->server);
1262 cifs_save_when_sent(midQ);
1263
1264 if (rc < 0)
1265 ses->server->sequence_number -= 2;
1266
1267 mutex_unlock(&ses->server->srv_mutex);
1268
1269 if (rc < 0) {
1270 cifs_delete_mid(midQ);
1271 return rc;
1272 }
1273
1274 /* Wait for a reply - allow signals to interrupt. */
1275 rc = wait_event_interruptible(ses->server->response_q,
1276 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1277 ((ses->server->tcpStatus != CifsGood) &&
1278 (ses->server->tcpStatus != CifsNew)));
1279
1280 /* Were we interrupted by a signal ? */
1281 if ((rc == -ERESTARTSYS) &&
1282 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1283 ((ses->server->tcpStatus == CifsGood) ||
1284 (ses->server->tcpStatus == CifsNew))) {
1285
1286 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1287 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1288 blocking lock to return. */
1289 rc = send_cancel(ses->server, &rqst, midQ);
1290 if (rc) {
1291 cifs_delete_mid(midQ);
1292 return rc;
1293 }
1294 } else {
1295 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1296 to cause the blocking lock to return. */
1297
1298 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1299
1300 /* If we get -ENOLCK back the lock may have
1301 already been removed. Don't exit in this case. */
1302 if (rc && rc != -ENOLCK) {
1303 cifs_delete_mid(midQ);
1304 return rc;
1305 }
1306 }
1307
1308 rc = wait_for_response(ses->server, midQ);
1309 if (rc) {
1310 send_cancel(ses->server, &rqst, midQ);
1311 spin_lock(&GlobalMid_Lock);
1312 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1313 /* no longer considered to be "in-flight" */
1314 midQ->callback = DeleteMidQEntry;
1315 spin_unlock(&GlobalMid_Lock);
1316 return rc;
1317 }
1318 spin_unlock(&GlobalMid_Lock);
1319 }
1320
1321 /* We got the response - restart system call. */
1322 rstart = 1;
1323 }
1324
1325 rc = cifs_sync_mid_result(midQ, ses->server);
1326 if (rc != 0)
1327 return rc;
1328
1329 /* rcvd frame is ok */
1330 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1331 rc = -EIO;
1332 cifs_dbg(VFS, "Bad MID state?\n");
1333 goto out;
1334 }
1335
1336 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1337 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1338 rc = cifs_check_receive(midQ, ses->server, 0);
1339 out:
1340 cifs_delete_mid(midQ);
1341 if (rstart && rc == -EACCES)
1342 return -ERESTARTSYS;
1343 return rc;
1344 }