]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/cifs/transport.c
Merge tag 'for-4.12/dm-post-merge-changes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40
41 void
42 cifs_wake_up_task(struct mid_q_entry *mid)
43 {
44 wake_up_process(mid->callback_data);
45 }
46
47 struct mid_q_entry *
48 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
49 {
50 struct mid_q_entry *temp;
51
52 if (server == NULL) {
53 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
54 return NULL;
55 }
56
57 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
58 memset(temp, 0, sizeof(struct mid_q_entry));
59 temp->mid = get_mid(smb_buffer);
60 temp->pid = current->pid;
61 temp->command = cpu_to_le16(smb_buffer->Command);
62 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
63 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
64 /* when mid allocated can be before when sent */
65 temp->when_alloc = jiffies;
66 temp->server = server;
67
68 /*
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
71 */
72 temp->callback = cifs_wake_up_task;
73 temp->callback_data = current;
74
75 atomic_inc(&midCount);
76 temp->mid_state = MID_REQUEST_ALLOCATED;
77 return temp;
78 }
79
80 void
81 DeleteMidQEntry(struct mid_q_entry *midEntry)
82 {
83 #ifdef CONFIG_CIFS_STATS2
84 __le16 command = midEntry->server->vals->lock_cmd;
85 unsigned long now;
86 #endif
87 midEntry->mid_state = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->large_buf)
90 cifs_buf_release(midEntry->resp_buf);
91 else
92 cifs_small_buf_release(midEntry->resp_buf);
93 #ifdef CONFIG_CIFS_STATS2
94 now = jiffies;
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) {
98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
99 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
100 midEntry->command, midEntry->mid);
101 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
102 now - midEntry->when_alloc,
103 now - midEntry->when_sent,
104 now - midEntry->when_received);
105 }
106 }
107 #endif
108 mempool_free(midEntry, cifs_mid_poolp);
109 }
110
111 void
112 cifs_delete_mid(struct mid_q_entry *mid)
113 {
114 spin_lock(&GlobalMid_Lock);
115 list_del(&mid->qhead);
116 spin_unlock(&GlobalMid_Lock);
117
118 DeleteMidQEntry(mid);
119 }
120
121 /*
122 * smb_send_kvec - send an array of kvecs to the server
123 * @server: Server to send the data to
124 * @smb_msg: Message to send
125 * @sent: amount of data sent on socket is stored here
126 *
127 * Our basic "send data to server" function. Should be called with srv_mutex
128 * held. The caller is responsible for handling the results.
129 */
130 static int
131 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
132 size_t *sent)
133 {
134 int rc = 0;
135 int retries = 0;
136 struct socket *ssocket = server->ssocket;
137
138 *sent = 0;
139
140 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
141 smb_msg->msg_namelen = sizeof(struct sockaddr);
142 smb_msg->msg_control = NULL;
143 smb_msg->msg_controllen = 0;
144 if (server->noblocksnd)
145 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
146 else
147 smb_msg->msg_flags = MSG_NOSIGNAL;
148
149 while (msg_data_left(smb_msg)) {
150 /*
151 * If blocking send, we try 3 times, since each can block
152 * for 5 seconds. For nonblocking we have to try more
153 * but wait increasing amounts of time allowing time for
154 * socket to clear. The overall time we wait in either
155 * case to send on the socket is about 15 seconds.
156 * Similarly we wait for 15 seconds for a response from
157 * the server in SendReceive[2] for the server to send
158 * a response back for most types of requests (except
159 * SMB Write past end of file which can be slow, and
160 * blocking lock operations). NFS waits slightly longer
161 * than CIFS, but this can make it take longer for
162 * nonresponsive servers to be detected and 15 seconds
163 * is more than enough time for modern networks to
164 * send a packet. In most cases if we fail to send
165 * after the retries we will kill the socket and
166 * reconnect which may clear the network problem.
167 */
168 rc = sock_sendmsg(ssocket, smb_msg);
169 if (rc == -EAGAIN) {
170 retries++;
171 if (retries >= 14 ||
172 (!server->noblocksnd && (retries > 2))) {
173 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
174 ssocket);
175 return -EAGAIN;
176 }
177 msleep(1 << retries);
178 continue;
179 }
180
181 if (rc < 0)
182 return rc;
183
184 if (rc == 0) {
185 /* should never happen, letting socket clear before
186 retrying is our only obvious option here */
187 cifs_dbg(VFS, "tcp sent no data\n");
188 msleep(500);
189 continue;
190 }
191
192 /* send was at least partially successful */
193 *sent += rc;
194 retries = 0; /* in case we get ENOSPC on the next send */
195 }
196 return 0;
197 }
198
199 static unsigned long
200 rqst_len(struct smb_rqst *rqst)
201 {
202 unsigned int i;
203 struct kvec *iov = rqst->rq_iov;
204 unsigned long buflen = 0;
205
206 /* total up iov array first */
207 for (i = 0; i < rqst->rq_nvec; i++)
208 buflen += iov[i].iov_len;
209
210 /* add in the page array if there is one */
211 if (rqst->rq_npages) {
212 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
213 buflen += rqst->rq_tailsz;
214 }
215
216 return buflen;
217 }
218
219 static int
220 __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
221 {
222 int rc;
223 struct kvec *iov = rqst->rq_iov;
224 int n_vec = rqst->rq_nvec;
225 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
226 unsigned long send_length;
227 unsigned int i;
228 size_t total_len = 0, sent, size;
229 struct socket *ssocket = server->ssocket;
230 struct msghdr smb_msg;
231 int val = 1;
232
233 if (ssocket == NULL)
234 return -ENOTSOCK;
235
236 /* sanity check send length */
237 send_length = rqst_len(rqst);
238 if (send_length != smb_buf_length + 4) {
239 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
240 send_length, smb_buf_length);
241 return -EIO;
242 }
243
244 if (n_vec < 2)
245 return -EIO;
246
247 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
248 dump_smb(iov[0].iov_base, iov[0].iov_len);
249 dump_smb(iov[1].iov_base, iov[1].iov_len);
250
251 /* cork the socket */
252 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
253 (char *)&val, sizeof(val));
254
255 size = 0;
256 for (i = 0; i < n_vec; i++)
257 size += iov[i].iov_len;
258
259 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
260
261 rc = smb_send_kvec(server, &smb_msg, &sent);
262 if (rc < 0)
263 goto uncork;
264
265 total_len += sent;
266
267 /* now walk the page array and send each page in it */
268 for (i = 0; i < rqst->rq_npages; i++) {
269 size_t len = i == rqst->rq_npages - 1
270 ? rqst->rq_tailsz
271 : rqst->rq_pagesz;
272 struct bio_vec bvec = {
273 .bv_page = rqst->rq_pages[i],
274 .bv_len = len
275 };
276 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
277 &bvec, 1, len);
278 rc = smb_send_kvec(server, &smb_msg, &sent);
279 if (rc < 0)
280 break;
281
282 total_len += sent;
283 }
284
285 uncork:
286 /* uncork it */
287 val = 0;
288 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
289 (char *)&val, sizeof(val));
290
291 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
292 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
293 smb_buf_length + 4, total_len);
294 /*
295 * If we have only sent part of an SMB then the next SMB could
296 * be taken as the remainder of this one. We need to kill the
297 * socket so the server throws away the partial SMB
298 */
299 server->tcpStatus = CifsNeedReconnect;
300 }
301
302 if (rc < 0 && rc != -EINTR)
303 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
304 rc);
305 else
306 rc = 0;
307
308 return rc;
309 }
310
311 static int
312 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
313 {
314 struct smb_rqst cur_rqst;
315 int rc;
316
317 if (!(flags & CIFS_TRANSFORM_REQ))
318 return __smb_send_rqst(server, rqst);
319
320 if (!server->ops->init_transform_rq ||
321 !server->ops->free_transform_rq) {
322 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
323 return -EIO;
324 }
325
326 rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
327 if (rc)
328 return rc;
329
330 rc = __smb_send_rqst(server, &cur_rqst);
331 server->ops->free_transform_rq(&cur_rqst);
332 return rc;
333 }
334
335 int
336 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
337 unsigned int smb_buf_length)
338 {
339 struct kvec iov[2];
340 struct smb_rqst rqst = { .rq_iov = iov,
341 .rq_nvec = 2 };
342
343 iov[0].iov_base = smb_buffer;
344 iov[0].iov_len = 4;
345 iov[1].iov_base = (char *)smb_buffer + 4;
346 iov[1].iov_len = smb_buf_length;
347
348 return __smb_send_rqst(server, &rqst);
349 }
350
351 static int
352 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
353 int *credits)
354 {
355 int rc;
356
357 spin_lock(&server->req_lock);
358 if (timeout == CIFS_ASYNC_OP) {
359 /* oplock breaks must not be held up */
360 server->in_flight++;
361 *credits -= 1;
362 spin_unlock(&server->req_lock);
363 return 0;
364 }
365
366 while (1) {
367 if (*credits <= 0) {
368 spin_unlock(&server->req_lock);
369 cifs_num_waiters_inc(server);
370 rc = wait_event_killable(server->request_q,
371 has_credits(server, credits));
372 cifs_num_waiters_dec(server);
373 if (rc)
374 return rc;
375 spin_lock(&server->req_lock);
376 } else {
377 if (server->tcpStatus == CifsExiting) {
378 spin_unlock(&server->req_lock);
379 return -ENOENT;
380 }
381
382 /*
383 * Can not count locking commands against total
384 * as they are allowed to block on server.
385 */
386
387 /* update # of requests on the wire to server */
388 if (timeout != CIFS_BLOCKING_OP) {
389 *credits -= 1;
390 server->in_flight++;
391 }
392 spin_unlock(&server->req_lock);
393 break;
394 }
395 }
396 return 0;
397 }
398
399 static int
400 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
401 const int optype)
402 {
403 int *val;
404
405 val = server->ops->get_credits_field(server, optype);
406 /* Since an echo is already inflight, no need to wait to send another */
407 if (*val <= 0 && optype == CIFS_ECHO_OP)
408 return -EAGAIN;
409 return wait_for_free_credits(server, timeout, val);
410 }
411
412 int
413 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
414 unsigned int *num, unsigned int *credits)
415 {
416 *num = size;
417 *credits = 0;
418 return 0;
419 }
420
421 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
422 struct mid_q_entry **ppmidQ)
423 {
424 if (ses->server->tcpStatus == CifsExiting) {
425 return -ENOENT;
426 }
427
428 if (ses->server->tcpStatus == CifsNeedReconnect) {
429 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
430 return -EAGAIN;
431 }
432
433 if (ses->status == CifsNew) {
434 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
435 (in_buf->Command != SMB_COM_NEGOTIATE))
436 return -EAGAIN;
437 /* else ok - we are setting up session */
438 }
439
440 if (ses->status == CifsExiting) {
441 /* check if SMB session is bad because we are setting it up */
442 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
443 return -EAGAIN;
444 /* else ok - we are shutting down session */
445 }
446
447 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
448 if (*ppmidQ == NULL)
449 return -ENOMEM;
450 spin_lock(&GlobalMid_Lock);
451 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
452 spin_unlock(&GlobalMid_Lock);
453 return 0;
454 }
455
456 static int
457 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
458 {
459 int error;
460
461 error = wait_event_freezekillable_unsafe(server->response_q,
462 midQ->mid_state != MID_REQUEST_SUBMITTED);
463 if (error < 0)
464 return -ERESTARTSYS;
465
466 return 0;
467 }
468
469 struct mid_q_entry *
470 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
471 {
472 int rc;
473 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
474 struct mid_q_entry *mid;
475
476 if (rqst->rq_iov[0].iov_len != 4 ||
477 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
478 return ERR_PTR(-EIO);
479
480 /* enable signing if server requires it */
481 if (server->sign)
482 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
483
484 mid = AllocMidQEntry(hdr, server);
485 if (mid == NULL)
486 return ERR_PTR(-ENOMEM);
487
488 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
489 if (rc) {
490 DeleteMidQEntry(mid);
491 return ERR_PTR(rc);
492 }
493
494 return mid;
495 }
496
497 /*
498 * Send a SMB request and set the callback function in the mid to handle
499 * the result. Caller is responsible for dealing with timeouts.
500 */
501 int
502 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
503 mid_receive_t *receive, mid_callback_t *callback,
504 mid_handle_t *handle, void *cbdata, const int flags)
505 {
506 int rc, timeout, optype;
507 struct mid_q_entry *mid;
508 unsigned int credits = 0;
509
510 timeout = flags & CIFS_TIMEOUT_MASK;
511 optype = flags & CIFS_OP_MASK;
512
513 if ((flags & CIFS_HAS_CREDITS) == 0) {
514 rc = wait_for_free_request(server, timeout, optype);
515 if (rc)
516 return rc;
517 credits = 1;
518 }
519
520 mutex_lock(&server->srv_mutex);
521 mid = server->ops->setup_async_request(server, rqst);
522 if (IS_ERR(mid)) {
523 mutex_unlock(&server->srv_mutex);
524 add_credits_and_wake_if(server, credits, optype);
525 return PTR_ERR(mid);
526 }
527
528 mid->receive = receive;
529 mid->callback = callback;
530 mid->callback_data = cbdata;
531 mid->handle = handle;
532 mid->mid_state = MID_REQUEST_SUBMITTED;
533
534 /* put it on the pending_mid_q */
535 spin_lock(&GlobalMid_Lock);
536 list_add_tail(&mid->qhead, &server->pending_mid_q);
537 spin_unlock(&GlobalMid_Lock);
538
539
540 cifs_in_send_inc(server);
541 rc = smb_send_rqst(server, rqst, flags);
542 cifs_in_send_dec(server);
543 cifs_save_when_sent(mid);
544
545 if (rc < 0) {
546 server->sequence_number -= 2;
547 cifs_delete_mid(mid);
548 }
549
550 mutex_unlock(&server->srv_mutex);
551
552 if (rc == 0)
553 return 0;
554
555 add_credits_and_wake_if(server, credits, optype);
556 return rc;
557 }
558
559 /*
560 *
561 * Send an SMB Request. No response info (other than return code)
562 * needs to be parsed.
563 *
564 * flags indicate the type of request buffer and how long to wait
565 * and whether to log NT STATUS code (error) before mapping it to POSIX error
566 *
567 */
568 int
569 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
570 char *in_buf, int flags)
571 {
572 int rc;
573 struct kvec iov[1];
574 struct kvec rsp_iov;
575 int resp_buf_type;
576
577 iov[0].iov_base = in_buf;
578 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
579 flags |= CIFS_NO_RESP;
580 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
581 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
582
583 return rc;
584 }
585
586 static int
587 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
588 {
589 int rc = 0;
590
591 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
592 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
593
594 spin_lock(&GlobalMid_Lock);
595 switch (mid->mid_state) {
596 case MID_RESPONSE_RECEIVED:
597 spin_unlock(&GlobalMid_Lock);
598 return rc;
599 case MID_RETRY_NEEDED:
600 rc = -EAGAIN;
601 break;
602 case MID_RESPONSE_MALFORMED:
603 rc = -EIO;
604 break;
605 case MID_SHUTDOWN:
606 rc = -EHOSTDOWN;
607 break;
608 default:
609 list_del_init(&mid->qhead);
610 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
611 __func__, mid->mid, mid->mid_state);
612 rc = -EIO;
613 }
614 spin_unlock(&GlobalMid_Lock);
615
616 mutex_lock(&server->srv_mutex);
617 DeleteMidQEntry(mid);
618 mutex_unlock(&server->srv_mutex);
619 return rc;
620 }
621
622 static inline int
623 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
624 struct mid_q_entry *mid)
625 {
626 return server->ops->send_cancel ?
627 server->ops->send_cancel(server, rqst, mid) : 0;
628 }
629
630 int
631 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
632 bool log_error)
633 {
634 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
635
636 dump_smb(mid->resp_buf, min_t(u32, 92, len));
637
638 /* convert the length into a more usable form */
639 if (server->sign) {
640 struct kvec iov[2];
641 int rc = 0;
642 struct smb_rqst rqst = { .rq_iov = iov,
643 .rq_nvec = 2 };
644
645 iov[0].iov_base = mid->resp_buf;
646 iov[0].iov_len = 4;
647 iov[1].iov_base = (char *)mid->resp_buf + 4;
648 iov[1].iov_len = len - 4;
649 /* FIXME: add code to kill session */
650 rc = cifs_verify_signature(&rqst, server,
651 mid->sequence_number);
652 if (rc)
653 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
654 rc);
655 }
656
657 /* BB special case reconnect tid and uid here? */
658 return map_smb_to_linux_error(mid->resp_buf, log_error);
659 }
660
661 struct mid_q_entry *
662 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
663 {
664 int rc;
665 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
666 struct mid_q_entry *mid;
667
668 if (rqst->rq_iov[0].iov_len != 4 ||
669 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
670 return ERR_PTR(-EIO);
671
672 rc = allocate_mid(ses, hdr, &mid);
673 if (rc)
674 return ERR_PTR(rc);
675 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
676 if (rc) {
677 cifs_delete_mid(mid);
678 return ERR_PTR(rc);
679 }
680 return mid;
681 }
682
683 int
684 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
685 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
686 struct kvec *resp_iov)
687 {
688 int rc = 0;
689 int timeout, optype;
690 struct mid_q_entry *midQ;
691 unsigned int credits = 1;
692 char *buf;
693
694 timeout = flags & CIFS_TIMEOUT_MASK;
695 optype = flags & CIFS_OP_MASK;
696
697 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
698
699 if ((ses == NULL) || (ses->server == NULL)) {
700 cifs_dbg(VFS, "Null session\n");
701 return -EIO;
702 }
703
704 if (ses->server->tcpStatus == CifsExiting)
705 return -ENOENT;
706
707 /*
708 * Ensure that we do not send more than 50 overlapping requests
709 * to the same server. We may make this configurable later or
710 * use ses->maxReq.
711 */
712
713 rc = wait_for_free_request(ses->server, timeout, optype);
714 if (rc)
715 return rc;
716
717 /*
718 * Make sure that we sign in the same order that we send on this socket
719 * and avoid races inside tcp sendmsg code that could cause corruption
720 * of smb data.
721 */
722
723 mutex_lock(&ses->server->srv_mutex);
724
725 midQ = ses->server->ops->setup_request(ses, rqst);
726 if (IS_ERR(midQ)) {
727 mutex_unlock(&ses->server->srv_mutex);
728 /* Update # of requests on wire to server */
729 add_credits(ses->server, 1, optype);
730 return PTR_ERR(midQ);
731 }
732
733 midQ->mid_state = MID_REQUEST_SUBMITTED;
734 cifs_in_send_inc(ses->server);
735 rc = smb_send_rqst(ses->server, rqst, flags);
736 cifs_in_send_dec(ses->server);
737 cifs_save_when_sent(midQ);
738
739 if (rc < 0)
740 ses->server->sequence_number -= 2;
741 mutex_unlock(&ses->server->srv_mutex);
742
743 if (rc < 0)
744 goto out;
745
746 if (timeout == CIFS_ASYNC_OP)
747 goto out;
748
749 rc = wait_for_response(ses->server, midQ);
750 if (rc != 0) {
751 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
752 send_cancel(ses->server, rqst, midQ);
753 spin_lock(&GlobalMid_Lock);
754 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
755 midQ->mid_flags |= MID_WAIT_CANCELLED;
756 midQ->callback = DeleteMidQEntry;
757 spin_unlock(&GlobalMid_Lock);
758 add_credits(ses->server, 1, optype);
759 return rc;
760 }
761 spin_unlock(&GlobalMid_Lock);
762 }
763
764 rc = cifs_sync_mid_result(midQ, ses->server);
765 if (rc != 0) {
766 add_credits(ses->server, 1, optype);
767 return rc;
768 }
769
770 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
771 rc = -EIO;
772 cifs_dbg(FYI, "Bad MID state?\n");
773 goto out;
774 }
775
776 buf = (char *)midQ->resp_buf;
777 resp_iov->iov_base = buf;
778 resp_iov->iov_len = get_rfc1002_length(buf) + 4;
779 if (midQ->large_buf)
780 *resp_buf_type = CIFS_LARGE_BUFFER;
781 else
782 *resp_buf_type = CIFS_SMALL_BUFFER;
783
784 credits = ses->server->ops->get_credits(midQ);
785
786 rc = ses->server->ops->check_receive(midQ, ses->server,
787 flags & CIFS_LOG_ERROR);
788
789 /* mark it so buf will not be freed by cifs_delete_mid */
790 if ((flags & CIFS_NO_RESP) == 0)
791 midQ->resp_buf = NULL;
792 out:
793 cifs_delete_mid(midQ);
794 add_credits(ses->server, credits, optype);
795
796 return rc;
797 }
798
799 int
800 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
801 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
802 const int flags, struct kvec *resp_iov)
803 {
804 struct smb_rqst rqst;
805 struct kvec *new_iov;
806 int rc;
807
808 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
809 if (!new_iov)
810 return -ENOMEM;
811
812 /* 1st iov is a RFC1001 length followed by the rest of the packet */
813 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
814
815 new_iov[0].iov_base = new_iov[1].iov_base;
816 new_iov[0].iov_len = 4;
817 new_iov[1].iov_base += 4;
818 new_iov[1].iov_len -= 4;
819
820 memset(&rqst, 0, sizeof(struct smb_rqst));
821 rqst.rq_iov = new_iov;
822 rqst.rq_nvec = n_vec + 1;
823
824 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
825 kfree(new_iov);
826 return rc;
827 }
828
829 int
830 SendReceive(const unsigned int xid, struct cifs_ses *ses,
831 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
832 int *pbytes_returned, const int timeout)
833 {
834 int rc = 0;
835 struct mid_q_entry *midQ;
836 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
837 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
838 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
839
840 if (ses == NULL) {
841 cifs_dbg(VFS, "Null smb session\n");
842 return -EIO;
843 }
844 if (ses->server == NULL) {
845 cifs_dbg(VFS, "Null tcp session\n");
846 return -EIO;
847 }
848
849 if (ses->server->tcpStatus == CifsExiting)
850 return -ENOENT;
851
852 /* Ensure that we do not send more than 50 overlapping requests
853 to the same server. We may make this configurable later or
854 use ses->maxReq */
855
856 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
857 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
858 len);
859 return -EIO;
860 }
861
862 rc = wait_for_free_request(ses->server, timeout, 0);
863 if (rc)
864 return rc;
865
866 /* make sure that we sign in the same order that we send on this socket
867 and avoid races inside tcp sendmsg code that could cause corruption
868 of smb data */
869
870 mutex_lock(&ses->server->srv_mutex);
871
872 rc = allocate_mid(ses, in_buf, &midQ);
873 if (rc) {
874 mutex_unlock(&ses->server->srv_mutex);
875 /* Update # of requests on wire to server */
876 add_credits(ses->server, 1, 0);
877 return rc;
878 }
879
880 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
881 if (rc) {
882 mutex_unlock(&ses->server->srv_mutex);
883 goto out;
884 }
885
886 midQ->mid_state = MID_REQUEST_SUBMITTED;
887
888 cifs_in_send_inc(ses->server);
889 rc = smb_send(ses->server, in_buf, len);
890 cifs_in_send_dec(ses->server);
891 cifs_save_when_sent(midQ);
892
893 if (rc < 0)
894 ses->server->sequence_number -= 2;
895
896 mutex_unlock(&ses->server->srv_mutex);
897
898 if (rc < 0)
899 goto out;
900
901 if (timeout == CIFS_ASYNC_OP)
902 goto out;
903
904 rc = wait_for_response(ses->server, midQ);
905 if (rc != 0) {
906 send_cancel(ses->server, &rqst, midQ);
907 spin_lock(&GlobalMid_Lock);
908 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
909 /* no longer considered to be "in-flight" */
910 midQ->callback = DeleteMidQEntry;
911 spin_unlock(&GlobalMid_Lock);
912 add_credits(ses->server, 1, 0);
913 return rc;
914 }
915 spin_unlock(&GlobalMid_Lock);
916 }
917
918 rc = cifs_sync_mid_result(midQ, ses->server);
919 if (rc != 0) {
920 add_credits(ses->server, 1, 0);
921 return rc;
922 }
923
924 if (!midQ->resp_buf || !out_buf ||
925 midQ->mid_state != MID_RESPONSE_RECEIVED) {
926 rc = -EIO;
927 cifs_dbg(VFS, "Bad MID state?\n");
928 goto out;
929 }
930
931 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
932 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
933 rc = cifs_check_receive(midQ, ses->server, 0);
934 out:
935 cifs_delete_mid(midQ);
936 add_credits(ses->server, 1, 0);
937
938 return rc;
939 }
940
941 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
942 blocking lock to return. */
943
944 static int
945 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
946 struct smb_hdr *in_buf,
947 struct smb_hdr *out_buf)
948 {
949 int bytes_returned;
950 struct cifs_ses *ses = tcon->ses;
951 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
952
953 /* We just modify the current in_buf to change
954 the type of lock from LOCKING_ANDX_SHARED_LOCK
955 or LOCKING_ANDX_EXCLUSIVE_LOCK to
956 LOCKING_ANDX_CANCEL_LOCK. */
957
958 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
959 pSMB->Timeout = 0;
960 pSMB->hdr.Mid = get_next_mid(ses->server);
961
962 return SendReceive(xid, ses, in_buf, out_buf,
963 &bytes_returned, 0);
964 }
965
966 int
967 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
968 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
969 int *pbytes_returned)
970 {
971 int rc = 0;
972 int rstart = 0;
973 struct mid_q_entry *midQ;
974 struct cifs_ses *ses;
975 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
976 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
977 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
978
979 if (tcon == NULL || tcon->ses == NULL) {
980 cifs_dbg(VFS, "Null smb session\n");
981 return -EIO;
982 }
983 ses = tcon->ses;
984
985 if (ses->server == NULL) {
986 cifs_dbg(VFS, "Null tcp session\n");
987 return -EIO;
988 }
989
990 if (ses->server->tcpStatus == CifsExiting)
991 return -ENOENT;
992
993 /* Ensure that we do not send more than 50 overlapping requests
994 to the same server. We may make this configurable later or
995 use ses->maxReq */
996
997 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
998 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
999 len);
1000 return -EIO;
1001 }
1002
1003 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1004 if (rc)
1005 return rc;
1006
1007 /* make sure that we sign in the same order that we send on this socket
1008 and avoid races inside tcp sendmsg code that could cause corruption
1009 of smb data */
1010
1011 mutex_lock(&ses->server->srv_mutex);
1012
1013 rc = allocate_mid(ses, in_buf, &midQ);
1014 if (rc) {
1015 mutex_unlock(&ses->server->srv_mutex);
1016 return rc;
1017 }
1018
1019 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1020 if (rc) {
1021 cifs_delete_mid(midQ);
1022 mutex_unlock(&ses->server->srv_mutex);
1023 return rc;
1024 }
1025
1026 midQ->mid_state = MID_REQUEST_SUBMITTED;
1027 cifs_in_send_inc(ses->server);
1028 rc = smb_send(ses->server, in_buf, len);
1029 cifs_in_send_dec(ses->server);
1030 cifs_save_when_sent(midQ);
1031
1032 if (rc < 0)
1033 ses->server->sequence_number -= 2;
1034
1035 mutex_unlock(&ses->server->srv_mutex);
1036
1037 if (rc < 0) {
1038 cifs_delete_mid(midQ);
1039 return rc;
1040 }
1041
1042 /* Wait for a reply - allow signals to interrupt. */
1043 rc = wait_event_interruptible(ses->server->response_q,
1044 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1045 ((ses->server->tcpStatus != CifsGood) &&
1046 (ses->server->tcpStatus != CifsNew)));
1047
1048 /* Were we interrupted by a signal ? */
1049 if ((rc == -ERESTARTSYS) &&
1050 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1051 ((ses->server->tcpStatus == CifsGood) ||
1052 (ses->server->tcpStatus == CifsNew))) {
1053
1054 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1055 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1056 blocking lock to return. */
1057 rc = send_cancel(ses->server, &rqst, midQ);
1058 if (rc) {
1059 cifs_delete_mid(midQ);
1060 return rc;
1061 }
1062 } else {
1063 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1064 to cause the blocking lock to return. */
1065
1066 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1067
1068 /* If we get -ENOLCK back the lock may have
1069 already been removed. Don't exit in this case. */
1070 if (rc && rc != -ENOLCK) {
1071 cifs_delete_mid(midQ);
1072 return rc;
1073 }
1074 }
1075
1076 rc = wait_for_response(ses->server, midQ);
1077 if (rc) {
1078 send_cancel(ses->server, &rqst, midQ);
1079 spin_lock(&GlobalMid_Lock);
1080 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1081 /* no longer considered to be "in-flight" */
1082 midQ->callback = DeleteMidQEntry;
1083 spin_unlock(&GlobalMid_Lock);
1084 return rc;
1085 }
1086 spin_unlock(&GlobalMid_Lock);
1087 }
1088
1089 /* We got the response - restart system call. */
1090 rstart = 1;
1091 }
1092
1093 rc = cifs_sync_mid_result(midQ, ses->server);
1094 if (rc != 0)
1095 return rc;
1096
1097 /* rcvd frame is ok */
1098 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1099 rc = -EIO;
1100 cifs_dbg(VFS, "Bad MID state?\n");
1101 goto out;
1102 }
1103
1104 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1105 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1106 rc = cifs_check_receive(midQ, ses->server, 0);
1107 out:
1108 cifs_delete_mid(midQ);
1109 if (rstart && rc == -EACCES)
1110 return -ERESTARTSYS;
1111 return rc;
1112 }