]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/cifs/transport.c
CIFS: Send RFC1001 length in a separate iov
[mirror_ubuntu-zesty-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40
41 void
42 cifs_wake_up_task(struct mid_q_entry *mid)
43 {
44 wake_up_process(mid->callback_data);
45 }
46
47 struct mid_q_entry *
48 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
49 {
50 struct mid_q_entry *temp;
51
52 if (server == NULL) {
53 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
54 return NULL;
55 }
56
57 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
58 if (temp == NULL)
59 return temp;
60 else {
61 memset(temp, 0, sizeof(struct mid_q_entry));
62 temp->mid = get_mid(smb_buffer);
63 temp->pid = current->pid;
64 temp->command = cpu_to_le16(smb_buffer->Command);
65 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
66 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
67 /* when mid allocated can be before when sent */
68 temp->when_alloc = jiffies;
69 temp->server = server;
70
71 /*
72 * The default is for the mid to be synchronous, so the
73 * default callback just wakes up the current task.
74 */
75 temp->callback = cifs_wake_up_task;
76 temp->callback_data = current;
77 }
78
79 atomic_inc(&midCount);
80 temp->mid_state = MID_REQUEST_ALLOCATED;
81 return temp;
82 }
83
84 void
85 DeleteMidQEntry(struct mid_q_entry *midEntry)
86 {
87 #ifdef CONFIG_CIFS_STATS2
88 __le16 command = midEntry->server->vals->lock_cmd;
89 unsigned long now;
90 #endif
91 midEntry->mid_state = MID_FREE;
92 atomic_dec(&midCount);
93 if (midEntry->large_buf)
94 cifs_buf_release(midEntry->resp_buf);
95 else
96 cifs_small_buf_release(midEntry->resp_buf);
97 #ifdef CONFIG_CIFS_STATS2
98 now = jiffies;
99 /* commands taking longer than one second are indications that
100 something is wrong, unless it is quite a slow link or server */
101 if ((now - midEntry->when_alloc) > HZ) {
102 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
103 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
104 midEntry->command, midEntry->mid);
105 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
106 now - midEntry->when_alloc,
107 now - midEntry->when_sent,
108 now - midEntry->when_received);
109 }
110 }
111 #endif
112 mempool_free(midEntry, cifs_mid_poolp);
113 }
114
115 void
116 cifs_delete_mid(struct mid_q_entry *mid)
117 {
118 spin_lock(&GlobalMid_Lock);
119 list_del(&mid->qhead);
120 spin_unlock(&GlobalMid_Lock);
121
122 DeleteMidQEntry(mid);
123 }
124
125 /*
126 * smb_send_kvec - send an array of kvecs to the server
127 * @server: Server to send the data to
128 * @smb_msg: Message to send
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
136 size_t *sent)
137 {
138 int rc = 0;
139 int retries = 0;
140 struct socket *ssocket = server->ssocket;
141
142 *sent = 0;
143
144 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
145 smb_msg->msg_namelen = sizeof(struct sockaddr);
146 smb_msg->msg_control = NULL;
147 smb_msg->msg_controllen = 0;
148 if (server->noblocksnd)
149 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
150 else
151 smb_msg->msg_flags = MSG_NOSIGNAL;
152
153 while (msg_data_left(smb_msg)) {
154 /*
155 * If blocking send, we try 3 times, since each can block
156 * for 5 seconds. For nonblocking we have to try more
157 * but wait increasing amounts of time allowing time for
158 * socket to clear. The overall time we wait in either
159 * case to send on the socket is about 15 seconds.
160 * Similarly we wait for 15 seconds for a response from
161 * the server in SendReceive[2] for the server to send
162 * a response back for most types of requests (except
163 * SMB Write past end of file which can be slow, and
164 * blocking lock operations). NFS waits slightly longer
165 * than CIFS, but this can make it take longer for
166 * nonresponsive servers to be detected and 15 seconds
167 * is more than enough time for modern networks to
168 * send a packet. In most cases if we fail to send
169 * after the retries we will kill the socket and
170 * reconnect which may clear the network problem.
171 */
172 rc = sock_sendmsg(ssocket, smb_msg);
173 if (rc == -EAGAIN) {
174 retries++;
175 if (retries >= 14 ||
176 (!server->noblocksnd && (retries > 2))) {
177 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
178 ssocket);
179 return -EAGAIN;
180 }
181 msleep(1 << retries);
182 continue;
183 }
184
185 if (rc < 0)
186 return rc;
187
188 if (rc == 0) {
189 /* should never happen, letting socket clear before
190 retrying is our only obvious option here */
191 cifs_dbg(VFS, "tcp sent no data\n");
192 msleep(500);
193 continue;
194 }
195
196 /* send was at least partially successful */
197 *sent += rc;
198 retries = 0; /* in case we get ENOSPC on the next send */
199 }
200 return 0;
201 }
202
203 static unsigned long
204 rqst_len(struct smb_rqst *rqst)
205 {
206 unsigned int i;
207 struct kvec *iov = rqst->rq_iov;
208 unsigned long buflen = 0;
209
210 /* total up iov array first */
211 for (i = 0; i < rqst->rq_nvec; i++)
212 buflen += iov[i].iov_len;
213
214 /* add in the page array if there is one */
215 if (rqst->rq_npages) {
216 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
217 buflen += rqst->rq_tailsz;
218 }
219
220 return buflen;
221 }
222
223 static int
224 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
225 {
226 int rc;
227 struct kvec *iov = rqst->rq_iov;
228 int n_vec = rqst->rq_nvec;
229 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
230 unsigned long send_length;
231 unsigned int i;
232 size_t total_len = 0, sent, size;
233 struct socket *ssocket = server->ssocket;
234 struct msghdr smb_msg;
235 int val = 1;
236
237 if (ssocket == NULL)
238 return -ENOTSOCK;
239
240 /* sanity check send length */
241 send_length = rqst_len(rqst);
242 if (send_length != smb_buf_length + 4) {
243 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
244 send_length, smb_buf_length);
245 return -EIO;
246 }
247
248 if (n_vec < 2)
249 return -EIO;
250
251 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
252 dump_smb(iov[0].iov_base, iov[0].iov_len);
253 dump_smb(iov[1].iov_base, iov[1].iov_len);
254
255 /* cork the socket */
256 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
257 (char *)&val, sizeof(val));
258
259 size = 0;
260 for (i = 0; i < n_vec; i++)
261 size += iov[i].iov_len;
262
263 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
264
265 rc = smb_send_kvec(server, &smb_msg, &sent);
266 if (rc < 0)
267 goto uncork;
268
269 total_len += sent;
270
271 /* now walk the page array and send each page in it */
272 for (i = 0; i < rqst->rq_npages; i++) {
273 size_t len = i == rqst->rq_npages - 1
274 ? rqst->rq_tailsz
275 : rqst->rq_pagesz;
276 struct bio_vec bvec = {
277 .bv_page = rqst->rq_pages[i],
278 .bv_len = len
279 };
280 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
281 &bvec, 1, len);
282 rc = smb_send_kvec(server, &smb_msg, &sent);
283 if (rc < 0)
284 break;
285
286 total_len += sent;
287 }
288
289 uncork:
290 /* uncork it */
291 val = 0;
292 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
293 (char *)&val, sizeof(val));
294
295 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
296 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
297 smb_buf_length + 4, total_len);
298 /*
299 * If we have only sent part of an SMB then the next SMB could
300 * be taken as the remainder of this one. We need to kill the
301 * socket so the server throws away the partial SMB
302 */
303 server->tcpStatus = CifsNeedReconnect;
304 }
305
306 if (rc < 0 && rc != -EINTR)
307 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
308 rc);
309 else
310 rc = 0;
311
312 return rc;
313 }
314
315 static int
316 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
317 {
318 struct smb_rqst rqst = { .rq_iov = iov,
319 .rq_nvec = n_vec };
320
321 return smb_send_rqst(server, &rqst);
322 }
323
324 int
325 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
326 unsigned int smb_buf_length)
327 {
328 struct kvec iov[2];
329
330 iov[0].iov_base = smb_buffer;
331 iov[0].iov_len = 4;
332 iov[1].iov_base = (char *)smb_buffer + 4;
333 iov[1].iov_len = smb_buf_length;
334
335 return smb_sendv(server, iov, 2);
336 }
337
338 static int
339 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
340 int *credits)
341 {
342 int rc;
343
344 spin_lock(&server->req_lock);
345 if (timeout == CIFS_ASYNC_OP) {
346 /* oplock breaks must not be held up */
347 server->in_flight++;
348 *credits -= 1;
349 spin_unlock(&server->req_lock);
350 return 0;
351 }
352
353 while (1) {
354 if (*credits <= 0) {
355 spin_unlock(&server->req_lock);
356 cifs_num_waiters_inc(server);
357 rc = wait_event_killable(server->request_q,
358 has_credits(server, credits));
359 cifs_num_waiters_dec(server);
360 if (rc)
361 return rc;
362 spin_lock(&server->req_lock);
363 } else {
364 if (server->tcpStatus == CifsExiting) {
365 spin_unlock(&server->req_lock);
366 return -ENOENT;
367 }
368
369 /*
370 * Can not count locking commands against total
371 * as they are allowed to block on server.
372 */
373
374 /* update # of requests on the wire to server */
375 if (timeout != CIFS_BLOCKING_OP) {
376 *credits -= 1;
377 server->in_flight++;
378 }
379 spin_unlock(&server->req_lock);
380 break;
381 }
382 }
383 return 0;
384 }
385
386 static int
387 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
388 const int optype)
389 {
390 int *val;
391
392 val = server->ops->get_credits_field(server, optype);
393 /* Since an echo is already inflight, no need to wait to send another */
394 if (*val <= 0 && optype == CIFS_ECHO_OP)
395 return -EAGAIN;
396 return wait_for_free_credits(server, timeout, val);
397 }
398
399 int
400 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
401 unsigned int *num, unsigned int *credits)
402 {
403 *num = size;
404 *credits = 0;
405 return 0;
406 }
407
408 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
409 struct mid_q_entry **ppmidQ)
410 {
411 if (ses->server->tcpStatus == CifsExiting) {
412 return -ENOENT;
413 }
414
415 if (ses->server->tcpStatus == CifsNeedReconnect) {
416 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
417 return -EAGAIN;
418 }
419
420 if (ses->status == CifsNew) {
421 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
422 (in_buf->Command != SMB_COM_NEGOTIATE))
423 return -EAGAIN;
424 /* else ok - we are setting up session */
425 }
426
427 if (ses->status == CifsExiting) {
428 /* check if SMB session is bad because we are setting it up */
429 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
430 return -EAGAIN;
431 /* else ok - we are shutting down session */
432 }
433
434 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
435 if (*ppmidQ == NULL)
436 return -ENOMEM;
437 spin_lock(&GlobalMid_Lock);
438 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
439 spin_unlock(&GlobalMid_Lock);
440 return 0;
441 }
442
443 static int
444 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
445 {
446 int error;
447
448 error = wait_event_freezekillable_unsafe(server->response_q,
449 midQ->mid_state != MID_REQUEST_SUBMITTED);
450 if (error < 0)
451 return -ERESTARTSYS;
452
453 return 0;
454 }
455
456 struct mid_q_entry *
457 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
458 {
459 int rc;
460 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
461 struct mid_q_entry *mid;
462
463 if (rqst->rq_iov[0].iov_len != 4 ||
464 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
465 return ERR_PTR(-EIO);
466
467 /* enable signing if server requires it */
468 if (server->sign)
469 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
470
471 mid = AllocMidQEntry(hdr, server);
472 if (mid == NULL)
473 return ERR_PTR(-ENOMEM);
474
475 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
476 if (rc) {
477 DeleteMidQEntry(mid);
478 return ERR_PTR(rc);
479 }
480
481 return mid;
482 }
483
484 /*
485 * Send a SMB request and set the callback function in the mid to handle
486 * the result. Caller is responsible for dealing with timeouts.
487 */
488 int
489 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
490 mid_receive_t *receive, mid_callback_t *callback, void *cbdata,
491 const int flags)
492 {
493 int rc, timeout, optype;
494 struct mid_q_entry *mid;
495 unsigned int credits = 0;
496
497 timeout = flags & CIFS_TIMEOUT_MASK;
498 optype = flags & CIFS_OP_MASK;
499
500 if ((flags & CIFS_HAS_CREDITS) == 0) {
501 rc = wait_for_free_request(server, timeout, optype);
502 if (rc)
503 return rc;
504 credits = 1;
505 }
506
507 mutex_lock(&server->srv_mutex);
508 mid = server->ops->setup_async_request(server, rqst);
509 if (IS_ERR(mid)) {
510 mutex_unlock(&server->srv_mutex);
511 add_credits_and_wake_if(server, credits, optype);
512 return PTR_ERR(mid);
513 }
514
515 mid->receive = receive;
516 mid->callback = callback;
517 mid->callback_data = cbdata;
518 mid->mid_state = MID_REQUEST_SUBMITTED;
519
520 /* put it on the pending_mid_q */
521 spin_lock(&GlobalMid_Lock);
522 list_add_tail(&mid->qhead, &server->pending_mid_q);
523 spin_unlock(&GlobalMid_Lock);
524
525
526 cifs_in_send_inc(server);
527 rc = smb_send_rqst(server, rqst);
528 cifs_in_send_dec(server);
529 cifs_save_when_sent(mid);
530
531 if (rc < 0) {
532 server->sequence_number -= 2;
533 cifs_delete_mid(mid);
534 }
535
536 mutex_unlock(&server->srv_mutex);
537
538 if (rc == 0)
539 return 0;
540
541 add_credits_and_wake_if(server, credits, optype);
542 return rc;
543 }
544
545 /*
546 *
547 * Send an SMB Request. No response info (other than return code)
548 * needs to be parsed.
549 *
550 * flags indicate the type of request buffer and how long to wait
551 * and whether to log NT STATUS code (error) before mapping it to POSIX error
552 *
553 */
554 int
555 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
556 char *in_buf, int flags)
557 {
558 int rc;
559 struct kvec iov[1];
560 struct kvec rsp_iov;
561 int resp_buf_type;
562
563 iov[0].iov_base = in_buf;
564 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
565 flags |= CIFS_NO_RESP;
566 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
567 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
568
569 return rc;
570 }
571
572 static int
573 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
574 {
575 int rc = 0;
576
577 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
578 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
579
580 spin_lock(&GlobalMid_Lock);
581 switch (mid->mid_state) {
582 case MID_RESPONSE_RECEIVED:
583 spin_unlock(&GlobalMid_Lock);
584 return rc;
585 case MID_RETRY_NEEDED:
586 rc = -EAGAIN;
587 break;
588 case MID_RESPONSE_MALFORMED:
589 rc = -EIO;
590 break;
591 case MID_SHUTDOWN:
592 rc = -EHOSTDOWN;
593 break;
594 default:
595 list_del_init(&mid->qhead);
596 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
597 __func__, mid->mid, mid->mid_state);
598 rc = -EIO;
599 }
600 spin_unlock(&GlobalMid_Lock);
601
602 mutex_lock(&server->srv_mutex);
603 DeleteMidQEntry(mid);
604 mutex_unlock(&server->srv_mutex);
605 return rc;
606 }
607
608 static inline int
609 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
610 struct mid_q_entry *mid)
611 {
612 return server->ops->send_cancel ?
613 server->ops->send_cancel(server, rqst, mid) : 0;
614 }
615
616 int
617 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
618 bool log_error)
619 {
620 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
621
622 dump_smb(mid->resp_buf, min_t(u32, 92, len));
623
624 /* convert the length into a more usable form */
625 if (server->sign) {
626 struct kvec iov[2];
627 int rc = 0;
628 struct smb_rqst rqst = { .rq_iov = iov,
629 .rq_nvec = 2 };
630
631 iov[0].iov_base = mid->resp_buf;
632 iov[0].iov_len = 4;
633 iov[1].iov_base = (char *)mid->resp_buf + 4;
634 iov[1].iov_len = len - 4;
635 /* FIXME: add code to kill session */
636 rc = cifs_verify_signature(&rqst, server,
637 mid->sequence_number);
638 if (rc)
639 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
640 rc);
641 }
642
643 /* BB special case reconnect tid and uid here? */
644 return map_smb_to_linux_error(mid->resp_buf, log_error);
645 }
646
647 struct mid_q_entry *
648 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
649 {
650 int rc;
651 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
652 struct mid_q_entry *mid;
653
654 if (rqst->rq_iov[0].iov_len != 4 ||
655 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
656 return ERR_PTR(-EIO);
657
658 rc = allocate_mid(ses, hdr, &mid);
659 if (rc)
660 return ERR_PTR(rc);
661 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
662 if (rc) {
663 cifs_delete_mid(mid);
664 return ERR_PTR(rc);
665 }
666 return mid;
667 }
668
669 static int
670 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
671 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
672 struct kvec *resp_iov)
673 {
674 int rc = 0;
675 int timeout, optype;
676 struct mid_q_entry *midQ;
677 unsigned int credits = 1;
678 char *buf;
679
680 timeout = flags & CIFS_TIMEOUT_MASK;
681 optype = flags & CIFS_OP_MASK;
682
683 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
684
685 if ((ses == NULL) || (ses->server == NULL)) {
686 cifs_dbg(VFS, "Null session\n");
687 return -EIO;
688 }
689
690 if (ses->server->tcpStatus == CifsExiting)
691 return -ENOENT;
692
693 /*
694 * Ensure that we do not send more than 50 overlapping requests
695 * to the same server. We may make this configurable later or
696 * use ses->maxReq.
697 */
698
699 rc = wait_for_free_request(ses->server, timeout, optype);
700 if (rc)
701 return rc;
702
703 /*
704 * Make sure that we sign in the same order that we send on this socket
705 * and avoid races inside tcp sendmsg code that could cause corruption
706 * of smb data.
707 */
708
709 mutex_lock(&ses->server->srv_mutex);
710
711 midQ = ses->server->ops->setup_request(ses, rqst);
712 if (IS_ERR(midQ)) {
713 mutex_unlock(&ses->server->srv_mutex);
714 /* Update # of requests on wire to server */
715 add_credits(ses->server, 1, optype);
716 return PTR_ERR(midQ);
717 }
718
719 midQ->mid_state = MID_REQUEST_SUBMITTED;
720 cifs_in_send_inc(ses->server);
721 rc = smb_send_rqst(ses->server, rqst);
722 cifs_in_send_dec(ses->server);
723 cifs_save_when_sent(midQ);
724
725 if (rc < 0)
726 ses->server->sequence_number -= 2;
727 mutex_unlock(&ses->server->srv_mutex);
728
729 if (rc < 0)
730 goto out;
731
732 if (timeout == CIFS_ASYNC_OP)
733 goto out;
734
735 rc = wait_for_response(ses->server, midQ);
736 if (rc != 0) {
737 send_cancel(ses->server, rqst, midQ);
738 spin_lock(&GlobalMid_Lock);
739 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
740 midQ->callback = DeleteMidQEntry;
741 spin_unlock(&GlobalMid_Lock);
742 add_credits(ses->server, 1, optype);
743 return rc;
744 }
745 spin_unlock(&GlobalMid_Lock);
746 }
747
748 rc = cifs_sync_mid_result(midQ, ses->server);
749 if (rc != 0) {
750 add_credits(ses->server, 1, optype);
751 return rc;
752 }
753
754 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
755 rc = -EIO;
756 cifs_dbg(FYI, "Bad MID state?\n");
757 goto out;
758 }
759
760 buf = (char *)midQ->resp_buf;
761 resp_iov->iov_base = buf;
762 resp_iov->iov_len = get_rfc1002_length(buf) + 4;
763 if (midQ->large_buf)
764 *resp_buf_type = CIFS_LARGE_BUFFER;
765 else
766 *resp_buf_type = CIFS_SMALL_BUFFER;
767
768 credits = ses->server->ops->get_credits(midQ);
769
770 rc = ses->server->ops->check_receive(midQ, ses->server,
771 flags & CIFS_LOG_ERROR);
772
773 /* mark it so buf will not be freed by cifs_delete_mid */
774 if ((flags & CIFS_NO_RESP) == 0)
775 midQ->resp_buf = NULL;
776 out:
777 cifs_delete_mid(midQ);
778 add_credits(ses->server, credits, optype);
779
780 return rc;
781 }
782
783 int
784 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
785 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
786 const int flags, struct kvec *resp_iov)
787 {
788 struct smb_rqst rqst;
789 struct kvec *new_iov;
790 int rc;
791
792 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
793 if (!new_iov)
794 return -ENOMEM;
795
796 /* 1st iov is a RFC1001 length followed by the rest of the packet */
797 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
798
799 new_iov[0].iov_base = new_iov[1].iov_base;
800 new_iov[0].iov_len = 4;
801 new_iov[1].iov_base += 4;
802 new_iov[1].iov_len -= 4;
803
804 memset(&rqst, 0, sizeof(struct smb_rqst));
805 rqst.rq_iov = new_iov;
806 rqst.rq_nvec = n_vec + 1;
807
808 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
809 kfree(new_iov);
810 return rc;
811 }
812
813 int
814 SendReceive(const unsigned int xid, struct cifs_ses *ses,
815 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
816 int *pbytes_returned, const int timeout)
817 {
818 int rc = 0;
819 struct mid_q_entry *midQ;
820 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
821 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
822 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
823
824 if (ses == NULL) {
825 cifs_dbg(VFS, "Null smb session\n");
826 return -EIO;
827 }
828 if (ses->server == NULL) {
829 cifs_dbg(VFS, "Null tcp session\n");
830 return -EIO;
831 }
832
833 if (ses->server->tcpStatus == CifsExiting)
834 return -ENOENT;
835
836 /* Ensure that we do not send more than 50 overlapping requests
837 to the same server. We may make this configurable later or
838 use ses->maxReq */
839
840 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
841 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
842 len);
843 return -EIO;
844 }
845
846 rc = wait_for_free_request(ses->server, timeout, 0);
847 if (rc)
848 return rc;
849
850 /* make sure that we sign in the same order that we send on this socket
851 and avoid races inside tcp sendmsg code that could cause corruption
852 of smb data */
853
854 mutex_lock(&ses->server->srv_mutex);
855
856 rc = allocate_mid(ses, in_buf, &midQ);
857 if (rc) {
858 mutex_unlock(&ses->server->srv_mutex);
859 /* Update # of requests on wire to server */
860 add_credits(ses->server, 1, 0);
861 return rc;
862 }
863
864 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
865 if (rc) {
866 mutex_unlock(&ses->server->srv_mutex);
867 goto out;
868 }
869
870 midQ->mid_state = MID_REQUEST_SUBMITTED;
871
872 cifs_in_send_inc(ses->server);
873 rc = smb_send(ses->server, in_buf, len);
874 cifs_in_send_dec(ses->server);
875 cifs_save_when_sent(midQ);
876
877 if (rc < 0)
878 ses->server->sequence_number -= 2;
879
880 mutex_unlock(&ses->server->srv_mutex);
881
882 if (rc < 0)
883 goto out;
884
885 if (timeout == CIFS_ASYNC_OP)
886 goto out;
887
888 rc = wait_for_response(ses->server, midQ);
889 if (rc != 0) {
890 send_cancel(ses->server, &rqst, midQ);
891 spin_lock(&GlobalMid_Lock);
892 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
893 /* no longer considered to be "in-flight" */
894 midQ->callback = DeleteMidQEntry;
895 spin_unlock(&GlobalMid_Lock);
896 add_credits(ses->server, 1, 0);
897 return rc;
898 }
899 spin_unlock(&GlobalMid_Lock);
900 }
901
902 rc = cifs_sync_mid_result(midQ, ses->server);
903 if (rc != 0) {
904 add_credits(ses->server, 1, 0);
905 return rc;
906 }
907
908 if (!midQ->resp_buf || !out_buf ||
909 midQ->mid_state != MID_RESPONSE_RECEIVED) {
910 rc = -EIO;
911 cifs_dbg(VFS, "Bad MID state?\n");
912 goto out;
913 }
914
915 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
916 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
917 rc = cifs_check_receive(midQ, ses->server, 0);
918 out:
919 cifs_delete_mid(midQ);
920 add_credits(ses->server, 1, 0);
921
922 return rc;
923 }
924
925 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
926 blocking lock to return. */
927
928 static int
929 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
930 struct smb_hdr *in_buf,
931 struct smb_hdr *out_buf)
932 {
933 int bytes_returned;
934 struct cifs_ses *ses = tcon->ses;
935 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
936
937 /* We just modify the current in_buf to change
938 the type of lock from LOCKING_ANDX_SHARED_LOCK
939 or LOCKING_ANDX_EXCLUSIVE_LOCK to
940 LOCKING_ANDX_CANCEL_LOCK. */
941
942 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
943 pSMB->Timeout = 0;
944 pSMB->hdr.Mid = get_next_mid(ses->server);
945
946 return SendReceive(xid, ses, in_buf, out_buf,
947 &bytes_returned, 0);
948 }
949
950 int
951 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
952 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
953 int *pbytes_returned)
954 {
955 int rc = 0;
956 int rstart = 0;
957 struct mid_q_entry *midQ;
958 struct cifs_ses *ses;
959 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
960 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
961 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
962
963 if (tcon == NULL || tcon->ses == NULL) {
964 cifs_dbg(VFS, "Null smb session\n");
965 return -EIO;
966 }
967 ses = tcon->ses;
968
969 if (ses->server == NULL) {
970 cifs_dbg(VFS, "Null tcp session\n");
971 return -EIO;
972 }
973
974 if (ses->server->tcpStatus == CifsExiting)
975 return -ENOENT;
976
977 /* Ensure that we do not send more than 50 overlapping requests
978 to the same server. We may make this configurable later or
979 use ses->maxReq */
980
981 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
982 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
983 len);
984 return -EIO;
985 }
986
987 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
988 if (rc)
989 return rc;
990
991 /* make sure that we sign in the same order that we send on this socket
992 and avoid races inside tcp sendmsg code that could cause corruption
993 of smb data */
994
995 mutex_lock(&ses->server->srv_mutex);
996
997 rc = allocate_mid(ses, in_buf, &midQ);
998 if (rc) {
999 mutex_unlock(&ses->server->srv_mutex);
1000 return rc;
1001 }
1002
1003 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1004 if (rc) {
1005 cifs_delete_mid(midQ);
1006 mutex_unlock(&ses->server->srv_mutex);
1007 return rc;
1008 }
1009
1010 midQ->mid_state = MID_REQUEST_SUBMITTED;
1011 cifs_in_send_inc(ses->server);
1012 rc = smb_send(ses->server, in_buf, len);
1013 cifs_in_send_dec(ses->server);
1014 cifs_save_when_sent(midQ);
1015
1016 if (rc < 0)
1017 ses->server->sequence_number -= 2;
1018
1019 mutex_unlock(&ses->server->srv_mutex);
1020
1021 if (rc < 0) {
1022 cifs_delete_mid(midQ);
1023 return rc;
1024 }
1025
1026 /* Wait for a reply - allow signals to interrupt. */
1027 rc = wait_event_interruptible(ses->server->response_q,
1028 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1029 ((ses->server->tcpStatus != CifsGood) &&
1030 (ses->server->tcpStatus != CifsNew)));
1031
1032 /* Were we interrupted by a signal ? */
1033 if ((rc == -ERESTARTSYS) &&
1034 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1035 ((ses->server->tcpStatus == CifsGood) ||
1036 (ses->server->tcpStatus == CifsNew))) {
1037
1038 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1039 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1040 blocking lock to return. */
1041 rc = send_cancel(ses->server, &rqst, midQ);
1042 if (rc) {
1043 cifs_delete_mid(midQ);
1044 return rc;
1045 }
1046 } else {
1047 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1048 to cause the blocking lock to return. */
1049
1050 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1051
1052 /* If we get -ENOLCK back the lock may have
1053 already been removed. Don't exit in this case. */
1054 if (rc && rc != -ENOLCK) {
1055 cifs_delete_mid(midQ);
1056 return rc;
1057 }
1058 }
1059
1060 rc = wait_for_response(ses->server, midQ);
1061 if (rc) {
1062 send_cancel(ses->server, &rqst, midQ);
1063 spin_lock(&GlobalMid_Lock);
1064 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1065 /* no longer considered to be "in-flight" */
1066 midQ->callback = DeleteMidQEntry;
1067 spin_unlock(&GlobalMid_Lock);
1068 return rc;
1069 }
1070 spin_unlock(&GlobalMid_Lock);
1071 }
1072
1073 /* We got the response - restart system call. */
1074 rstart = 1;
1075 }
1076
1077 rc = cifs_sync_mid_result(midQ, ses->server);
1078 if (rc != 0)
1079 return rc;
1080
1081 /* rcvd frame is ok */
1082 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1083 rc = -EIO;
1084 cifs_dbg(VFS, "Bad MID state?\n");
1085 goto out;
1086 }
1087
1088 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1089 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1090 rc = cifs_check_receive(midQ, ses->server, 0);
1091 out:
1092 cifs_delete_mid(midQ);
1093 if (rstart && rc == -EACCES)
1094 return -ERESTARTSYS;
1095 return rc;
1096 }