]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/cifs/transport.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/highmem.h>
32 #include <asm/uaccess.h>
33 #include <asm/processor.h>
34 #include <linux/mempool.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39
40 void
41 cifs_wake_up_task(struct mid_q_entry *mid)
42 {
43 wake_up_process(mid->callback_data);
44 }
45
46 struct mid_q_entry *
47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = get_mid(smb_buffer);
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68 temp->server = server;
69
70 /*
71 * The default is for the mid to be synchronous, so the
72 * default callback just wakes up the current task.
73 */
74 temp->callback = cifs_wake_up_task;
75 temp->callback_data = current;
76 }
77
78 atomic_inc(&midCount);
79 temp->mid_state = MID_REQUEST_ALLOCATED;
80 return temp;
81 }
82
83 void
84 DeleteMidQEntry(struct mid_q_entry *midEntry)
85 {
86 #ifdef CONFIG_CIFS_STATS2
87 __le16 command = midEntry->server->vals->lock_cmd;
88 unsigned long now;
89 #endif
90 midEntry->mid_state = MID_FREE;
91 atomic_dec(&midCount);
92 if (midEntry->large_buf)
93 cifs_buf_release(midEntry->resp_buf);
94 else
95 cifs_small_buf_release(midEntry->resp_buf);
96 #ifdef CONFIG_CIFS_STATS2
97 now = jiffies;
98 /* commands taking longer than one second are indications that
99 something is wrong, unless it is quite a slow link or server */
100 if ((now - midEntry->when_alloc) > HZ) {
101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
102 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
103 midEntry->command, midEntry->mid);
104 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 now - midEntry->when_alloc,
106 now - midEntry->when_sent,
107 now - midEntry->when_received);
108 }
109 }
110 #endif
111 mempool_free(midEntry, cifs_mid_poolp);
112 }
113
114 void
115 cifs_delete_mid(struct mid_q_entry *mid)
116 {
117 spin_lock(&GlobalMid_Lock);
118 list_del(&mid->qhead);
119 spin_unlock(&GlobalMid_Lock);
120
121 DeleteMidQEntry(mid);
122 }
123
124 /*
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 size_t *sent)
137 {
138 int rc = 0;
139 int i = 0;
140 struct msghdr smb_msg;
141 unsigned int remaining;
142 size_t first_vec = 0;
143 struct socket *ssocket = server->ssocket;
144
145 *sent = 0;
146
147 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
148 smb_msg.msg_namelen = sizeof(struct sockaddr);
149 smb_msg.msg_control = NULL;
150 smb_msg.msg_controllen = 0;
151 if (server->noblocksnd)
152 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
153 else
154 smb_msg.msg_flags = MSG_NOSIGNAL;
155
156 remaining = 0;
157 for (i = 0; i < n_vec; i++)
158 remaining += iov[i].iov_len;
159
160 i = 0;
161 while (remaining) {
162 /*
163 * If blocking send, we try 3 times, since each can block
164 * for 5 seconds. For nonblocking we have to try more
165 * but wait increasing amounts of time allowing time for
166 * socket to clear. The overall time we wait in either
167 * case to send on the socket is about 15 seconds.
168 * Similarly we wait for 15 seconds for a response from
169 * the server in SendReceive[2] for the server to send
170 * a response back for most types of requests (except
171 * SMB Write past end of file which can be slow, and
172 * blocking lock operations). NFS waits slightly longer
173 * than CIFS, but this can make it take longer for
174 * nonresponsive servers to be detected and 15 seconds
175 * is more than enough time for modern networks to
176 * send a packet. In most cases if we fail to send
177 * after the retries we will kill the socket and
178 * reconnect which may clear the network problem.
179 */
180 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
181 n_vec - first_vec, remaining);
182 if (rc == -EAGAIN) {
183 i++;
184 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
185 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
186 ssocket);
187 rc = -EAGAIN;
188 break;
189 }
190 msleep(1 << i);
191 continue;
192 }
193
194 if (rc < 0)
195 break;
196
197 /* send was at least partially successful */
198 *sent += rc;
199
200 if (rc == remaining) {
201 remaining = 0;
202 break;
203 }
204
205 if (rc > remaining) {
206 cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
207 break;
208 }
209
210 if (rc == 0) {
211 /* should never happen, letting socket clear before
212 retrying is our only obvious option here */
213 cifs_dbg(VFS, "tcp sent no data\n");
214 msleep(500);
215 continue;
216 }
217
218 remaining -= rc;
219
220 /* the line below resets i */
221 for (i = first_vec; i < n_vec; i++) {
222 if (iov[i].iov_len) {
223 if (rc > iov[i].iov_len) {
224 rc -= iov[i].iov_len;
225 iov[i].iov_len = 0;
226 } else {
227 iov[i].iov_base += rc;
228 iov[i].iov_len -= rc;
229 first_vec = i;
230 break;
231 }
232 }
233 }
234
235 i = 0; /* in case we get ENOSPC on the next send */
236 rc = 0;
237 }
238 return rc;
239 }
240
241 /**
242 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
243 * @rqst: pointer to smb_rqst
244 * @idx: index into the array of the page
245 * @iov: pointer to struct kvec that will hold the result
246 *
247 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
248 * The page will be kmapped and the address placed into iov_base. The length
249 * will then be adjusted according to the ptailoff.
250 */
251 void
252 cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
253 struct kvec *iov)
254 {
255 /*
256 * FIXME: We could avoid this kmap altogether if we used
257 * kernel_sendpage instead of kernel_sendmsg. That will only
258 * work if signing is disabled though as sendpage inlines the
259 * page directly into the fraglist. If userspace modifies the
260 * page after we calculate the signature, then the server will
261 * reject it and may break the connection. kernel_sendmsg does
262 * an extra copy of the data and avoids that issue.
263 */
264 iov->iov_base = kmap(rqst->rq_pages[idx]);
265
266 /* if last page, don't send beyond this offset into page */
267 if (idx == (rqst->rq_npages - 1))
268 iov->iov_len = rqst->rq_tailsz;
269 else
270 iov->iov_len = rqst->rq_pagesz;
271 }
272
273 static int
274 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
275 {
276 int rc;
277 struct kvec *iov = rqst->rq_iov;
278 int n_vec = rqst->rq_nvec;
279 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
280 unsigned int i;
281 size_t total_len = 0, sent;
282 struct socket *ssocket = server->ssocket;
283 int val = 1;
284
285 if (ssocket == NULL)
286 return -ENOTSOCK;
287
288 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
289 dump_smb(iov[0].iov_base, iov[0].iov_len);
290
291 /* cork the socket */
292 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
293 (char *)&val, sizeof(val));
294
295 rc = smb_send_kvec(server, iov, n_vec, &sent);
296 if (rc < 0)
297 goto uncork;
298
299 total_len += sent;
300
301 /* now walk the page array and send each page in it */
302 for (i = 0; i < rqst->rq_npages; i++) {
303 struct kvec p_iov;
304
305 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
306 rc = smb_send_kvec(server, &p_iov, 1, &sent);
307 kunmap(rqst->rq_pages[i]);
308 if (rc < 0)
309 break;
310
311 total_len += sent;
312 }
313
314 uncork:
315 /* uncork it */
316 val = 0;
317 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
318 (char *)&val, sizeof(val));
319
320 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
321 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
322 smb_buf_length + 4, total_len);
323 /*
324 * If we have only sent part of an SMB then the next SMB could
325 * be taken as the remainder of this one. We need to kill the
326 * socket so the server throws away the partial SMB
327 */
328 server->tcpStatus = CifsNeedReconnect;
329 }
330
331 if (rc < 0 && rc != -EINTR)
332 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
333 rc);
334 else
335 rc = 0;
336
337 return rc;
338 }
339
340 static int
341 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
342 {
343 struct smb_rqst rqst = { .rq_iov = iov,
344 .rq_nvec = n_vec };
345
346 return smb_send_rqst(server, &rqst);
347 }
348
349 int
350 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
351 unsigned int smb_buf_length)
352 {
353 struct kvec iov;
354
355 iov.iov_base = smb_buffer;
356 iov.iov_len = smb_buf_length + 4;
357
358 return smb_sendv(server, &iov, 1);
359 }
360
361 static int
362 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
363 int *credits)
364 {
365 int rc;
366
367 spin_lock(&server->req_lock);
368 if (timeout == CIFS_ASYNC_OP) {
369 /* oplock breaks must not be held up */
370 server->in_flight++;
371 *credits -= 1;
372 spin_unlock(&server->req_lock);
373 return 0;
374 }
375
376 while (1) {
377 if (*credits <= 0) {
378 spin_unlock(&server->req_lock);
379 cifs_num_waiters_inc(server);
380 rc = wait_event_killable(server->request_q,
381 has_credits(server, credits));
382 cifs_num_waiters_dec(server);
383 if (rc)
384 return rc;
385 spin_lock(&server->req_lock);
386 } else {
387 if (server->tcpStatus == CifsExiting) {
388 spin_unlock(&server->req_lock);
389 return -ENOENT;
390 }
391
392 /*
393 * Can not count locking commands against total
394 * as they are allowed to block on server.
395 */
396
397 /* update # of requests on the wire to server */
398 if (timeout != CIFS_BLOCKING_OP) {
399 *credits -= 1;
400 server->in_flight++;
401 }
402 spin_unlock(&server->req_lock);
403 break;
404 }
405 }
406 return 0;
407 }
408
409 static int
410 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
411 const int optype)
412 {
413 int *val;
414
415 val = server->ops->get_credits_field(server, optype);
416 /* Since an echo is already inflight, no need to wait to send another */
417 if (*val <= 0 && optype == CIFS_ECHO_OP)
418 return -EAGAIN;
419 return wait_for_free_credits(server, timeout, val);
420 }
421
422 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
423 struct mid_q_entry **ppmidQ)
424 {
425 if (ses->server->tcpStatus == CifsExiting) {
426 return -ENOENT;
427 }
428
429 if (ses->server->tcpStatus == CifsNeedReconnect) {
430 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
431 return -EAGAIN;
432 }
433
434 if (ses->status == CifsNew) {
435 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
436 (in_buf->Command != SMB_COM_NEGOTIATE))
437 return -EAGAIN;
438 /* else ok - we are setting up session */
439 }
440
441 if (ses->status == CifsExiting) {
442 /* check if SMB session is bad because we are setting it up */
443 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
444 return -EAGAIN;
445 /* else ok - we are shutting down session */
446 }
447
448 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
449 if (*ppmidQ == NULL)
450 return -ENOMEM;
451 spin_lock(&GlobalMid_Lock);
452 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
453 spin_unlock(&GlobalMid_Lock);
454 return 0;
455 }
456
457 static int
458 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
459 {
460 int error;
461
462 error = wait_event_freezekillable_unsafe(server->response_q,
463 midQ->mid_state != MID_REQUEST_SUBMITTED);
464 if (error < 0)
465 return -ERESTARTSYS;
466
467 return 0;
468 }
469
470 struct mid_q_entry *
471 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
472 {
473 int rc;
474 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
475 struct mid_q_entry *mid;
476
477 /* enable signing if server requires it */
478 if (server->sign)
479 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
480
481 mid = AllocMidQEntry(hdr, server);
482 if (mid == NULL)
483 return ERR_PTR(-ENOMEM);
484
485 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
486 if (rc) {
487 DeleteMidQEntry(mid);
488 return ERR_PTR(rc);
489 }
490
491 return mid;
492 }
493
494 /*
495 * Send a SMB request and set the callback function in the mid to handle
496 * the result. Caller is responsible for dealing with timeouts.
497 */
498 int
499 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
500 mid_receive_t *receive, mid_callback_t *callback,
501 void *cbdata, const int flags)
502 {
503 int rc, timeout, optype;
504 struct mid_q_entry *mid;
505
506 timeout = flags & CIFS_TIMEOUT_MASK;
507 optype = flags & CIFS_OP_MASK;
508
509 rc = wait_for_free_request(server, timeout, optype);
510 if (rc)
511 return rc;
512
513 mutex_lock(&server->srv_mutex);
514 mid = server->ops->setup_async_request(server, rqst);
515 if (IS_ERR(mid)) {
516 mutex_unlock(&server->srv_mutex);
517 add_credits(server, 1, optype);
518 wake_up(&server->request_q);
519 return PTR_ERR(mid);
520 }
521
522 mid->receive = receive;
523 mid->callback = callback;
524 mid->callback_data = cbdata;
525 mid->mid_state = MID_REQUEST_SUBMITTED;
526
527 /* put it on the pending_mid_q */
528 spin_lock(&GlobalMid_Lock);
529 list_add_tail(&mid->qhead, &server->pending_mid_q);
530 spin_unlock(&GlobalMid_Lock);
531
532
533 cifs_in_send_inc(server);
534 rc = smb_send_rqst(server, rqst);
535 cifs_in_send_dec(server);
536 cifs_save_when_sent(mid);
537
538 if (rc < 0)
539 server->sequence_number -= 2;
540 mutex_unlock(&server->srv_mutex);
541
542 if (rc == 0)
543 return 0;
544
545 cifs_delete_mid(mid);
546 add_credits(server, 1, optype);
547 wake_up(&server->request_q);
548 return rc;
549 }
550
551 /*
552 *
553 * Send an SMB Request. No response info (other than return code)
554 * needs to be parsed.
555 *
556 * flags indicate the type of request buffer and how long to wait
557 * and whether to log NT STATUS code (error) before mapping it to POSIX error
558 *
559 */
560 int
561 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
562 char *in_buf, int flags)
563 {
564 int rc;
565 struct kvec iov[1];
566 int resp_buf_type;
567
568 iov[0].iov_base = in_buf;
569 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
570 flags |= CIFS_NO_RESP;
571 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
572 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
573
574 return rc;
575 }
576
577 static int
578 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
579 {
580 int rc = 0;
581
582 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
583 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
584
585 spin_lock(&GlobalMid_Lock);
586 switch (mid->mid_state) {
587 case MID_RESPONSE_RECEIVED:
588 spin_unlock(&GlobalMid_Lock);
589 return rc;
590 case MID_RETRY_NEEDED:
591 rc = -EAGAIN;
592 break;
593 case MID_RESPONSE_MALFORMED:
594 rc = -EIO;
595 break;
596 case MID_SHUTDOWN:
597 rc = -EHOSTDOWN;
598 break;
599 default:
600 list_del_init(&mid->qhead);
601 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
602 __func__, mid->mid, mid->mid_state);
603 rc = -EIO;
604 }
605 spin_unlock(&GlobalMid_Lock);
606
607 DeleteMidQEntry(mid);
608 return rc;
609 }
610
611 static inline int
612 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
613 {
614 return server->ops->send_cancel ?
615 server->ops->send_cancel(server, buf, mid) : 0;
616 }
617
618 int
619 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
620 bool log_error)
621 {
622 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
623
624 dump_smb(mid->resp_buf, min_t(u32, 92, len));
625
626 /* convert the length into a more usable form */
627 if (server->sign) {
628 struct kvec iov;
629 int rc = 0;
630 struct smb_rqst rqst = { .rq_iov = &iov,
631 .rq_nvec = 1 };
632
633 iov.iov_base = mid->resp_buf;
634 iov.iov_len = len;
635 /* FIXME: add code to kill session */
636 rc = cifs_verify_signature(&rqst, server,
637 mid->sequence_number);
638 if (rc)
639 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
640 rc);
641 }
642
643 /* BB special case reconnect tid and uid here? */
644 return map_smb_to_linux_error(mid->resp_buf, log_error);
645 }
646
647 struct mid_q_entry *
648 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
649 {
650 int rc;
651 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
652 struct mid_q_entry *mid;
653
654 rc = allocate_mid(ses, hdr, &mid);
655 if (rc)
656 return ERR_PTR(rc);
657 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
658 if (rc) {
659 cifs_delete_mid(mid);
660 return ERR_PTR(rc);
661 }
662 return mid;
663 }
664
665 int
666 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
667 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
668 const int flags)
669 {
670 int rc = 0;
671 int timeout, optype;
672 struct mid_q_entry *midQ;
673 char *buf = iov[0].iov_base;
674 unsigned int credits = 1;
675 struct smb_rqst rqst = { .rq_iov = iov,
676 .rq_nvec = n_vec };
677
678 timeout = flags & CIFS_TIMEOUT_MASK;
679 optype = flags & CIFS_OP_MASK;
680
681 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
682
683 if ((ses == NULL) || (ses->server == NULL)) {
684 cifs_small_buf_release(buf);
685 cifs_dbg(VFS, "Null session\n");
686 return -EIO;
687 }
688
689 if (ses->server->tcpStatus == CifsExiting) {
690 cifs_small_buf_release(buf);
691 return -ENOENT;
692 }
693
694 /*
695 * Ensure that we do not send more than 50 overlapping requests
696 * to the same server. We may make this configurable later or
697 * use ses->maxReq.
698 */
699
700 rc = wait_for_free_request(ses->server, timeout, optype);
701 if (rc) {
702 cifs_small_buf_release(buf);
703 return rc;
704 }
705
706 /*
707 * Make sure that we sign in the same order that we send on this socket
708 * and avoid races inside tcp sendmsg code that could cause corruption
709 * of smb data.
710 */
711
712 mutex_lock(&ses->server->srv_mutex);
713
714 midQ = ses->server->ops->setup_request(ses, &rqst);
715 if (IS_ERR(midQ)) {
716 mutex_unlock(&ses->server->srv_mutex);
717 cifs_small_buf_release(buf);
718 /* Update # of requests on wire to server */
719 add_credits(ses->server, 1, optype);
720 return PTR_ERR(midQ);
721 }
722
723 midQ->mid_state = MID_REQUEST_SUBMITTED;
724 cifs_in_send_inc(ses->server);
725 rc = smb_sendv(ses->server, iov, n_vec);
726 cifs_in_send_dec(ses->server);
727 cifs_save_when_sent(midQ);
728
729 if (rc < 0)
730 ses->server->sequence_number -= 2;
731 mutex_unlock(&ses->server->srv_mutex);
732
733 if (rc < 0) {
734 cifs_small_buf_release(buf);
735 goto out;
736 }
737
738 if (timeout == CIFS_ASYNC_OP) {
739 cifs_small_buf_release(buf);
740 goto out;
741 }
742
743 rc = wait_for_response(ses->server, midQ);
744 if (rc != 0) {
745 send_cancel(ses->server, buf, midQ);
746 spin_lock(&GlobalMid_Lock);
747 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
748 midQ->callback = DeleteMidQEntry;
749 spin_unlock(&GlobalMid_Lock);
750 cifs_small_buf_release(buf);
751 add_credits(ses->server, 1, optype);
752 return rc;
753 }
754 spin_unlock(&GlobalMid_Lock);
755 }
756
757 cifs_small_buf_release(buf);
758
759 rc = cifs_sync_mid_result(midQ, ses->server);
760 if (rc != 0) {
761 add_credits(ses->server, 1, optype);
762 return rc;
763 }
764
765 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
766 rc = -EIO;
767 cifs_dbg(FYI, "Bad MID state?\n");
768 goto out;
769 }
770
771 buf = (char *)midQ->resp_buf;
772 iov[0].iov_base = buf;
773 iov[0].iov_len = get_rfc1002_length(buf) + 4;
774 if (midQ->large_buf)
775 *resp_buf_type = CIFS_LARGE_BUFFER;
776 else
777 *resp_buf_type = CIFS_SMALL_BUFFER;
778
779 credits = ses->server->ops->get_credits(midQ);
780
781 rc = ses->server->ops->check_receive(midQ, ses->server,
782 flags & CIFS_LOG_ERROR);
783
784 /* mark it so buf will not be freed by cifs_delete_mid */
785 if ((flags & CIFS_NO_RESP) == 0)
786 midQ->resp_buf = NULL;
787 out:
788 cifs_delete_mid(midQ);
789 add_credits(ses->server, credits, optype);
790
791 return rc;
792 }
793
794 int
795 SendReceive(const unsigned int xid, struct cifs_ses *ses,
796 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
797 int *pbytes_returned, const int timeout)
798 {
799 int rc = 0;
800 struct mid_q_entry *midQ;
801
802 if (ses == NULL) {
803 cifs_dbg(VFS, "Null smb session\n");
804 return -EIO;
805 }
806 if (ses->server == NULL) {
807 cifs_dbg(VFS, "Null tcp session\n");
808 return -EIO;
809 }
810
811 if (ses->server->tcpStatus == CifsExiting)
812 return -ENOENT;
813
814 /* Ensure that we do not send more than 50 overlapping requests
815 to the same server. We may make this configurable later or
816 use ses->maxReq */
817
818 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
819 MAX_CIFS_HDR_SIZE - 4) {
820 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
821 be32_to_cpu(in_buf->smb_buf_length));
822 return -EIO;
823 }
824
825 rc = wait_for_free_request(ses->server, timeout, 0);
826 if (rc)
827 return rc;
828
829 /* make sure that we sign in the same order that we send on this socket
830 and avoid races inside tcp sendmsg code that could cause corruption
831 of smb data */
832
833 mutex_lock(&ses->server->srv_mutex);
834
835 rc = allocate_mid(ses, in_buf, &midQ);
836 if (rc) {
837 mutex_unlock(&ses->server->srv_mutex);
838 /* Update # of requests on wire to server */
839 add_credits(ses->server, 1, 0);
840 return rc;
841 }
842
843 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
844 if (rc) {
845 mutex_unlock(&ses->server->srv_mutex);
846 goto out;
847 }
848
849 midQ->mid_state = MID_REQUEST_SUBMITTED;
850
851 cifs_in_send_inc(ses->server);
852 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
853 cifs_in_send_dec(ses->server);
854 cifs_save_when_sent(midQ);
855
856 if (rc < 0)
857 ses->server->sequence_number -= 2;
858
859 mutex_unlock(&ses->server->srv_mutex);
860
861 if (rc < 0)
862 goto out;
863
864 if (timeout == CIFS_ASYNC_OP)
865 goto out;
866
867 rc = wait_for_response(ses->server, midQ);
868 if (rc != 0) {
869 send_cancel(ses->server, in_buf, midQ);
870 spin_lock(&GlobalMid_Lock);
871 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
872 /* no longer considered to be "in-flight" */
873 midQ->callback = DeleteMidQEntry;
874 spin_unlock(&GlobalMid_Lock);
875 add_credits(ses->server, 1, 0);
876 return rc;
877 }
878 spin_unlock(&GlobalMid_Lock);
879 }
880
881 rc = cifs_sync_mid_result(midQ, ses->server);
882 if (rc != 0) {
883 add_credits(ses->server, 1, 0);
884 return rc;
885 }
886
887 if (!midQ->resp_buf || !out_buf ||
888 midQ->mid_state != MID_RESPONSE_RECEIVED) {
889 rc = -EIO;
890 cifs_dbg(VFS, "Bad MID state?\n");
891 goto out;
892 }
893
894 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
895 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
896 rc = cifs_check_receive(midQ, ses->server, 0);
897 out:
898 cifs_delete_mid(midQ);
899 add_credits(ses->server, 1, 0);
900
901 return rc;
902 }
903
904 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
905 blocking lock to return. */
906
907 static int
908 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
909 struct smb_hdr *in_buf,
910 struct smb_hdr *out_buf)
911 {
912 int bytes_returned;
913 struct cifs_ses *ses = tcon->ses;
914 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
915
916 /* We just modify the current in_buf to change
917 the type of lock from LOCKING_ANDX_SHARED_LOCK
918 or LOCKING_ANDX_EXCLUSIVE_LOCK to
919 LOCKING_ANDX_CANCEL_LOCK. */
920
921 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
922 pSMB->Timeout = 0;
923 pSMB->hdr.Mid = get_next_mid(ses->server);
924
925 return SendReceive(xid, ses, in_buf, out_buf,
926 &bytes_returned, 0);
927 }
928
929 int
930 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
931 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
932 int *pbytes_returned)
933 {
934 int rc = 0;
935 int rstart = 0;
936 struct mid_q_entry *midQ;
937 struct cifs_ses *ses;
938
939 if (tcon == NULL || tcon->ses == NULL) {
940 cifs_dbg(VFS, "Null smb session\n");
941 return -EIO;
942 }
943 ses = tcon->ses;
944
945 if (ses->server == NULL) {
946 cifs_dbg(VFS, "Null tcp session\n");
947 return -EIO;
948 }
949
950 if (ses->server->tcpStatus == CifsExiting)
951 return -ENOENT;
952
953 /* Ensure that we do not send more than 50 overlapping requests
954 to the same server. We may make this configurable later or
955 use ses->maxReq */
956
957 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
958 MAX_CIFS_HDR_SIZE - 4) {
959 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
960 be32_to_cpu(in_buf->smb_buf_length));
961 return -EIO;
962 }
963
964 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
965 if (rc)
966 return rc;
967
968 /* make sure that we sign in the same order that we send on this socket
969 and avoid races inside tcp sendmsg code that could cause corruption
970 of smb data */
971
972 mutex_lock(&ses->server->srv_mutex);
973
974 rc = allocate_mid(ses, in_buf, &midQ);
975 if (rc) {
976 mutex_unlock(&ses->server->srv_mutex);
977 return rc;
978 }
979
980 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
981 if (rc) {
982 cifs_delete_mid(midQ);
983 mutex_unlock(&ses->server->srv_mutex);
984 return rc;
985 }
986
987 midQ->mid_state = MID_REQUEST_SUBMITTED;
988 cifs_in_send_inc(ses->server);
989 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
990 cifs_in_send_dec(ses->server);
991 cifs_save_when_sent(midQ);
992
993 if (rc < 0)
994 ses->server->sequence_number -= 2;
995
996 mutex_unlock(&ses->server->srv_mutex);
997
998 if (rc < 0) {
999 cifs_delete_mid(midQ);
1000 return rc;
1001 }
1002
1003 /* Wait for a reply - allow signals to interrupt. */
1004 rc = wait_event_interruptible(ses->server->response_q,
1005 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1006 ((ses->server->tcpStatus != CifsGood) &&
1007 (ses->server->tcpStatus != CifsNew)));
1008
1009 /* Were we interrupted by a signal ? */
1010 if ((rc == -ERESTARTSYS) &&
1011 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1012 ((ses->server->tcpStatus == CifsGood) ||
1013 (ses->server->tcpStatus == CifsNew))) {
1014
1015 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1016 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1017 blocking lock to return. */
1018 rc = send_cancel(ses->server, in_buf, midQ);
1019 if (rc) {
1020 cifs_delete_mid(midQ);
1021 return rc;
1022 }
1023 } else {
1024 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1025 to cause the blocking lock to return. */
1026
1027 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1028
1029 /* If we get -ENOLCK back the lock may have
1030 already been removed. Don't exit in this case. */
1031 if (rc && rc != -ENOLCK) {
1032 cifs_delete_mid(midQ);
1033 return rc;
1034 }
1035 }
1036
1037 rc = wait_for_response(ses->server, midQ);
1038 if (rc) {
1039 send_cancel(ses->server, in_buf, midQ);
1040 spin_lock(&GlobalMid_Lock);
1041 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1042 /* no longer considered to be "in-flight" */
1043 midQ->callback = DeleteMidQEntry;
1044 spin_unlock(&GlobalMid_Lock);
1045 return rc;
1046 }
1047 spin_unlock(&GlobalMid_Lock);
1048 }
1049
1050 /* We got the response - restart system call. */
1051 rstart = 1;
1052 }
1053
1054 rc = cifs_sync_mid_result(midQ, ses->server);
1055 if (rc != 0)
1056 return rc;
1057
1058 /* rcvd frame is ok */
1059 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1060 rc = -EIO;
1061 cifs_dbg(VFS, "Bad MID state?\n");
1062 goto out;
1063 }
1064
1065 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1066 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1067 rc = cifs_check_receive(midQ, ses->server, 0);
1068 out:
1069 cifs_delete_mid(midQ);
1070 if (rstart && rc == -EACCES)
1071 return -ERESTARTSYS;
1072 return rc;
1073 }