]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/cifs/transport.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-artful-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <asm/uaccess.h>
31 #include <asm/processor.h>
32 #include <linux/mempool.h>
33 #include "cifspdu.h"
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37
38 void
39 cifs_wake_up_task(struct mid_q_entry *mid)
40 {
41 wake_up_process(mid->callback_data);
42 }
43
44 struct mid_q_entry *
45 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
46 {
47 struct mid_q_entry *temp;
48
49 if (server == NULL) {
50 cERROR(1, "Null TCP session in AllocMidQEntry");
51 return NULL;
52 }
53
54 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
55 if (temp == NULL)
56 return temp;
57 else {
58 memset(temp, 0, sizeof(struct mid_q_entry));
59 temp->mid = smb_buffer->Mid; /* always LE */
60 temp->pid = current->pid;
61 temp->command = cpu_to_le16(smb_buffer->Command);
62 cFYI(1, "For smb_command %d", smb_buffer->Command);
63 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
64 /* when mid allocated can be before when sent */
65 temp->when_alloc = jiffies;
66 temp->server = server;
67
68 /*
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
71 */
72 temp->callback = cifs_wake_up_task;
73 temp->callback_data = current;
74 }
75
76 atomic_inc(&midCount);
77 temp->mid_state = MID_REQUEST_ALLOCATED;
78 return temp;
79 }
80
81 void
82 DeleteMidQEntry(struct mid_q_entry *midEntry)
83 {
84 #ifdef CONFIG_CIFS_STATS2
85 __le16 command = midEntry->server->vals->lock_cmd;
86 unsigned long now;
87 #endif
88 midEntry->mid_state = MID_FREE;
89 atomic_dec(&midCount);
90 if (midEntry->large_buf)
91 cifs_buf_release(midEntry->resp_buf);
92 else
93 cifs_small_buf_release(midEntry->resp_buf);
94 #ifdef CONFIG_CIFS_STATS2
95 now = jiffies;
96 /* commands taking longer than one second are indications that
97 something is wrong, unless it is quite a slow link or server */
98 if ((now - midEntry->when_alloc) > HZ) {
99 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
100 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
101 midEntry->command, midEntry->mid);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now - midEntry->when_alloc,
104 now - midEntry->when_sent,
105 now - midEntry->when_received);
106 }
107 }
108 #endif
109 mempool_free(midEntry, cifs_mid_poolp);
110 }
111
112 static void
113 delete_mid(struct mid_q_entry *mid)
114 {
115 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead);
117 spin_unlock(&GlobalMid_Lock);
118
119 DeleteMidQEntry(mid);
120 }
121
122 static int
123 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
124 {
125 int rc = 0;
126 int i = 0;
127 struct msghdr smb_msg;
128 unsigned int len = iov[0].iov_len;
129 unsigned int total_len;
130 int first_vec = 0;
131 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
132 struct socket *ssocket = server->ssocket;
133
134 if (ssocket == NULL)
135 return -ENOTSOCK; /* BB eventually add reconnect code here */
136
137 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
138 smb_msg.msg_namelen = sizeof(struct sockaddr);
139 smb_msg.msg_control = NULL;
140 smb_msg.msg_controllen = 0;
141 if (server->noblocksnd)
142 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
143 else
144 smb_msg.msg_flags = MSG_NOSIGNAL;
145
146 total_len = 0;
147 for (i = 0; i < n_vec; i++)
148 total_len += iov[i].iov_len;
149
150 cFYI(1, "Sending smb: total_len %d", total_len);
151 dump_smb(iov[0].iov_base, len);
152
153 i = 0;
154 while (total_len) {
155 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
156 n_vec - first_vec, total_len);
157 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
158 i++;
159 /*
160 * If blocking send we try 3 times, since each can block
161 * for 5 seconds. For nonblocking we have to try more
162 * but wait increasing amounts of time allowing time for
163 * socket to clear. The overall time we wait in either
164 * case to send on the socket is about 15 seconds.
165 * Similarly we wait for 15 seconds for a response from
166 * the server in SendReceive[2] for the server to send
167 * a response back for most types of requests (except
168 * SMB Write past end of file which can be slow, and
169 * blocking lock operations). NFS waits slightly longer
170 * than CIFS, but this can make it take longer for
171 * nonresponsive servers to be detected and 15 seconds
172 * is more than enough time for modern networks to
173 * send a packet. In most cases if we fail to send
174 * after the retries we will kill the socket and
175 * reconnect which may clear the network problem.
176 */
177 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
178 cERROR(1, "sends on sock %p stuck for 15 seconds",
179 ssocket);
180 rc = -EAGAIN;
181 break;
182 }
183 msleep(1 << i);
184 continue;
185 }
186 if (rc < 0)
187 break;
188
189 if (rc == total_len) {
190 total_len = 0;
191 break;
192 } else if (rc > total_len) {
193 cERROR(1, "sent %d requested %d", rc, total_len);
194 break;
195 }
196 if (rc == 0) {
197 /* should never happen, letting socket clear before
198 retrying is our only obvious option here */
199 cERROR(1, "tcp sent no data");
200 msleep(500);
201 continue;
202 }
203 total_len -= rc;
204 /* the line below resets i */
205 for (i = first_vec; i < n_vec; i++) {
206 if (iov[i].iov_len) {
207 if (rc > iov[i].iov_len) {
208 rc -= iov[i].iov_len;
209 iov[i].iov_len = 0;
210 } else {
211 iov[i].iov_base += rc;
212 iov[i].iov_len -= rc;
213 first_vec = i;
214 break;
215 }
216 }
217 }
218 i = 0; /* in case we get ENOSPC on the next send */
219 }
220
221 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
222 cFYI(1, "partial send (%d remaining), terminating session",
223 total_len);
224 /* If we have only sent part of an SMB then the next SMB
225 could be taken as the remainder of this one. We need
226 to kill the socket so the server throws away the partial
227 SMB */
228 server->tcpStatus = CifsNeedReconnect;
229 }
230
231 if (rc < 0 && rc != -EINTR)
232 cERROR(1, "Error %d sending data on socket to server", rc);
233 else
234 rc = 0;
235
236 return rc;
237 }
238
239 int
240 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
241 unsigned int smb_buf_length)
242 {
243 struct kvec iov;
244
245 iov.iov_base = smb_buffer;
246 iov.iov_len = smb_buf_length + 4;
247
248 return smb_sendv(server, &iov, 1);
249 }
250
251 static int
252 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
253 int *credits)
254 {
255 int rc;
256
257 spin_lock(&server->req_lock);
258 if (timeout == CIFS_ASYNC_OP) {
259 /* oplock breaks must not be held up */
260 server->in_flight++;
261 *credits -= 1;
262 spin_unlock(&server->req_lock);
263 return 0;
264 }
265
266 while (1) {
267 if (*credits <= 0) {
268 spin_unlock(&server->req_lock);
269 cifs_num_waiters_inc(server);
270 rc = wait_event_killable(server->request_q,
271 has_credits(server, credits));
272 cifs_num_waiters_dec(server);
273 if (rc)
274 return rc;
275 spin_lock(&server->req_lock);
276 } else {
277 if (server->tcpStatus == CifsExiting) {
278 spin_unlock(&server->req_lock);
279 return -ENOENT;
280 }
281
282 /*
283 * Can not count locking commands against total
284 * as they are allowed to block on server.
285 */
286
287 /* update # of requests on the wire to server */
288 if (timeout != CIFS_BLOCKING_OP) {
289 *credits -= 1;
290 server->in_flight++;
291 }
292 spin_unlock(&server->req_lock);
293 break;
294 }
295 }
296 return 0;
297 }
298
299 static int
300 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
301 const int optype)
302 {
303 return wait_for_free_credits(server, timeout,
304 server->ops->get_credits_field(server, optype));
305 }
306
307 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
308 struct mid_q_entry **ppmidQ)
309 {
310 if (ses->server->tcpStatus == CifsExiting) {
311 return -ENOENT;
312 }
313
314 if (ses->server->tcpStatus == CifsNeedReconnect) {
315 cFYI(1, "tcp session dead - return to caller to retry");
316 return -EAGAIN;
317 }
318
319 if (ses->status != CifsGood) {
320 /* check if SMB session is bad because we are setting it up */
321 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
322 (in_buf->Command != SMB_COM_NEGOTIATE))
323 return -EAGAIN;
324 /* else ok - we are setting up session */
325 }
326 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
327 if (*ppmidQ == NULL)
328 return -ENOMEM;
329 spin_lock(&GlobalMid_Lock);
330 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
331 spin_unlock(&GlobalMid_Lock);
332 return 0;
333 }
334
335 static int
336 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
337 {
338 int error;
339
340 error = wait_event_freezekillable(server->response_q,
341 midQ->mid_state != MID_REQUEST_SUBMITTED);
342 if (error < 0)
343 return -ERESTARTSYS;
344
345 return 0;
346 }
347
348 int
349 cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
350 unsigned int nvec, struct mid_q_entry **ret_mid)
351 {
352 int rc;
353 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
354 struct mid_q_entry *mid;
355
356 /* enable signing if server requires it */
357 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
358 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
359
360 mid = AllocMidQEntry(hdr, server);
361 if (mid == NULL)
362 return -ENOMEM;
363
364 rc = cifs_sign_smbv(iov, nvec, server, &mid->sequence_number);
365 if (rc) {
366 DeleteMidQEntry(mid);
367 return rc;
368 }
369
370 *ret_mid = mid;
371 return 0;
372 }
373
374 /*
375 * Send a SMB request and set the callback function in the mid to handle
376 * the result. Caller is responsible for dealing with timeouts.
377 */
378 int
379 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
380 unsigned int nvec, mid_receive_t *receive,
381 mid_callback_t *callback, void *cbdata, const int flags)
382 {
383 int rc, timeout, optype;
384 struct mid_q_entry *mid;
385
386 timeout = flags & CIFS_TIMEOUT_MASK;
387 optype = flags & CIFS_OP_MASK;
388
389 rc = wait_for_free_request(server, timeout, optype);
390 if (rc)
391 return rc;
392
393 mutex_lock(&server->srv_mutex);
394 rc = server->ops->setup_async_request(server, iov, nvec, &mid);
395 if (rc) {
396 mutex_unlock(&server->srv_mutex);
397 add_credits(server, 1, optype);
398 wake_up(&server->request_q);
399 return rc;
400 }
401
402 mid->receive = receive;
403 mid->callback = callback;
404 mid->callback_data = cbdata;
405 mid->mid_state = MID_REQUEST_SUBMITTED;
406
407 /* put it on the pending_mid_q */
408 spin_lock(&GlobalMid_Lock);
409 list_add_tail(&mid->qhead, &server->pending_mid_q);
410 spin_unlock(&GlobalMid_Lock);
411
412
413 cifs_in_send_inc(server);
414 rc = smb_sendv(server, iov, nvec);
415 cifs_in_send_dec(server);
416 cifs_save_when_sent(mid);
417 mutex_unlock(&server->srv_mutex);
418
419 if (rc == 0)
420 return 0;
421
422 delete_mid(mid);
423 add_credits(server, 1, optype);
424 wake_up(&server->request_q);
425 return rc;
426 }
427
428 /*
429 *
430 * Send an SMB Request. No response info (other than return code)
431 * needs to be parsed.
432 *
433 * flags indicate the type of request buffer and how long to wait
434 * and whether to log NT STATUS code (error) before mapping it to POSIX error
435 *
436 */
437 int
438 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
439 char *in_buf, int flags)
440 {
441 int rc;
442 struct kvec iov[1];
443 int resp_buf_type;
444
445 iov[0].iov_base = in_buf;
446 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
447 flags |= CIFS_NO_RESP;
448 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
449 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
450
451 return rc;
452 }
453
454 static int
455 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
456 {
457 int rc = 0;
458
459 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
460 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
461
462 spin_lock(&GlobalMid_Lock);
463 switch (mid->mid_state) {
464 case MID_RESPONSE_RECEIVED:
465 spin_unlock(&GlobalMid_Lock);
466 return rc;
467 case MID_RETRY_NEEDED:
468 rc = -EAGAIN;
469 break;
470 case MID_RESPONSE_MALFORMED:
471 rc = -EIO;
472 break;
473 case MID_SHUTDOWN:
474 rc = -EHOSTDOWN;
475 break;
476 default:
477 list_del_init(&mid->qhead);
478 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
479 mid->mid, mid->mid_state);
480 rc = -EIO;
481 }
482 spin_unlock(&GlobalMid_Lock);
483
484 DeleteMidQEntry(mid);
485 return rc;
486 }
487
488 static inline int
489 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
490 {
491 return server->ops->send_cancel ?
492 server->ops->send_cancel(server, buf, mid) : 0;
493 }
494
495 int
496 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
497 bool log_error)
498 {
499 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
500
501 dump_smb(mid->resp_buf, min_t(u32, 92, len));
502
503 /* convert the length into a more usable form */
504 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
505 struct kvec iov;
506
507 iov.iov_base = mid->resp_buf;
508 iov.iov_len = len;
509 /* FIXME: add code to kill session */
510 if (cifs_verify_signature(&iov, 1, server,
511 mid->sequence_number + 1) != 0)
512 cERROR(1, "Unexpected SMB signature");
513 }
514
515 /* BB special case reconnect tid and uid here? */
516 return map_smb_to_linux_error(mid->resp_buf, log_error);
517 }
518
519 int
520 cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
521 unsigned int nvec, struct mid_q_entry **ret_mid)
522 {
523 int rc;
524 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
525 struct mid_q_entry *mid;
526
527 rc = allocate_mid(ses, hdr, &mid);
528 if (rc)
529 return rc;
530 rc = cifs_sign_smbv(iov, nvec, ses->server, &mid->sequence_number);
531 if (rc)
532 delete_mid(mid);
533 *ret_mid = mid;
534 return rc;
535 }
536
537 int
538 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
539 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
540 const int flags)
541 {
542 int rc = 0;
543 int timeout, optype;
544 struct mid_q_entry *midQ;
545 char *buf = iov[0].iov_base;
546 unsigned int credits = 1;
547
548 timeout = flags & CIFS_TIMEOUT_MASK;
549 optype = flags & CIFS_OP_MASK;
550
551 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
552
553 if ((ses == NULL) || (ses->server == NULL)) {
554 cifs_small_buf_release(buf);
555 cERROR(1, "Null session");
556 return -EIO;
557 }
558
559 if (ses->server->tcpStatus == CifsExiting) {
560 cifs_small_buf_release(buf);
561 return -ENOENT;
562 }
563
564 /*
565 * Ensure that we do not send more than 50 overlapping requests
566 * to the same server. We may make this configurable later or
567 * use ses->maxReq.
568 */
569
570 rc = wait_for_free_request(ses->server, timeout, optype);
571 if (rc) {
572 cifs_small_buf_release(buf);
573 return rc;
574 }
575
576 /*
577 * Make sure that we sign in the same order that we send on this socket
578 * and avoid races inside tcp sendmsg code that could cause corruption
579 * of smb data.
580 */
581
582 mutex_lock(&ses->server->srv_mutex);
583
584 rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
585 if (rc) {
586 mutex_unlock(&ses->server->srv_mutex);
587 cifs_small_buf_release(buf);
588 /* Update # of requests on wire to server */
589 add_credits(ses->server, 1, optype);
590 return rc;
591 }
592
593 midQ->mid_state = MID_REQUEST_SUBMITTED;
594 cifs_in_send_inc(ses->server);
595 rc = smb_sendv(ses->server, iov, n_vec);
596 cifs_in_send_dec(ses->server);
597 cifs_save_when_sent(midQ);
598
599 mutex_unlock(&ses->server->srv_mutex);
600
601 if (rc < 0) {
602 cifs_small_buf_release(buf);
603 goto out;
604 }
605
606 if (timeout == CIFS_ASYNC_OP) {
607 cifs_small_buf_release(buf);
608 goto out;
609 }
610
611 rc = wait_for_response(ses->server, midQ);
612 if (rc != 0) {
613 send_cancel(ses->server, buf, midQ);
614 spin_lock(&GlobalMid_Lock);
615 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
616 midQ->callback = DeleteMidQEntry;
617 spin_unlock(&GlobalMid_Lock);
618 cifs_small_buf_release(buf);
619 add_credits(ses->server, 1, optype);
620 return rc;
621 }
622 spin_unlock(&GlobalMid_Lock);
623 }
624
625 cifs_small_buf_release(buf);
626
627 rc = cifs_sync_mid_result(midQ, ses->server);
628 if (rc != 0) {
629 add_credits(ses->server, 1, optype);
630 return rc;
631 }
632
633 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
634 rc = -EIO;
635 cFYI(1, "Bad MID state?");
636 goto out;
637 }
638
639 buf = (char *)midQ->resp_buf;
640 iov[0].iov_base = buf;
641 iov[0].iov_len = get_rfc1002_length(buf) + 4;
642 if (midQ->large_buf)
643 *resp_buf_type = CIFS_LARGE_BUFFER;
644 else
645 *resp_buf_type = CIFS_SMALL_BUFFER;
646
647 credits = ses->server->ops->get_credits(midQ);
648
649 rc = ses->server->ops->check_receive(midQ, ses->server,
650 flags & CIFS_LOG_ERROR);
651
652 /* mark it so buf will not be freed by delete_mid */
653 if ((flags & CIFS_NO_RESP) == 0)
654 midQ->resp_buf = NULL;
655 out:
656 delete_mid(midQ);
657 add_credits(ses->server, credits, optype);
658
659 return rc;
660 }
661
662 int
663 SendReceive(const unsigned int xid, struct cifs_ses *ses,
664 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
665 int *pbytes_returned, const int timeout)
666 {
667 int rc = 0;
668 struct mid_q_entry *midQ;
669
670 if (ses == NULL) {
671 cERROR(1, "Null smb session");
672 return -EIO;
673 }
674 if (ses->server == NULL) {
675 cERROR(1, "Null tcp session");
676 return -EIO;
677 }
678
679 if (ses->server->tcpStatus == CifsExiting)
680 return -ENOENT;
681
682 /* Ensure that we do not send more than 50 overlapping requests
683 to the same server. We may make this configurable later or
684 use ses->maxReq */
685
686 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
687 MAX_CIFS_HDR_SIZE - 4) {
688 cERROR(1, "Illegal length, greater than maximum frame, %d",
689 be32_to_cpu(in_buf->smb_buf_length));
690 return -EIO;
691 }
692
693 rc = wait_for_free_request(ses->server, timeout, 0);
694 if (rc)
695 return rc;
696
697 /* make sure that we sign in the same order that we send on this socket
698 and avoid races inside tcp sendmsg code that could cause corruption
699 of smb data */
700
701 mutex_lock(&ses->server->srv_mutex);
702
703 rc = allocate_mid(ses, in_buf, &midQ);
704 if (rc) {
705 mutex_unlock(&ses->server->srv_mutex);
706 /* Update # of requests on wire to server */
707 add_credits(ses->server, 1, 0);
708 return rc;
709 }
710
711 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
712 if (rc) {
713 mutex_unlock(&ses->server->srv_mutex);
714 goto out;
715 }
716
717 midQ->mid_state = MID_REQUEST_SUBMITTED;
718
719 cifs_in_send_inc(ses->server);
720 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
721 cifs_in_send_dec(ses->server);
722 cifs_save_when_sent(midQ);
723 mutex_unlock(&ses->server->srv_mutex);
724
725 if (rc < 0)
726 goto out;
727
728 if (timeout == CIFS_ASYNC_OP)
729 goto out;
730
731 rc = wait_for_response(ses->server, midQ);
732 if (rc != 0) {
733 send_cancel(ses->server, in_buf, midQ);
734 spin_lock(&GlobalMid_Lock);
735 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
736 /* no longer considered to be "in-flight" */
737 midQ->callback = DeleteMidQEntry;
738 spin_unlock(&GlobalMid_Lock);
739 add_credits(ses->server, 1, 0);
740 return rc;
741 }
742 spin_unlock(&GlobalMid_Lock);
743 }
744
745 rc = cifs_sync_mid_result(midQ, ses->server);
746 if (rc != 0) {
747 add_credits(ses->server, 1, 0);
748 return rc;
749 }
750
751 if (!midQ->resp_buf || !out_buf ||
752 midQ->mid_state != MID_RESPONSE_RECEIVED) {
753 rc = -EIO;
754 cERROR(1, "Bad MID state?");
755 goto out;
756 }
757
758 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
759 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
760 rc = cifs_check_receive(midQ, ses->server, 0);
761 out:
762 delete_mid(midQ);
763 add_credits(ses->server, 1, 0);
764
765 return rc;
766 }
767
768 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
769 blocking lock to return. */
770
771 static int
772 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
773 struct smb_hdr *in_buf,
774 struct smb_hdr *out_buf)
775 {
776 int bytes_returned;
777 struct cifs_ses *ses = tcon->ses;
778 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
779
780 /* We just modify the current in_buf to change
781 the type of lock from LOCKING_ANDX_SHARED_LOCK
782 or LOCKING_ANDX_EXCLUSIVE_LOCK to
783 LOCKING_ANDX_CANCEL_LOCK. */
784
785 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
786 pSMB->Timeout = 0;
787 pSMB->hdr.Mid = get_next_mid(ses->server);
788
789 return SendReceive(xid, ses, in_buf, out_buf,
790 &bytes_returned, 0);
791 }
792
793 int
794 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
795 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
796 int *pbytes_returned)
797 {
798 int rc = 0;
799 int rstart = 0;
800 struct mid_q_entry *midQ;
801 struct cifs_ses *ses;
802
803 if (tcon == NULL || tcon->ses == NULL) {
804 cERROR(1, "Null smb session");
805 return -EIO;
806 }
807 ses = tcon->ses;
808
809 if (ses->server == NULL) {
810 cERROR(1, "Null tcp session");
811 return -EIO;
812 }
813
814 if (ses->server->tcpStatus == CifsExiting)
815 return -ENOENT;
816
817 /* Ensure that we do not send more than 50 overlapping requests
818 to the same server. We may make this configurable later or
819 use ses->maxReq */
820
821 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
822 MAX_CIFS_HDR_SIZE - 4) {
823 cERROR(1, "Illegal length, greater than maximum frame, %d",
824 be32_to_cpu(in_buf->smb_buf_length));
825 return -EIO;
826 }
827
828 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
829 if (rc)
830 return rc;
831
832 /* make sure that we sign in the same order that we send on this socket
833 and avoid races inside tcp sendmsg code that could cause corruption
834 of smb data */
835
836 mutex_lock(&ses->server->srv_mutex);
837
838 rc = allocate_mid(ses, in_buf, &midQ);
839 if (rc) {
840 mutex_unlock(&ses->server->srv_mutex);
841 return rc;
842 }
843
844 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
845 if (rc) {
846 delete_mid(midQ);
847 mutex_unlock(&ses->server->srv_mutex);
848 return rc;
849 }
850
851 midQ->mid_state = MID_REQUEST_SUBMITTED;
852 cifs_in_send_inc(ses->server);
853 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
854 cifs_in_send_dec(ses->server);
855 cifs_save_when_sent(midQ);
856 mutex_unlock(&ses->server->srv_mutex);
857
858 if (rc < 0) {
859 delete_mid(midQ);
860 return rc;
861 }
862
863 /* Wait for a reply - allow signals to interrupt. */
864 rc = wait_event_interruptible(ses->server->response_q,
865 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
866 ((ses->server->tcpStatus != CifsGood) &&
867 (ses->server->tcpStatus != CifsNew)));
868
869 /* Were we interrupted by a signal ? */
870 if ((rc == -ERESTARTSYS) &&
871 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
872 ((ses->server->tcpStatus == CifsGood) ||
873 (ses->server->tcpStatus == CifsNew))) {
874
875 if (in_buf->Command == SMB_COM_TRANSACTION2) {
876 /* POSIX lock. We send a NT_CANCEL SMB to cause the
877 blocking lock to return. */
878 rc = send_cancel(ses->server, in_buf, midQ);
879 if (rc) {
880 delete_mid(midQ);
881 return rc;
882 }
883 } else {
884 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
885 to cause the blocking lock to return. */
886
887 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
888
889 /* If we get -ENOLCK back the lock may have
890 already been removed. Don't exit in this case. */
891 if (rc && rc != -ENOLCK) {
892 delete_mid(midQ);
893 return rc;
894 }
895 }
896
897 rc = wait_for_response(ses->server, midQ);
898 if (rc) {
899 send_cancel(ses->server, in_buf, midQ);
900 spin_lock(&GlobalMid_Lock);
901 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
902 /* no longer considered to be "in-flight" */
903 midQ->callback = DeleteMidQEntry;
904 spin_unlock(&GlobalMid_Lock);
905 return rc;
906 }
907 spin_unlock(&GlobalMid_Lock);
908 }
909
910 /* We got the response - restart system call. */
911 rstart = 1;
912 }
913
914 rc = cifs_sync_mid_result(midQ, ses->server);
915 if (rc != 0)
916 return rc;
917
918 /* rcvd frame is ok */
919 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
920 rc = -EIO;
921 cERROR(1, "Bad MID state?");
922 goto out;
923 }
924
925 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
926 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
927 rc = cifs_check_receive(midQ, ses->server, 0);
928 out:
929 delete_mid(midQ);
930 if (rstart && rc == -EACCES)
931 return -ERESTARTSYS;
932 return rc;
933 }