]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/cifs/transport.c
Add support for the AudioInjector.net Octo sound card
[mirror_ubuntu-zesty-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40
41 void
42 cifs_wake_up_task(struct mid_q_entry *mid)
43 {
44 wake_up_process(mid->callback_data);
45 }
46
47 struct mid_q_entry *
48 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
49 {
50 struct mid_q_entry *temp;
51
52 if (server == NULL) {
53 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
54 return NULL;
55 }
56
57 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
58 if (temp == NULL)
59 return temp;
60 else {
61 memset(temp, 0, sizeof(struct mid_q_entry));
62 temp->mid = get_mid(smb_buffer);
63 temp->pid = current->pid;
64 temp->command = cpu_to_le16(smb_buffer->Command);
65 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
66 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
67 /* when mid allocated can be before when sent */
68 temp->when_alloc = jiffies;
69 temp->server = server;
70
71 /*
72 * The default is for the mid to be synchronous, so the
73 * default callback just wakes up the current task.
74 */
75 temp->callback = cifs_wake_up_task;
76 temp->callback_data = current;
77 }
78
79 atomic_inc(&midCount);
80 temp->mid_state = MID_REQUEST_ALLOCATED;
81 return temp;
82 }
83
84 void
85 DeleteMidQEntry(struct mid_q_entry *midEntry)
86 {
87 #ifdef CONFIG_CIFS_STATS2
88 __le16 command = midEntry->server->vals->lock_cmd;
89 unsigned long now;
90 #endif
91 midEntry->mid_state = MID_FREE;
92 atomic_dec(&midCount);
93 if (midEntry->large_buf)
94 cifs_buf_release(midEntry->resp_buf);
95 else
96 cifs_small_buf_release(midEntry->resp_buf);
97 #ifdef CONFIG_CIFS_STATS2
98 now = jiffies;
99 /* commands taking longer than one second are indications that
100 something is wrong, unless it is quite a slow link or server */
101 if ((now - midEntry->when_alloc) > HZ) {
102 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
103 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
104 midEntry->command, midEntry->mid);
105 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
106 now - midEntry->when_alloc,
107 now - midEntry->when_sent,
108 now - midEntry->when_received);
109 }
110 }
111 #endif
112 mempool_free(midEntry, cifs_mid_poolp);
113 }
114
115 void
116 cifs_delete_mid(struct mid_q_entry *mid)
117 {
118 spin_lock(&GlobalMid_Lock);
119 list_del(&mid->qhead);
120 spin_unlock(&GlobalMid_Lock);
121
122 DeleteMidQEntry(mid);
123 }
124
125 /*
126 * smb_send_kvec - send an array of kvecs to the server
127 * @server: Server to send the data to
128 * @smb_msg: Message to send
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
136 size_t *sent)
137 {
138 int rc = 0;
139 int retries = 0;
140 struct socket *ssocket = server->ssocket;
141
142 *sent = 0;
143
144 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
145 smb_msg->msg_namelen = sizeof(struct sockaddr);
146 smb_msg->msg_control = NULL;
147 smb_msg->msg_controllen = 0;
148 if (server->noblocksnd)
149 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
150 else
151 smb_msg->msg_flags = MSG_NOSIGNAL;
152
153 while (msg_data_left(smb_msg)) {
154 /*
155 * If blocking send, we try 3 times, since each can block
156 * for 5 seconds. For nonblocking we have to try more
157 * but wait increasing amounts of time allowing time for
158 * socket to clear. The overall time we wait in either
159 * case to send on the socket is about 15 seconds.
160 * Similarly we wait for 15 seconds for a response from
161 * the server in SendReceive[2] for the server to send
162 * a response back for most types of requests (except
163 * SMB Write past end of file which can be slow, and
164 * blocking lock operations). NFS waits slightly longer
165 * than CIFS, but this can make it take longer for
166 * nonresponsive servers to be detected and 15 seconds
167 * is more than enough time for modern networks to
168 * send a packet. In most cases if we fail to send
169 * after the retries we will kill the socket and
170 * reconnect which may clear the network problem.
171 */
172 rc = sock_sendmsg(ssocket, smb_msg);
173 if (rc == -EAGAIN) {
174 retries++;
175 if (retries >= 14 ||
176 (!server->noblocksnd && (retries > 2))) {
177 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
178 ssocket);
179 return -EAGAIN;
180 }
181 msleep(1 << retries);
182 continue;
183 }
184
185 if (rc < 0)
186 return rc;
187
188 if (rc == 0) {
189 /* should never happen, letting socket clear before
190 retrying is our only obvious option here */
191 cifs_dbg(VFS, "tcp sent no data\n");
192 msleep(500);
193 continue;
194 }
195
196 /* send was at least partially successful */
197 *sent += rc;
198 retries = 0; /* in case we get ENOSPC on the next send */
199 }
200 return 0;
201 }
202
203 static unsigned long
204 rqst_len(struct smb_rqst *rqst)
205 {
206 unsigned int i;
207 struct kvec *iov = rqst->rq_iov;
208 unsigned long buflen = 0;
209
210 /* total up iov array first */
211 for (i = 0; i < rqst->rq_nvec; i++)
212 buflen += iov[i].iov_len;
213
214 /* add in the page array if there is one */
215 if (rqst->rq_npages) {
216 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
217 buflen += rqst->rq_tailsz;
218 }
219
220 return buflen;
221 }
222
223 static int
224 __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
225 {
226 int rc;
227 struct kvec *iov = rqst->rq_iov;
228 int n_vec = rqst->rq_nvec;
229 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
230 unsigned long send_length;
231 unsigned int i;
232 size_t total_len = 0, sent, size;
233 struct socket *ssocket = server->ssocket;
234 struct msghdr smb_msg;
235 int val = 1;
236
237 if (ssocket == NULL)
238 return -ENOTSOCK;
239
240 /* sanity check send length */
241 send_length = rqst_len(rqst);
242 if (send_length != smb_buf_length + 4) {
243 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
244 send_length, smb_buf_length);
245 return -EIO;
246 }
247
248 if (n_vec < 2)
249 return -EIO;
250
251 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
252 dump_smb(iov[0].iov_base, iov[0].iov_len);
253 dump_smb(iov[1].iov_base, iov[1].iov_len);
254
255 /* cork the socket */
256 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
257 (char *)&val, sizeof(val));
258
259 size = 0;
260 for (i = 0; i < n_vec; i++)
261 size += iov[i].iov_len;
262
263 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
264
265 rc = smb_send_kvec(server, &smb_msg, &sent);
266 if (rc < 0)
267 goto uncork;
268
269 total_len += sent;
270
271 /* now walk the page array and send each page in it */
272 for (i = 0; i < rqst->rq_npages; i++) {
273 size_t len = i == rqst->rq_npages - 1
274 ? rqst->rq_tailsz
275 : rqst->rq_pagesz;
276 struct bio_vec bvec = {
277 .bv_page = rqst->rq_pages[i],
278 .bv_len = len
279 };
280 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
281 &bvec, 1, len);
282 rc = smb_send_kvec(server, &smb_msg, &sent);
283 if (rc < 0)
284 break;
285
286 total_len += sent;
287 }
288
289 uncork:
290 /* uncork it */
291 val = 0;
292 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
293 (char *)&val, sizeof(val));
294
295 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
296 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
297 smb_buf_length + 4, total_len);
298 /*
299 * If we have only sent part of an SMB then the next SMB could
300 * be taken as the remainder of this one. We need to kill the
301 * socket so the server throws away the partial SMB
302 */
303 server->tcpStatus = CifsNeedReconnect;
304 }
305
306 if (rc < 0 && rc != -EINTR)
307 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
308 rc);
309 else
310 rc = 0;
311
312 return rc;
313 }
314
315 static int
316 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
317 {
318 struct smb_rqst cur_rqst;
319 int rc;
320
321 if (!(flags & CIFS_TRANSFORM_REQ))
322 return __smb_send_rqst(server, rqst);
323
324 if (!server->ops->init_transform_rq ||
325 !server->ops->free_transform_rq) {
326 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
327 return -EIO;
328 }
329
330 rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
331 if (rc)
332 return rc;
333
334 rc = __smb_send_rqst(server, &cur_rqst);
335 server->ops->free_transform_rq(&cur_rqst);
336 return rc;
337 }
338
339 int
340 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
341 unsigned int smb_buf_length)
342 {
343 struct kvec iov[2];
344 struct smb_rqst rqst = { .rq_iov = iov,
345 .rq_nvec = 2 };
346
347 iov[0].iov_base = smb_buffer;
348 iov[0].iov_len = 4;
349 iov[1].iov_base = (char *)smb_buffer + 4;
350 iov[1].iov_len = smb_buf_length;
351
352 return __smb_send_rqst(server, &rqst);
353 }
354
355 static int
356 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
357 int *credits)
358 {
359 int rc;
360
361 spin_lock(&server->req_lock);
362 if (timeout == CIFS_ASYNC_OP) {
363 /* oplock breaks must not be held up */
364 server->in_flight++;
365 *credits -= 1;
366 spin_unlock(&server->req_lock);
367 return 0;
368 }
369
370 while (1) {
371 if (*credits <= 0) {
372 spin_unlock(&server->req_lock);
373 cifs_num_waiters_inc(server);
374 rc = wait_event_killable(server->request_q,
375 has_credits(server, credits));
376 cifs_num_waiters_dec(server);
377 if (rc)
378 return rc;
379 spin_lock(&server->req_lock);
380 } else {
381 if (server->tcpStatus == CifsExiting) {
382 spin_unlock(&server->req_lock);
383 return -ENOENT;
384 }
385
386 /*
387 * Can not count locking commands against total
388 * as they are allowed to block on server.
389 */
390
391 /* update # of requests on the wire to server */
392 if (timeout != CIFS_BLOCKING_OP) {
393 *credits -= 1;
394 server->in_flight++;
395 }
396 spin_unlock(&server->req_lock);
397 break;
398 }
399 }
400 return 0;
401 }
402
403 static int
404 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
405 const int optype)
406 {
407 int *val;
408
409 val = server->ops->get_credits_field(server, optype);
410 /* Since an echo is already inflight, no need to wait to send another */
411 if (*val <= 0 && optype == CIFS_ECHO_OP)
412 return -EAGAIN;
413 return wait_for_free_credits(server, timeout, val);
414 }
415
416 int
417 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
418 unsigned int *num, unsigned int *credits)
419 {
420 *num = size;
421 *credits = 0;
422 return 0;
423 }
424
425 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
426 struct mid_q_entry **ppmidQ)
427 {
428 if (ses->server->tcpStatus == CifsExiting) {
429 return -ENOENT;
430 }
431
432 if (ses->server->tcpStatus == CifsNeedReconnect) {
433 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
434 return -EAGAIN;
435 }
436
437 if (ses->status == CifsNew) {
438 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
439 (in_buf->Command != SMB_COM_NEGOTIATE))
440 return -EAGAIN;
441 /* else ok - we are setting up session */
442 }
443
444 if (ses->status == CifsExiting) {
445 /* check if SMB session is bad because we are setting it up */
446 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
447 return -EAGAIN;
448 /* else ok - we are shutting down session */
449 }
450
451 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
452 if (*ppmidQ == NULL)
453 return -ENOMEM;
454 spin_lock(&GlobalMid_Lock);
455 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
456 spin_unlock(&GlobalMid_Lock);
457 return 0;
458 }
459
460 static int
461 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
462 {
463 int error;
464
465 error = wait_event_freezekillable_unsafe(server->response_q,
466 midQ->mid_state != MID_REQUEST_SUBMITTED);
467 if (error < 0)
468 return -ERESTARTSYS;
469
470 return 0;
471 }
472
473 struct mid_q_entry *
474 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
475 {
476 int rc;
477 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
478 struct mid_q_entry *mid;
479
480 if (rqst->rq_iov[0].iov_len != 4 ||
481 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
482 return ERR_PTR(-EIO);
483
484 /* enable signing if server requires it */
485 if (server->sign)
486 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
487
488 mid = AllocMidQEntry(hdr, server);
489 if (mid == NULL)
490 return ERR_PTR(-ENOMEM);
491
492 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
493 if (rc) {
494 DeleteMidQEntry(mid);
495 return ERR_PTR(rc);
496 }
497
498 return mid;
499 }
500
501 /*
502 * Send a SMB request and set the callback function in the mid to handle
503 * the result. Caller is responsible for dealing with timeouts.
504 */
505 int
506 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
507 mid_receive_t *receive, mid_callback_t *callback,
508 mid_handle_t *handle, void *cbdata, const int flags)
509 {
510 int rc, timeout, optype;
511 struct mid_q_entry *mid;
512 unsigned int credits = 0;
513
514 timeout = flags & CIFS_TIMEOUT_MASK;
515 optype = flags & CIFS_OP_MASK;
516
517 if ((flags & CIFS_HAS_CREDITS) == 0) {
518 rc = wait_for_free_request(server, timeout, optype);
519 if (rc)
520 return rc;
521 credits = 1;
522 }
523
524 mutex_lock(&server->srv_mutex);
525 mid = server->ops->setup_async_request(server, rqst);
526 if (IS_ERR(mid)) {
527 mutex_unlock(&server->srv_mutex);
528 add_credits_and_wake_if(server, credits, optype);
529 return PTR_ERR(mid);
530 }
531
532 mid->receive = receive;
533 mid->callback = callback;
534 mid->callback_data = cbdata;
535 mid->handle = handle;
536 mid->mid_state = MID_REQUEST_SUBMITTED;
537
538 /* put it on the pending_mid_q */
539 spin_lock(&GlobalMid_Lock);
540 list_add_tail(&mid->qhead, &server->pending_mid_q);
541 spin_unlock(&GlobalMid_Lock);
542
543
544 cifs_in_send_inc(server);
545 rc = smb_send_rqst(server, rqst, flags);
546 cifs_in_send_dec(server);
547 cifs_save_when_sent(mid);
548
549 if (rc < 0) {
550 server->sequence_number -= 2;
551 cifs_delete_mid(mid);
552 }
553
554 mutex_unlock(&server->srv_mutex);
555
556 if (rc == 0)
557 return 0;
558
559 add_credits_and_wake_if(server, credits, optype);
560 return rc;
561 }
562
563 /*
564 *
565 * Send an SMB Request. No response info (other than return code)
566 * needs to be parsed.
567 *
568 * flags indicate the type of request buffer and how long to wait
569 * and whether to log NT STATUS code (error) before mapping it to POSIX error
570 *
571 */
572 int
573 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
574 char *in_buf, int flags)
575 {
576 int rc;
577 struct kvec iov[1];
578 struct kvec rsp_iov;
579 int resp_buf_type;
580
581 iov[0].iov_base = in_buf;
582 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
583 flags |= CIFS_NO_RESP;
584 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
585 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
586
587 return rc;
588 }
589
590 static int
591 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
592 {
593 int rc = 0;
594
595 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
596 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
597
598 spin_lock(&GlobalMid_Lock);
599 switch (mid->mid_state) {
600 case MID_RESPONSE_RECEIVED:
601 spin_unlock(&GlobalMid_Lock);
602 return rc;
603 case MID_RETRY_NEEDED:
604 rc = -EAGAIN;
605 break;
606 case MID_RESPONSE_MALFORMED:
607 rc = -EIO;
608 break;
609 case MID_SHUTDOWN:
610 rc = -EHOSTDOWN;
611 break;
612 default:
613 list_del_init(&mid->qhead);
614 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
615 __func__, mid->mid, mid->mid_state);
616 rc = -EIO;
617 }
618 spin_unlock(&GlobalMid_Lock);
619
620 mutex_lock(&server->srv_mutex);
621 DeleteMidQEntry(mid);
622 mutex_unlock(&server->srv_mutex);
623 return rc;
624 }
625
626 static inline int
627 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
628 struct mid_q_entry *mid)
629 {
630 return server->ops->send_cancel ?
631 server->ops->send_cancel(server, rqst, mid) : 0;
632 }
633
634 int
635 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
636 bool log_error)
637 {
638 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
639
640 dump_smb(mid->resp_buf, min_t(u32, 92, len));
641
642 /* convert the length into a more usable form */
643 if (server->sign) {
644 struct kvec iov[2];
645 int rc = 0;
646 struct smb_rqst rqst = { .rq_iov = iov,
647 .rq_nvec = 2 };
648
649 iov[0].iov_base = mid->resp_buf;
650 iov[0].iov_len = 4;
651 iov[1].iov_base = (char *)mid->resp_buf + 4;
652 iov[1].iov_len = len - 4;
653 /* FIXME: add code to kill session */
654 rc = cifs_verify_signature(&rqst, server,
655 mid->sequence_number);
656 if (rc)
657 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
658 rc);
659 }
660
661 /* BB special case reconnect tid and uid here? */
662 return map_smb_to_linux_error(mid->resp_buf, log_error);
663 }
664
665 struct mid_q_entry *
666 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
667 {
668 int rc;
669 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
670 struct mid_q_entry *mid;
671
672 if (rqst->rq_iov[0].iov_len != 4 ||
673 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
674 return ERR_PTR(-EIO);
675
676 rc = allocate_mid(ses, hdr, &mid);
677 if (rc)
678 return ERR_PTR(rc);
679 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
680 if (rc) {
681 cifs_delete_mid(mid);
682 return ERR_PTR(rc);
683 }
684 return mid;
685 }
686
687 int
688 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
689 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
690 struct kvec *resp_iov)
691 {
692 int rc = 0;
693 int timeout, optype;
694 struct mid_q_entry *midQ;
695 unsigned int credits = 1;
696 char *buf;
697
698 timeout = flags & CIFS_TIMEOUT_MASK;
699 optype = flags & CIFS_OP_MASK;
700
701 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
702
703 if ((ses == NULL) || (ses->server == NULL)) {
704 cifs_dbg(VFS, "Null session\n");
705 return -EIO;
706 }
707
708 if (ses->server->tcpStatus == CifsExiting)
709 return -ENOENT;
710
711 /*
712 * Ensure that we do not send more than 50 overlapping requests
713 * to the same server. We may make this configurable later or
714 * use ses->maxReq.
715 */
716
717 rc = wait_for_free_request(ses->server, timeout, optype);
718 if (rc)
719 return rc;
720
721 /*
722 * Make sure that we sign in the same order that we send on this socket
723 * and avoid races inside tcp sendmsg code that could cause corruption
724 * of smb data.
725 */
726
727 mutex_lock(&ses->server->srv_mutex);
728
729 midQ = ses->server->ops->setup_request(ses, rqst);
730 if (IS_ERR(midQ)) {
731 mutex_unlock(&ses->server->srv_mutex);
732 /* Update # of requests on wire to server */
733 add_credits(ses->server, 1, optype);
734 return PTR_ERR(midQ);
735 }
736
737 midQ->mid_state = MID_REQUEST_SUBMITTED;
738 cifs_in_send_inc(ses->server);
739 rc = smb_send_rqst(ses->server, rqst, flags);
740 cifs_in_send_dec(ses->server);
741 cifs_save_when_sent(midQ);
742
743 if (rc < 0)
744 ses->server->sequence_number -= 2;
745 mutex_unlock(&ses->server->srv_mutex);
746
747 if (rc < 0)
748 goto out;
749
750 if (timeout == CIFS_ASYNC_OP)
751 goto out;
752
753 rc = wait_for_response(ses->server, midQ);
754 if (rc != 0) {
755 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
756 send_cancel(ses->server, rqst, midQ);
757 spin_lock(&GlobalMid_Lock);
758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 midQ->mid_flags |= MID_WAIT_CANCELLED;
760 midQ->callback = DeleteMidQEntry;
761 spin_unlock(&GlobalMid_Lock);
762 add_credits(ses->server, 1, optype);
763 return rc;
764 }
765 spin_unlock(&GlobalMid_Lock);
766 }
767
768 rc = cifs_sync_mid_result(midQ, ses->server);
769 if (rc != 0) {
770 add_credits(ses->server, 1, optype);
771 return rc;
772 }
773
774 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
775 rc = -EIO;
776 cifs_dbg(FYI, "Bad MID state?\n");
777 goto out;
778 }
779
780 buf = (char *)midQ->resp_buf;
781 resp_iov->iov_base = buf;
782 resp_iov->iov_len = get_rfc1002_length(buf) + 4;
783 if (midQ->large_buf)
784 *resp_buf_type = CIFS_LARGE_BUFFER;
785 else
786 *resp_buf_type = CIFS_SMALL_BUFFER;
787
788 credits = ses->server->ops->get_credits(midQ);
789
790 rc = ses->server->ops->check_receive(midQ, ses->server,
791 flags & CIFS_LOG_ERROR);
792
793 /* mark it so buf will not be freed by cifs_delete_mid */
794 if ((flags & CIFS_NO_RESP) == 0)
795 midQ->resp_buf = NULL;
796 out:
797 cifs_delete_mid(midQ);
798 add_credits(ses->server, credits, optype);
799
800 return rc;
801 }
802
803 int
804 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
805 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
806 const int flags, struct kvec *resp_iov)
807 {
808 struct smb_rqst rqst;
809 struct kvec *new_iov;
810 int rc;
811
812 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
813 if (!new_iov)
814 return -ENOMEM;
815
816 /* 1st iov is a RFC1001 length followed by the rest of the packet */
817 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
818
819 new_iov[0].iov_base = new_iov[1].iov_base;
820 new_iov[0].iov_len = 4;
821 new_iov[1].iov_base += 4;
822 new_iov[1].iov_len -= 4;
823
824 memset(&rqst, 0, sizeof(struct smb_rqst));
825 rqst.rq_iov = new_iov;
826 rqst.rq_nvec = n_vec + 1;
827
828 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
829 kfree(new_iov);
830 return rc;
831 }
832
833 int
834 SendReceive(const unsigned int xid, struct cifs_ses *ses,
835 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
836 int *pbytes_returned, const int timeout)
837 {
838 int rc = 0;
839 struct mid_q_entry *midQ;
840 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
841 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
842 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
843
844 if (ses == NULL) {
845 cifs_dbg(VFS, "Null smb session\n");
846 return -EIO;
847 }
848 if (ses->server == NULL) {
849 cifs_dbg(VFS, "Null tcp session\n");
850 return -EIO;
851 }
852
853 if (ses->server->tcpStatus == CifsExiting)
854 return -ENOENT;
855
856 /* Ensure that we do not send more than 50 overlapping requests
857 to the same server. We may make this configurable later or
858 use ses->maxReq */
859
860 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
861 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
862 len);
863 return -EIO;
864 }
865
866 rc = wait_for_free_request(ses->server, timeout, 0);
867 if (rc)
868 return rc;
869
870 /* make sure that we sign in the same order that we send on this socket
871 and avoid races inside tcp sendmsg code that could cause corruption
872 of smb data */
873
874 mutex_lock(&ses->server->srv_mutex);
875
876 rc = allocate_mid(ses, in_buf, &midQ);
877 if (rc) {
878 mutex_unlock(&ses->server->srv_mutex);
879 /* Update # of requests on wire to server */
880 add_credits(ses->server, 1, 0);
881 return rc;
882 }
883
884 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
885 if (rc) {
886 mutex_unlock(&ses->server->srv_mutex);
887 goto out;
888 }
889
890 midQ->mid_state = MID_REQUEST_SUBMITTED;
891
892 cifs_in_send_inc(ses->server);
893 rc = smb_send(ses->server, in_buf, len);
894 cifs_in_send_dec(ses->server);
895 cifs_save_when_sent(midQ);
896
897 if (rc < 0)
898 ses->server->sequence_number -= 2;
899
900 mutex_unlock(&ses->server->srv_mutex);
901
902 if (rc < 0)
903 goto out;
904
905 if (timeout == CIFS_ASYNC_OP)
906 goto out;
907
908 rc = wait_for_response(ses->server, midQ);
909 if (rc != 0) {
910 send_cancel(ses->server, &rqst, midQ);
911 spin_lock(&GlobalMid_Lock);
912 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
913 /* no longer considered to be "in-flight" */
914 midQ->callback = DeleteMidQEntry;
915 spin_unlock(&GlobalMid_Lock);
916 add_credits(ses->server, 1, 0);
917 return rc;
918 }
919 spin_unlock(&GlobalMid_Lock);
920 }
921
922 rc = cifs_sync_mid_result(midQ, ses->server);
923 if (rc != 0) {
924 add_credits(ses->server, 1, 0);
925 return rc;
926 }
927
928 if (!midQ->resp_buf || !out_buf ||
929 midQ->mid_state != MID_RESPONSE_RECEIVED) {
930 rc = -EIO;
931 cifs_dbg(VFS, "Bad MID state?\n");
932 goto out;
933 }
934
935 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
936 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
937 rc = cifs_check_receive(midQ, ses->server, 0);
938 out:
939 cifs_delete_mid(midQ);
940 add_credits(ses->server, 1, 0);
941
942 return rc;
943 }
944
945 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
946 blocking lock to return. */
947
948 static int
949 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
950 struct smb_hdr *in_buf,
951 struct smb_hdr *out_buf)
952 {
953 int bytes_returned;
954 struct cifs_ses *ses = tcon->ses;
955 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
956
957 /* We just modify the current in_buf to change
958 the type of lock from LOCKING_ANDX_SHARED_LOCK
959 or LOCKING_ANDX_EXCLUSIVE_LOCK to
960 LOCKING_ANDX_CANCEL_LOCK. */
961
962 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
963 pSMB->Timeout = 0;
964 pSMB->hdr.Mid = get_next_mid(ses->server);
965
966 return SendReceive(xid, ses, in_buf, out_buf,
967 &bytes_returned, 0);
968 }
969
970 int
971 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
972 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
973 int *pbytes_returned)
974 {
975 int rc = 0;
976 int rstart = 0;
977 struct mid_q_entry *midQ;
978 struct cifs_ses *ses;
979 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
980 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
981 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
982
983 if (tcon == NULL || tcon->ses == NULL) {
984 cifs_dbg(VFS, "Null smb session\n");
985 return -EIO;
986 }
987 ses = tcon->ses;
988
989 if (ses->server == NULL) {
990 cifs_dbg(VFS, "Null tcp session\n");
991 return -EIO;
992 }
993
994 if (ses->server->tcpStatus == CifsExiting)
995 return -ENOENT;
996
997 /* Ensure that we do not send more than 50 overlapping requests
998 to the same server. We may make this configurable later or
999 use ses->maxReq */
1000
1001 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1002 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1003 len);
1004 return -EIO;
1005 }
1006
1007 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1008 if (rc)
1009 return rc;
1010
1011 /* make sure that we sign in the same order that we send on this socket
1012 and avoid races inside tcp sendmsg code that could cause corruption
1013 of smb data */
1014
1015 mutex_lock(&ses->server->srv_mutex);
1016
1017 rc = allocate_mid(ses, in_buf, &midQ);
1018 if (rc) {
1019 mutex_unlock(&ses->server->srv_mutex);
1020 return rc;
1021 }
1022
1023 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1024 if (rc) {
1025 cifs_delete_mid(midQ);
1026 mutex_unlock(&ses->server->srv_mutex);
1027 return rc;
1028 }
1029
1030 midQ->mid_state = MID_REQUEST_SUBMITTED;
1031 cifs_in_send_inc(ses->server);
1032 rc = smb_send(ses->server, in_buf, len);
1033 cifs_in_send_dec(ses->server);
1034 cifs_save_when_sent(midQ);
1035
1036 if (rc < 0)
1037 ses->server->sequence_number -= 2;
1038
1039 mutex_unlock(&ses->server->srv_mutex);
1040
1041 if (rc < 0) {
1042 cifs_delete_mid(midQ);
1043 return rc;
1044 }
1045
1046 /* Wait for a reply - allow signals to interrupt. */
1047 rc = wait_event_interruptible(ses->server->response_q,
1048 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1049 ((ses->server->tcpStatus != CifsGood) &&
1050 (ses->server->tcpStatus != CifsNew)));
1051
1052 /* Were we interrupted by a signal ? */
1053 if ((rc == -ERESTARTSYS) &&
1054 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1055 ((ses->server->tcpStatus == CifsGood) ||
1056 (ses->server->tcpStatus == CifsNew))) {
1057
1058 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1059 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1060 blocking lock to return. */
1061 rc = send_cancel(ses->server, &rqst, midQ);
1062 if (rc) {
1063 cifs_delete_mid(midQ);
1064 return rc;
1065 }
1066 } else {
1067 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1068 to cause the blocking lock to return. */
1069
1070 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1071
1072 /* If we get -ENOLCK back the lock may have
1073 already been removed. Don't exit in this case. */
1074 if (rc && rc != -ENOLCK) {
1075 cifs_delete_mid(midQ);
1076 return rc;
1077 }
1078 }
1079
1080 rc = wait_for_response(ses->server, midQ);
1081 if (rc) {
1082 send_cancel(ses->server, &rqst, midQ);
1083 spin_lock(&GlobalMid_Lock);
1084 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1085 /* no longer considered to be "in-flight" */
1086 midQ->callback = DeleteMidQEntry;
1087 spin_unlock(&GlobalMid_Lock);
1088 return rc;
1089 }
1090 spin_unlock(&GlobalMid_Lock);
1091 }
1092
1093 /* We got the response - restart system call. */
1094 rstart = 1;
1095 }
1096
1097 rc = cifs_sync_mid_result(midQ, ses->server);
1098 if (rc != 0)
1099 return rc;
1100
1101 /* rcvd frame is ok */
1102 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1103 rc = -EIO;
1104 cifs_dbg(VFS, "Bad MID state?\n");
1105 goto out;
1106 }
1107
1108 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1109 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1110 rc = cifs_check_receive(midQ, ses->server, 0);
1111 out:
1112 cifs_delete_mid(midQ);
1113 if (rstart && rc == -EACCES)
1114 return -ERESTARTSYS;
1115 return rc;
1116 }