]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/cifs/transport.c
net: hns: add the code for cleaning pkt in chip
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40
41 void
42 cifs_wake_up_task(struct mid_q_entry *mid)
43 {
44 wake_up_process(mid->callback_data);
45 }
46
47 struct mid_q_entry *
48 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
49 {
50 struct mid_q_entry *temp;
51
52 if (server == NULL) {
53 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
54 return NULL;
55 }
56
57 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
58 memset(temp, 0, sizeof(struct mid_q_entry));
59 temp->mid = get_mid(smb_buffer);
60 temp->pid = current->pid;
61 temp->command = cpu_to_le16(smb_buffer->Command);
62 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
63 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
64 /* when mid allocated can be before when sent */
65 temp->when_alloc = jiffies;
66 temp->server = server;
67
68 /*
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
71 */
72 temp->callback = cifs_wake_up_task;
73 temp->callback_data = current;
74
75 atomic_inc(&midCount);
76 temp->mid_state = MID_REQUEST_ALLOCATED;
77 return temp;
78 }
79
80 void
81 DeleteMidQEntry(struct mid_q_entry *midEntry)
82 {
83 #ifdef CONFIG_CIFS_STATS2
84 __le16 command = midEntry->server->vals->lock_cmd;
85 unsigned long now;
86 #endif
87 midEntry->mid_state = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->large_buf)
90 cifs_buf_release(midEntry->resp_buf);
91 else
92 cifs_small_buf_release(midEntry->resp_buf);
93 #ifdef CONFIG_CIFS_STATS2
94 now = jiffies;
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if (time_after(now, midEntry->when_alloc + HZ)) {
98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
99 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
100 midEntry->command, midEntry->mid);
101 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
102 now - midEntry->when_alloc,
103 now - midEntry->when_sent,
104 now - midEntry->when_received);
105 }
106 }
107 #endif
108 mempool_free(midEntry, cifs_mid_poolp);
109 }
110
111 void
112 cifs_delete_mid(struct mid_q_entry *mid)
113 {
114 spin_lock(&GlobalMid_Lock);
115 list_del(&mid->qhead);
116 spin_unlock(&GlobalMid_Lock);
117
118 DeleteMidQEntry(mid);
119 }
120
121 /*
122 * smb_send_kvec - send an array of kvecs to the server
123 * @server: Server to send the data to
124 * @smb_msg: Message to send
125 * @sent: amount of data sent on socket is stored here
126 *
127 * Our basic "send data to server" function. Should be called with srv_mutex
128 * held. The caller is responsible for handling the results.
129 */
130 static int
131 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
132 size_t *sent)
133 {
134 int rc = 0;
135 int retries = 0;
136 struct socket *ssocket = server->ssocket;
137
138 *sent = 0;
139
140 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
141 smb_msg->msg_namelen = sizeof(struct sockaddr);
142 smb_msg->msg_control = NULL;
143 smb_msg->msg_controllen = 0;
144 if (server->noblocksnd)
145 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
146 else
147 smb_msg->msg_flags = MSG_NOSIGNAL;
148
149 while (msg_data_left(smb_msg)) {
150 /*
151 * If blocking send, we try 3 times, since each can block
152 * for 5 seconds. For nonblocking we have to try more
153 * but wait increasing amounts of time allowing time for
154 * socket to clear. The overall time we wait in either
155 * case to send on the socket is about 15 seconds.
156 * Similarly we wait for 15 seconds for a response from
157 * the server in SendReceive[2] for the server to send
158 * a response back for most types of requests (except
159 * SMB Write past end of file which can be slow, and
160 * blocking lock operations). NFS waits slightly longer
161 * than CIFS, but this can make it take longer for
162 * nonresponsive servers to be detected and 15 seconds
163 * is more than enough time for modern networks to
164 * send a packet. In most cases if we fail to send
165 * after the retries we will kill the socket and
166 * reconnect which may clear the network problem.
167 */
168 rc = sock_sendmsg(ssocket, smb_msg);
169 if (rc == -EAGAIN) {
170 retries++;
171 if (retries >= 14 ||
172 (!server->noblocksnd && (retries > 2))) {
173 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
174 ssocket);
175 return -EAGAIN;
176 }
177 msleep(1 << retries);
178 continue;
179 }
180
181 if (rc < 0)
182 return rc;
183
184 if (rc == 0) {
185 /* should never happen, letting socket clear before
186 retrying is our only obvious option here */
187 cifs_dbg(VFS, "tcp sent no data\n");
188 msleep(500);
189 continue;
190 }
191
192 /* send was at least partially successful */
193 *sent += rc;
194 retries = 0; /* in case we get ENOSPC on the next send */
195 }
196 return 0;
197 }
198
199 static unsigned long
200 rqst_len(struct smb_rqst *rqst)
201 {
202 unsigned int i;
203 struct kvec *iov = rqst->rq_iov;
204 unsigned long buflen = 0;
205
206 /* total up iov array first */
207 for (i = 0; i < rqst->rq_nvec; i++)
208 buflen += iov[i].iov_len;
209
210 /* add in the page array if there is one */
211 if (rqst->rq_npages) {
212 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
213 buflen += rqst->rq_tailsz;
214 }
215
216 return buflen;
217 }
218
219 static int
220 __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
221 {
222 int rc;
223 struct kvec *iov = rqst->rq_iov;
224 int n_vec = rqst->rq_nvec;
225 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
226 unsigned long send_length;
227 unsigned int i;
228 size_t total_len = 0, sent, size;
229 struct socket *ssocket = server->ssocket;
230 struct msghdr smb_msg;
231 int val = 1;
232
233 if (ssocket == NULL)
234 return -ENOTSOCK;
235
236 /* sanity check send length */
237 send_length = rqst_len(rqst);
238 if (send_length != smb_buf_length + 4) {
239 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
240 send_length, smb_buf_length);
241 return -EIO;
242 }
243
244 if (n_vec < 2)
245 return -EIO;
246
247 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
248 dump_smb(iov[0].iov_base, iov[0].iov_len);
249 dump_smb(iov[1].iov_base, iov[1].iov_len);
250
251 /* cork the socket */
252 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
253 (char *)&val, sizeof(val));
254
255 size = 0;
256 for (i = 0; i < n_vec; i++)
257 size += iov[i].iov_len;
258
259 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
260
261 rc = smb_send_kvec(server, &smb_msg, &sent);
262 if (rc < 0)
263 goto uncork;
264
265 total_len += sent;
266
267 /* now walk the page array and send each page in it */
268 for (i = 0; i < rqst->rq_npages; i++) {
269 size_t len = i == rqst->rq_npages - 1
270 ? rqst->rq_tailsz
271 : rqst->rq_pagesz;
272 struct bio_vec bvec = {
273 .bv_page = rqst->rq_pages[i],
274 .bv_len = len
275 };
276 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
277 &bvec, 1, len);
278 rc = smb_send_kvec(server, &smb_msg, &sent);
279 if (rc < 0)
280 break;
281
282 total_len += sent;
283 }
284
285 uncork:
286 /* uncork it */
287 val = 0;
288 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
289 (char *)&val, sizeof(val));
290
291 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
292 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
293 smb_buf_length + 4, total_len);
294 /*
295 * If we have only sent part of an SMB then the next SMB could
296 * be taken as the remainder of this one. We need to kill the
297 * socket so the server throws away the partial SMB
298 */
299 server->tcpStatus = CifsNeedReconnect;
300 }
301
302 if (rc < 0 && rc != -EINTR)
303 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
304 rc);
305 else
306 rc = 0;
307
308 return rc;
309 }
310
311 static int
312 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
313 {
314 struct smb_rqst cur_rqst;
315 int rc;
316
317 if (!(flags & CIFS_TRANSFORM_REQ))
318 return __smb_send_rqst(server, rqst);
319
320 if (!server->ops->init_transform_rq ||
321 !server->ops->free_transform_rq) {
322 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
323 return -EIO;
324 }
325
326 rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
327 if (rc)
328 return rc;
329
330 rc = __smb_send_rqst(server, &cur_rqst);
331 server->ops->free_transform_rq(&cur_rqst);
332 return rc;
333 }
334
335 int
336 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
337 unsigned int smb_buf_length)
338 {
339 struct kvec iov[2];
340 struct smb_rqst rqst = { .rq_iov = iov,
341 .rq_nvec = 2 };
342
343 iov[0].iov_base = smb_buffer;
344 iov[0].iov_len = 4;
345 iov[1].iov_base = (char *)smb_buffer + 4;
346 iov[1].iov_len = smb_buf_length;
347
348 return __smb_send_rqst(server, &rqst);
349 }
350
351 static int
352 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
353 int *credits)
354 {
355 int rc;
356
357 spin_lock(&server->req_lock);
358 if (timeout == CIFS_ASYNC_OP) {
359 /* oplock breaks must not be held up */
360 server->in_flight++;
361 *credits -= 1;
362 spin_unlock(&server->req_lock);
363 return 0;
364 }
365
366 while (1) {
367 if (*credits <= 0) {
368 spin_unlock(&server->req_lock);
369 cifs_num_waiters_inc(server);
370 rc = wait_event_killable(server->request_q,
371 has_credits(server, credits));
372 cifs_num_waiters_dec(server);
373 if (rc)
374 return rc;
375 spin_lock(&server->req_lock);
376 } else {
377 if (server->tcpStatus == CifsExiting) {
378 spin_unlock(&server->req_lock);
379 return -ENOENT;
380 }
381
382 /*
383 * Can not count locking commands against total
384 * as they are allowed to block on server.
385 */
386
387 /* update # of requests on the wire to server */
388 if (timeout != CIFS_BLOCKING_OP) {
389 *credits -= 1;
390 server->in_flight++;
391 }
392 spin_unlock(&server->req_lock);
393 break;
394 }
395 }
396 return 0;
397 }
398
399 static int
400 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
401 const int optype)
402 {
403 int *val;
404
405 val = server->ops->get_credits_field(server, optype);
406 /* Since an echo is already inflight, no need to wait to send another */
407 if (*val <= 0 && optype == CIFS_ECHO_OP)
408 return -EAGAIN;
409 return wait_for_free_credits(server, timeout, val);
410 }
411
412 int
413 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
414 unsigned int *num, unsigned int *credits)
415 {
416 *num = size;
417 *credits = 0;
418 return 0;
419 }
420
421 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
422 struct mid_q_entry **ppmidQ)
423 {
424 if (ses->server->tcpStatus == CifsExiting) {
425 return -ENOENT;
426 }
427
428 if (ses->server->tcpStatus == CifsNeedReconnect) {
429 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
430 return -EAGAIN;
431 }
432
433 if (ses->status == CifsNew) {
434 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
435 (in_buf->Command != SMB_COM_NEGOTIATE))
436 return -EAGAIN;
437 /* else ok - we are setting up session */
438 }
439
440 if (ses->status == CifsExiting) {
441 /* check if SMB session is bad because we are setting it up */
442 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
443 return -EAGAIN;
444 /* else ok - we are shutting down session */
445 }
446
447 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
448 if (*ppmidQ == NULL)
449 return -ENOMEM;
450 spin_lock(&GlobalMid_Lock);
451 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
452 spin_unlock(&GlobalMid_Lock);
453 return 0;
454 }
455
456 static int
457 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
458 {
459 int error;
460
461 error = wait_event_freezekillable_unsafe(server->response_q,
462 midQ->mid_state != MID_REQUEST_SUBMITTED);
463 if (error < 0)
464 return -ERESTARTSYS;
465
466 return 0;
467 }
468
469 struct mid_q_entry *
470 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
471 {
472 int rc;
473 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
474 struct mid_q_entry *mid;
475
476 if (rqst->rq_iov[0].iov_len != 4 ||
477 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
478 return ERR_PTR(-EIO);
479
480 /* enable signing if server requires it */
481 if (server->sign)
482 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
483
484 mid = AllocMidQEntry(hdr, server);
485 if (mid == NULL)
486 return ERR_PTR(-ENOMEM);
487
488 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
489 if (rc) {
490 DeleteMidQEntry(mid);
491 return ERR_PTR(rc);
492 }
493
494 return mid;
495 }
496
497 /*
498 * Send a SMB request and set the callback function in the mid to handle
499 * the result. Caller is responsible for dealing with timeouts.
500 */
501 int
502 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
503 mid_receive_t *receive, mid_callback_t *callback,
504 mid_handle_t *handle, void *cbdata, const int flags)
505 {
506 int rc, timeout, optype;
507 struct mid_q_entry *mid;
508 unsigned int credits = 0;
509
510 timeout = flags & CIFS_TIMEOUT_MASK;
511 optype = flags & CIFS_OP_MASK;
512
513 if ((flags & CIFS_HAS_CREDITS) == 0) {
514 rc = wait_for_free_request(server, timeout, optype);
515 if (rc)
516 return rc;
517 credits = 1;
518 }
519
520 mutex_lock(&server->srv_mutex);
521 mid = server->ops->setup_async_request(server, rqst);
522 if (IS_ERR(mid)) {
523 mutex_unlock(&server->srv_mutex);
524 add_credits_and_wake_if(server, credits, optype);
525 return PTR_ERR(mid);
526 }
527
528 mid->receive = receive;
529 mid->callback = callback;
530 mid->callback_data = cbdata;
531 mid->handle = handle;
532 mid->mid_state = MID_REQUEST_SUBMITTED;
533
534 /* put it on the pending_mid_q */
535 spin_lock(&GlobalMid_Lock);
536 list_add_tail(&mid->qhead, &server->pending_mid_q);
537 spin_unlock(&GlobalMid_Lock);
538
539 /*
540 * Need to store the time in mid before calling I/O. For call_async,
541 * I/O response may come back and free the mid entry on another thread.
542 */
543 cifs_save_when_sent(mid);
544 cifs_in_send_inc(server);
545 rc = smb_send_rqst(server, rqst, flags);
546 cifs_in_send_dec(server);
547
548 if (rc < 0) {
549 server->sequence_number -= 2;
550 cifs_delete_mid(mid);
551 }
552
553 mutex_unlock(&server->srv_mutex);
554
555 if (rc == 0)
556 return 0;
557
558 add_credits_and_wake_if(server, credits, optype);
559 return rc;
560 }
561
562 /*
563 *
564 * Send an SMB Request. No response info (other than return code)
565 * needs to be parsed.
566 *
567 * flags indicate the type of request buffer and how long to wait
568 * and whether to log NT STATUS code (error) before mapping it to POSIX error
569 *
570 */
571 int
572 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
573 char *in_buf, int flags)
574 {
575 int rc;
576 struct kvec iov[1];
577 struct kvec rsp_iov;
578 int resp_buf_type;
579
580 iov[0].iov_base = in_buf;
581 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
582 flags |= CIFS_NO_RESP;
583 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
584 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
585
586 return rc;
587 }
588
589 static int
590 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
591 {
592 int rc = 0;
593
594 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
595 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
596
597 spin_lock(&GlobalMid_Lock);
598 switch (mid->mid_state) {
599 case MID_RESPONSE_RECEIVED:
600 spin_unlock(&GlobalMid_Lock);
601 return rc;
602 case MID_RETRY_NEEDED:
603 rc = -EAGAIN;
604 break;
605 case MID_RESPONSE_MALFORMED:
606 rc = -EIO;
607 break;
608 case MID_SHUTDOWN:
609 rc = -EHOSTDOWN;
610 break;
611 default:
612 list_del_init(&mid->qhead);
613 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
614 __func__, mid->mid, mid->mid_state);
615 rc = -EIO;
616 }
617 spin_unlock(&GlobalMid_Lock);
618
619 DeleteMidQEntry(mid);
620 return rc;
621 }
622
623 static inline int
624 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
625 struct mid_q_entry *mid)
626 {
627 return server->ops->send_cancel ?
628 server->ops->send_cancel(server, rqst, mid) : 0;
629 }
630
631 int
632 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
633 bool log_error)
634 {
635 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
636
637 dump_smb(mid->resp_buf, min_t(u32, 92, len));
638
639 /* convert the length into a more usable form */
640 if (server->sign) {
641 struct kvec iov[2];
642 int rc = 0;
643 struct smb_rqst rqst = { .rq_iov = iov,
644 .rq_nvec = 2 };
645
646 iov[0].iov_base = mid->resp_buf;
647 iov[0].iov_len = 4;
648 iov[1].iov_base = (char *)mid->resp_buf + 4;
649 iov[1].iov_len = len - 4;
650 /* FIXME: add code to kill session */
651 rc = cifs_verify_signature(&rqst, server,
652 mid->sequence_number);
653 if (rc)
654 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
655 rc);
656 }
657
658 /* BB special case reconnect tid and uid here? */
659 return map_smb_to_linux_error(mid->resp_buf, log_error);
660 }
661
662 struct mid_q_entry *
663 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
664 {
665 int rc;
666 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
667 struct mid_q_entry *mid;
668
669 if (rqst->rq_iov[0].iov_len != 4 ||
670 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
671 return ERR_PTR(-EIO);
672
673 rc = allocate_mid(ses, hdr, &mid);
674 if (rc)
675 return ERR_PTR(rc);
676 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
677 if (rc) {
678 cifs_delete_mid(mid);
679 return ERR_PTR(rc);
680 }
681 return mid;
682 }
683
684 int
685 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
686 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
687 struct kvec *resp_iov)
688 {
689 int rc = 0;
690 int timeout, optype;
691 struct mid_q_entry *midQ;
692 unsigned int credits = 1;
693 char *buf;
694
695 timeout = flags & CIFS_TIMEOUT_MASK;
696 optype = flags & CIFS_OP_MASK;
697
698 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
699
700 if ((ses == NULL) || (ses->server == NULL)) {
701 cifs_dbg(VFS, "Null session\n");
702 return -EIO;
703 }
704
705 if (ses->server->tcpStatus == CifsExiting)
706 return -ENOENT;
707
708 /*
709 * Ensure that we do not send more than 50 overlapping requests
710 * to the same server. We may make this configurable later or
711 * use ses->maxReq.
712 */
713
714 rc = wait_for_free_request(ses->server, timeout, optype);
715 if (rc)
716 return rc;
717
718 /*
719 * Make sure that we sign in the same order that we send on this socket
720 * and avoid races inside tcp sendmsg code that could cause corruption
721 * of smb data.
722 */
723
724 mutex_lock(&ses->server->srv_mutex);
725
726 midQ = ses->server->ops->setup_request(ses, rqst);
727 if (IS_ERR(midQ)) {
728 mutex_unlock(&ses->server->srv_mutex);
729 /* Update # of requests on wire to server */
730 add_credits(ses->server, 1, optype);
731 return PTR_ERR(midQ);
732 }
733
734 midQ->mid_state = MID_REQUEST_SUBMITTED;
735 cifs_in_send_inc(ses->server);
736 rc = smb_send_rqst(ses->server, rqst, flags);
737 cifs_in_send_dec(ses->server);
738 cifs_save_when_sent(midQ);
739
740 if (rc < 0)
741 ses->server->sequence_number -= 2;
742 mutex_unlock(&ses->server->srv_mutex);
743
744 if (rc < 0)
745 goto out;
746
747 if (timeout == CIFS_ASYNC_OP)
748 goto out;
749
750 rc = wait_for_response(ses->server, midQ);
751 if (rc != 0) {
752 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
753 send_cancel(ses->server, rqst, midQ);
754 spin_lock(&GlobalMid_Lock);
755 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
756 midQ->mid_flags |= MID_WAIT_CANCELLED;
757 midQ->callback = DeleteMidQEntry;
758 spin_unlock(&GlobalMid_Lock);
759 add_credits(ses->server, 1, optype);
760 return rc;
761 }
762 spin_unlock(&GlobalMid_Lock);
763 }
764
765 rc = cifs_sync_mid_result(midQ, ses->server);
766 if (rc != 0) {
767 add_credits(ses->server, 1, optype);
768 return rc;
769 }
770
771 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
772 rc = -EIO;
773 cifs_dbg(FYI, "Bad MID state?\n");
774 goto out;
775 }
776
777 buf = (char *)midQ->resp_buf;
778 resp_iov->iov_base = buf;
779 resp_iov->iov_len = get_rfc1002_length(buf) + 4;
780 if (midQ->large_buf)
781 *resp_buf_type = CIFS_LARGE_BUFFER;
782 else
783 *resp_buf_type = CIFS_SMALL_BUFFER;
784
785 credits = ses->server->ops->get_credits(midQ);
786
787 rc = ses->server->ops->check_receive(midQ, ses->server,
788 flags & CIFS_LOG_ERROR);
789
790 /* mark it so buf will not be freed by cifs_delete_mid */
791 if ((flags & CIFS_NO_RESP) == 0)
792 midQ->resp_buf = NULL;
793 out:
794 cifs_delete_mid(midQ);
795 add_credits(ses->server, credits, optype);
796
797 return rc;
798 }
799
800 int
801 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
802 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
803 const int flags, struct kvec *resp_iov)
804 {
805 struct smb_rqst rqst;
806 struct kvec *new_iov;
807 int rc;
808
809 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
810 if (!new_iov) {
811 /* otherwise cifs_send_recv below sets resp_buf_type */
812 *resp_buf_type = CIFS_NO_BUFFER;
813 return -ENOMEM;
814 }
815
816 /* 1st iov is a RFC1001 length followed by the rest of the packet */
817 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
818
819 new_iov[0].iov_base = new_iov[1].iov_base;
820 new_iov[0].iov_len = 4;
821 new_iov[1].iov_base += 4;
822 new_iov[1].iov_len -= 4;
823
824 memset(&rqst, 0, sizeof(struct smb_rqst));
825 rqst.rq_iov = new_iov;
826 rqst.rq_nvec = n_vec + 1;
827
828 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
829 kfree(new_iov);
830 return rc;
831 }
832
833 int
834 SendReceive(const unsigned int xid, struct cifs_ses *ses,
835 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
836 int *pbytes_returned, const int timeout)
837 {
838 int rc = 0;
839 struct mid_q_entry *midQ;
840 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
841 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
842 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
843
844 if (ses == NULL) {
845 cifs_dbg(VFS, "Null smb session\n");
846 return -EIO;
847 }
848 if (ses->server == NULL) {
849 cifs_dbg(VFS, "Null tcp session\n");
850 return -EIO;
851 }
852
853 if (ses->server->tcpStatus == CifsExiting)
854 return -ENOENT;
855
856 /* Ensure that we do not send more than 50 overlapping requests
857 to the same server. We may make this configurable later or
858 use ses->maxReq */
859
860 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
861 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
862 len);
863 return -EIO;
864 }
865
866 rc = wait_for_free_request(ses->server, timeout, 0);
867 if (rc)
868 return rc;
869
870 /* make sure that we sign in the same order that we send on this socket
871 and avoid races inside tcp sendmsg code that could cause corruption
872 of smb data */
873
874 mutex_lock(&ses->server->srv_mutex);
875
876 rc = allocate_mid(ses, in_buf, &midQ);
877 if (rc) {
878 mutex_unlock(&ses->server->srv_mutex);
879 /* Update # of requests on wire to server */
880 add_credits(ses->server, 1, 0);
881 return rc;
882 }
883
884 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
885 if (rc) {
886 mutex_unlock(&ses->server->srv_mutex);
887 goto out;
888 }
889
890 midQ->mid_state = MID_REQUEST_SUBMITTED;
891
892 cifs_in_send_inc(ses->server);
893 rc = smb_send(ses->server, in_buf, len);
894 cifs_in_send_dec(ses->server);
895 cifs_save_when_sent(midQ);
896
897 if (rc < 0)
898 ses->server->sequence_number -= 2;
899
900 mutex_unlock(&ses->server->srv_mutex);
901
902 if (rc < 0)
903 goto out;
904
905 if (timeout == CIFS_ASYNC_OP)
906 goto out;
907
908 rc = wait_for_response(ses->server, midQ);
909 if (rc != 0) {
910 send_cancel(ses->server, &rqst, midQ);
911 spin_lock(&GlobalMid_Lock);
912 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
913 /* no longer considered to be "in-flight" */
914 midQ->callback = DeleteMidQEntry;
915 spin_unlock(&GlobalMid_Lock);
916 add_credits(ses->server, 1, 0);
917 return rc;
918 }
919 spin_unlock(&GlobalMid_Lock);
920 }
921
922 rc = cifs_sync_mid_result(midQ, ses->server);
923 if (rc != 0) {
924 add_credits(ses->server, 1, 0);
925 return rc;
926 }
927
928 if (!midQ->resp_buf || !out_buf ||
929 midQ->mid_state != MID_RESPONSE_RECEIVED) {
930 rc = -EIO;
931 cifs_dbg(VFS, "Bad MID state?\n");
932 goto out;
933 }
934
935 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
936 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
937 rc = cifs_check_receive(midQ, ses->server, 0);
938 out:
939 cifs_delete_mid(midQ);
940 add_credits(ses->server, 1, 0);
941
942 return rc;
943 }
944
945 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
946 blocking lock to return. */
947
948 static int
949 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
950 struct smb_hdr *in_buf,
951 struct smb_hdr *out_buf)
952 {
953 int bytes_returned;
954 struct cifs_ses *ses = tcon->ses;
955 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
956
957 /* We just modify the current in_buf to change
958 the type of lock from LOCKING_ANDX_SHARED_LOCK
959 or LOCKING_ANDX_EXCLUSIVE_LOCK to
960 LOCKING_ANDX_CANCEL_LOCK. */
961
962 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
963 pSMB->Timeout = 0;
964 pSMB->hdr.Mid = get_next_mid(ses->server);
965
966 return SendReceive(xid, ses, in_buf, out_buf,
967 &bytes_returned, 0);
968 }
969
970 int
971 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
972 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
973 int *pbytes_returned)
974 {
975 int rc = 0;
976 int rstart = 0;
977 struct mid_q_entry *midQ;
978 struct cifs_ses *ses;
979 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
980 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
981 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
982
983 if (tcon == NULL || tcon->ses == NULL) {
984 cifs_dbg(VFS, "Null smb session\n");
985 return -EIO;
986 }
987 ses = tcon->ses;
988
989 if (ses->server == NULL) {
990 cifs_dbg(VFS, "Null tcp session\n");
991 return -EIO;
992 }
993
994 if (ses->server->tcpStatus == CifsExiting)
995 return -ENOENT;
996
997 /* Ensure that we do not send more than 50 overlapping requests
998 to the same server. We may make this configurable later or
999 use ses->maxReq */
1000
1001 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1002 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1003 len);
1004 return -EIO;
1005 }
1006
1007 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1008 if (rc)
1009 return rc;
1010
1011 /* make sure that we sign in the same order that we send on this socket
1012 and avoid races inside tcp sendmsg code that could cause corruption
1013 of smb data */
1014
1015 mutex_lock(&ses->server->srv_mutex);
1016
1017 rc = allocate_mid(ses, in_buf, &midQ);
1018 if (rc) {
1019 mutex_unlock(&ses->server->srv_mutex);
1020 return rc;
1021 }
1022
1023 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1024 if (rc) {
1025 cifs_delete_mid(midQ);
1026 mutex_unlock(&ses->server->srv_mutex);
1027 return rc;
1028 }
1029
1030 midQ->mid_state = MID_REQUEST_SUBMITTED;
1031 cifs_in_send_inc(ses->server);
1032 rc = smb_send(ses->server, in_buf, len);
1033 cifs_in_send_dec(ses->server);
1034 cifs_save_when_sent(midQ);
1035
1036 if (rc < 0)
1037 ses->server->sequence_number -= 2;
1038
1039 mutex_unlock(&ses->server->srv_mutex);
1040
1041 if (rc < 0) {
1042 cifs_delete_mid(midQ);
1043 return rc;
1044 }
1045
1046 /* Wait for a reply - allow signals to interrupt. */
1047 rc = wait_event_interruptible(ses->server->response_q,
1048 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1049 ((ses->server->tcpStatus != CifsGood) &&
1050 (ses->server->tcpStatus != CifsNew)));
1051
1052 /* Were we interrupted by a signal ? */
1053 if ((rc == -ERESTARTSYS) &&
1054 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1055 ((ses->server->tcpStatus == CifsGood) ||
1056 (ses->server->tcpStatus == CifsNew))) {
1057
1058 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1059 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1060 blocking lock to return. */
1061 rc = send_cancel(ses->server, &rqst, midQ);
1062 if (rc) {
1063 cifs_delete_mid(midQ);
1064 return rc;
1065 }
1066 } else {
1067 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1068 to cause the blocking lock to return. */
1069
1070 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1071
1072 /* If we get -ENOLCK back the lock may have
1073 already been removed. Don't exit in this case. */
1074 if (rc && rc != -ENOLCK) {
1075 cifs_delete_mid(midQ);
1076 return rc;
1077 }
1078 }
1079
1080 rc = wait_for_response(ses->server, midQ);
1081 if (rc) {
1082 send_cancel(ses->server, &rqst, midQ);
1083 spin_lock(&GlobalMid_Lock);
1084 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1085 /* no longer considered to be "in-flight" */
1086 midQ->callback = DeleteMidQEntry;
1087 spin_unlock(&GlobalMid_Lock);
1088 return rc;
1089 }
1090 spin_unlock(&GlobalMid_Lock);
1091 }
1092
1093 /* We got the response - restart system call. */
1094 rstart = 1;
1095 }
1096
1097 rc = cifs_sync_mid_result(midQ, ses->server);
1098 if (rc != 0)
1099 return rc;
1100
1101 /* rcvd frame is ok */
1102 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1103 rc = -EIO;
1104 cifs_dbg(VFS, "Bad MID state?\n");
1105 goto out;
1106 }
1107
1108 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1109 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1110 rc = cifs_check_receive(midQ, ses->server, 0);
1111 out:
1112 cifs_delete_mid(midQ);
1113 if (rstart && rc == -EACCES)
1114 return -ERESTARTSYS;
1115 return rc;
1116 }