]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/9p/trans_fd.c
9P: Add cancelled() to the transport functions.
[mirror_ubuntu-bionic-kernel.git] / net / 9p / trans_fd.c
1 /*
2 * linux/fs/9p/trans_fd.c
3 *
4 * Fd transport layer. Includes deprecated socket layer.
5 *
6 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to:
22 * Free Software Foundation
23 * 51 Franklin Street, Fifth Floor
24 * Boston, MA 02111-1301 USA
25 *
26 */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #include <linux/in.h>
31 #include <linux/module.h>
32 #include <linux/net.h>
33 #include <linux/ipv6.h>
34 #include <linux/kthread.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/un.h>
38 #include <linux/uaccess.h>
39 #include <linux/inet.h>
40 #include <linux/idr.h>
41 #include <linux/file.h>
42 #include <linux/parser.h>
43 #include <linux/slab.h>
44 #include <net/9p/9p.h>
45 #include <net/9p/client.h>
46 #include <net/9p/transport.h>
47
48 #include <linux/syscalls.h> /* killme */
49
50 #define P9_PORT 564
51 #define MAX_SOCK_BUF (64*1024)
52 #define MAXPOLLWADDR 2
53
54 /**
55 * struct p9_fd_opts - per-transport options
56 * @rfd: file descriptor for reading (trans=fd)
57 * @wfd: file descriptor for writing (trans=fd)
58 * @port: port to connect to (trans=tcp)
59 *
60 */
61
62 struct p9_fd_opts {
63 int rfd;
64 int wfd;
65 u16 port;
66 int privport;
67 };
68
69 /**
70 * struct p9_trans_fd - transport state
71 * @rd: reference to file to read from
72 * @wr: reference of file to write to
73 * @conn: connection state reference
74 *
75 */
76
77 struct p9_trans_fd {
78 struct file *rd;
79 struct file *wr;
80 struct p9_conn *conn;
81 };
82
83 /*
84 * Option Parsing (code inspired by NFS code)
85 * - a little lazy - parse all fd-transport options
86 */
87
88 enum {
89 /* Options that take integer arguments */
90 Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
91 /* Options that take no arguments */
92 Opt_privport,
93 };
94
95 static const match_table_t tokens = {
96 {Opt_port, "port=%u"},
97 {Opt_rfdno, "rfdno=%u"},
98 {Opt_wfdno, "wfdno=%u"},
99 {Opt_privport, "privport"},
100 {Opt_err, NULL},
101 };
102
103 enum {
104 Rworksched = 1, /* read work scheduled or running */
105 Rpending = 2, /* can read */
106 Wworksched = 4, /* write work scheduled or running */
107 Wpending = 8, /* can write */
108 };
109
110 struct p9_poll_wait {
111 struct p9_conn *conn;
112 wait_queue_t wait;
113 wait_queue_head_t *wait_addr;
114 };
115
116 /**
117 * struct p9_conn - fd mux connection state information
118 * @mux_list: list link for mux to manage multiple connections (?)
119 * @client: reference to client instance for this connection
120 * @err: error state
121 * @req_list: accounting for requests which have been sent
122 * @unsent_req_list: accounting for requests that haven't been sent
123 * @req: current request being processed (if any)
124 * @tmp_buf: temporary buffer to read in header
125 * @rsize: amount to read for current frame
126 * @rpos: read position in current frame
127 * @rbuf: current read buffer
128 * @wpos: write position for current frame
129 * @wsize: amount of data to write for current frame
130 * @wbuf: current write buffer
131 * @poll_pending_link: pending links to be polled per conn
132 * @poll_wait: array of wait_q's for various worker threads
133 * @pt: poll state
134 * @rq: current read work
135 * @wq: current write work
136 * @wsched: ????
137 *
138 */
139
140 struct p9_conn {
141 struct list_head mux_list;
142 struct p9_client *client;
143 int err;
144 struct list_head req_list;
145 struct list_head unsent_req_list;
146 struct p9_req_t *req;
147 char tmp_buf[7];
148 int rsize;
149 int rpos;
150 char *rbuf;
151 int wpos;
152 int wsize;
153 char *wbuf;
154 struct list_head poll_pending_link;
155 struct p9_poll_wait poll_wait[MAXPOLLWADDR];
156 poll_table pt;
157 struct work_struct rq;
158 struct work_struct wq;
159 unsigned long wsched;
160 };
161
162 static void p9_poll_workfn(struct work_struct *work);
163
164 static DEFINE_SPINLOCK(p9_poll_lock);
165 static LIST_HEAD(p9_poll_pending_list);
166 static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
167
168 static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
169 static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
170
171 static void p9_mux_poll_stop(struct p9_conn *m)
172 {
173 unsigned long flags;
174 int i;
175
176 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
177 struct p9_poll_wait *pwait = &m->poll_wait[i];
178
179 if (pwait->wait_addr) {
180 remove_wait_queue(pwait->wait_addr, &pwait->wait);
181 pwait->wait_addr = NULL;
182 }
183 }
184
185 spin_lock_irqsave(&p9_poll_lock, flags);
186 list_del_init(&m->poll_pending_link);
187 spin_unlock_irqrestore(&p9_poll_lock, flags);
188 }
189
190 /**
191 * p9_conn_cancel - cancel all pending requests with error
192 * @m: mux data
193 * @err: error code
194 *
195 */
196
197 static void p9_conn_cancel(struct p9_conn *m, int err)
198 {
199 struct p9_req_t *req, *rtmp;
200 unsigned long flags;
201 LIST_HEAD(cancel_list);
202
203 p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
204
205 spin_lock_irqsave(&m->client->lock, flags);
206
207 if (m->err) {
208 spin_unlock_irqrestore(&m->client->lock, flags);
209 return;
210 }
211
212 m->err = err;
213
214 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
215 list_move(&req->req_list, &cancel_list);
216 }
217 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
218 list_move(&req->req_list, &cancel_list);
219 }
220 spin_unlock_irqrestore(&m->client->lock, flags);
221
222 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
223 p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
224 list_del(&req->req_list);
225 if (!req->t_err)
226 req->t_err = err;
227 p9_client_cb(m->client, req, REQ_STATUS_ERROR);
228 }
229 }
230
231 static int
232 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
233 {
234 int ret, n;
235 struct p9_trans_fd *ts = NULL;
236
237 if (client && client->status == Connected)
238 ts = client->trans;
239
240 if (!ts)
241 return -EREMOTEIO;
242
243 if (!ts->rd->f_op->poll)
244 return -EIO;
245
246 if (!ts->wr->f_op->poll)
247 return -EIO;
248
249 ret = ts->rd->f_op->poll(ts->rd, pt);
250 if (ret < 0)
251 return ret;
252
253 if (ts->rd != ts->wr) {
254 n = ts->wr->f_op->poll(ts->wr, pt);
255 if (n < 0)
256 return n;
257 ret = (ret & ~POLLOUT) | (n & ~POLLIN);
258 }
259
260 return ret;
261 }
262
263 /**
264 * p9_fd_read- read from a fd
265 * @client: client instance
266 * @v: buffer to receive data into
267 * @len: size of receive buffer
268 *
269 */
270
271 static int p9_fd_read(struct p9_client *client, void *v, int len)
272 {
273 int ret;
274 struct p9_trans_fd *ts = NULL;
275
276 if (client && client->status != Disconnected)
277 ts = client->trans;
278
279 if (!ts)
280 return -EREMOTEIO;
281
282 if (!(ts->rd->f_flags & O_NONBLOCK))
283 p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
284
285 ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
286 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
287 client->status = Disconnected;
288 return ret;
289 }
290
291 /**
292 * p9_read_work - called when there is some data to be read from a transport
293 * @work: container of work to be done
294 *
295 */
296
297 static void p9_read_work(struct work_struct *work)
298 {
299 int n, err;
300 struct p9_conn *m;
301 int status = REQ_STATUS_ERROR;
302
303 m = container_of(work, struct p9_conn, rq);
304
305 if (m->err < 0)
306 return;
307
308 p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
309
310 if (!m->rbuf) {
311 m->rbuf = m->tmp_buf;
312 m->rpos = 0;
313 m->rsize = 7; /* start by reading header */
314 }
315
316 clear_bit(Rpending, &m->wsched);
317 p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n",
318 m, m->rpos, m->rsize, m->rsize-m->rpos);
319 err = p9_fd_read(m->client, m->rbuf + m->rpos,
320 m->rsize - m->rpos);
321 p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
322 if (err == -EAGAIN) {
323 goto end_clear;
324 }
325
326 if (err <= 0)
327 goto error;
328
329 m->rpos += err;
330
331 if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
332 u16 tag;
333 p9_debug(P9_DEBUG_TRANS, "got new header\n");
334
335 n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
336 if (n >= m->client->msize) {
337 p9_debug(P9_DEBUG_ERROR,
338 "requested packet size too big: %d\n", n);
339 err = -EIO;
340 goto error;
341 }
342
343 tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
344 p9_debug(P9_DEBUG_TRANS,
345 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
346
347 m->req = p9_tag_lookup(m->client, tag);
348 if (!m->req || (m->req->status != REQ_STATUS_SENT &&
349 m->req->status != REQ_STATUS_FLSH)) {
350 p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
351 tag);
352 err = -EIO;
353 goto error;
354 }
355
356 if (m->req->rc == NULL) {
357 m->req->rc = kmalloc(sizeof(struct p9_fcall) +
358 m->client->msize, GFP_NOFS);
359 if (!m->req->rc) {
360 m->req = NULL;
361 err = -ENOMEM;
362 goto error;
363 }
364 }
365 m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
366 memcpy(m->rbuf, m->tmp_buf, m->rsize);
367 m->rsize = n;
368 }
369
370 /* not an else because some packets (like clunk) have no payload */
371 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
372 p9_debug(P9_DEBUG_TRANS, "got new packet\n");
373 spin_lock(&m->client->lock);
374 if (m->req->status != REQ_STATUS_ERROR)
375 status = REQ_STATUS_RCVD;
376 list_del(&m->req->req_list);
377 spin_unlock(&m->client->lock);
378 p9_client_cb(m->client, m->req, status);
379 m->rbuf = NULL;
380 m->rpos = 0;
381 m->rsize = 0;
382 m->req = NULL;
383 }
384
385 end_clear:
386 clear_bit(Rworksched, &m->wsched);
387
388 if (!list_empty(&m->req_list)) {
389 if (test_and_clear_bit(Rpending, &m->wsched))
390 n = POLLIN;
391 else
392 n = p9_fd_poll(m->client, NULL);
393
394 if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
395 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
396 schedule_work(&m->rq);
397 }
398 }
399
400 return;
401 error:
402 p9_conn_cancel(m, err);
403 clear_bit(Rworksched, &m->wsched);
404 }
405
406 /**
407 * p9_fd_write - write to a socket
408 * @client: client instance
409 * @v: buffer to send data from
410 * @len: size of send buffer
411 *
412 */
413
414 static int p9_fd_write(struct p9_client *client, void *v, int len)
415 {
416 int ret;
417 mm_segment_t oldfs;
418 struct p9_trans_fd *ts = NULL;
419
420 if (client && client->status != Disconnected)
421 ts = client->trans;
422
423 if (!ts)
424 return -EREMOTEIO;
425
426 if (!(ts->wr->f_flags & O_NONBLOCK))
427 p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
428
429 oldfs = get_fs();
430 set_fs(get_ds());
431 /* The cast to a user pointer is valid due to the set_fs() */
432 ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
433 set_fs(oldfs);
434
435 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
436 client->status = Disconnected;
437 return ret;
438 }
439
440 /**
441 * p9_write_work - called when a transport can send some data
442 * @work: container for work to be done
443 *
444 */
445
446 static void p9_write_work(struct work_struct *work)
447 {
448 int n, err;
449 struct p9_conn *m;
450 struct p9_req_t *req;
451
452 m = container_of(work, struct p9_conn, wq);
453
454 if (m->err < 0) {
455 clear_bit(Wworksched, &m->wsched);
456 return;
457 }
458
459 if (!m->wsize) {
460 spin_lock(&m->client->lock);
461 if (list_empty(&m->unsent_req_list)) {
462 clear_bit(Wworksched, &m->wsched);
463 spin_unlock(&m->client->lock);
464 return;
465 }
466
467 req = list_entry(m->unsent_req_list.next, struct p9_req_t,
468 req_list);
469 req->status = REQ_STATUS_SENT;
470 p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
471 list_move_tail(&req->req_list, &m->req_list);
472
473 m->wbuf = req->tc->sdata;
474 m->wsize = req->tc->size;
475 m->wpos = 0;
476 spin_unlock(&m->client->lock);
477 }
478
479 p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
480 m, m->wpos, m->wsize);
481 clear_bit(Wpending, &m->wsched);
482 err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
483 p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
484 if (err == -EAGAIN)
485 goto end_clear;
486
487
488 if (err < 0)
489 goto error;
490 else if (err == 0) {
491 err = -EREMOTEIO;
492 goto error;
493 }
494
495 m->wpos += err;
496 if (m->wpos == m->wsize)
497 m->wpos = m->wsize = 0;
498
499 end_clear:
500 clear_bit(Wworksched, &m->wsched);
501
502 if (m->wsize || !list_empty(&m->unsent_req_list)) {
503 if (test_and_clear_bit(Wpending, &m->wsched))
504 n = POLLOUT;
505 else
506 n = p9_fd_poll(m->client, NULL);
507
508 if ((n & POLLOUT) &&
509 !test_and_set_bit(Wworksched, &m->wsched)) {
510 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
511 schedule_work(&m->wq);
512 }
513 }
514
515 return;
516
517 error:
518 p9_conn_cancel(m, err);
519 clear_bit(Wworksched, &m->wsched);
520 }
521
522 static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
523 {
524 struct p9_poll_wait *pwait =
525 container_of(wait, struct p9_poll_wait, wait);
526 struct p9_conn *m = pwait->conn;
527 unsigned long flags;
528
529 spin_lock_irqsave(&p9_poll_lock, flags);
530 if (list_empty(&m->poll_pending_link))
531 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
532 spin_unlock_irqrestore(&p9_poll_lock, flags);
533
534 schedule_work(&p9_poll_work);
535 return 1;
536 }
537
538 /**
539 * p9_pollwait - add poll task to the wait queue
540 * @filp: file pointer being polled
541 * @wait_address: wait_q to block on
542 * @p: poll state
543 *
544 * called by files poll operation to add v9fs-poll task to files wait queue
545 */
546
547 static void
548 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
549 {
550 struct p9_conn *m = container_of(p, struct p9_conn, pt);
551 struct p9_poll_wait *pwait = NULL;
552 int i;
553
554 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
555 if (m->poll_wait[i].wait_addr == NULL) {
556 pwait = &m->poll_wait[i];
557 break;
558 }
559 }
560
561 if (!pwait) {
562 p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
563 return;
564 }
565
566 pwait->conn = m;
567 pwait->wait_addr = wait_address;
568 init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
569 add_wait_queue(wait_address, &pwait->wait);
570 }
571
572 /**
573 * p9_conn_create - allocate and initialize the per-session mux data
574 * @client: client instance
575 *
576 * Note: Creates the polling task if this is the first session.
577 */
578
579 static struct p9_conn *p9_conn_create(struct p9_client *client)
580 {
581 int n;
582 struct p9_conn *m;
583
584 p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
585 m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
586 if (!m)
587 return ERR_PTR(-ENOMEM);
588
589 INIT_LIST_HEAD(&m->mux_list);
590 m->client = client;
591
592 INIT_LIST_HEAD(&m->req_list);
593 INIT_LIST_HEAD(&m->unsent_req_list);
594 INIT_WORK(&m->rq, p9_read_work);
595 INIT_WORK(&m->wq, p9_write_work);
596 INIT_LIST_HEAD(&m->poll_pending_link);
597 init_poll_funcptr(&m->pt, p9_pollwait);
598
599 n = p9_fd_poll(client, &m->pt);
600 if (n & POLLIN) {
601 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
602 set_bit(Rpending, &m->wsched);
603 }
604
605 if (n & POLLOUT) {
606 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
607 set_bit(Wpending, &m->wsched);
608 }
609
610 return m;
611 }
612
613 /**
614 * p9_poll_mux - polls a mux and schedules read or write works if necessary
615 * @m: connection to poll
616 *
617 */
618
619 static void p9_poll_mux(struct p9_conn *m)
620 {
621 int n;
622
623 if (m->err < 0)
624 return;
625
626 n = p9_fd_poll(m->client, NULL);
627 if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
628 p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
629 if (n >= 0)
630 n = -ECONNRESET;
631 p9_conn_cancel(m, n);
632 }
633
634 if (n & POLLIN) {
635 set_bit(Rpending, &m->wsched);
636 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
637 if (!test_and_set_bit(Rworksched, &m->wsched)) {
638 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
639 schedule_work(&m->rq);
640 }
641 }
642
643 if (n & POLLOUT) {
644 set_bit(Wpending, &m->wsched);
645 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
646 if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
647 !test_and_set_bit(Wworksched, &m->wsched)) {
648 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
649 schedule_work(&m->wq);
650 }
651 }
652 }
653
654 /**
655 * p9_fd_request - send 9P request
656 * The function can sleep until the request is scheduled for sending.
657 * The function can be interrupted. Return from the function is not
658 * a guarantee that the request is sent successfully.
659 *
660 * @client: client instance
661 * @req: request to be sent
662 *
663 */
664
665 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
666 {
667 int n;
668 struct p9_trans_fd *ts = client->trans;
669 struct p9_conn *m = ts->conn;
670
671 p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
672 m, current, req->tc, req->tc->id);
673 if (m->err < 0)
674 return m->err;
675
676 spin_lock(&client->lock);
677 req->status = REQ_STATUS_UNSENT;
678 list_add_tail(&req->req_list, &m->unsent_req_list);
679 spin_unlock(&client->lock);
680
681 if (test_and_clear_bit(Wpending, &m->wsched))
682 n = POLLOUT;
683 else
684 n = p9_fd_poll(m->client, NULL);
685
686 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
687 schedule_work(&m->wq);
688
689 return 0;
690 }
691
692 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
693 {
694 int ret = 1;
695
696 p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
697
698 spin_lock(&client->lock);
699
700 if (req->status == REQ_STATUS_UNSENT) {
701 list_del(&req->req_list);
702 req->status = REQ_STATUS_FLSHD;
703 ret = 0;
704 } else if (req->status == REQ_STATUS_SENT)
705 req->status = REQ_STATUS_FLSH;
706
707 spin_unlock(&client->lock);
708
709 return ret;
710 }
711
712 static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
713 {
714 p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
715
716 /* we haven't received a response for oldreq,
717 * remove it from the list.
718 */
719 spin_lock(&client->lock);
720 list_del(&req->req_list);
721 spin_unlock(&client->lock);
722
723 return 0;
724 }
725
726 /**
727 * parse_opts - parse mount options into p9_fd_opts structure
728 * @params: options string passed from mount
729 * @opts: fd transport-specific structure to parse options into
730 *
731 * Returns 0 upon success, -ERRNO upon failure
732 */
733
734 static int parse_opts(char *params, struct p9_fd_opts *opts)
735 {
736 char *p;
737 substring_t args[MAX_OPT_ARGS];
738 int option;
739 char *options, *tmp_options;
740
741 opts->port = P9_PORT;
742 opts->rfd = ~0;
743 opts->wfd = ~0;
744
745 if (!params)
746 return 0;
747
748 tmp_options = kstrdup(params, GFP_KERNEL);
749 if (!tmp_options) {
750 p9_debug(P9_DEBUG_ERROR,
751 "failed to allocate copy of option string\n");
752 return -ENOMEM;
753 }
754 options = tmp_options;
755
756 while ((p = strsep(&options, ",")) != NULL) {
757 int token;
758 int r;
759 if (!*p)
760 continue;
761 token = match_token(p, tokens, args);
762 if ((token != Opt_err) && (token != Opt_privport)) {
763 r = match_int(&args[0], &option);
764 if (r < 0) {
765 p9_debug(P9_DEBUG_ERROR,
766 "integer field, but no integer?\n");
767 continue;
768 }
769 }
770 switch (token) {
771 case Opt_port:
772 opts->port = option;
773 break;
774 case Opt_rfdno:
775 opts->rfd = option;
776 break;
777 case Opt_wfdno:
778 opts->wfd = option;
779 break;
780 case Opt_privport:
781 opts->privport = 1;
782 break;
783 default:
784 continue;
785 }
786 }
787
788 kfree(tmp_options);
789 return 0;
790 }
791
792 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
793 {
794 struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
795 GFP_KERNEL);
796 if (!ts)
797 return -ENOMEM;
798
799 ts->rd = fget(rfd);
800 ts->wr = fget(wfd);
801 if (!ts->rd || !ts->wr) {
802 if (ts->rd)
803 fput(ts->rd);
804 if (ts->wr)
805 fput(ts->wr);
806 kfree(ts);
807 return -EIO;
808 }
809
810 client->trans = ts;
811 client->status = Connected;
812
813 return 0;
814 }
815
816 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
817 {
818 struct p9_trans_fd *p;
819 struct file *file;
820 int ret;
821
822 p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
823 if (!p)
824 return -ENOMEM;
825
826 csocket->sk->sk_allocation = GFP_NOIO;
827 file = sock_alloc_file(csocket, 0, NULL);
828 if (IS_ERR(file)) {
829 pr_err("%s (%d): failed to map fd\n",
830 __func__, task_pid_nr(current));
831 sock_release(csocket);
832 kfree(p);
833 return PTR_ERR(file);
834 }
835
836 get_file(file);
837 p->wr = p->rd = file;
838 client->trans = p;
839 client->status = Connected;
840
841 p->rd->f_flags |= O_NONBLOCK;
842
843 p->conn = p9_conn_create(client);
844 if (IS_ERR(p->conn)) {
845 ret = PTR_ERR(p->conn);
846 p->conn = NULL;
847 kfree(p);
848 sockfd_put(csocket);
849 sockfd_put(csocket);
850 return ret;
851 }
852 return 0;
853 }
854
855 /**
856 * p9_mux_destroy - cancels all pending requests and frees mux resources
857 * @m: mux to destroy
858 *
859 */
860
861 static void p9_conn_destroy(struct p9_conn *m)
862 {
863 p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
864 m, m->mux_list.prev, m->mux_list.next);
865
866 p9_mux_poll_stop(m);
867 cancel_work_sync(&m->rq);
868 cancel_work_sync(&m->wq);
869
870 p9_conn_cancel(m, -ECONNRESET);
871
872 m->client = NULL;
873 kfree(m);
874 }
875
876 /**
877 * p9_fd_close - shutdown file descriptor transport
878 * @client: client instance
879 *
880 */
881
882 static void p9_fd_close(struct p9_client *client)
883 {
884 struct p9_trans_fd *ts;
885
886 if (!client)
887 return;
888
889 ts = client->trans;
890 if (!ts)
891 return;
892
893 client->status = Disconnected;
894
895 p9_conn_destroy(ts->conn);
896
897 if (ts->rd)
898 fput(ts->rd);
899 if (ts->wr)
900 fput(ts->wr);
901
902 kfree(ts);
903 }
904
905 /*
906 * stolen from NFS - maybe should be made a generic function?
907 */
908 static inline int valid_ipaddr4(const char *buf)
909 {
910 int rc, count, in[4];
911
912 rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
913 if (rc != 4)
914 return -EINVAL;
915 for (count = 0; count < 4; count++) {
916 if (in[count] > 255)
917 return -EINVAL;
918 }
919 return 0;
920 }
921
922 static int p9_bind_privport(struct socket *sock)
923 {
924 struct sockaddr_in cl;
925 int port, err = -EINVAL;
926
927 memset(&cl, 0, sizeof(cl));
928 cl.sin_family = AF_INET;
929 cl.sin_addr.s_addr = INADDR_ANY;
930 for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
931 cl.sin_port = htons((ushort)port);
932 err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
933 if (err != -EADDRINUSE)
934 break;
935 }
936 return err;
937 }
938
939
940 static int
941 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
942 {
943 int err;
944 struct socket *csocket;
945 struct sockaddr_in sin_server;
946 struct p9_fd_opts opts;
947
948 err = parse_opts(args, &opts);
949 if (err < 0)
950 return err;
951
952 if (valid_ipaddr4(addr) < 0)
953 return -EINVAL;
954
955 csocket = NULL;
956
957 sin_server.sin_family = AF_INET;
958 sin_server.sin_addr.s_addr = in_aton(addr);
959 sin_server.sin_port = htons(opts.port);
960 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
961 SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
962 if (err) {
963 pr_err("%s (%d): problem creating socket\n",
964 __func__, task_pid_nr(current));
965 return err;
966 }
967
968 if (opts.privport) {
969 err = p9_bind_privport(csocket);
970 if (err < 0) {
971 pr_err("%s (%d): problem binding to privport\n",
972 __func__, task_pid_nr(current));
973 sock_release(csocket);
974 return err;
975 }
976 }
977
978 err = csocket->ops->connect(csocket,
979 (struct sockaddr *)&sin_server,
980 sizeof(struct sockaddr_in), 0);
981 if (err < 0) {
982 pr_err("%s (%d): problem connecting socket to %s\n",
983 __func__, task_pid_nr(current), addr);
984 sock_release(csocket);
985 return err;
986 }
987
988 return p9_socket_open(client, csocket);
989 }
990
991 static int
992 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
993 {
994 int err;
995 struct socket *csocket;
996 struct sockaddr_un sun_server;
997
998 csocket = NULL;
999
1000 if (strlen(addr) >= UNIX_PATH_MAX) {
1001 pr_err("%s (%d): address too long: %s\n",
1002 __func__, task_pid_nr(current), addr);
1003 return -ENAMETOOLONG;
1004 }
1005
1006 sun_server.sun_family = PF_UNIX;
1007 strcpy(sun_server.sun_path, addr);
1008 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
1009 SOCK_STREAM, 0, &csocket, 1);
1010 if (err < 0) {
1011 pr_err("%s (%d): problem creating socket\n",
1012 __func__, task_pid_nr(current));
1013
1014 return err;
1015 }
1016 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1017 sizeof(struct sockaddr_un) - 1, 0);
1018 if (err < 0) {
1019 pr_err("%s (%d): problem connecting socket: %s: %d\n",
1020 __func__, task_pid_nr(current), addr, err);
1021 sock_release(csocket);
1022 return err;
1023 }
1024
1025 return p9_socket_open(client, csocket);
1026 }
1027
1028 static int
1029 p9_fd_create(struct p9_client *client, const char *addr, char *args)
1030 {
1031 int err;
1032 struct p9_fd_opts opts;
1033 struct p9_trans_fd *p;
1034
1035 parse_opts(args, &opts);
1036
1037 if (opts.rfd == ~0 || opts.wfd == ~0) {
1038 pr_err("Insufficient options for proto=fd\n");
1039 return -ENOPROTOOPT;
1040 }
1041
1042 err = p9_fd_open(client, opts.rfd, opts.wfd);
1043 if (err < 0)
1044 return err;
1045
1046 p = (struct p9_trans_fd *) client->trans;
1047 p->conn = p9_conn_create(client);
1048 if (IS_ERR(p->conn)) {
1049 err = PTR_ERR(p->conn);
1050 p->conn = NULL;
1051 fput(p->rd);
1052 fput(p->wr);
1053 return err;
1054 }
1055
1056 return 0;
1057 }
1058
1059 static struct p9_trans_module p9_tcp_trans = {
1060 .name = "tcp",
1061 .maxsize = MAX_SOCK_BUF,
1062 .def = 0,
1063 .create = p9_fd_create_tcp,
1064 .close = p9_fd_close,
1065 .request = p9_fd_request,
1066 .cancel = p9_fd_cancel,
1067 .cancelled = p9_fd_cancelled,
1068 .owner = THIS_MODULE,
1069 };
1070
1071 static struct p9_trans_module p9_unix_trans = {
1072 .name = "unix",
1073 .maxsize = MAX_SOCK_BUF,
1074 .def = 0,
1075 .create = p9_fd_create_unix,
1076 .close = p9_fd_close,
1077 .request = p9_fd_request,
1078 .cancel = p9_fd_cancel,
1079 .cancelled = p9_fd_cancelled,
1080 .owner = THIS_MODULE,
1081 };
1082
1083 static struct p9_trans_module p9_fd_trans = {
1084 .name = "fd",
1085 .maxsize = MAX_SOCK_BUF,
1086 .def = 0,
1087 .create = p9_fd_create,
1088 .close = p9_fd_close,
1089 .request = p9_fd_request,
1090 .cancel = p9_fd_cancel,
1091 .cancelled = p9_fd_cancelled,
1092 .owner = THIS_MODULE,
1093 };
1094
1095 /**
1096 * p9_poll_proc - poll worker thread
1097 * @a: thread state and arguments
1098 *
1099 * polls all v9fs transports for new events and queues the appropriate
1100 * work to the work queue
1101 *
1102 */
1103
1104 static void p9_poll_workfn(struct work_struct *work)
1105 {
1106 unsigned long flags;
1107
1108 p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1109
1110 spin_lock_irqsave(&p9_poll_lock, flags);
1111 while (!list_empty(&p9_poll_pending_list)) {
1112 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1113 struct p9_conn,
1114 poll_pending_link);
1115 list_del_init(&conn->poll_pending_link);
1116 spin_unlock_irqrestore(&p9_poll_lock, flags);
1117
1118 p9_poll_mux(conn);
1119
1120 spin_lock_irqsave(&p9_poll_lock, flags);
1121 }
1122 spin_unlock_irqrestore(&p9_poll_lock, flags);
1123
1124 p9_debug(P9_DEBUG_TRANS, "finish\n");
1125 }
1126
1127 int p9_trans_fd_init(void)
1128 {
1129 v9fs_register_trans(&p9_tcp_trans);
1130 v9fs_register_trans(&p9_unix_trans);
1131 v9fs_register_trans(&p9_fd_trans);
1132
1133 return 0;
1134 }
1135
1136 void p9_trans_fd_exit(void)
1137 {
1138 flush_work(&p9_poll_work);
1139 v9fs_unregister_trans(&p9_tcp_trans);
1140 v9fs_unregister_trans(&p9_unix_trans);
1141 v9fs_unregister_trans(&p9_fd_trans);
1142 }