]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ncpfs/sock.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / fs / ncpfs / sock.c
1 /*
2 * linux/fs/ncpfs/sock.c
3 *
4 * Copyright (C) 1992, 1993 Rick Sladkey
5 *
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
8 *
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/socket.h>
16 #include <linux/fcntl.h>
17 #include <linux/stat.h>
18 #include <linux/string.h>
19 #include <linux/sched/signal.h>
20 #include <linux/uaccess.h>
21 #include <linux/in.h>
22 #include <linux/net.h>
23 #include <linux/mm.h>
24 #include <linux/netdevice.h>
25 #include <linux/signal.h>
26 #include <linux/slab.h>
27 #include <net/scm.h>
28 #include <net/sock.h>
29 #include <linux/ipx.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
32
33 #include "ncp_fs.h"
34
35 #include "ncpsign_kernel.h"
36
37 static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
38 {
39 struct msghdr msg = {NULL, };
40 struct kvec iov = {buf, size};
41 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
42 }
43
44 static int _send(struct socket *sock, const void *buff, int len)
45 {
46 struct msghdr msg = { .msg_flags = 0 };
47 struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
48 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
49 return sock_sendmsg(sock, &msg);
50 }
51
52 struct ncp_request_reply {
53 struct list_head req;
54 wait_queue_head_t wq;
55 atomic_t refs;
56 unsigned char* reply_buf;
57 size_t datalen;
58 int result;
59 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
60 struct iov_iter from;
61 struct kvec tx_iov[3];
62 u_int16_t tx_type;
63 u_int32_t sign[6];
64 };
65
66 static inline struct ncp_request_reply* ncp_alloc_req(void)
67 {
68 struct ncp_request_reply *req;
69
70 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
71 if (!req)
72 return NULL;
73
74 init_waitqueue_head(&req->wq);
75 atomic_set(&req->refs, (1));
76 req->status = RQ_IDLE;
77
78 return req;
79 }
80
81 static void ncp_req_get(struct ncp_request_reply *req)
82 {
83 atomic_inc(&req->refs);
84 }
85
86 static void ncp_req_put(struct ncp_request_reply *req)
87 {
88 if (atomic_dec_and_test(&req->refs))
89 kfree(req);
90 }
91
92 void ncp_tcp_data_ready(struct sock *sk)
93 {
94 struct ncp_server *server = sk->sk_user_data;
95
96 server->data_ready(sk);
97 schedule_work(&server->rcv.tq);
98 }
99
100 void ncp_tcp_error_report(struct sock *sk)
101 {
102 struct ncp_server *server = sk->sk_user_data;
103
104 server->error_report(sk);
105 schedule_work(&server->rcv.tq);
106 }
107
108 void ncp_tcp_write_space(struct sock *sk)
109 {
110 struct ncp_server *server = sk->sk_user_data;
111
112 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
113 not vice versa... */
114 server->write_space(sk);
115 if (server->tx.creq)
116 schedule_work(&server->tx.tq);
117 }
118
119 void ncpdgram_timeout_call(unsigned long v)
120 {
121 struct ncp_server *server = (void*)v;
122
123 schedule_work(&server->timeout_tq);
124 }
125
126 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
127 {
128 req->result = result;
129 if (req->status != RQ_ABANDONED)
130 memcpy(req->reply_buf, server->rxbuf, req->datalen);
131 req->status = RQ_DONE;
132 wake_up_all(&req->wq);
133 ncp_req_put(req);
134 }
135
136 static void __abort_ncp_connection(struct ncp_server *server)
137 {
138 struct ncp_request_reply *req;
139
140 ncp_invalidate_conn(server);
141 del_timer(&server->timeout_tm);
142 while (!list_empty(&server->tx.requests)) {
143 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
144
145 list_del_init(&req->req);
146 ncp_finish_request(server, req, -EIO);
147 }
148 req = server->rcv.creq;
149 if (req) {
150 server->rcv.creq = NULL;
151 ncp_finish_request(server, req, -EIO);
152 server->rcv.ptr = NULL;
153 server->rcv.state = 0;
154 }
155 req = server->tx.creq;
156 if (req) {
157 server->tx.creq = NULL;
158 ncp_finish_request(server, req, -EIO);
159 }
160 }
161
162 static inline int get_conn_number(struct ncp_reply_header *rp)
163 {
164 return rp->conn_low | (rp->conn_high << 8);
165 }
166
167 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
168 {
169 /* If req is done, we got signal, but we also received answer... */
170 switch (req->status) {
171 case RQ_IDLE:
172 case RQ_DONE:
173 break;
174 case RQ_QUEUED:
175 list_del_init(&req->req);
176 ncp_finish_request(server, req, err);
177 break;
178 case RQ_INPROGRESS:
179 req->status = RQ_ABANDONED;
180 break;
181 case RQ_ABANDONED:
182 break;
183 }
184 }
185
186 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
187 {
188 mutex_lock(&server->rcv.creq_mutex);
189 __ncp_abort_request(server, req, err);
190 mutex_unlock(&server->rcv.creq_mutex);
191 }
192
193 static inline void __ncptcp_abort(struct ncp_server *server)
194 {
195 __abort_ncp_connection(server);
196 }
197
198 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
199 {
200 struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
201 return sock_sendmsg(sock, &msg);
202 }
203
204 static void __ncptcp_try_send(struct ncp_server *server)
205 {
206 struct ncp_request_reply *rq;
207 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
208 int result;
209
210 rq = server->tx.creq;
211 if (!rq)
212 return;
213
214 msg.msg_iter = rq->from;
215 result = sock_sendmsg(server->ncp_sock, &msg);
216
217 if (result == -EAGAIN)
218 return;
219
220 if (result < 0) {
221 pr_err("tcp: Send failed: %d\n", result);
222 __ncp_abort_request(server, rq, result);
223 return;
224 }
225 if (!msg_data_left(&msg)) {
226 server->rcv.creq = rq;
227 server->tx.creq = NULL;
228 return;
229 }
230 rq->from = msg.msg_iter;
231 }
232
233 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
234 {
235 req->status = RQ_INPROGRESS;
236 h->conn_low = server->connection;
237 h->conn_high = server->connection >> 8;
238 h->sequence = ++server->sequence;
239 }
240
241 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
242 {
243 size_t signlen, len = req->tx_iov[1].iov_len;
244 struct ncp_request_header *h = req->tx_iov[1].iov_base;
245
246 ncp_init_header(server, req, h);
247 signlen = sign_packet(server,
248 req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
249 len - sizeof(struct ncp_request_header) + 1,
250 cpu_to_le32(len), req->sign);
251 if (signlen) {
252 /* NCP over UDP appends signature */
253 req->tx_iov[2].iov_base = req->sign;
254 req->tx_iov[2].iov_len = signlen;
255 }
256 iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
257 req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
258 server->rcv.creq = req;
259 server->timeout_last = server->m.time_out;
260 server->timeout_retries = server->m.retry_count;
261 ncpdgram_send(server->ncp_sock, req);
262 mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
263 }
264
265 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
266 #define NCP_TCP_XMIT_VERSION (1)
267 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
268
269 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
270 {
271 size_t signlen, len = req->tx_iov[1].iov_len;
272 struct ncp_request_header *h = req->tx_iov[1].iov_base;
273
274 ncp_init_header(server, req, h);
275 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
276 len - sizeof(struct ncp_request_header) + 1,
277 cpu_to_be32(len + 24), req->sign + 4) + 16;
278
279 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
280 req->sign[1] = htonl(len + signlen);
281 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
282 req->sign[3] = htonl(req->datalen + 8);
283 /* NCP over TCP prepends signature */
284 req->tx_iov[0].iov_base = req->sign;
285 req->tx_iov[0].iov_len = signlen;
286 iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
287 req->tx_iov, 2, len + signlen);
288
289 server->tx.creq = req;
290 __ncptcp_try_send(server);
291 }
292
293 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
294 {
295 /* we copy the data so that we do not depend on the caller
296 staying alive */
297 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
298 req->tx_iov[1].iov_base = server->txbuf;
299
300 if (server->ncp_sock->type == SOCK_STREAM)
301 ncptcp_start_request(server, req);
302 else
303 ncpdgram_start_request(server, req);
304 }
305
306 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
307 {
308 mutex_lock(&server->rcv.creq_mutex);
309 if (!ncp_conn_valid(server)) {
310 mutex_unlock(&server->rcv.creq_mutex);
311 pr_err("tcp: Server died\n");
312 return -EIO;
313 }
314 ncp_req_get(req);
315 if (server->tx.creq || server->rcv.creq) {
316 req->status = RQ_QUEUED;
317 list_add_tail(&req->req, &server->tx.requests);
318 mutex_unlock(&server->rcv.creq_mutex);
319 return 0;
320 }
321 __ncp_start_request(server, req);
322 mutex_unlock(&server->rcv.creq_mutex);
323 return 0;
324 }
325
326 static void __ncp_next_request(struct ncp_server *server)
327 {
328 struct ncp_request_reply *req;
329
330 server->rcv.creq = NULL;
331 if (list_empty(&server->tx.requests)) {
332 return;
333 }
334 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
335 list_del_init(&req->req);
336 __ncp_start_request(server, req);
337 }
338
339 static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
340 {
341 if (server->info_sock) {
342 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
343 __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
344 struct kvec iov[2] = {
345 {.iov_base = hdr, .iov_len = 8},
346 {.iov_base = (void *)data, .iov_len = len},
347 };
348
349 iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
350 iov, 2, len + 8);
351
352 sock_sendmsg(server->info_sock, &msg);
353 }
354 }
355
356 void ncpdgram_rcv_proc(struct work_struct *work)
357 {
358 struct ncp_server *server =
359 container_of(work, struct ncp_server, rcv.tq);
360 struct socket* sock;
361
362 sock = server->ncp_sock;
363
364 while (1) {
365 struct ncp_reply_header reply;
366 int result;
367
368 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
369 if (result < 0) {
370 break;
371 }
372 if (result >= sizeof(reply)) {
373 struct ncp_request_reply *req;
374
375 if (reply.type == NCP_WATCHDOG) {
376 unsigned char buf[10];
377
378 if (server->connection != get_conn_number(&reply)) {
379 goto drop;
380 }
381 result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
382 if (result < 0) {
383 ncp_dbg(1, "recv failed with %d\n", result);
384 continue;
385 }
386 if (result < 10) {
387 ncp_dbg(1, "too short (%u) watchdog packet\n", result);
388 continue;
389 }
390 if (buf[9] != '?') {
391 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
392 continue;
393 }
394 buf[9] = 'Y';
395 _send(sock, buf, sizeof(buf));
396 continue;
397 }
398 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
399 result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
400 if (result < 0) {
401 continue;
402 }
403 info_server(server, 0, server->unexpected_packet.data, result);
404 continue;
405 }
406 mutex_lock(&server->rcv.creq_mutex);
407 req = server->rcv.creq;
408 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
409 server->connection == get_conn_number(&reply)))) {
410 if (reply.type == NCP_POSITIVE_ACK) {
411 server->timeout_retries = server->m.retry_count;
412 server->timeout_last = NCP_MAX_RPC_TIMEOUT;
413 mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
414 } else if (reply.type == NCP_REPLY) {
415 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
416 #ifdef CONFIG_NCPFS_PACKET_SIGNING
417 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
418 if (result < 8 + 8) {
419 result = -EIO;
420 } else {
421 unsigned int hdrl;
422
423 result -= 8;
424 hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
425 if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
426 pr_info("Signature violation\n");
427 result = -EIO;
428 }
429 }
430 }
431 #endif
432 del_timer(&server->timeout_tm);
433 server->rcv.creq = NULL;
434 ncp_finish_request(server, req, result);
435 __ncp_next_request(server);
436 mutex_unlock(&server->rcv.creq_mutex);
437 continue;
438 }
439 }
440 mutex_unlock(&server->rcv.creq_mutex);
441 }
442 drop:;
443 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
444 }
445 }
446
447 static void __ncpdgram_timeout_proc(struct ncp_server *server)
448 {
449 /* If timer is pending, we are processing another request... */
450 if (!timer_pending(&server->timeout_tm)) {
451 struct ncp_request_reply* req;
452
453 req = server->rcv.creq;
454 if (req) {
455 int timeout;
456
457 if (server->m.flags & NCP_MOUNT_SOFT) {
458 if (server->timeout_retries-- == 0) {
459 __ncp_abort_request(server, req, -ETIMEDOUT);
460 return;
461 }
462 }
463 /* Ignore errors */
464 ncpdgram_send(server->ncp_sock, req);
465 timeout = server->timeout_last << 1;
466 if (timeout > NCP_MAX_RPC_TIMEOUT) {
467 timeout = NCP_MAX_RPC_TIMEOUT;
468 }
469 server->timeout_last = timeout;
470 mod_timer(&server->timeout_tm, jiffies + timeout);
471 }
472 }
473 }
474
475 void ncpdgram_timeout_proc(struct work_struct *work)
476 {
477 struct ncp_server *server =
478 container_of(work, struct ncp_server, timeout_tq);
479 mutex_lock(&server->rcv.creq_mutex);
480 __ncpdgram_timeout_proc(server);
481 mutex_unlock(&server->rcv.creq_mutex);
482 }
483
484 static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
485 {
486 int result;
487
488 if (buffer) {
489 result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
490 } else {
491 static unsigned char dummy[1024];
492
493 if (len > sizeof(dummy)) {
494 len = sizeof(dummy);
495 }
496 result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
497 }
498 if (result < 0) {
499 return result;
500 }
501 if (result > len) {
502 pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len);
503 return -EIO;
504 }
505 return result;
506 }
507
508 static int __ncptcp_rcv_proc(struct ncp_server *server)
509 {
510 /* We have to check the result, so store the complete header */
511 while (1) {
512 int result;
513 struct ncp_request_reply *req;
514 int datalen;
515 int type;
516
517 while (server->rcv.len) {
518 result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
519 if (result == -EAGAIN) {
520 return 0;
521 }
522 if (result <= 0) {
523 req = server->rcv.creq;
524 if (req) {
525 __ncp_abort_request(server, req, -EIO);
526 } else {
527 __ncptcp_abort(server);
528 }
529 if (result < 0) {
530 pr_err("tcp: error in recvmsg: %d\n", result);
531 } else {
532 ncp_dbg(1, "tcp: EOF\n");
533 }
534 return -EIO;
535 }
536 if (server->rcv.ptr) {
537 server->rcv.ptr += result;
538 }
539 server->rcv.len -= result;
540 }
541 switch (server->rcv.state) {
542 case 0:
543 if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
544 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
545 __ncptcp_abort(server);
546 return -EIO;
547 }
548 datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
549 if (datalen < 10) {
550 pr_err("tcp: Unexpected reply len %d\n", datalen);
551 __ncptcp_abort(server);
552 return -EIO;
553 }
554 #ifdef CONFIG_NCPFS_PACKET_SIGNING
555 if (server->sign_active) {
556 if (datalen < 18) {
557 pr_err("tcp: Unexpected reply len %d\n", datalen);
558 __ncptcp_abort(server);
559 return -EIO;
560 }
561 server->rcv.buf.len = datalen - 8;
562 server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
563 server->rcv.len = 8;
564 server->rcv.state = 4;
565 break;
566 }
567 #endif
568 type = ntohs(server->rcv.buf.type);
569 #ifdef CONFIG_NCPFS_PACKET_SIGNING
570 cont:;
571 #endif
572 if (type != NCP_REPLY) {
573 if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
574 *(__u16*)(server->unexpected_packet.data) = htons(type);
575 server->unexpected_packet.len = datalen - 8;
576
577 server->rcv.state = 5;
578 server->rcv.ptr = server->unexpected_packet.data + 2;
579 server->rcv.len = datalen - 10;
580 break;
581 }
582 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
583 skipdata2:;
584 server->rcv.state = 2;
585 skipdata:;
586 server->rcv.ptr = NULL;
587 server->rcv.len = datalen - 10;
588 break;
589 }
590 req = server->rcv.creq;
591 if (!req) {
592 ncp_dbg(1, "Reply without appropriate request\n");
593 goto skipdata2;
594 }
595 if (datalen > req->datalen + 8) {
596 pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8);
597 server->rcv.state = 3;
598 goto skipdata;
599 }
600 req->datalen = datalen - 8;
601 ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
602 server->rcv.ptr = server->rxbuf + 2;
603 server->rcv.len = datalen - 10;
604 server->rcv.state = 1;
605 break;
606 #ifdef CONFIG_NCPFS_PACKET_SIGNING
607 case 4:
608 datalen = server->rcv.buf.len;
609 type = ntohs(server->rcv.buf.type2);
610 goto cont;
611 #endif
612 case 1:
613 req = server->rcv.creq;
614 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
615 if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
616 pr_err("tcp: Bad sequence number\n");
617 __ncp_abort_request(server, req, -EIO);
618 return -EIO;
619 }
620 if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
621 pr_err("tcp: Connection number mismatch\n");
622 __ncp_abort_request(server, req, -EIO);
623 return -EIO;
624 }
625 }
626 #ifdef CONFIG_NCPFS_PACKET_SIGNING
627 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
628 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
629 pr_err("tcp: Signature violation\n");
630 __ncp_abort_request(server, req, -EIO);
631 return -EIO;
632 }
633 }
634 #endif
635 ncp_finish_request(server, req, req->datalen);
636 nextreq:;
637 __ncp_next_request(server);
638 case 2:
639 next:;
640 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
641 server->rcv.len = 10;
642 server->rcv.state = 0;
643 break;
644 case 3:
645 ncp_finish_request(server, server->rcv.creq, -EIO);
646 goto nextreq;
647 case 5:
648 info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
649 goto next;
650 }
651 }
652 }
653
654 void ncp_tcp_rcv_proc(struct work_struct *work)
655 {
656 struct ncp_server *server =
657 container_of(work, struct ncp_server, rcv.tq);
658
659 mutex_lock(&server->rcv.creq_mutex);
660 __ncptcp_rcv_proc(server);
661 mutex_unlock(&server->rcv.creq_mutex);
662 }
663
664 void ncp_tcp_tx_proc(struct work_struct *work)
665 {
666 struct ncp_server *server =
667 container_of(work, struct ncp_server, tx.tq);
668
669 mutex_lock(&server->rcv.creq_mutex);
670 __ncptcp_try_send(server);
671 mutex_unlock(&server->rcv.creq_mutex);
672 }
673
674 static int do_ncp_rpc_call(struct ncp_server *server, int size,
675 unsigned char* reply_buf, int max_reply_size)
676 {
677 int result;
678 struct ncp_request_reply *req;
679
680 req = ncp_alloc_req();
681 if (!req)
682 return -ENOMEM;
683
684 req->reply_buf = reply_buf;
685 req->datalen = max_reply_size;
686 req->tx_iov[1].iov_base = server->packet;
687 req->tx_iov[1].iov_len = size;
688 req->tx_type = *(u_int16_t*)server->packet;
689
690 result = ncp_add_request(server, req);
691 if (result < 0)
692 goto out;
693
694 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
695 ncp_abort_request(server, req, -EINTR);
696 result = -EINTR;
697 goto out;
698 }
699
700 result = req->result;
701
702 out:
703 ncp_req_put(req);
704
705 return result;
706 }
707
708 /*
709 * We need the server to be locked here, so check!
710 */
711
712 static int ncp_do_request(struct ncp_server *server, int size,
713 void* reply, int max_reply_size)
714 {
715 int result;
716
717 if (server->lock == 0) {
718 pr_err("Server not locked!\n");
719 return -EIO;
720 }
721 if (!ncp_conn_valid(server)) {
722 return -EIO;
723 }
724 {
725 sigset_t old_set;
726 unsigned long mask, flags;
727
728 spin_lock_irqsave(&current->sighand->siglock, flags);
729 old_set = current->blocked;
730 if (current->flags & PF_EXITING)
731 mask = 0;
732 else
733 mask = sigmask(SIGKILL);
734 if (server->m.flags & NCP_MOUNT_INTR) {
735 /* FIXME: This doesn't seem right at all. So, like,
736 we can't handle SIGINT and get whatever to stop?
737 What if we've blocked it ourselves? What about
738 alarms? Why, in fact, are we mucking with the
739 sigmask at all? -- r~ */
740 if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
741 mask |= sigmask(SIGINT);
742 if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
743 mask |= sigmask(SIGQUIT);
744 }
745 siginitsetinv(&current->blocked, mask);
746 recalc_sigpending();
747 spin_unlock_irqrestore(&current->sighand->siglock, flags);
748
749 result = do_ncp_rpc_call(server, size, reply, max_reply_size);
750
751 spin_lock_irqsave(&current->sighand->siglock, flags);
752 current->blocked = old_set;
753 recalc_sigpending();
754 spin_unlock_irqrestore(&current->sighand->siglock, flags);
755 }
756
757 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
758
759 return result;
760 }
761
762 /* ncp_do_request assures that at least a complete reply header is
763 * received. It assumes that server->current_size contains the ncp
764 * request size
765 */
766 int ncp_request2(struct ncp_server *server, int function,
767 void* rpl, int size)
768 {
769 struct ncp_request_header *h;
770 struct ncp_reply_header* reply = rpl;
771 int result;
772
773 h = (struct ncp_request_header *) (server->packet);
774 if (server->has_subfunction != 0) {
775 *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
776 }
777 h->type = NCP_REQUEST;
778 /*
779 * The server shouldn't know or care what task is making a
780 * request, so we always use the same task number.
781 */
782 h->task = 2; /* (current->pid) & 0xff; */
783 h->function = function;
784
785 result = ncp_do_request(server, server->current_size, reply, size);
786 if (result < 0) {
787 ncp_dbg(1, "ncp_request_error: %d\n", result);
788 goto out;
789 }
790 server->completion = reply->completion_code;
791 server->conn_status = reply->connection_state;
792 server->reply_size = result;
793 server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
794
795 result = reply->completion_code;
796
797 if (result != 0)
798 ncp_vdbg("completion code=%x\n", result);
799 out:
800 return result;
801 }
802
803 int ncp_connect(struct ncp_server *server)
804 {
805 struct ncp_request_header *h;
806 int result;
807
808 server->connection = 0xFFFF;
809 server->sequence = 255;
810
811 h = (struct ncp_request_header *) (server->packet);
812 h->type = NCP_ALLOC_SLOT_REQUEST;
813 h->task = 2; /* see above */
814 h->function = 0;
815
816 result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
817 if (result < 0)
818 goto out;
819 server->connection = h->conn_low + (h->conn_high * 256);
820 result = 0;
821 out:
822 return result;
823 }
824
825 int ncp_disconnect(struct ncp_server *server)
826 {
827 struct ncp_request_header *h;
828
829 h = (struct ncp_request_header *) (server->packet);
830 h->type = NCP_DEALLOC_SLOT_REQUEST;
831 h->task = 2; /* see above */
832 h->function = 0;
833
834 return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
835 }
836
837 void ncp_lock_server(struct ncp_server *server)
838 {
839 mutex_lock(&server->mutex);
840 if (server->lock)
841 pr_warn("%s: was locked!\n", __func__);
842 server->lock = 1;
843 }
844
845 void ncp_unlock_server(struct ncp_server *server)
846 {
847 if (!server->lock) {
848 pr_warn("%s: was not locked!\n", __func__);
849 return;
850 }
851 server->lock = 0;
852 mutex_unlock(&server->mutex);
853 }