]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ncpfs/sock.c
mm: shmem.c: Correctly annotate new inodes for lockdep
[mirror_ubuntu-bionic-kernel.git] / fs / ncpfs / sock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ncpfs/sock.c
4 *
5 * Copyright (C) 1992, 1993 Rick Sladkey
6 *
7 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
8 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
9 *
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/time.h>
15 #include <linux/errno.h>
16 #include <linux/socket.h>
17 #include <linux/fcntl.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/sched/signal.h>
21 #include <linux/uaccess.h>
22 #include <linux/in.h>
23 #include <linux/net.h>
24 #include <linux/mm.h>
25 #include <linux/netdevice.h>
26 #include <linux/signal.h>
27 #include <linux/slab.h>
28 #include <net/scm.h>
29 #include <net/sock.h>
30 #include <linux/ipx.h>
31 #include <linux/poll.h>
32 #include <linux/file.h>
33
34 #include "ncp_fs.h"
35
36 #include "ncpsign_kernel.h"
37
38 static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
39 {
40 struct msghdr msg = {NULL, };
41 struct kvec iov = {buf, size};
42 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
43 }
44
45 static int _send(struct socket *sock, const void *buff, int len)
46 {
47 struct msghdr msg = { .msg_flags = 0 };
48 struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
49 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
50 return sock_sendmsg(sock, &msg);
51 }
52
53 struct ncp_request_reply {
54 struct list_head req;
55 wait_queue_head_t wq;
56 atomic_t refs;
57 unsigned char* reply_buf;
58 size_t datalen;
59 int result;
60 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
61 struct iov_iter from;
62 struct kvec tx_iov[3];
63 u_int16_t tx_type;
64 u_int32_t sign[6];
65 };
66
67 static inline struct ncp_request_reply* ncp_alloc_req(void)
68 {
69 struct ncp_request_reply *req;
70
71 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
72 if (!req)
73 return NULL;
74
75 init_waitqueue_head(&req->wq);
76 atomic_set(&req->refs, (1));
77 req->status = RQ_IDLE;
78
79 return req;
80 }
81
82 static void ncp_req_get(struct ncp_request_reply *req)
83 {
84 atomic_inc(&req->refs);
85 }
86
87 static void ncp_req_put(struct ncp_request_reply *req)
88 {
89 if (atomic_dec_and_test(&req->refs))
90 kfree(req);
91 }
92
93 void ncp_tcp_data_ready(struct sock *sk)
94 {
95 struct ncp_server *server = sk->sk_user_data;
96
97 server->data_ready(sk);
98 schedule_work(&server->rcv.tq);
99 }
100
101 void ncp_tcp_error_report(struct sock *sk)
102 {
103 struct ncp_server *server = sk->sk_user_data;
104
105 server->error_report(sk);
106 schedule_work(&server->rcv.tq);
107 }
108
109 void ncp_tcp_write_space(struct sock *sk)
110 {
111 struct ncp_server *server = sk->sk_user_data;
112
113 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
114 not vice versa... */
115 server->write_space(sk);
116 if (server->tx.creq)
117 schedule_work(&server->tx.tq);
118 }
119
120 void ncpdgram_timeout_call(struct timer_list *t)
121 {
122 struct ncp_server *server = from_timer(server, t, timeout_tm);
123
124 schedule_work(&server->timeout_tq);
125 }
126
127 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
128 {
129 req->result = result;
130 if (req->status != RQ_ABANDONED)
131 memcpy(req->reply_buf, server->rxbuf, req->datalen);
132 req->status = RQ_DONE;
133 wake_up_all(&req->wq);
134 ncp_req_put(req);
135 }
136
137 static void __abort_ncp_connection(struct ncp_server *server)
138 {
139 struct ncp_request_reply *req;
140
141 ncp_invalidate_conn(server);
142 del_timer(&server->timeout_tm);
143 while (!list_empty(&server->tx.requests)) {
144 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
145
146 list_del_init(&req->req);
147 ncp_finish_request(server, req, -EIO);
148 }
149 req = server->rcv.creq;
150 if (req) {
151 server->rcv.creq = NULL;
152 ncp_finish_request(server, req, -EIO);
153 server->rcv.ptr = NULL;
154 server->rcv.state = 0;
155 }
156 req = server->tx.creq;
157 if (req) {
158 server->tx.creq = NULL;
159 ncp_finish_request(server, req, -EIO);
160 }
161 }
162
163 static inline int get_conn_number(struct ncp_reply_header *rp)
164 {
165 return rp->conn_low | (rp->conn_high << 8);
166 }
167
168 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
169 {
170 /* If req is done, we got signal, but we also received answer... */
171 switch (req->status) {
172 case RQ_IDLE:
173 case RQ_DONE:
174 break;
175 case RQ_QUEUED:
176 list_del_init(&req->req);
177 ncp_finish_request(server, req, err);
178 break;
179 case RQ_INPROGRESS:
180 req->status = RQ_ABANDONED;
181 break;
182 case RQ_ABANDONED:
183 break;
184 }
185 }
186
187 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
188 {
189 mutex_lock(&server->rcv.creq_mutex);
190 __ncp_abort_request(server, req, err);
191 mutex_unlock(&server->rcv.creq_mutex);
192 }
193
194 static inline void __ncptcp_abort(struct ncp_server *server)
195 {
196 __abort_ncp_connection(server);
197 }
198
199 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
200 {
201 struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
202 return sock_sendmsg(sock, &msg);
203 }
204
205 static void __ncptcp_try_send(struct ncp_server *server)
206 {
207 struct ncp_request_reply *rq;
208 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
209 int result;
210
211 rq = server->tx.creq;
212 if (!rq)
213 return;
214
215 msg.msg_iter = rq->from;
216 result = sock_sendmsg(server->ncp_sock, &msg);
217
218 if (result == -EAGAIN)
219 return;
220
221 if (result < 0) {
222 pr_err("tcp: Send failed: %d\n", result);
223 __ncp_abort_request(server, rq, result);
224 return;
225 }
226 if (!msg_data_left(&msg)) {
227 server->rcv.creq = rq;
228 server->tx.creq = NULL;
229 return;
230 }
231 rq->from = msg.msg_iter;
232 }
233
234 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
235 {
236 req->status = RQ_INPROGRESS;
237 h->conn_low = server->connection;
238 h->conn_high = server->connection >> 8;
239 h->sequence = ++server->sequence;
240 }
241
242 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
243 {
244 size_t signlen, len = req->tx_iov[1].iov_len;
245 struct ncp_request_header *h = req->tx_iov[1].iov_base;
246
247 ncp_init_header(server, req, h);
248 signlen = sign_packet(server,
249 req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
250 len - sizeof(struct ncp_request_header) + 1,
251 cpu_to_le32(len), req->sign);
252 if (signlen) {
253 /* NCP over UDP appends signature */
254 req->tx_iov[2].iov_base = req->sign;
255 req->tx_iov[2].iov_len = signlen;
256 }
257 iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
258 req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
259 server->rcv.creq = req;
260 server->timeout_last = server->m.time_out;
261 server->timeout_retries = server->m.retry_count;
262 ncpdgram_send(server->ncp_sock, req);
263 mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
264 }
265
266 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
267 #define NCP_TCP_XMIT_VERSION (1)
268 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
269
270 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
271 {
272 size_t signlen, len = req->tx_iov[1].iov_len;
273 struct ncp_request_header *h = req->tx_iov[1].iov_base;
274
275 ncp_init_header(server, req, h);
276 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
277 len - sizeof(struct ncp_request_header) + 1,
278 cpu_to_be32(len + 24), req->sign + 4) + 16;
279
280 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
281 req->sign[1] = htonl(len + signlen);
282 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
283 req->sign[3] = htonl(req->datalen + 8);
284 /* NCP over TCP prepends signature */
285 req->tx_iov[0].iov_base = req->sign;
286 req->tx_iov[0].iov_len = signlen;
287 iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
288 req->tx_iov, 2, len + signlen);
289
290 server->tx.creq = req;
291 __ncptcp_try_send(server);
292 }
293
294 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
295 {
296 /* we copy the data so that we do not depend on the caller
297 staying alive */
298 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
299 req->tx_iov[1].iov_base = server->txbuf;
300
301 if (server->ncp_sock->type == SOCK_STREAM)
302 ncptcp_start_request(server, req);
303 else
304 ncpdgram_start_request(server, req);
305 }
306
307 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
308 {
309 mutex_lock(&server->rcv.creq_mutex);
310 if (!ncp_conn_valid(server)) {
311 mutex_unlock(&server->rcv.creq_mutex);
312 pr_err("tcp: Server died\n");
313 return -EIO;
314 }
315 ncp_req_get(req);
316 if (server->tx.creq || server->rcv.creq) {
317 req->status = RQ_QUEUED;
318 list_add_tail(&req->req, &server->tx.requests);
319 mutex_unlock(&server->rcv.creq_mutex);
320 return 0;
321 }
322 __ncp_start_request(server, req);
323 mutex_unlock(&server->rcv.creq_mutex);
324 return 0;
325 }
326
327 static void __ncp_next_request(struct ncp_server *server)
328 {
329 struct ncp_request_reply *req;
330
331 server->rcv.creq = NULL;
332 if (list_empty(&server->tx.requests)) {
333 return;
334 }
335 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
336 list_del_init(&req->req);
337 __ncp_start_request(server, req);
338 }
339
340 static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
341 {
342 if (server->info_sock) {
343 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
344 __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
345 struct kvec iov[2] = {
346 {.iov_base = hdr, .iov_len = 8},
347 {.iov_base = (void *)data, .iov_len = len},
348 };
349
350 iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
351 iov, 2, len + 8);
352
353 sock_sendmsg(server->info_sock, &msg);
354 }
355 }
356
357 void ncpdgram_rcv_proc(struct work_struct *work)
358 {
359 struct ncp_server *server =
360 container_of(work, struct ncp_server, rcv.tq);
361 struct socket* sock;
362
363 sock = server->ncp_sock;
364
365 while (1) {
366 struct ncp_reply_header reply;
367 int result;
368
369 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
370 if (result < 0) {
371 break;
372 }
373 if (result >= sizeof(reply)) {
374 struct ncp_request_reply *req;
375
376 if (reply.type == NCP_WATCHDOG) {
377 unsigned char buf[10];
378
379 if (server->connection != get_conn_number(&reply)) {
380 goto drop;
381 }
382 result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
383 if (result < 0) {
384 ncp_dbg(1, "recv failed with %d\n", result);
385 continue;
386 }
387 if (result < 10) {
388 ncp_dbg(1, "too short (%u) watchdog packet\n", result);
389 continue;
390 }
391 if (buf[9] != '?') {
392 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
393 continue;
394 }
395 buf[9] = 'Y';
396 _send(sock, buf, sizeof(buf));
397 continue;
398 }
399 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
400 result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
401 if (result < 0) {
402 continue;
403 }
404 info_server(server, 0, server->unexpected_packet.data, result);
405 continue;
406 }
407 mutex_lock(&server->rcv.creq_mutex);
408 req = server->rcv.creq;
409 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
410 server->connection == get_conn_number(&reply)))) {
411 if (reply.type == NCP_POSITIVE_ACK) {
412 server->timeout_retries = server->m.retry_count;
413 server->timeout_last = NCP_MAX_RPC_TIMEOUT;
414 mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
415 } else if (reply.type == NCP_REPLY) {
416 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
417 #ifdef CONFIG_NCPFS_PACKET_SIGNING
418 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
419 if (result < 8 + 8) {
420 result = -EIO;
421 } else {
422 unsigned int hdrl;
423
424 result -= 8;
425 hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
426 if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
427 pr_info("Signature violation\n");
428 result = -EIO;
429 }
430 }
431 }
432 #endif
433 del_timer(&server->timeout_tm);
434 server->rcv.creq = NULL;
435 ncp_finish_request(server, req, result);
436 __ncp_next_request(server);
437 mutex_unlock(&server->rcv.creq_mutex);
438 continue;
439 }
440 }
441 mutex_unlock(&server->rcv.creq_mutex);
442 }
443 drop:;
444 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
445 }
446 }
447
448 static void __ncpdgram_timeout_proc(struct ncp_server *server)
449 {
450 /* If timer is pending, we are processing another request... */
451 if (!timer_pending(&server->timeout_tm)) {
452 struct ncp_request_reply* req;
453
454 req = server->rcv.creq;
455 if (req) {
456 int timeout;
457
458 if (server->m.flags & NCP_MOUNT_SOFT) {
459 if (server->timeout_retries-- == 0) {
460 __ncp_abort_request(server, req, -ETIMEDOUT);
461 return;
462 }
463 }
464 /* Ignore errors */
465 ncpdgram_send(server->ncp_sock, req);
466 timeout = server->timeout_last << 1;
467 if (timeout > NCP_MAX_RPC_TIMEOUT) {
468 timeout = NCP_MAX_RPC_TIMEOUT;
469 }
470 server->timeout_last = timeout;
471 mod_timer(&server->timeout_tm, jiffies + timeout);
472 }
473 }
474 }
475
476 void ncpdgram_timeout_proc(struct work_struct *work)
477 {
478 struct ncp_server *server =
479 container_of(work, struct ncp_server, timeout_tq);
480 mutex_lock(&server->rcv.creq_mutex);
481 __ncpdgram_timeout_proc(server);
482 mutex_unlock(&server->rcv.creq_mutex);
483 }
484
485 static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
486 {
487 int result;
488
489 if (buffer) {
490 result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
491 } else {
492 static unsigned char dummy[1024];
493
494 if (len > sizeof(dummy)) {
495 len = sizeof(dummy);
496 }
497 result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
498 }
499 if (result < 0) {
500 return result;
501 }
502 if (result > len) {
503 pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len);
504 return -EIO;
505 }
506 return result;
507 }
508
509 static int __ncptcp_rcv_proc(struct ncp_server *server)
510 {
511 /* We have to check the result, so store the complete header */
512 while (1) {
513 int result;
514 struct ncp_request_reply *req;
515 int datalen;
516 int type;
517
518 while (server->rcv.len) {
519 result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
520 if (result == -EAGAIN) {
521 return 0;
522 }
523 if (result <= 0) {
524 req = server->rcv.creq;
525 if (req) {
526 __ncp_abort_request(server, req, -EIO);
527 } else {
528 __ncptcp_abort(server);
529 }
530 if (result < 0) {
531 pr_err("tcp: error in recvmsg: %d\n", result);
532 } else {
533 ncp_dbg(1, "tcp: EOF\n");
534 }
535 return -EIO;
536 }
537 if (server->rcv.ptr) {
538 server->rcv.ptr += result;
539 }
540 server->rcv.len -= result;
541 }
542 switch (server->rcv.state) {
543 case 0:
544 if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
545 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
546 __ncptcp_abort(server);
547 return -EIO;
548 }
549 datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
550 if (datalen < 10) {
551 pr_err("tcp: Unexpected reply len %d\n", datalen);
552 __ncptcp_abort(server);
553 return -EIO;
554 }
555 #ifdef CONFIG_NCPFS_PACKET_SIGNING
556 if (server->sign_active) {
557 if (datalen < 18) {
558 pr_err("tcp: Unexpected reply len %d\n", datalen);
559 __ncptcp_abort(server);
560 return -EIO;
561 }
562 server->rcv.buf.len = datalen - 8;
563 server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
564 server->rcv.len = 8;
565 server->rcv.state = 4;
566 break;
567 }
568 #endif
569 type = ntohs(server->rcv.buf.type);
570 #ifdef CONFIG_NCPFS_PACKET_SIGNING
571 cont:;
572 #endif
573 if (type != NCP_REPLY) {
574 if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
575 *(__u16*)(server->unexpected_packet.data) = htons(type);
576 server->unexpected_packet.len = datalen - 8;
577
578 server->rcv.state = 5;
579 server->rcv.ptr = server->unexpected_packet.data + 2;
580 server->rcv.len = datalen - 10;
581 break;
582 }
583 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
584 skipdata2:;
585 server->rcv.state = 2;
586 skipdata:;
587 server->rcv.ptr = NULL;
588 server->rcv.len = datalen - 10;
589 break;
590 }
591 req = server->rcv.creq;
592 if (!req) {
593 ncp_dbg(1, "Reply without appropriate request\n");
594 goto skipdata2;
595 }
596 if (datalen > req->datalen + 8) {
597 pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8);
598 server->rcv.state = 3;
599 goto skipdata;
600 }
601 req->datalen = datalen - 8;
602 ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
603 server->rcv.ptr = server->rxbuf + 2;
604 server->rcv.len = datalen - 10;
605 server->rcv.state = 1;
606 break;
607 #ifdef CONFIG_NCPFS_PACKET_SIGNING
608 case 4:
609 datalen = server->rcv.buf.len;
610 type = ntohs(server->rcv.buf.type2);
611 goto cont;
612 #endif
613 case 1:
614 req = server->rcv.creq;
615 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
616 if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
617 pr_err("tcp: Bad sequence number\n");
618 __ncp_abort_request(server, req, -EIO);
619 return -EIO;
620 }
621 if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
622 pr_err("tcp: Connection number mismatch\n");
623 __ncp_abort_request(server, req, -EIO);
624 return -EIO;
625 }
626 }
627 #ifdef CONFIG_NCPFS_PACKET_SIGNING
628 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
629 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
630 pr_err("tcp: Signature violation\n");
631 __ncp_abort_request(server, req, -EIO);
632 return -EIO;
633 }
634 }
635 #endif
636 ncp_finish_request(server, req, req->datalen);
637 nextreq:;
638 __ncp_next_request(server);
639 case 2:
640 next:;
641 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
642 server->rcv.len = 10;
643 server->rcv.state = 0;
644 break;
645 case 3:
646 ncp_finish_request(server, server->rcv.creq, -EIO);
647 goto nextreq;
648 case 5:
649 info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
650 goto next;
651 }
652 }
653 }
654
655 void ncp_tcp_rcv_proc(struct work_struct *work)
656 {
657 struct ncp_server *server =
658 container_of(work, struct ncp_server, rcv.tq);
659
660 mutex_lock(&server->rcv.creq_mutex);
661 __ncptcp_rcv_proc(server);
662 mutex_unlock(&server->rcv.creq_mutex);
663 }
664
665 void ncp_tcp_tx_proc(struct work_struct *work)
666 {
667 struct ncp_server *server =
668 container_of(work, struct ncp_server, tx.tq);
669
670 mutex_lock(&server->rcv.creq_mutex);
671 __ncptcp_try_send(server);
672 mutex_unlock(&server->rcv.creq_mutex);
673 }
674
675 static int do_ncp_rpc_call(struct ncp_server *server, int size,
676 unsigned char* reply_buf, int max_reply_size)
677 {
678 int result;
679 struct ncp_request_reply *req;
680
681 req = ncp_alloc_req();
682 if (!req)
683 return -ENOMEM;
684
685 req->reply_buf = reply_buf;
686 req->datalen = max_reply_size;
687 req->tx_iov[1].iov_base = server->packet;
688 req->tx_iov[1].iov_len = size;
689 req->tx_type = *(u_int16_t*)server->packet;
690
691 result = ncp_add_request(server, req);
692 if (result < 0)
693 goto out;
694
695 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
696 ncp_abort_request(server, req, -EINTR);
697 result = -EINTR;
698 goto out;
699 }
700
701 result = req->result;
702
703 out:
704 ncp_req_put(req);
705
706 return result;
707 }
708
709 /*
710 * We need the server to be locked here, so check!
711 */
712
713 static int ncp_do_request(struct ncp_server *server, int size,
714 void* reply, int max_reply_size)
715 {
716 int result;
717
718 if (server->lock == 0) {
719 pr_err("Server not locked!\n");
720 return -EIO;
721 }
722 if (!ncp_conn_valid(server)) {
723 return -EIO;
724 }
725 {
726 sigset_t old_set;
727 unsigned long mask, flags;
728
729 spin_lock_irqsave(&current->sighand->siglock, flags);
730 old_set = current->blocked;
731 if (current->flags & PF_EXITING)
732 mask = 0;
733 else
734 mask = sigmask(SIGKILL);
735 if (server->m.flags & NCP_MOUNT_INTR) {
736 /* FIXME: This doesn't seem right at all. So, like,
737 we can't handle SIGINT and get whatever to stop?
738 What if we've blocked it ourselves? What about
739 alarms? Why, in fact, are we mucking with the
740 sigmask at all? -- r~ */
741 if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
742 mask |= sigmask(SIGINT);
743 if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
744 mask |= sigmask(SIGQUIT);
745 }
746 siginitsetinv(&current->blocked, mask);
747 recalc_sigpending();
748 spin_unlock_irqrestore(&current->sighand->siglock, flags);
749
750 result = do_ncp_rpc_call(server, size, reply, max_reply_size);
751
752 spin_lock_irqsave(&current->sighand->siglock, flags);
753 current->blocked = old_set;
754 recalc_sigpending();
755 spin_unlock_irqrestore(&current->sighand->siglock, flags);
756 }
757
758 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
759
760 return result;
761 }
762
763 /* ncp_do_request assures that at least a complete reply header is
764 * received. It assumes that server->current_size contains the ncp
765 * request size
766 */
767 int ncp_request2(struct ncp_server *server, int function,
768 void* rpl, int size)
769 {
770 struct ncp_request_header *h;
771 struct ncp_reply_header* reply = rpl;
772 int result;
773
774 h = (struct ncp_request_header *) (server->packet);
775 if (server->has_subfunction != 0) {
776 *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
777 }
778 h->type = NCP_REQUEST;
779 /*
780 * The server shouldn't know or care what task is making a
781 * request, so we always use the same task number.
782 */
783 h->task = 2; /* (current->pid) & 0xff; */
784 h->function = function;
785
786 result = ncp_do_request(server, server->current_size, reply, size);
787 if (result < 0) {
788 ncp_dbg(1, "ncp_request_error: %d\n", result);
789 goto out;
790 }
791 server->completion = reply->completion_code;
792 server->conn_status = reply->connection_state;
793 server->reply_size = result;
794 server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
795
796 result = reply->completion_code;
797
798 if (result != 0)
799 ncp_vdbg("completion code=%x\n", result);
800 out:
801 return result;
802 }
803
804 int ncp_connect(struct ncp_server *server)
805 {
806 struct ncp_request_header *h;
807 int result;
808
809 server->connection = 0xFFFF;
810 server->sequence = 255;
811
812 h = (struct ncp_request_header *) (server->packet);
813 h->type = NCP_ALLOC_SLOT_REQUEST;
814 h->task = 2; /* see above */
815 h->function = 0;
816
817 result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
818 if (result < 0)
819 goto out;
820 server->connection = h->conn_low + (h->conn_high * 256);
821 result = 0;
822 out:
823 return result;
824 }
825
826 int ncp_disconnect(struct ncp_server *server)
827 {
828 struct ncp_request_header *h;
829
830 h = (struct ncp_request_header *) (server->packet);
831 h->type = NCP_DEALLOC_SLOT_REQUEST;
832 h->task = 2; /* see above */
833 h->function = 0;
834
835 return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
836 }
837
838 void ncp_lock_server(struct ncp_server *server)
839 {
840 mutex_lock(&server->mutex);
841 if (server->lock)
842 pr_warn("%s: was locked!\n", __func__);
843 server->lock = 1;
844 }
845
846 void ncp_unlock_server(struct ncp_server *server)
847 {
848 if (!server->lock) {
849 pr_warn("%s: was not locked!\n", __func__);
850 return;
851 }
852 server->lock = 0;
853 mutex_unlock(&server->mutex);
854 }