]>
Commit | Line | Data |
---|---|---|
eac3731b JH |
1 | /* |
2 | * linux/net/iucv/af_iucv.c | |
3 | * | |
4 | * IUCV protocol stack for Linux on zSeries | |
5 | * | |
6 | * Copyright 2006 IBM Corporation | |
7 | * | |
8 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/skbuff.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/poll.h> | |
21 | #include <net/sock.h> | |
22 | #include <asm/ebcdic.h> | |
23 | #include <asm/cpcmd.h> | |
24 | #include <linux/kmod.h> | |
25 | ||
26 | #include <net/iucv/iucv.h> | |
27 | #include <net/iucv/af_iucv.h> | |
28 | ||
29 | #define CONFIG_IUCV_SOCK_DEBUG 1 | |
30 | ||
31 | #define IPRMDATA 0x80 | |
32 | #define VERSION "1.0" | |
33 | ||
34 | static char iucv_userid[80]; | |
35 | ||
36 | static struct proto_ops iucv_sock_ops; | |
37 | ||
38 | static struct proto iucv_proto = { | |
39 | .name = "AF_IUCV", | |
40 | .owner = THIS_MODULE, | |
41 | .obj_size = sizeof(struct iucv_sock), | |
42 | }; | |
43 | ||
57f20448 HC |
44 | static void iucv_sock_kill(struct sock *sk); |
45 | static void iucv_sock_close(struct sock *sk); | |
46 | ||
eac3731b JH |
47 | /* Call Back functions */ |
48 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); | |
49 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); | |
50 | static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); | |
da99f056 HC |
51 | static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], |
52 | u8 ipuser[16]); | |
eac3731b JH |
53 | static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); |
54 | ||
55 | static struct iucv_sock_list iucv_sk_list = { | |
56 | .lock = RW_LOCK_UNLOCKED, | |
57 | .autobind_name = ATOMIC_INIT(0) | |
58 | }; | |
59 | ||
60 | static struct iucv_handler af_iucv_handler = { | |
61 | .path_pending = iucv_callback_connreq, | |
62 | .path_complete = iucv_callback_connack, | |
63 | .path_severed = iucv_callback_connrej, | |
64 | .message_pending = iucv_callback_rx, | |
65 | .message_complete = iucv_callback_txdone | |
66 | }; | |
67 | ||
68 | static inline void high_nmcpy(unsigned char *dst, char *src) | |
69 | { | |
70 | memcpy(dst, src, 8); | |
71 | } | |
72 | ||
73 | static inline void low_nmcpy(unsigned char *dst, char *src) | |
74 | { | |
75 | memcpy(&dst[8], src, 8); | |
76 | } | |
77 | ||
78 | /* Timers */ | |
79 | static void iucv_sock_timeout(unsigned long arg) | |
80 | { | |
81 | struct sock *sk = (struct sock *)arg; | |
82 | ||
83 | bh_lock_sock(sk); | |
84 | sk->sk_err = ETIMEDOUT; | |
85 | sk->sk_state_change(sk); | |
86 | bh_unlock_sock(sk); | |
87 | ||
88 | iucv_sock_kill(sk); | |
89 | sock_put(sk); | |
90 | } | |
91 | ||
92 | static void iucv_sock_clear_timer(struct sock *sk) | |
93 | { | |
94 | sk_stop_timer(sk, &sk->sk_timer); | |
95 | } | |
96 | ||
97 | static void iucv_sock_init_timer(struct sock *sk) | |
98 | { | |
99 | init_timer(&sk->sk_timer); | |
100 | sk->sk_timer.function = iucv_sock_timeout; | |
101 | sk->sk_timer.data = (unsigned long)sk; | |
102 | } | |
103 | ||
104 | static struct sock *__iucv_get_sock_by_name(char *nm) | |
105 | { | |
106 | struct sock *sk; | |
107 | struct hlist_node *node; | |
108 | ||
109 | sk_for_each(sk, node, &iucv_sk_list.head) | |
110 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) | |
111 | return sk; | |
112 | ||
113 | return NULL; | |
114 | } | |
115 | ||
116 | static void iucv_sock_destruct(struct sock *sk) | |
117 | { | |
118 | skb_queue_purge(&sk->sk_receive_queue); | |
119 | skb_queue_purge(&sk->sk_write_queue); | |
120 | } | |
121 | ||
122 | /* Cleanup Listen */ | |
123 | static void iucv_sock_cleanup_listen(struct sock *parent) | |
124 | { | |
125 | struct sock *sk; | |
126 | ||
127 | /* Close non-accepted connections */ | |
128 | while ((sk = iucv_accept_dequeue(parent, NULL))) { | |
129 | iucv_sock_close(sk); | |
130 | iucv_sock_kill(sk); | |
131 | } | |
132 | ||
133 | parent->sk_state = IUCV_CLOSED; | |
134 | sock_set_flag(parent, SOCK_ZAPPED); | |
135 | } | |
136 | ||
137 | /* Kill socket */ | |
138 | static void iucv_sock_kill(struct sock *sk) | |
139 | { | |
140 | if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) | |
141 | return; | |
142 | ||
143 | iucv_sock_unlink(&iucv_sk_list, sk); | |
144 | sock_set_flag(sk, SOCK_DEAD); | |
145 | sock_put(sk); | |
146 | } | |
147 | ||
148 | /* Close an IUCV socket */ | |
149 | static void iucv_sock_close(struct sock *sk) | |
150 | { | |
151 | unsigned char user_data[16]; | |
152 | struct iucv_sock *iucv = iucv_sk(sk); | |
153 | int err; | |
561e0360 | 154 | unsigned long timeo; |
eac3731b JH |
155 | |
156 | iucv_sock_clear_timer(sk); | |
157 | lock_sock(sk); | |
158 | ||
da99f056 | 159 | switch (sk->sk_state) { |
eac3731b JH |
160 | case IUCV_LISTEN: |
161 | iucv_sock_cleanup_listen(sk); | |
162 | break; | |
163 | ||
164 | case IUCV_CONNECTED: | |
165 | case IUCV_DISCONN: | |
166 | err = 0; | |
561e0360 JH |
167 | |
168 | sk->sk_state = IUCV_CLOSING; | |
169 | sk->sk_state_change(sk); | |
170 | ||
da99f056 | 171 | if (!skb_queue_empty(&iucv->send_skb_q)) { |
561e0360 JH |
172 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
173 | timeo = sk->sk_lingertime; | |
174 | else | |
175 | timeo = IUCV_DISCONN_TIMEOUT; | |
176 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); | |
177 | } | |
178 | ||
179 | sk->sk_state = IUCV_CLOSED; | |
180 | sk->sk_state_change(sk); | |
181 | ||
eac3731b JH |
182 | if (iucv->path) { |
183 | low_nmcpy(user_data, iucv->src_name); | |
184 | high_nmcpy(user_data, iucv->dst_name); | |
185 | ASCEBC(user_data, sizeof(user_data)); | |
186 | err = iucv_path_sever(iucv->path, user_data); | |
187 | iucv_path_free(iucv->path); | |
188 | iucv->path = NULL; | |
189 | } | |
190 | ||
eac3731b JH |
191 | sk->sk_err = ECONNRESET; |
192 | sk->sk_state_change(sk); | |
193 | ||
194 | skb_queue_purge(&iucv->send_skb_q); | |
561e0360 | 195 | skb_queue_purge(&iucv->backlog_skb_q); |
eac3731b JH |
196 | |
197 | sock_set_flag(sk, SOCK_ZAPPED); | |
198 | break; | |
199 | ||
200 | default: | |
201 | sock_set_flag(sk, SOCK_ZAPPED); | |
202 | break; | |
3ff50b79 | 203 | } |
eac3731b JH |
204 | |
205 | release_sock(sk); | |
206 | iucv_sock_kill(sk); | |
207 | } | |
208 | ||
209 | static void iucv_sock_init(struct sock *sk, struct sock *parent) | |
210 | { | |
211 | if (parent) | |
212 | sk->sk_type = parent->sk_type; | |
213 | } | |
214 | ||
215 | static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) | |
216 | { | |
217 | struct sock *sk; | |
218 | ||
6257ff21 | 219 | sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); |
eac3731b JH |
220 | if (!sk) |
221 | return NULL; | |
222 | ||
223 | sock_init_data(sock, sk); | |
224 | INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); | |
febca281 | 225 | spin_lock_init(&iucv_sk(sk)->accept_q_lock); |
eac3731b | 226 | skb_queue_head_init(&iucv_sk(sk)->send_skb_q); |
f0703c80 UB |
227 | INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); |
228 | spin_lock_init(&iucv_sk(sk)->message_q.lock); | |
561e0360 | 229 | skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); |
eac3731b JH |
230 | iucv_sk(sk)->send_tag = 0; |
231 | ||
232 | sk->sk_destruct = iucv_sock_destruct; | |
233 | sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; | |
234 | sk->sk_allocation = GFP_DMA; | |
235 | ||
236 | sock_reset_flag(sk, SOCK_ZAPPED); | |
237 | ||
238 | sk->sk_protocol = proto; | |
239 | sk->sk_state = IUCV_OPEN; | |
240 | ||
241 | iucv_sock_init_timer(sk); | |
242 | ||
243 | iucv_sock_link(&iucv_sk_list, sk); | |
244 | return sk; | |
245 | } | |
246 | ||
247 | /* Create an IUCV socket */ | |
1b8d7ae4 | 248 | static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) |
eac3731b JH |
249 | { |
250 | struct sock *sk; | |
251 | ||
252 | if (sock->type != SOCK_STREAM) | |
253 | return -ESOCKTNOSUPPORT; | |
254 | ||
255 | sock->state = SS_UNCONNECTED; | |
256 | sock->ops = &iucv_sock_ops; | |
257 | ||
258 | sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); | |
259 | if (!sk) | |
260 | return -ENOMEM; | |
261 | ||
262 | iucv_sock_init(sk, NULL); | |
263 | ||
264 | return 0; | |
265 | } | |
266 | ||
267 | void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) | |
268 | { | |
269 | write_lock_bh(&l->lock); | |
270 | sk_add_node(sk, &l->head); | |
271 | write_unlock_bh(&l->lock); | |
272 | } | |
273 | ||
274 | void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) | |
275 | { | |
276 | write_lock_bh(&l->lock); | |
277 | sk_del_node_init(sk); | |
278 | write_unlock_bh(&l->lock); | |
279 | } | |
280 | ||
281 | void iucv_accept_enqueue(struct sock *parent, struct sock *sk) | |
282 | { | |
febca281 UB |
283 | unsigned long flags; |
284 | struct iucv_sock *par = iucv_sk(parent); | |
285 | ||
eac3731b | 286 | sock_hold(sk); |
febca281 UB |
287 | spin_lock_irqsave(&par->accept_q_lock, flags); |
288 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); | |
289 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | |
eac3731b JH |
290 | iucv_sk(sk)->parent = parent; |
291 | parent->sk_ack_backlog++; | |
292 | } | |
293 | ||
294 | void iucv_accept_unlink(struct sock *sk) | |
295 | { | |
febca281 UB |
296 | unsigned long flags; |
297 | struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); | |
298 | ||
299 | spin_lock_irqsave(&par->accept_q_lock, flags); | |
eac3731b | 300 | list_del_init(&iucv_sk(sk)->accept_q); |
febca281 | 301 | spin_unlock_irqrestore(&par->accept_q_lock, flags); |
eac3731b JH |
302 | iucv_sk(sk)->parent->sk_ack_backlog--; |
303 | iucv_sk(sk)->parent = NULL; | |
304 | sock_put(sk); | |
305 | } | |
306 | ||
307 | struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) | |
308 | { | |
309 | struct iucv_sock *isk, *n; | |
310 | struct sock *sk; | |
311 | ||
da99f056 | 312 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
eac3731b JH |
313 | sk = (struct sock *) isk; |
314 | lock_sock(sk); | |
315 | ||
316 | if (sk->sk_state == IUCV_CLOSED) { | |
eac3731b | 317 | iucv_accept_unlink(sk); |
febca281 | 318 | release_sock(sk); |
eac3731b JH |
319 | continue; |
320 | } | |
321 | ||
322 | if (sk->sk_state == IUCV_CONNECTED || | |
323 | sk->sk_state == IUCV_SEVERED || | |
324 | !newsock) { | |
325 | iucv_accept_unlink(sk); | |
326 | if (newsock) | |
327 | sock_graft(sk, newsock); | |
328 | ||
329 | if (sk->sk_state == IUCV_SEVERED) | |
330 | sk->sk_state = IUCV_DISCONN; | |
331 | ||
332 | release_sock(sk); | |
333 | return sk; | |
334 | } | |
335 | ||
336 | release_sock(sk); | |
337 | } | |
338 | return NULL; | |
339 | } | |
340 | ||
341 | int iucv_sock_wait_state(struct sock *sk, int state, int state2, | |
342 | unsigned long timeo) | |
343 | { | |
344 | DECLARE_WAITQUEUE(wait, current); | |
345 | int err = 0; | |
346 | ||
347 | add_wait_queue(sk->sk_sleep, &wait); | |
348 | while (sk->sk_state != state && sk->sk_state != state2) { | |
349 | set_current_state(TASK_INTERRUPTIBLE); | |
350 | ||
351 | if (!timeo) { | |
352 | err = -EAGAIN; | |
353 | break; | |
354 | } | |
355 | ||
356 | if (signal_pending(current)) { | |
357 | err = sock_intr_errno(timeo); | |
358 | break; | |
359 | } | |
360 | ||
361 | release_sock(sk); | |
362 | timeo = schedule_timeout(timeo); | |
363 | lock_sock(sk); | |
364 | ||
365 | err = sock_error(sk); | |
366 | if (err) | |
367 | break; | |
368 | } | |
369 | set_current_state(TASK_RUNNING); | |
370 | remove_wait_queue(sk->sk_sleep, &wait); | |
371 | return err; | |
372 | } | |
373 | ||
374 | /* Bind an unbound socket */ | |
375 | static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | |
376 | int addr_len) | |
377 | { | |
378 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | |
379 | struct sock *sk = sock->sk; | |
380 | struct iucv_sock *iucv; | |
381 | int err; | |
382 | ||
383 | /* Verify the input sockaddr */ | |
384 | if (!addr || addr->sa_family != AF_IUCV) | |
385 | return -EINVAL; | |
386 | ||
387 | lock_sock(sk); | |
388 | if (sk->sk_state != IUCV_OPEN) { | |
389 | err = -EBADFD; | |
390 | goto done; | |
391 | } | |
392 | ||
393 | write_lock_bh(&iucv_sk_list.lock); | |
394 | ||
395 | iucv = iucv_sk(sk); | |
396 | if (__iucv_get_sock_by_name(sa->siucv_name)) { | |
397 | err = -EADDRINUSE; | |
398 | goto done_unlock; | |
399 | } | |
400 | if (iucv->path) { | |
401 | err = 0; | |
402 | goto done_unlock; | |
403 | } | |
404 | ||
405 | /* Bind the socket */ | |
406 | memcpy(iucv->src_name, sa->siucv_name, 8); | |
407 | ||
408 | /* Copy the user id */ | |
409 | memcpy(iucv->src_user_id, iucv_userid, 8); | |
410 | sk->sk_state = IUCV_BOUND; | |
411 | err = 0; | |
412 | ||
413 | done_unlock: | |
414 | /* Release the socket list lock */ | |
415 | write_unlock_bh(&iucv_sk_list.lock); | |
416 | done: | |
417 | release_sock(sk); | |
418 | return err; | |
419 | } | |
420 | ||
421 | /* Automatically bind an unbound socket */ | |
422 | static int iucv_sock_autobind(struct sock *sk) | |
423 | { | |
424 | struct iucv_sock *iucv = iucv_sk(sk); | |
425 | char query_buffer[80]; | |
426 | char name[12]; | |
427 | int err = 0; | |
428 | ||
429 | /* Set the userid and name */ | |
430 | cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err); | |
431 | if (unlikely(err)) | |
432 | return -EPROTO; | |
433 | ||
434 | memcpy(iucv->src_user_id, query_buffer, 8); | |
435 | ||
436 | write_lock_bh(&iucv_sk_list.lock); | |
437 | ||
438 | sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); | |
439 | while (__iucv_get_sock_by_name(name)) { | |
440 | sprintf(name, "%08x", | |
441 | atomic_inc_return(&iucv_sk_list.autobind_name)); | |
442 | } | |
443 | ||
444 | write_unlock_bh(&iucv_sk_list.lock); | |
445 | ||
446 | memcpy(&iucv->src_name, name, 8); | |
447 | ||
448 | return err; | |
449 | } | |
450 | ||
451 | /* Connect an unconnected socket */ | |
452 | static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |
453 | int alen, int flags) | |
454 | { | |
455 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | |
456 | struct sock *sk = sock->sk; | |
457 | struct iucv_sock *iucv; | |
458 | unsigned char user_data[16]; | |
459 | int err; | |
460 | ||
461 | if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) | |
462 | return -EINVAL; | |
463 | ||
464 | if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) | |
465 | return -EBADFD; | |
466 | ||
467 | if (sk->sk_type != SOCK_STREAM) | |
468 | return -EINVAL; | |
469 | ||
470 | iucv = iucv_sk(sk); | |
471 | ||
472 | if (sk->sk_state == IUCV_OPEN) { | |
473 | err = iucv_sock_autobind(sk); | |
474 | if (unlikely(err)) | |
475 | return err; | |
476 | } | |
477 | ||
478 | lock_sock(sk); | |
479 | ||
480 | /* Set the destination information */ | |
481 | memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); | |
482 | memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); | |
483 | ||
484 | high_nmcpy(user_data, sa->siucv_name); | |
485 | low_nmcpy(user_data, iucv_sk(sk)->src_name); | |
486 | ASCEBC(user_data, sizeof(user_data)); | |
487 | ||
488 | iucv = iucv_sk(sk); | |
489 | /* Create path. */ | |
490 | iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, | |
491 | IPRMDATA, GFP_KERNEL); | |
492 | err = iucv_path_connect(iucv->path, &af_iucv_handler, | |
493 | sa->siucv_user_id, NULL, user_data, sk); | |
494 | if (err) { | |
495 | iucv_path_free(iucv->path); | |
496 | iucv->path = NULL; | |
497 | err = -ECONNREFUSED; | |
498 | goto done; | |
499 | } | |
500 | ||
501 | if (sk->sk_state != IUCV_CONNECTED) { | |
502 | err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, | |
503 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | |
504 | } | |
505 | ||
506 | if (sk->sk_state == IUCV_DISCONN) { | |
507 | release_sock(sk); | |
508 | return -ECONNREFUSED; | |
509 | } | |
510 | done: | |
511 | release_sock(sk); | |
512 | return err; | |
513 | } | |
514 | ||
515 | /* Move a socket into listening state. */ | |
516 | static int iucv_sock_listen(struct socket *sock, int backlog) | |
517 | { | |
518 | struct sock *sk = sock->sk; | |
519 | int err; | |
520 | ||
521 | lock_sock(sk); | |
522 | ||
523 | err = -EINVAL; | |
524 | if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) | |
525 | goto done; | |
526 | ||
527 | sk->sk_max_ack_backlog = backlog; | |
528 | sk->sk_ack_backlog = 0; | |
529 | sk->sk_state = IUCV_LISTEN; | |
530 | err = 0; | |
531 | ||
532 | done: | |
533 | release_sock(sk); | |
534 | return err; | |
535 | } | |
536 | ||
537 | /* Accept a pending connection */ | |
538 | static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |
539 | int flags) | |
540 | { | |
541 | DECLARE_WAITQUEUE(wait, current); | |
542 | struct sock *sk = sock->sk, *nsk; | |
543 | long timeo; | |
544 | int err = 0; | |
545 | ||
561e0360 | 546 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
eac3731b JH |
547 | |
548 | if (sk->sk_state != IUCV_LISTEN) { | |
549 | err = -EBADFD; | |
550 | goto done; | |
551 | } | |
552 | ||
553 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | |
554 | ||
555 | /* Wait for an incoming connection */ | |
556 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | |
da99f056 | 557 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { |
eac3731b JH |
558 | set_current_state(TASK_INTERRUPTIBLE); |
559 | if (!timeo) { | |
560 | err = -EAGAIN; | |
561 | break; | |
562 | } | |
563 | ||
564 | release_sock(sk); | |
565 | timeo = schedule_timeout(timeo); | |
561e0360 | 566 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
eac3731b JH |
567 | |
568 | if (sk->sk_state != IUCV_LISTEN) { | |
569 | err = -EBADFD; | |
570 | break; | |
571 | } | |
572 | ||
573 | if (signal_pending(current)) { | |
574 | err = sock_intr_errno(timeo); | |
575 | break; | |
576 | } | |
577 | } | |
578 | ||
579 | set_current_state(TASK_RUNNING); | |
580 | remove_wait_queue(sk->sk_sleep, &wait); | |
581 | ||
582 | if (err) | |
583 | goto done; | |
584 | ||
585 | newsock->state = SS_CONNECTED; | |
586 | ||
587 | done: | |
588 | release_sock(sk); | |
589 | return err; | |
590 | } | |
591 | ||
592 | static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, | |
593 | int *len, int peer) | |
594 | { | |
595 | struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; | |
596 | struct sock *sk = sock->sk; | |
597 | ||
598 | addr->sa_family = AF_IUCV; | |
599 | *len = sizeof(struct sockaddr_iucv); | |
600 | ||
601 | if (peer) { | |
602 | memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); | |
603 | memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); | |
604 | } else { | |
605 | memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); | |
606 | memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); | |
607 | } | |
608 | memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); | |
609 | memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); | |
610 | memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); | |
611 | ||
612 | return 0; | |
613 | } | |
614 | ||
615 | static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |
616 | struct msghdr *msg, size_t len) | |
617 | { | |
618 | struct sock *sk = sock->sk; | |
619 | struct iucv_sock *iucv = iucv_sk(sk); | |
620 | struct sk_buff *skb; | |
621 | struct iucv_message txmsg; | |
622 | int err; | |
623 | ||
624 | err = sock_error(sk); | |
625 | if (err) | |
626 | return err; | |
627 | ||
628 | if (msg->msg_flags & MSG_OOB) | |
629 | return -EOPNOTSUPP; | |
630 | ||
631 | lock_sock(sk); | |
632 | ||
633 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | |
634 | err = -EPIPE; | |
635 | goto out; | |
636 | } | |
637 | ||
da99f056 HC |
638 | if (sk->sk_state == IUCV_CONNECTED) { |
639 | if (!(skb = sock_alloc_send_skb(sk, len, | |
640 | msg->msg_flags & MSG_DONTWAIT, | |
641 | &err))) | |
561e0360 | 642 | goto out; |
eac3731b | 643 | |
da99f056 | 644 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
eac3731b JH |
645 | err = -EFAULT; |
646 | goto fail; | |
647 | } | |
648 | ||
649 | txmsg.class = 0; | |
650 | txmsg.tag = iucv->send_tag++; | |
651 | memcpy(skb->cb, &txmsg.tag, 4); | |
652 | skb_queue_tail(&iucv->send_skb_q, skb); | |
653 | err = iucv_message_send(iucv->path, &txmsg, 0, 0, | |
654 | (void *) skb->data, skb->len); | |
655 | if (err) { | |
656 | if (err == 3) | |
657 | printk(KERN_ERR "AF_IUCV msg limit exceeded\n"); | |
658 | skb_unlink(skb, &iucv->send_skb_q); | |
659 | err = -EPIPE; | |
660 | goto fail; | |
661 | } | |
662 | ||
663 | } else { | |
664 | err = -ENOTCONN; | |
665 | goto out; | |
666 | } | |
667 | ||
668 | release_sock(sk); | |
669 | return len; | |
670 | ||
671 | fail: | |
672 | kfree_skb(skb); | |
673 | out: | |
674 | release_sock(sk); | |
675 | return err; | |
676 | } | |
677 | ||
f0703c80 UB |
678 | static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) |
679 | { | |
680 | int dataleft, size, copied = 0; | |
681 | struct sk_buff *nskb; | |
682 | ||
683 | dataleft = len; | |
684 | while (dataleft) { | |
685 | if (dataleft >= sk->sk_rcvbuf / 4) | |
686 | size = sk->sk_rcvbuf / 4; | |
687 | else | |
688 | size = dataleft; | |
689 | ||
690 | nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); | |
691 | if (!nskb) | |
692 | return -ENOMEM; | |
693 | ||
694 | memcpy(nskb->data, skb->data + copied, size); | |
695 | copied += size; | |
696 | dataleft -= size; | |
697 | ||
698 | skb_reset_transport_header(nskb); | |
699 | skb_reset_network_header(nskb); | |
700 | nskb->len = size; | |
701 | ||
702 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); | |
703 | } | |
704 | ||
705 | return 0; | |
706 | } | |
707 | ||
708 | static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |
709 | struct iucv_path *path, | |
710 | struct iucv_message *msg) | |
711 | { | |
712 | int rc; | |
713 | ||
714 | if (msg->flags & IPRMDATA) { | |
715 | skb->data = NULL; | |
716 | skb->len = 0; | |
717 | } else { | |
718 | rc = iucv_message_receive(path, msg, 0, skb->data, | |
719 | msg->length, NULL); | |
720 | if (rc) { | |
721 | kfree_skb(skb); | |
722 | return; | |
723 | } | |
724 | if (skb->truesize >= sk->sk_rcvbuf / 4) { | |
725 | rc = iucv_fragment_skb(sk, skb, msg->length); | |
726 | kfree_skb(skb); | |
727 | skb = NULL; | |
728 | if (rc) { | |
729 | iucv_path_sever(path, NULL); | |
730 | return; | |
731 | } | |
732 | skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | |
733 | } else { | |
734 | skb_reset_transport_header(skb); | |
735 | skb_reset_network_header(skb); | |
736 | skb->len = msg->length; | |
737 | } | |
738 | } | |
739 | ||
740 | if (sock_queue_rcv_skb(sk, skb)) | |
741 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); | |
742 | } | |
743 | ||
744 | static void iucv_process_message_q(struct sock *sk) | |
745 | { | |
746 | struct iucv_sock *iucv = iucv_sk(sk); | |
747 | struct sk_buff *skb; | |
748 | struct sock_msg_q *p, *n; | |
749 | ||
750 | list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { | |
751 | skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); | |
752 | if (!skb) | |
753 | break; | |
754 | iucv_process_message(sk, skb, p->path, &p->msg); | |
755 | list_del(&p->list); | |
756 | kfree(p); | |
757 | if (!skb_queue_empty(&iucv->backlog_skb_q)) | |
758 | break; | |
759 | } | |
760 | } | |
761 | ||
eac3731b JH |
762 | static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
763 | struct msghdr *msg, size_t len, int flags) | |
764 | { | |
765 | int noblock = flags & MSG_DONTWAIT; | |
766 | struct sock *sk = sock->sk; | |
561e0360 | 767 | struct iucv_sock *iucv = iucv_sk(sk); |
eac3731b | 768 | int target, copied = 0; |
561e0360 | 769 | struct sk_buff *skb, *rskb, *cskb; |
eac3731b JH |
770 | int err = 0; |
771 | ||
561e0360 | 772 | if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && |
f0703c80 UB |
773 | skb_queue_empty(&iucv->backlog_skb_q) && |
774 | skb_queue_empty(&sk->sk_receive_queue) && | |
775 | list_empty(&iucv->message_q.list)) | |
561e0360 JH |
776 | return 0; |
777 | ||
eac3731b JH |
778 | if (flags & (MSG_OOB)) |
779 | return -EOPNOTSUPP; | |
780 | ||
781 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
782 | ||
783 | skb = skb_recv_datagram(sk, flags, noblock, &err); | |
784 | if (!skb) { | |
785 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
786 | return 0; | |
787 | return err; | |
788 | } | |
789 | ||
790 | copied = min_t(unsigned int, skb->len, len); | |
791 | ||
561e0360 JH |
792 | cskb = skb; |
793 | if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { | |
eac3731b JH |
794 | skb_queue_head(&sk->sk_receive_queue, skb); |
795 | if (copied == 0) | |
796 | return -EFAULT; | |
561e0360 | 797 | goto done; |
eac3731b JH |
798 | } |
799 | ||
800 | len -= copied; | |
801 | ||
802 | /* Mark read part of skb as used */ | |
803 | if (!(flags & MSG_PEEK)) { | |
804 | skb_pull(skb, copied); | |
805 | ||
806 | if (skb->len) { | |
807 | skb_queue_head(&sk->sk_receive_queue, skb); | |
808 | goto done; | |
809 | } | |
810 | ||
811 | kfree_skb(skb); | |
561e0360 JH |
812 | |
813 | /* Queue backlog skbs */ | |
f0703c80 | 814 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
da99f056 | 815 | while (rskb) { |
561e0360 | 816 | if (sock_queue_rcv_skb(sk, rskb)) { |
f0703c80 | 817 | skb_queue_head(&iucv->backlog_skb_q, |
561e0360 JH |
818 | rskb); |
819 | break; | |
820 | } else { | |
f0703c80 | 821 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
561e0360 JH |
822 | } |
823 | } | |
f0703c80 UB |
824 | if (skb_queue_empty(&iucv->backlog_skb_q)) { |
825 | spin_lock_bh(&iucv->message_q.lock); | |
826 | if (!list_empty(&iucv->message_q.list)) | |
827 | iucv_process_message_q(sk); | |
828 | spin_unlock_bh(&iucv->message_q.lock); | |
829 | } | |
830 | ||
eac3731b JH |
831 | } else |
832 | skb_queue_head(&sk->sk_receive_queue, skb); | |
833 | ||
834 | done: | |
835 | return err ? : copied; | |
836 | } | |
837 | ||
838 | static inline unsigned int iucv_accept_poll(struct sock *parent) | |
839 | { | |
840 | struct iucv_sock *isk, *n; | |
841 | struct sock *sk; | |
842 | ||
da99f056 | 843 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
eac3731b JH |
844 | sk = (struct sock *) isk; |
845 | ||
846 | if (sk->sk_state == IUCV_CONNECTED) | |
847 | return POLLIN | POLLRDNORM; | |
848 | } | |
849 | ||
850 | return 0; | |
851 | } | |
852 | ||
853 | unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |
854 | poll_table *wait) | |
855 | { | |
856 | struct sock *sk = sock->sk; | |
857 | unsigned int mask = 0; | |
858 | ||
859 | poll_wait(file, sk->sk_sleep, wait); | |
860 | ||
861 | if (sk->sk_state == IUCV_LISTEN) | |
862 | return iucv_accept_poll(sk); | |
863 | ||
864 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
865 | mask |= POLLERR; | |
866 | ||
867 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
868 | mask |= POLLRDHUP; | |
869 | ||
870 | if (sk->sk_shutdown == SHUTDOWN_MASK) | |
871 | mask |= POLLHUP; | |
872 | ||
873 | if (!skb_queue_empty(&sk->sk_receive_queue) || | |
da99f056 | 874 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
eac3731b JH |
875 | mask |= POLLIN | POLLRDNORM; |
876 | ||
877 | if (sk->sk_state == IUCV_CLOSED) | |
878 | mask |= POLLHUP; | |
879 | ||
561e0360 JH |
880 | if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) |
881 | mask |= POLLIN; | |
882 | ||
eac3731b JH |
883 | if (sock_writeable(sk)) |
884 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
885 | else | |
886 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
887 | ||
888 | return mask; | |
889 | } | |
890 | ||
891 | static int iucv_sock_shutdown(struct socket *sock, int how) | |
892 | { | |
893 | struct sock *sk = sock->sk; | |
894 | struct iucv_sock *iucv = iucv_sk(sk); | |
895 | struct iucv_message txmsg; | |
896 | int err = 0; | |
897 | u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; | |
898 | ||
899 | how++; | |
900 | ||
901 | if ((how & ~SHUTDOWN_MASK) || !how) | |
902 | return -EINVAL; | |
903 | ||
904 | lock_sock(sk); | |
da99f056 | 905 | switch (sk->sk_state) { |
eac3731b JH |
906 | case IUCV_CLOSED: |
907 | err = -ENOTCONN; | |
908 | goto fail; | |
909 | ||
910 | default: | |
911 | sk->sk_shutdown |= how; | |
912 | break; | |
913 | } | |
914 | ||
915 | if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { | |
916 | txmsg.class = 0; | |
917 | txmsg.tag = 0; | |
918 | err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, | |
919 | (void *) prmmsg, 8); | |
920 | if (err) { | |
da99f056 | 921 | switch (err) { |
eac3731b JH |
922 | case 1: |
923 | err = -ENOTCONN; | |
924 | break; | |
925 | case 2: | |
926 | err = -ECONNRESET; | |
927 | break; | |
928 | default: | |
929 | err = -ENOTCONN; | |
930 | break; | |
931 | } | |
932 | } | |
933 | } | |
934 | ||
935 | if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { | |
936 | err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); | |
937 | if (err) | |
938 | err = -ENOTCONN; | |
939 | ||
940 | skb_queue_purge(&sk->sk_receive_queue); | |
941 | } | |
942 | ||
943 | /* Wake up anyone sleeping in poll */ | |
944 | sk->sk_state_change(sk); | |
945 | ||
946 | fail: | |
947 | release_sock(sk); | |
948 | return err; | |
949 | } | |
950 | ||
951 | static int iucv_sock_release(struct socket *sock) | |
952 | { | |
953 | struct sock *sk = sock->sk; | |
954 | int err = 0; | |
955 | ||
956 | if (!sk) | |
957 | return 0; | |
958 | ||
959 | iucv_sock_close(sk); | |
960 | ||
961 | /* Unregister with IUCV base support */ | |
962 | if (iucv_sk(sk)->path) { | |
963 | iucv_path_sever(iucv_sk(sk)->path, NULL); | |
964 | iucv_path_free(iucv_sk(sk)->path); | |
965 | iucv_sk(sk)->path = NULL; | |
966 | } | |
967 | ||
eac3731b JH |
968 | sock_orphan(sk); |
969 | iucv_sock_kill(sk); | |
970 | return err; | |
971 | } | |
972 | ||
973 | /* Callback wrappers - called from iucv base support */ | |
974 | static int iucv_callback_connreq(struct iucv_path *path, | |
975 | u8 ipvmid[8], u8 ipuser[16]) | |
976 | { | |
977 | unsigned char user_data[16]; | |
978 | unsigned char nuser_data[16]; | |
979 | unsigned char src_name[8]; | |
980 | struct hlist_node *node; | |
981 | struct sock *sk, *nsk; | |
982 | struct iucv_sock *iucv, *niucv; | |
983 | int err; | |
984 | ||
985 | memcpy(src_name, ipuser, 8); | |
986 | EBCASC(src_name, 8); | |
987 | /* Find out if this path belongs to af_iucv. */ | |
988 | read_lock(&iucv_sk_list.lock); | |
989 | iucv = NULL; | |
febca281 | 990 | sk = NULL; |
eac3731b JH |
991 | sk_for_each(sk, node, &iucv_sk_list.head) |
992 | if (sk->sk_state == IUCV_LISTEN && | |
993 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { | |
994 | /* | |
995 | * Found a listening socket with | |
996 | * src_name == ipuser[0-7]. | |
997 | */ | |
998 | iucv = iucv_sk(sk); | |
999 | break; | |
1000 | } | |
1001 | read_unlock(&iucv_sk_list.lock); | |
1002 | if (!iucv) | |
1003 | /* No socket found, not one of our paths. */ | |
1004 | return -EINVAL; | |
1005 | ||
1006 | bh_lock_sock(sk); | |
1007 | ||
1008 | /* Check if parent socket is listening */ | |
1009 | low_nmcpy(user_data, iucv->src_name); | |
1010 | high_nmcpy(user_data, iucv->dst_name); | |
1011 | ASCEBC(user_data, sizeof(user_data)); | |
1012 | if (sk->sk_state != IUCV_LISTEN) { | |
1013 | err = iucv_path_sever(path, user_data); | |
1014 | goto fail; | |
1015 | } | |
1016 | ||
1017 | /* Check for backlog size */ | |
1018 | if (sk_acceptq_is_full(sk)) { | |
1019 | err = iucv_path_sever(path, user_data); | |
1020 | goto fail; | |
1021 | } | |
1022 | ||
1023 | /* Create the new socket */ | |
1024 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); | |
da99f056 | 1025 | if (!nsk) { |
eac3731b JH |
1026 | err = iucv_path_sever(path, user_data); |
1027 | goto fail; | |
1028 | } | |
1029 | ||
1030 | niucv = iucv_sk(nsk); | |
1031 | iucv_sock_init(nsk, sk); | |
1032 | ||
1033 | /* Set the new iucv_sock */ | |
1034 | memcpy(niucv->dst_name, ipuser + 8, 8); | |
1035 | EBCASC(niucv->dst_name, 8); | |
1036 | memcpy(niucv->dst_user_id, ipvmid, 8); | |
1037 | memcpy(niucv->src_name, iucv->src_name, 8); | |
1038 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | |
1039 | niucv->path = path; | |
1040 | ||
1041 | /* Call iucv_accept */ | |
1042 | high_nmcpy(nuser_data, ipuser + 8); | |
1043 | memcpy(nuser_data + 8, niucv->src_name, 8); | |
1044 | ASCEBC(nuser_data + 8, 8); | |
1045 | ||
1046 | path->msglim = IUCV_QUEUELEN_DEFAULT; | |
1047 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); | |
da99f056 | 1048 | if (err) { |
eac3731b JH |
1049 | err = iucv_path_sever(path, user_data); |
1050 | goto fail; | |
1051 | } | |
1052 | ||
1053 | iucv_accept_enqueue(sk, nsk); | |
1054 | ||
1055 | /* Wake up accept */ | |
1056 | nsk->sk_state = IUCV_CONNECTED; | |
1057 | sk->sk_data_ready(sk, 1); | |
1058 | err = 0; | |
1059 | fail: | |
1060 | bh_unlock_sock(sk); | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) | |
1065 | { | |
1066 | struct sock *sk = path->private; | |
1067 | ||
1068 | sk->sk_state = IUCV_CONNECTED; | |
1069 | sk->sk_state_change(sk); | |
1070 | } | |
1071 | ||
1072 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |
1073 | { | |
1074 | struct sock *sk = path->private; | |
561e0360 | 1075 | struct iucv_sock *iucv = iucv_sk(sk); |
f0703c80 UB |
1076 | struct sk_buff *skb; |
1077 | struct sock_msg_q *save_msg; | |
1078 | int len; | |
561e0360 | 1079 | |
eac3731b JH |
1080 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1081 | return; | |
1082 | ||
f0703c80 UB |
1083 | if (!list_empty(&iucv->message_q.list) || |
1084 | !skb_queue_empty(&iucv->backlog_skb_q)) | |
1085 | goto save_message; | |
1086 | ||
1087 | len = atomic_read(&sk->sk_rmem_alloc); | |
1088 | len += msg->length + sizeof(struct sk_buff); | |
1089 | if (len > sk->sk_rcvbuf) | |
1090 | goto save_message; | |
1091 | ||
eac3731b | 1092 | skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); |
f0703c80 UB |
1093 | if (!skb) |
1094 | goto save_message; | |
eac3731b | 1095 | |
f0703c80 UB |
1096 | spin_lock(&iucv->message_q.lock); |
1097 | iucv_process_message(sk, skb, path, msg); | |
1098 | spin_unlock(&iucv->message_q.lock); | |
eac3731b | 1099 | |
f0703c80 UB |
1100 | return; |
1101 | ||
1102 | save_message: | |
1103 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); | |
1104 | save_msg->path = path; | |
1105 | save_msg->msg = *msg; | |
eac3731b | 1106 | |
f0703c80 UB |
1107 | spin_lock(&iucv->message_q.lock); |
1108 | list_add_tail(&save_msg->list, &iucv->message_q.list); | |
1109 | spin_unlock(&iucv->message_q.lock); | |
eac3731b JH |
1110 | } |
1111 | ||
1112 | static void iucv_callback_txdone(struct iucv_path *path, | |
1113 | struct iucv_message *msg) | |
1114 | { | |
1115 | struct sock *sk = path->private; | |
1116 | struct sk_buff *this; | |
1117 | struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; | |
1118 | struct sk_buff *list_skb = list->next; | |
1119 | unsigned long flags; | |
1120 | ||
561e0360 JH |
1121 | if (list_skb) { |
1122 | spin_lock_irqsave(&list->lock, flags); | |
1123 | ||
1124 | do { | |
1125 | this = list_skb; | |
1126 | list_skb = list_skb->next; | |
1127 | } while (memcmp(&msg->tag, this->cb, 4) && list_skb); | |
1128 | ||
1129 | spin_unlock_irqrestore(&list->lock, flags); | |
eac3731b | 1130 | |
561e0360 JH |
1131 | skb_unlink(this, &iucv_sk(sk)->send_skb_q); |
1132 | kfree_skb(this); | |
1133 | } | |
eac3731b | 1134 | |
da99f056 | 1135 | if (sk->sk_state == IUCV_CLOSING) { |
561e0360 JH |
1136 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { |
1137 | sk->sk_state = IUCV_CLOSED; | |
1138 | sk->sk_state_change(sk); | |
1139 | } | |
1140 | } | |
eac3731b | 1141 | |
eac3731b JH |
1142 | } |
1143 | ||
1144 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) | |
1145 | { | |
1146 | struct sock *sk = path->private; | |
1147 | ||
1148 | if (!list_empty(&iucv_sk(sk)->accept_q)) | |
1149 | sk->sk_state = IUCV_SEVERED; | |
1150 | else | |
1151 | sk->sk_state = IUCV_DISCONN; | |
1152 | ||
1153 | sk->sk_state_change(sk); | |
1154 | } | |
1155 | ||
1156 | static struct proto_ops iucv_sock_ops = { | |
1157 | .family = PF_IUCV, | |
1158 | .owner = THIS_MODULE, | |
1159 | .release = iucv_sock_release, | |
1160 | .bind = iucv_sock_bind, | |
1161 | .connect = iucv_sock_connect, | |
1162 | .listen = iucv_sock_listen, | |
1163 | .accept = iucv_sock_accept, | |
1164 | .getname = iucv_sock_getname, | |
1165 | .sendmsg = iucv_sock_sendmsg, | |
1166 | .recvmsg = iucv_sock_recvmsg, | |
1167 | .poll = iucv_sock_poll, | |
1168 | .ioctl = sock_no_ioctl, | |
1169 | .mmap = sock_no_mmap, | |
1170 | .socketpair = sock_no_socketpair, | |
1171 | .shutdown = iucv_sock_shutdown, | |
1172 | .setsockopt = sock_no_setsockopt, | |
1173 | .getsockopt = sock_no_getsockopt | |
1174 | }; | |
1175 | ||
1176 | static struct net_proto_family iucv_sock_family_ops = { | |
1177 | .family = AF_IUCV, | |
1178 | .owner = THIS_MODULE, | |
1179 | .create = iucv_sock_create, | |
1180 | }; | |
1181 | ||
da99f056 | 1182 | static int __init afiucv_init(void) |
eac3731b JH |
1183 | { |
1184 | int err; | |
1185 | ||
1186 | if (!MACHINE_IS_VM) { | |
1187 | printk(KERN_ERR "AF_IUCV connection needs VM as base\n"); | |
1188 | err = -EPROTONOSUPPORT; | |
1189 | goto out; | |
1190 | } | |
1191 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); | |
1192 | if (unlikely(err)) { | |
1193 | printk(KERN_ERR "AF_IUCV needs the VM userid\n"); | |
1194 | err = -EPROTONOSUPPORT; | |
1195 | goto out; | |
1196 | } | |
1197 | ||
1198 | err = iucv_register(&af_iucv_handler, 0); | |
1199 | if (err) | |
1200 | goto out; | |
1201 | err = proto_register(&iucv_proto, 0); | |
1202 | if (err) | |
1203 | goto out_iucv; | |
1204 | err = sock_register(&iucv_sock_family_ops); | |
1205 | if (err) | |
1206 | goto out_proto; | |
1207 | printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n"); | |
1208 | return 0; | |
1209 | ||
1210 | out_proto: | |
1211 | proto_unregister(&iucv_proto); | |
1212 | out_iucv: | |
1213 | iucv_unregister(&af_iucv_handler, 0); | |
1214 | out: | |
1215 | return err; | |
1216 | } | |
1217 | ||
1218 | static void __exit afiucv_exit(void) | |
1219 | { | |
1220 | sock_unregister(PF_IUCV); | |
1221 | proto_unregister(&iucv_proto); | |
1222 | iucv_unregister(&af_iucv_handler, 0); | |
1223 | ||
1224 | printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n"); | |
1225 | } | |
1226 | ||
1227 | module_init(afiucv_init); | |
1228 | module_exit(afiucv_exit); | |
1229 | ||
1230 | MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); | |
1231 | MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); | |
1232 | MODULE_VERSION(VERSION); | |
1233 | MODULE_LICENSE("GPL"); | |
1234 | MODULE_ALIAS_NETPROTO(PF_IUCV); |