]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/tty/hvc/hvc_iucv.c
Merge branch 'drm-sti-next-2015-11-03' of http://git.linaro.org/people/benjamin.gaign...
[mirror_ubuntu-focal-kernel.git] / drivers / tty / hvc / hvc_iucv.c
1 /*
2 * z/VM IUCV hypervisor console (HVC) device driver
3 *
4 * This HVC device driver provides terminal access using
5 * z/VM IUCV communication paths.
6 *
7 * Copyright IBM Corp. 2008, 2013
8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11 #define KMSG_COMPONENT "hvc_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <asm/ebcdic.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/mempool.h>
22 #include <linux/moduleparam.h>
23 #include <linux/tty.h>
24 #include <linux/wait.h>
25 #include <net/iucv/iucv.h>
26
27 #include "hvc_console.h"
28
29
30 /* General device driver settings */
31 #define HVC_IUCV_MAGIC 0xc9e4c3e5
32 #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
33 #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34
35 /* IUCV TTY message */
36 #define MSG_VERSION 0x02 /* Message version */
37 #define MSG_TYPE_ERROR 0x01 /* Error message */
38 #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
39 #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
40 #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
41 #define MSG_TYPE_DATA 0x10 /* Terminal data */
42
43 struct iucv_tty_msg {
44 u8 version; /* Message version */
45 u8 type; /* Message type */
46 #define MSG_MAX_DATALEN ((u16)(~0))
47 u16 datalen; /* Payload length */
48 u8 data[]; /* Payload buffer */
49 } __attribute__((packed));
50 #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
51
52 enum iucv_state_t {
53 IUCV_DISCONN = 0,
54 IUCV_CONNECTED = 1,
55 IUCV_SEVERED = 2,
56 };
57
58 enum tty_state_t {
59 TTY_CLOSED = 0,
60 TTY_OPENED = 1,
61 };
62
63 struct hvc_iucv_private {
64 struct hvc_struct *hvc; /* HVC struct reference */
65 u8 srv_name[8]; /* IUCV service name (ebcdic) */
66 unsigned char is_console; /* Linux console usage flag */
67 enum iucv_state_t iucv_state; /* IUCV connection status */
68 enum tty_state_t tty_state; /* TTY status */
69 struct iucv_path *path; /* IUCV path pointer */
70 spinlock_t lock; /* hvc_iucv_private lock */
71 #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
72 void *sndbuf; /* send buffer */
73 size_t sndbuf_len; /* length of send buffer */
74 #define QUEUE_SNDBUF_DELAY (HZ / 25)
75 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
76 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
77 struct list_head tty_outqueue; /* outgoing IUCV messages */
78 struct list_head tty_inqueue; /* incoming IUCV messages */
79 struct device *dev; /* device structure */
80 u8 info_path[16]; /* IUCV path info (dev attr) */
81 };
82
83 struct iucv_tty_buffer {
84 struct list_head list; /* list pointer */
85 struct iucv_message msg; /* store an IUCV message */
86 size_t offset; /* data buffer offset */
87 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
88 };
89
90 /* IUCV callback handler */
91 static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
92 static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
93 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
94 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
95
96
97 /* Kernel module parameter: use one terminal device as default */
98 static unsigned long hvc_iucv_devices = 1;
99
100 /* Array of allocated hvc iucv tty lines... */
101 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
102 #define IUCV_HVC_CON_IDX (0)
103 /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
104 #define MAX_VMID_FILTER (500)
105 #define FILTER_WILDCARD_CHAR '*'
106 static size_t hvc_iucv_filter_size;
107 static void *hvc_iucv_filter;
108 static const char *hvc_iucv_filter_string;
109 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
110
111 /* Kmem cache and mempool for iucv_tty_buffer elements */
112 static struct kmem_cache *hvc_iucv_buffer_cache;
113 static mempool_t *hvc_iucv_mempool;
114
115 /* IUCV handler callback functions */
116 static struct iucv_handler hvc_iucv_handler = {
117 .path_pending = hvc_iucv_path_pending,
118 .path_severed = hvc_iucv_path_severed,
119 .message_complete = hvc_iucv_msg_complete,
120 .message_pending = hvc_iucv_msg_pending,
121 };
122
123
124 /**
125 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
126 * @num: The HVC virtual terminal number (vtermno)
127 *
128 * This function returns the struct hvc_iucv_private instance that corresponds
129 * to the HVC virtual terminal number specified as parameter @num.
130 */
131 static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
132 {
133 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
134 return NULL;
135 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
136 }
137
138 /**
139 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
140 * @size: Size of the internal buffer used to store data.
141 * @flags: Memory allocation flags passed to mempool.
142 *
143 * This function allocates a new struct iucv_tty_buffer element and, optionally,
144 * allocates an internal data buffer with the specified size @size.
145 * The internal data buffer is always allocated with GFP_DMA which is
146 * required for receiving and sending data with IUCV.
147 * Note: The total message size arises from the internal buffer size and the
148 * members of the iucv_tty_msg structure.
149 * The function returns NULL if memory allocation has failed.
150 */
151 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
152 {
153 struct iucv_tty_buffer *bufp;
154
155 bufp = mempool_alloc(hvc_iucv_mempool, flags);
156 if (!bufp)
157 return NULL;
158 memset(bufp, 0, sizeof(*bufp));
159
160 if (size > 0) {
161 bufp->msg.length = MSG_SIZE(size);
162 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
163 if (!bufp->mbuf) {
164 mempool_free(bufp, hvc_iucv_mempool);
165 return NULL;
166 }
167 bufp->mbuf->version = MSG_VERSION;
168 bufp->mbuf->type = MSG_TYPE_DATA;
169 bufp->mbuf->datalen = (u16) size;
170 }
171 return bufp;
172 }
173
174 /**
175 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
176 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
177 */
178 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
179 {
180 kfree(bufp->mbuf);
181 mempool_free(bufp, hvc_iucv_mempool);
182 }
183
184 /**
185 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
186 * @list: List containing struct iucv_tty_buffer elements.
187 */
188 static void destroy_tty_buffer_list(struct list_head *list)
189 {
190 struct iucv_tty_buffer *ent, *next;
191
192 list_for_each_entry_safe(ent, next, list, list) {
193 list_del(&ent->list);
194 destroy_tty_buffer(ent);
195 }
196 }
197
198 /**
199 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
200 * @priv: Pointer to struct hvc_iucv_private
201 * @buf: HVC buffer for writing received terminal data.
202 * @count: HVC buffer size.
203 * @has_more_data: Pointer to an int variable.
204 *
205 * The function picks up pending messages from the input queue and receives
206 * the message data that is then written to the specified buffer @buf.
207 * If the buffer size @count is less than the data message size, the
208 * message is kept on the input queue and @has_more_data is set to 1.
209 * If all message data has been written, the message is removed from
210 * the input queue.
211 *
212 * The function returns the number of bytes written to the terminal, zero if
213 * there are no pending data messages available or if there is no established
214 * IUCV path.
215 * If the IUCV path has been severed, then -EPIPE is returned to cause a
216 * hang up (that is issued by the HVC layer).
217 */
218 static int hvc_iucv_write(struct hvc_iucv_private *priv,
219 char *buf, int count, int *has_more_data)
220 {
221 struct iucv_tty_buffer *rb;
222 int written;
223 int rc;
224
225 /* immediately return if there is no IUCV connection */
226 if (priv->iucv_state == IUCV_DISCONN)
227 return 0;
228
229 /* if the IUCV path has been severed, return -EPIPE to inform the
230 * HVC layer to hang up the tty device. */
231 if (priv->iucv_state == IUCV_SEVERED)
232 return -EPIPE;
233
234 /* check if there are pending messages */
235 if (list_empty(&priv->tty_inqueue))
236 return 0;
237
238 /* receive an iucv message and flip data to the tty (ldisc) */
239 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
240
241 written = 0;
242 if (!rb->mbuf) { /* message not yet received ... */
243 /* allocate mem to store msg data; if no memory is available
244 * then leave the buffer on the list and re-try later */
245 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
246 if (!rb->mbuf)
247 return -ENOMEM;
248
249 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
250 rb->mbuf, rb->msg.length, NULL);
251 switch (rc) {
252 case 0: /* Successful */
253 break;
254 case 2: /* No message found */
255 case 9: /* Message purged */
256 break;
257 default:
258 written = -EIO;
259 }
260 /* remove buffer if an error has occurred or received data
261 * is not correct */
262 if (rc || (rb->mbuf->version != MSG_VERSION) ||
263 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
264 goto out_remove_buffer;
265 }
266
267 switch (rb->mbuf->type) {
268 case MSG_TYPE_DATA:
269 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
270 memcpy(buf, rb->mbuf->data + rb->offset, written);
271 if (written < (rb->mbuf->datalen - rb->offset)) {
272 rb->offset += written;
273 *has_more_data = 1;
274 goto out_written;
275 }
276 break;
277
278 case MSG_TYPE_WINSIZE:
279 if (rb->mbuf->datalen != sizeof(struct winsize))
280 break;
281 /* The caller must ensure that the hvc is locked, which
282 * is the case when called from hvc_iucv_get_chars() */
283 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
284 break;
285
286 case MSG_TYPE_ERROR: /* ignored ... */
287 case MSG_TYPE_TERMENV: /* ignored ... */
288 case MSG_TYPE_TERMIOS: /* ignored ... */
289 break;
290 }
291
292 out_remove_buffer:
293 list_del(&rb->list);
294 destroy_tty_buffer(rb);
295 *has_more_data = !list_empty(&priv->tty_inqueue);
296
297 out_written:
298 return written;
299 }
300
301 /**
302 * hvc_iucv_get_chars() - HVC get_chars operation.
303 * @vtermno: HVC virtual terminal number.
304 * @buf: Pointer to a buffer to store data
305 * @count: Size of buffer available for writing
306 *
307 * The HVC thread calls this method to read characters from the back-end.
308 * If an IUCV communication path has been established, pending IUCV messages
309 * are received and data is copied into buffer @buf up to @count bytes.
310 *
311 * Locking: The routine gets called under an irqsave() spinlock; and
312 * the routine locks the struct hvc_iucv_private->lock to call
313 * helper functions.
314 */
315 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
316 {
317 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
318 int written;
319 int has_more_data;
320
321 if (count <= 0)
322 return 0;
323
324 if (!priv)
325 return -ENODEV;
326
327 spin_lock(&priv->lock);
328 has_more_data = 0;
329 written = hvc_iucv_write(priv, buf, count, &has_more_data);
330 spin_unlock(&priv->lock);
331
332 /* if there are still messages on the queue... schedule another run */
333 if (has_more_data)
334 hvc_kick();
335
336 return written;
337 }
338
339 /**
340 * hvc_iucv_queue() - Buffer terminal data for sending.
341 * @priv: Pointer to struct hvc_iucv_private instance.
342 * @buf: Buffer containing data to send.
343 * @count: Size of buffer and amount of data to send.
344 *
345 * The function queues data for sending. To actually send the buffered data,
346 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
347 * The function returns the number of data bytes that has been buffered.
348 *
349 * If the device is not connected, data is ignored and the function returns
350 * @count.
351 * If the buffer is full, the function returns 0.
352 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
353 * (that can be passed to HVC layer to cause a tty hangup).
354 */
355 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
356 int count)
357 {
358 size_t len;
359
360 if (priv->iucv_state == IUCV_DISCONN)
361 return count; /* ignore data */
362
363 if (priv->iucv_state == IUCV_SEVERED)
364 return -EPIPE;
365
366 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
367 if (!len)
368 return 0;
369
370 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
371 priv->sndbuf_len += len;
372
373 if (priv->iucv_state == IUCV_CONNECTED)
374 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
375
376 return len;
377 }
378
379 /**
380 * hvc_iucv_send() - Send an IUCV message containing terminal data.
381 * @priv: Pointer to struct hvc_iucv_private instance.
382 *
383 * If an IUCV communication path has been established, the buffered output data
384 * is sent via an IUCV message and the number of bytes sent is returned.
385 * Returns 0 if there is no established IUCV communication path or
386 * -EPIPE if an existing IUCV communicaton path has been severed.
387 */
388 static int hvc_iucv_send(struct hvc_iucv_private *priv)
389 {
390 struct iucv_tty_buffer *sb;
391 int rc, len;
392
393 if (priv->iucv_state == IUCV_SEVERED)
394 return -EPIPE;
395
396 if (priv->iucv_state == IUCV_DISCONN)
397 return -EIO;
398
399 if (!priv->sndbuf_len)
400 return 0;
401
402 /* allocate internal buffer to store msg data and also compute total
403 * message length */
404 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
405 if (!sb)
406 return -ENOMEM;
407
408 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
409 sb->mbuf->datalen = (u16) priv->sndbuf_len;
410 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
411
412 list_add_tail(&sb->list, &priv->tty_outqueue);
413
414 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
415 (void *) sb->mbuf, sb->msg.length);
416 if (rc) {
417 /* drop the message here; however we might want to handle
418 * 0x03 (msg limit reached) by trying again... */
419 list_del(&sb->list);
420 destroy_tty_buffer(sb);
421 }
422 len = priv->sndbuf_len;
423 priv->sndbuf_len = 0;
424
425 return len;
426 }
427
428 /**
429 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
430 * @work: Work structure.
431 *
432 * This work queue function sends buffered output data over IUCV and,
433 * if not all buffered data could be sent, reschedules itself.
434 */
435 static void hvc_iucv_sndbuf_work(struct work_struct *work)
436 {
437 struct hvc_iucv_private *priv;
438
439 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
440 if (!priv)
441 return;
442
443 spin_lock_bh(&priv->lock);
444 hvc_iucv_send(priv);
445 spin_unlock_bh(&priv->lock);
446 }
447
448 /**
449 * hvc_iucv_put_chars() - HVC put_chars operation.
450 * @vtermno: HVC virtual terminal number.
451 * @buf: Pointer to an buffer to read data from
452 * @count: Size of buffer available for reading
453 *
454 * The HVC thread calls this method to write characters to the back-end.
455 * The function calls hvc_iucv_queue() to queue terminal data for sending.
456 *
457 * Locking: The method gets called under an irqsave() spinlock; and
458 * locks struct hvc_iucv_private->lock.
459 */
460 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
461 {
462 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
463 int queued;
464
465 if (count <= 0)
466 return 0;
467
468 if (!priv)
469 return -ENODEV;
470
471 spin_lock(&priv->lock);
472 queued = hvc_iucv_queue(priv, buf, count);
473 spin_unlock(&priv->lock);
474
475 return queued;
476 }
477
478 /**
479 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
480 * @hp: Pointer to the HVC device (struct hvc_struct)
481 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
482 * hvc_iucv_private instance.
483 *
484 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
485 * instance that is derived from @id. Always returns 0.
486 *
487 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
488 */
489 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
490 {
491 struct hvc_iucv_private *priv;
492
493 priv = hvc_iucv_get_private(id);
494 if (!priv)
495 return 0;
496
497 spin_lock_bh(&priv->lock);
498 priv->tty_state = TTY_OPENED;
499 spin_unlock_bh(&priv->lock);
500
501 return 0;
502 }
503
504 /**
505 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
506 * @priv: Pointer to the struct hvc_iucv_private instance.
507 */
508 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
509 {
510 destroy_tty_buffer_list(&priv->tty_outqueue);
511 destroy_tty_buffer_list(&priv->tty_inqueue);
512
513 priv->tty_state = TTY_CLOSED;
514 priv->iucv_state = IUCV_DISCONN;
515
516 priv->sndbuf_len = 0;
517 }
518
519 /**
520 * tty_outqueue_empty() - Test if the tty outq is empty
521 * @priv: Pointer to struct hvc_iucv_private instance.
522 */
523 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
524 {
525 int rc;
526
527 spin_lock_bh(&priv->lock);
528 rc = list_empty(&priv->tty_outqueue);
529 spin_unlock_bh(&priv->lock);
530
531 return rc;
532 }
533
534 /**
535 * flush_sndbuf_sync() - Flush send buffer and wait for completion
536 * @priv: Pointer to struct hvc_iucv_private instance.
537 *
538 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
539 * to flush any buffered terminal output data and waits for completion.
540 */
541 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
542 {
543 int sync_wait;
544
545 cancel_delayed_work_sync(&priv->sndbuf_work);
546
547 spin_lock_bh(&priv->lock);
548 hvc_iucv_send(priv); /* force sending buffered data */
549 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
550 spin_unlock_bh(&priv->lock);
551
552 if (sync_wait)
553 wait_event_timeout(priv->sndbuf_waitq,
554 tty_outqueue_empty(priv), HZ/10);
555 }
556
557 /**
558 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
559 * @priv: Pointer to hvc_iucv_private structure
560 *
561 * This routine severs an existing IUCV communication path and hangs
562 * up the underlying HVC terminal device.
563 * The hang-up occurs only if an IUCV communication path is established;
564 * otherwise there is no need to hang up the terminal device.
565 *
566 * The IUCV HVC hang-up is separated into two steps:
567 * 1. After the IUCV path has been severed, the iucv_state is set to
568 * IUCV_SEVERED.
569 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
570 * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
571 *
572 * If the tty has not yet been opened, clean up the hvc_iucv_private
573 * structure to allow re-connects.
574 * If the tty has been opened, let get_chars() return -EPIPE to signal
575 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
576 * to call get_chars()...
577 *
578 * Special notes on hanging up a HVC terminal instantiated as console:
579 * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
580 * 2. do_tty_hangup() calls tty->ops->close() for console_filp
581 * => no hangup notifier is called by HVC (default)
582 * 2. hvc_close() returns because of tty_hung_up_p(filp)
583 * => no delete notifier is called!
584 * Finally, the back-end is not being notified, thus, the tty session is
585 * kept active (TTY_OPEN) to be ready for re-connects.
586 *
587 * Locking: spin_lock(&priv->lock) w/o disabling bh
588 */
589 static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
590 {
591 struct iucv_path *path;
592
593 path = NULL;
594 spin_lock(&priv->lock);
595 if (priv->iucv_state == IUCV_CONNECTED) {
596 path = priv->path;
597 priv->path = NULL;
598 priv->iucv_state = IUCV_SEVERED;
599 if (priv->tty_state == TTY_CLOSED)
600 hvc_iucv_cleanup(priv);
601 else
602 /* console is special (see above) */
603 if (priv->is_console) {
604 hvc_iucv_cleanup(priv);
605 priv->tty_state = TTY_OPENED;
606 } else
607 hvc_kick();
608 }
609 spin_unlock(&priv->lock);
610
611 /* finally sever path (outside of priv->lock due to lock ordering) */
612 if (path) {
613 iucv_path_sever(path, NULL);
614 iucv_path_free(path);
615 }
616 }
617
618 /**
619 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
620 * @hp: Pointer to the HVC device (struct hvc_struct)
621 * @id: Additional data (originally passed to hvc_alloc):
622 * the index of an struct hvc_iucv_private instance.
623 *
624 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
625 * virtual or otherwise) has occurred.
626 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
627 * to keep an existing IUCV communication path established.
628 * (Background: vhangup() is called from user space (by getty or login) to
629 * disable writing to the tty by other applications).
630 * If the tty has been opened and an established IUCV path has been severed
631 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
632 *
633 * Locking: struct hvc_iucv_private->lock
634 */
635 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
636 {
637 struct hvc_iucv_private *priv;
638
639 priv = hvc_iucv_get_private(id);
640 if (!priv)
641 return;
642
643 flush_sndbuf_sync(priv);
644
645 spin_lock_bh(&priv->lock);
646 /* NOTE: If the hangup was scheduled by ourself (from the iucv
647 * path_servered callback [IUCV_SEVERED]), we have to clean up
648 * our structure and to set state to TTY_CLOSED.
649 * If the tty was hung up otherwise (e.g. vhangup()), then we
650 * ignore this hangup and keep an established IUCV path open...
651 * (...the reason is that we are not able to connect back to the
652 * client if we disconnect on hang up) */
653 priv->tty_state = TTY_CLOSED;
654
655 if (priv->iucv_state == IUCV_SEVERED)
656 hvc_iucv_cleanup(priv);
657 spin_unlock_bh(&priv->lock);
658 }
659
660 /**
661 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
662 * @hp: Pointer the HVC device (struct hvc_struct)
663 * @raise: Non-zero to raise or zero to lower DTR/RTS lines
664 *
665 * This routine notifies the HVC back-end to raise or lower DTR/RTS
666 * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
667 * drop the IUCV connection (similar to hang up the modem).
668 */
669 static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
670 {
671 struct hvc_iucv_private *priv;
672 struct iucv_path *path;
673
674 /* Raising the DTR/RTS is ignored as IUCV connections can be
675 * established at any times.
676 */
677 if (raise)
678 return;
679
680 priv = hvc_iucv_get_private(hp->vtermno);
681 if (!priv)
682 return;
683
684 /* Lowering the DTR/RTS lines disconnects an established IUCV
685 * connection.
686 */
687 flush_sndbuf_sync(priv);
688
689 spin_lock_bh(&priv->lock);
690 path = priv->path; /* save reference to IUCV path */
691 priv->path = NULL;
692 priv->iucv_state = IUCV_DISCONN;
693 spin_unlock_bh(&priv->lock);
694
695 /* Sever IUCV path outside of priv->lock due to lock ordering of:
696 * priv->lock <--> iucv_table_lock */
697 if (path) {
698 iucv_path_sever(path, NULL);
699 iucv_path_free(path);
700 }
701 }
702
703 /**
704 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
705 * @hp: Pointer to the HVC device (struct hvc_struct)
706 * @id: Additional data (originally passed to hvc_alloc):
707 * the index of an struct hvc_iucv_private instance.
708 *
709 * This routine notifies the HVC back-end that the last tty device fd has been
710 * closed. The function cleans up tty resources. The clean-up of the IUCV
711 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
712 * control setting.
713 *
714 * Locking: struct hvc_iucv_private->lock
715 */
716 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
717 {
718 struct hvc_iucv_private *priv;
719
720 priv = hvc_iucv_get_private(id);
721 if (!priv)
722 return;
723
724 flush_sndbuf_sync(priv);
725
726 spin_lock_bh(&priv->lock);
727 destroy_tty_buffer_list(&priv->tty_outqueue);
728 destroy_tty_buffer_list(&priv->tty_inqueue);
729 priv->tty_state = TTY_CLOSED;
730 priv->sndbuf_len = 0;
731 spin_unlock_bh(&priv->lock);
732 }
733
734 /**
735 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
736 * @ipvmid: Originating z/VM user ID (right padded with blanks)
737 *
738 * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
739 * connect, otherwise non-zero.
740 */
741 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
742 {
743 const char *wildcard, *filter_entry;
744 size_t i, len;
745
746 /* Note: default policy is ACCEPT if no filter is set */
747 if (!hvc_iucv_filter_size)
748 return 0;
749
750 for (i = 0; i < hvc_iucv_filter_size; i++) {
751 filter_entry = hvc_iucv_filter + (8 * i);
752
753 /* If a filter entry contains the filter wildcard character,
754 * reduce the length to match the leading portion of the user
755 * ID only (wildcard match). Characters following the wildcard
756 * are ignored.
757 */
758 wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
759 len = (wildcard) ? wildcard - filter_entry : 8;
760 if (0 == memcmp(ipvmid, filter_entry, len))
761 return 0;
762 }
763 return 1;
764 }
765
766 /**
767 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
768 * @path: Pending path (struct iucv_path)
769 * @ipvmid: z/VM system identifier of originator
770 * @ipuser: User specified data for this path
771 * (AF_IUCV: port/service name and originator port)
772 *
773 * The function uses the @ipuser data to determine if the pending path belongs
774 * to a terminal managed by this device driver.
775 * If the path belongs to this driver, ensure that the terminal is not accessed
776 * multiple times (only one connection to a terminal is allowed).
777 * If the terminal is not yet connected, the pending path is accepted and is
778 * associated to the appropriate struct hvc_iucv_private instance.
779 *
780 * Returns 0 if @path belongs to a terminal managed by the this device driver;
781 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
782 *
783 * Locking: struct hvc_iucv_private->lock
784 */
785 static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
786 u8 *ipuser)
787 {
788 struct hvc_iucv_private *priv, *tmp;
789 u8 wildcard[9] = "lnxhvc ";
790 int i, rc, find_unused;
791 u8 nuser_data[16];
792 u8 vm_user_id[9];
793
794 ASCEBC(wildcard, sizeof(wildcard));
795 find_unused = !memcmp(wildcard, ipuser, 8);
796
797 /* First, check if the pending path request is managed by this
798 * IUCV handler:
799 * - find a disconnected device if ipuser contains the wildcard
800 * - find the device that matches the terminal ID in ipuser
801 */
802 priv = NULL;
803 for (i = 0; i < hvc_iucv_devices; i++) {
804 tmp = hvc_iucv_table[i];
805 if (!tmp)
806 continue;
807
808 if (find_unused) {
809 spin_lock(&tmp->lock);
810 if (tmp->iucv_state == IUCV_DISCONN)
811 priv = tmp;
812 spin_unlock(&tmp->lock);
813
814 } else if (!memcmp(tmp->srv_name, ipuser, 8))
815 priv = tmp;
816 if (priv)
817 break;
818 }
819 if (!priv)
820 return -ENODEV;
821
822 /* Enforce that ipvmid is allowed to connect to us */
823 read_lock(&hvc_iucv_filter_lock);
824 rc = hvc_iucv_filter_connreq(ipvmid);
825 read_unlock(&hvc_iucv_filter_lock);
826 if (rc) {
827 iucv_path_sever(path, ipuser);
828 iucv_path_free(path);
829 memcpy(vm_user_id, ipvmid, 8);
830 vm_user_id[8] = 0;
831 pr_info("A connection request from z/VM user ID %s "
832 "was refused\n", vm_user_id);
833 return 0;
834 }
835
836 spin_lock(&priv->lock);
837
838 /* If the terminal is already connected or being severed, then sever
839 * this path to enforce that there is only ONE established communication
840 * path per terminal. */
841 if (priv->iucv_state != IUCV_DISCONN) {
842 iucv_path_sever(path, ipuser);
843 iucv_path_free(path);
844 goto out_path_handled;
845 }
846
847 /* accept path */
848 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
849 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
850 path->msglim = 0xffff; /* IUCV MSGLIMIT */
851 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
852 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
853 if (rc) {
854 iucv_path_sever(path, ipuser);
855 iucv_path_free(path);
856 goto out_path_handled;
857 }
858 priv->path = path;
859 priv->iucv_state = IUCV_CONNECTED;
860
861 /* store path information */
862 memcpy(priv->info_path, ipvmid, 8);
863 memcpy(priv->info_path + 8, ipuser + 8, 8);
864
865 /* flush buffered output data... */
866 schedule_delayed_work(&priv->sndbuf_work, 5);
867
868 out_path_handled:
869 spin_unlock(&priv->lock);
870 return 0;
871 }
872
873 /**
874 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
875 * @path: Pending path (struct iucv_path)
876 * @ipuser: User specified data for this path
877 * (AF_IUCV: port/service name and originator port)
878 *
879 * This function calls the hvc_iucv_hangup() function for the
880 * respective IUCV HVC terminal.
881 *
882 * Locking: struct hvc_iucv_private->lock
883 */
884 static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
885 {
886 struct hvc_iucv_private *priv = path->private;
887
888 hvc_iucv_hangup(priv);
889 }
890
891 /**
892 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
893 * @path: Pending path (struct iucv_path)
894 * @msg: Pointer to the IUCV message
895 *
896 * The function puts an incoming message on the input queue for later
897 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
898 * If the tty has not yet been opened, the message is rejected.
899 *
900 * Locking: struct hvc_iucv_private->lock
901 */
902 static void hvc_iucv_msg_pending(struct iucv_path *path,
903 struct iucv_message *msg)
904 {
905 struct hvc_iucv_private *priv = path->private;
906 struct iucv_tty_buffer *rb;
907
908 /* reject messages that exceed max size of iucv_tty_msg->datalen */
909 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
910 iucv_message_reject(path, msg);
911 return;
912 }
913
914 spin_lock(&priv->lock);
915
916 /* reject messages if tty has not yet been opened */
917 if (priv->tty_state == TTY_CLOSED) {
918 iucv_message_reject(path, msg);
919 goto unlock_return;
920 }
921
922 /* allocate tty buffer to save iucv msg only */
923 rb = alloc_tty_buffer(0, GFP_ATOMIC);
924 if (!rb) {
925 iucv_message_reject(path, msg);
926 goto unlock_return; /* -ENOMEM */
927 }
928 rb->msg = *msg;
929
930 list_add_tail(&rb->list, &priv->tty_inqueue);
931
932 hvc_kick(); /* wake up hvc thread */
933
934 unlock_return:
935 spin_unlock(&priv->lock);
936 }
937
938 /**
939 * hvc_iucv_msg_complete() - IUCV handler to process message completion
940 * @path: Pending path (struct iucv_path)
941 * @msg: Pointer to the IUCV message
942 *
943 * The function is called upon completion of message delivery to remove the
944 * message from the outqueue. Additional delivery information can be found
945 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
946 * purged messages (0x010000 (IPADPGNR)).
947 *
948 * Locking: struct hvc_iucv_private->lock
949 */
950 static void hvc_iucv_msg_complete(struct iucv_path *path,
951 struct iucv_message *msg)
952 {
953 struct hvc_iucv_private *priv = path->private;
954 struct iucv_tty_buffer *ent, *next;
955 LIST_HEAD(list_remove);
956
957 spin_lock(&priv->lock);
958 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
959 if (ent->msg.id == msg->id) {
960 list_move(&ent->list, &list_remove);
961 break;
962 }
963 wake_up(&priv->sndbuf_waitq);
964 spin_unlock(&priv->lock);
965 destroy_tty_buffer_list(&list_remove);
966 }
967
968 /**
969 * hvc_iucv_pm_freeze() - Freeze PM callback
970 * @dev: IUVC HVC terminal device
971 *
972 * Sever an established IUCV communication path and
973 * trigger a hang-up of the underlying HVC terminal.
974 */
975 static int hvc_iucv_pm_freeze(struct device *dev)
976 {
977 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
978
979 local_bh_disable();
980 hvc_iucv_hangup(priv);
981 local_bh_enable();
982
983 return 0;
984 }
985
986 /**
987 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
988 * @dev: IUVC HVC terminal device
989 *
990 * Wake up the HVC thread to trigger hang-up and respective
991 * HVC back-end notifier invocations.
992 */
993 static int hvc_iucv_pm_restore_thaw(struct device *dev)
994 {
995 hvc_kick();
996 return 0;
997 }
998
999 static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
1000 struct device_attribute *attr,
1001 char *buf)
1002 {
1003 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1004 size_t len;
1005
1006 len = sizeof(priv->srv_name);
1007 memcpy(buf, priv->srv_name, len);
1008 EBCASC(buf, len);
1009 buf[len++] = '\n';
1010 return len;
1011 }
1012
1013 static ssize_t hvc_iucv_dev_state_show(struct device *dev,
1014 struct device_attribute *attr,
1015 char *buf)
1016 {
1017 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1018 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
1019 }
1020
1021 static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
1022 struct device_attribute *attr,
1023 char *buf)
1024 {
1025 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1026 char vmid[9], ipuser[9];
1027
1028 memset(vmid, 0, sizeof(vmid));
1029 memset(ipuser, 0, sizeof(ipuser));
1030
1031 spin_lock_bh(&priv->lock);
1032 if (priv->iucv_state == IUCV_CONNECTED) {
1033 memcpy(vmid, priv->info_path, 8);
1034 memcpy(ipuser, priv->info_path + 8, 8);
1035 }
1036 spin_unlock_bh(&priv->lock);
1037 EBCASC(ipuser, 8);
1038
1039 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1040 }
1041
1042
1043 /* HVC operations */
1044 static const struct hv_ops hvc_iucv_ops = {
1045 .get_chars = hvc_iucv_get_chars,
1046 .put_chars = hvc_iucv_put_chars,
1047 .notifier_add = hvc_iucv_notifier_add,
1048 .notifier_del = hvc_iucv_notifier_del,
1049 .notifier_hangup = hvc_iucv_notifier_hangup,
1050 .dtr_rts = hvc_iucv_dtr_rts,
1051 };
1052
1053 /* Suspend / resume device operations */
1054 static const struct dev_pm_ops hvc_iucv_pm_ops = {
1055 .freeze = hvc_iucv_pm_freeze,
1056 .thaw = hvc_iucv_pm_restore_thaw,
1057 .restore = hvc_iucv_pm_restore_thaw,
1058 };
1059
1060 /* IUCV HVC device driver */
1061 static struct device_driver hvc_iucv_driver = {
1062 .name = KMSG_COMPONENT,
1063 .bus = &iucv_bus,
1064 .pm = &hvc_iucv_pm_ops,
1065 };
1066
1067 /* IUCV HVC device attributes */
1068 static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1069 static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1070 static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1071 static struct attribute *hvc_iucv_dev_attrs[] = {
1072 &dev_attr_termid.attr,
1073 &dev_attr_state.attr,
1074 &dev_attr_peer.attr,
1075 NULL,
1076 };
1077 static struct attribute_group hvc_iucv_dev_attr_group = {
1078 .attrs = hvc_iucv_dev_attrs,
1079 };
1080 static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1081 &hvc_iucv_dev_attr_group,
1082 NULL,
1083 };
1084
1085
1086 /**
1087 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1088 * @id: hvc_iucv_table index
1089 * @is_console: Flag if the instance is used as Linux console
1090 *
1091 * This function allocates a new hvc_iucv_private structure and stores
1092 * the instance in hvc_iucv_table at index @id.
1093 * Returns 0 on success; otherwise non-zero.
1094 */
1095 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1096 {
1097 struct hvc_iucv_private *priv;
1098 char name[9];
1099 int rc;
1100
1101 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1102 if (!priv)
1103 return -ENOMEM;
1104
1105 spin_lock_init(&priv->lock);
1106 INIT_LIST_HEAD(&priv->tty_outqueue);
1107 INIT_LIST_HEAD(&priv->tty_inqueue);
1108 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1109 init_waitqueue_head(&priv->sndbuf_waitq);
1110
1111 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1112 if (!priv->sndbuf) {
1113 kfree(priv);
1114 return -ENOMEM;
1115 }
1116
1117 /* set console flag */
1118 priv->is_console = is_console;
1119
1120 /* allocate hvc device */
1121 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
1122 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1123 if (IS_ERR(priv->hvc)) {
1124 rc = PTR_ERR(priv->hvc);
1125 goto out_error_hvc;
1126 }
1127
1128 /* notify HVC thread instead of using polling */
1129 priv->hvc->irq_requested = 1;
1130
1131 /* setup iucv related information */
1132 snprintf(name, 9, "lnxhvc%-2d", id);
1133 memcpy(priv->srv_name, name, 8);
1134 ASCEBC(priv->srv_name, 8);
1135
1136 /* create and setup device */
1137 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1138 if (!priv->dev) {
1139 rc = -ENOMEM;
1140 goto out_error_dev;
1141 }
1142 dev_set_name(priv->dev, "hvc_iucv%d", id);
1143 dev_set_drvdata(priv->dev, priv);
1144 priv->dev->bus = &iucv_bus;
1145 priv->dev->parent = iucv_root;
1146 priv->dev->driver = &hvc_iucv_driver;
1147 priv->dev->groups = hvc_iucv_dev_attr_groups;
1148 priv->dev->release = (void (*)(struct device *)) kfree;
1149 rc = device_register(priv->dev);
1150 if (rc) {
1151 put_device(priv->dev);
1152 goto out_error_dev;
1153 }
1154
1155 hvc_iucv_table[id] = priv;
1156 return 0;
1157
1158 out_error_dev:
1159 hvc_remove(priv->hvc);
1160 out_error_hvc:
1161 free_page((unsigned long) priv->sndbuf);
1162 kfree(priv);
1163
1164 return rc;
1165 }
1166
1167 /**
1168 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1169 */
1170 static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1171 {
1172 hvc_remove(priv->hvc);
1173 device_unregister(priv->dev);
1174 free_page((unsigned long) priv->sndbuf);
1175 kfree(priv);
1176 }
1177
1178 /**
1179 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1180 * @filter: String containing a comma-separated list of z/VM user IDs
1181 * @dest: Location where to store the parsed z/VM user ID
1182 */
1183 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1184 {
1185 const char *nextdelim, *residual;
1186 size_t len;
1187
1188 nextdelim = strchr(filter, ',');
1189 if (nextdelim) {
1190 len = nextdelim - filter;
1191 residual = nextdelim + 1;
1192 } else {
1193 len = strlen(filter);
1194 residual = filter + len;
1195 }
1196
1197 if (len == 0)
1198 return ERR_PTR(-EINVAL);
1199
1200 /* check for '\n' (if called from sysfs) */
1201 if (filter[len - 1] == '\n')
1202 len--;
1203
1204 /* prohibit filter entries containing the wildcard character only */
1205 if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1206 return ERR_PTR(-EINVAL);
1207
1208 if (len > 8)
1209 return ERR_PTR(-EINVAL);
1210
1211 /* pad with blanks and save upper case version of user ID */
1212 memset(dest, ' ', 8);
1213 while (len--)
1214 dest[len] = toupper(filter[len]);
1215 return residual;
1216 }
1217
1218 /**
1219 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1220 * @filter: String consisting of a comma-separated list of z/VM user IDs
1221 *
1222 * The function parses the @filter string and creates an array containing
1223 * the list of z/VM user ID filter entries.
1224 * Return code 0 means success, -EINVAL if the filter is syntactically
1225 * incorrect, -ENOMEM if there was not enough memory to allocate the
1226 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1227 */
1228 static int hvc_iucv_setup_filter(const char *val)
1229 {
1230 const char *residual;
1231 int err;
1232 size_t size, count;
1233 void *array, *old_filter;
1234
1235 count = strlen(val);
1236 if (count == 0 || (count == 1 && val[0] == '\n')) {
1237 size = 0;
1238 array = NULL;
1239 goto out_replace_filter; /* clear filter */
1240 }
1241
1242 /* count user IDs in order to allocate sufficient memory */
1243 size = 1;
1244 residual = val;
1245 while ((residual = strchr(residual, ',')) != NULL) {
1246 residual++;
1247 size++;
1248 }
1249
1250 /* check if the specified list exceeds the filter limit */
1251 if (size > MAX_VMID_FILTER)
1252 return -ENOSPC;
1253
1254 array = kzalloc(size * 8, GFP_KERNEL);
1255 if (!array)
1256 return -ENOMEM;
1257
1258 count = size;
1259 residual = val;
1260 while (*residual && count) {
1261 residual = hvc_iucv_parse_filter(residual,
1262 array + ((size - count) * 8));
1263 if (IS_ERR(residual)) {
1264 err = PTR_ERR(residual);
1265 kfree(array);
1266 goto out_err;
1267 }
1268 count--;
1269 }
1270
1271 out_replace_filter:
1272 write_lock_bh(&hvc_iucv_filter_lock);
1273 old_filter = hvc_iucv_filter;
1274 hvc_iucv_filter_size = size;
1275 hvc_iucv_filter = array;
1276 write_unlock_bh(&hvc_iucv_filter_lock);
1277 kfree(old_filter);
1278
1279 err = 0;
1280 out_err:
1281 return err;
1282 }
1283
1284 /**
1285 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1286 * @val: String consisting of a comma-separated list of z/VM user IDs
1287 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1288 *
1289 * The function sets up the z/VM user ID filter specified as comma-separated
1290 * list of user IDs in @val.
1291 * Note: If it is called early in the boot process, @val is stored and
1292 * parsed later in hvc_iucv_init().
1293 */
1294 static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1295 {
1296 int rc;
1297
1298 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1299 return -ENODEV;
1300
1301 if (!val)
1302 return -EINVAL;
1303
1304 rc = 0;
1305 if (slab_is_available())
1306 rc = hvc_iucv_setup_filter(val);
1307 else
1308 hvc_iucv_filter_string = val; /* defer... */
1309 return rc;
1310 }
1311
1312 /**
1313 * param_get_vmidfilter() - Get z/VM user ID filter
1314 * @buffer: Buffer to store z/VM user ID filter,
1315 * (buffer size assumption PAGE_SIZE)
1316 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1317 *
1318 * The function stores the filter as a comma-separated list of z/VM user IDs
1319 * in @buffer. Typically, sysfs routines call this function for attr show.
1320 */
1321 static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1322 {
1323 int rc;
1324 size_t index, len;
1325 void *start, *end;
1326
1327 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1328 return -ENODEV;
1329
1330 rc = 0;
1331 read_lock_bh(&hvc_iucv_filter_lock);
1332 for (index = 0; index < hvc_iucv_filter_size; index++) {
1333 start = hvc_iucv_filter + (8 * index);
1334 end = memchr(start, ' ', 8);
1335 len = (end) ? end - start : 8;
1336 memcpy(buffer + rc, start, len);
1337 rc += len;
1338 buffer[rc++] = ',';
1339 }
1340 read_unlock_bh(&hvc_iucv_filter_lock);
1341 if (rc)
1342 buffer[--rc] = '\0'; /* replace last comma and update rc */
1343 return rc;
1344 }
1345
1346 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1347
1348 static const struct kernel_param_ops param_ops_vmidfilter = {
1349 .set = param_set_vmidfilter,
1350 .get = param_get_vmidfilter,
1351 };
1352
1353 /**
1354 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1355 */
1356 static int __init hvc_iucv_init(void)
1357 {
1358 int rc;
1359 unsigned int i;
1360
1361 if (!hvc_iucv_devices)
1362 return -ENODEV;
1363
1364 if (!MACHINE_IS_VM) {
1365 pr_notice("The z/VM IUCV HVC device driver cannot "
1366 "be used without z/VM\n");
1367 rc = -ENODEV;
1368 goto out_error;
1369 }
1370
1371 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1372 pr_err("%lu is not a valid value for the hvc_iucv= "
1373 "kernel parameter\n", hvc_iucv_devices);
1374 rc = -EINVAL;
1375 goto out_error;
1376 }
1377
1378 /* register IUCV HVC device driver */
1379 rc = driver_register(&hvc_iucv_driver);
1380 if (rc)
1381 goto out_error;
1382
1383 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1384 if (hvc_iucv_filter_string) {
1385 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1386 switch (rc) {
1387 case 0:
1388 break;
1389 case -ENOMEM:
1390 pr_err("Allocating memory failed with "
1391 "reason code=%d\n", 3);
1392 goto out_error;
1393 case -EINVAL:
1394 pr_err("hvc_iucv_allow= does not specify a valid "
1395 "z/VM user ID list\n");
1396 goto out_error;
1397 case -ENOSPC:
1398 pr_err("hvc_iucv_allow= specifies too many "
1399 "z/VM user IDs\n");
1400 goto out_error;
1401 default:
1402 goto out_error;
1403 }
1404 }
1405
1406 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1407 sizeof(struct iucv_tty_buffer),
1408 0, 0, NULL);
1409 if (!hvc_iucv_buffer_cache) {
1410 pr_err("Allocating memory failed with reason code=%d\n", 1);
1411 rc = -ENOMEM;
1412 goto out_error;
1413 }
1414
1415 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1416 hvc_iucv_buffer_cache);
1417 if (!hvc_iucv_mempool) {
1418 pr_err("Allocating memory failed with reason code=%d\n", 2);
1419 kmem_cache_destroy(hvc_iucv_buffer_cache);
1420 rc = -ENOMEM;
1421 goto out_error;
1422 }
1423
1424 /* register the first terminal device as console
1425 * (must be done before allocating hvc terminal devices) */
1426 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1427 if (rc) {
1428 pr_err("Registering HVC terminal device as "
1429 "Linux console failed\n");
1430 goto out_error_memory;
1431 }
1432
1433 /* allocate hvc_iucv_private structs */
1434 for (i = 0; i < hvc_iucv_devices; i++) {
1435 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1436 if (rc) {
1437 pr_err("Creating a new HVC terminal device "
1438 "failed with error code=%d\n", rc);
1439 goto out_error_hvc;
1440 }
1441 }
1442
1443 /* register IUCV callback handler */
1444 rc = iucv_register(&hvc_iucv_handler, 0);
1445 if (rc) {
1446 pr_err("Registering IUCV handlers failed with error code=%d\n",
1447 rc);
1448 goto out_error_hvc;
1449 }
1450
1451 return 0;
1452
1453 out_error_hvc:
1454 for (i = 0; i < hvc_iucv_devices; i++)
1455 if (hvc_iucv_table[i])
1456 hvc_iucv_destroy(hvc_iucv_table[i]);
1457 out_error_memory:
1458 mempool_destroy(hvc_iucv_mempool);
1459 kmem_cache_destroy(hvc_iucv_buffer_cache);
1460 out_error:
1461 kfree(hvc_iucv_filter);
1462 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1463 return rc;
1464 }
1465
1466 /**
1467 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1468 * @val: Parameter value (numeric)
1469 */
1470 static int __init hvc_iucv_config(char *val)
1471 {
1472 return kstrtoul(val, 10, &hvc_iucv_devices);
1473 }
1474
1475
1476 device_initcall(hvc_iucv_init);
1477 __setup("hvc_iucv=", hvc_iucv_config);
1478 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);