]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/char/hvc_iucv.c
[S390] hvc_iucv: Change IUCV term id and use one device as default
[mirror_ubuntu-artful-kernel.git] / drivers / char / hvc_iucv.c
CommitLineData
44a01d5b
HB
1/*
2 * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC)
3 *
4 * This back-end for HVC provides terminal access via
5 * z/VM IUCV communication paths.
6 *
7 * Copyright IBM Corp. 2008.
8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11#define KMSG_COMPONENT "hvc_iucv"
12
13#include <linux/types.h>
14#include <asm/ebcdic.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/tty.h>
18#include <net/iucv/iucv.h>
19
20#include "hvc_console.h"
21
22
23/* HVC backend for z/VM IUCV */
24#define HVC_IUCV_MAGIC 0xc9e4c3e5
25#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
26#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
27
28/* IUCV TTY message */
29#define MSG_VERSION 0x02 /* Message version */
30#define MSG_TYPE_ERROR 0x01 /* Error message */
31#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
32#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
33#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
34#define MSG_TYPE_DATA 0x10 /* Terminal data */
35
36#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
37struct iucv_tty_msg {
38 u8 version; /* Message version */
39 u8 type; /* Message type */
40#define MSG_MAX_DATALEN (~(u16)0)
41 u16 datalen; /* Payload length */
42 u8 data[]; /* Payload buffer */
43} __attribute__((packed));
44
45enum iucv_state_t {
46 IUCV_DISCONN = 0,
47 IUCV_CONNECTED = 1,
48 IUCV_SEVERED = 2,
49};
50
51enum tty_state_t {
52 TTY_CLOSED = 0,
53 TTY_OPENED = 1,
54};
55
56struct hvc_iucv_private {
57 struct hvc_struct *hvc; /* HVC console struct reference */
58 u8 srv_name[8]; /* IUCV service name (ebcdic) */
59 enum iucv_state_t iucv_state; /* IUCV connection status */
60 enum tty_state_t tty_state; /* TTY status */
61 struct iucv_path *path; /* IUCV path pointer */
62 spinlock_t lock; /* hvc_iucv_private lock */
63 struct list_head tty_outqueue; /* outgoing IUCV messages */
64 struct list_head tty_inqueue; /* incoming IUCV messages */
65};
66
67struct iucv_tty_buffer {
68 struct list_head list; /* list pointer */
69 struct iucv_message msg; /* store an incoming IUCV message */
70 size_t offset; /* data buffer offset */
71 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
72};
73
74/* IUCV callback handler */
75static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
76static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
77static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
78static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
79
80
2dc184c0
HB
81/* Kernel module parameter: use one terminal device as default */
82static unsigned long hvc_iucv_devices = 1;
44a01d5b
HB
83
84/* Array of allocated hvc iucv tty lines... */
85static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
86
87/* Kmem cache and mempool for iucv_tty_buffer elements */
88static struct kmem_cache *hvc_iucv_buffer_cache;
89static mempool_t *hvc_iucv_mempool;
90
91/* IUCV handler callback functions */
92static struct iucv_handler hvc_iucv_handler = {
93 .path_pending = hvc_iucv_path_pending,
94 .path_severed = hvc_iucv_path_severed,
95 .message_complete = hvc_iucv_msg_complete,
96 .message_pending = hvc_iucv_msg_pending,
97};
98
99
100/**
101 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
102 * @num: The HVC virtual terminal number (vtermno)
103 *
104 * This function returns the struct hvc_iucv_private instance that corresponds
105 * to the HVC virtual terminal number specified as parameter @num.
106 */
107struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
108{
109 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
110 return NULL;
111 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
112}
113
114/**
115 * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element.
116 * @size: Size of the internal buffer used to store data.
117 * @flags: Memory allocation flags passed to mempool.
118 *
119 * This function allocates a new struct iucv_tty_buffer element and, optionally,
120 * allocates an internal data buffer with the specified size @size.
121 * Note: The total message size arises from the internal buffer size and the
122 * members of the iucv_tty_msg structure.
123 *
124 * The function returns NULL if memory allocation has failed.
125 */
126static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
127{
128 struct iucv_tty_buffer *bufp;
129
130 bufp = mempool_alloc(hvc_iucv_mempool, flags);
131 if (!bufp)
132 return NULL;
133 memset(bufp, 0, sizeof(struct iucv_tty_buffer));
134
135 if (size > 0) {
136 bufp->msg.length = MSG_SIZE(size);
137 bufp->mbuf = kmalloc(bufp->msg.length, flags);
138 if (!bufp->mbuf) {
139 mempool_free(bufp, hvc_iucv_mempool);
140 return NULL;
141 }
142 bufp->mbuf->version = MSG_VERSION;
143 bufp->mbuf->type = MSG_TYPE_DATA;
144 bufp->mbuf->datalen = (u16) size;
145 }
146 return bufp;
147}
148
149/**
150 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
151 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
152 *
153 * The destroy_tty_buffer() function frees the internal data buffer and returns
154 * the struct iucv_tty_buffer element back to the mempool for freeing.
155 */
156static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
157{
158 kfree(bufp->mbuf);
159 mempool_free(bufp, hvc_iucv_mempool);
160}
161
162/**
163 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
164 * @list: List head pointer to a list containing struct iucv_tty_buffer
165 * elements.
166 *
167 * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the
168 * list @list.
169 */
170static void destroy_tty_buffer_list(struct list_head *list)
171{
172 struct iucv_tty_buffer *ent, *next;
173
174 list_for_each_entry_safe(ent, next, list, list) {
175 list_del(&ent->list);
176 destroy_tty_buffer(ent);
177 }
178}
179
180/**
181 * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer.
182 * @priv: Pointer to hvc_iucv_private structure.
183 * @buf: HVC console buffer for writing received terminal data.
184 * @count: HVC console buffer size.
185 * @has_more_data: Pointer to an int variable.
186 *
187 * The function picks up pending messages from the input queue and receives
188 * the message data that is then written to the specified buffer @buf.
189 * If the buffer size @count is less than the data message size, then the
190 * message is kept on the input queue and @has_more_data is set to 1.
191 * If the message data has been entirely written, the message is removed from
192 * the input queue.
193 *
194 * The function returns the number of bytes written to the terminal, zero if
195 * there are no pending data messages available or if there is no established
196 * IUCV path.
197 * If the IUCV path has been severed, then -EPIPE is returned to cause a
198 * hang up (that is issued by the HVC console layer).
199 */
200static int hvc_iucv_write(struct hvc_iucv_private *priv,
201 char *buf, int count, int *has_more_data)
202{
203 struct iucv_tty_buffer *rb;
204 int written;
205 int rc;
206
207 /* Immediately return if there is no IUCV connection */
208 if (priv->iucv_state == IUCV_DISCONN)
209 return 0;
210
211 /* If the IUCV path has been severed, return -EPIPE to inform the
212 * hvc console layer to hang up the tty device. */
213 if (priv->iucv_state == IUCV_SEVERED)
214 return -EPIPE;
215
216 /* check if there are pending messages */
217 if (list_empty(&priv->tty_inqueue))
218 return 0;
219
220 /* receive a iucv message and flip data to the tty (ldisc) */
221 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
222
223 written = 0;
224 if (!rb->mbuf) { /* message not yet received ... */
225 /* allocate mem to store msg data; if no memory is available
226 * then leave the buffer on the list and re-try later */
227 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
228 if (!rb->mbuf)
229 return -ENOMEM;
230
231 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
232 rb->mbuf, rb->msg.length, NULL);
233 switch (rc) {
234 case 0: /* Successful */
235 break;
236 case 2: /* No message found */
237 case 9: /* Message purged */
238 break;
239 default:
240 written = -EIO;
241 }
242 /* remove buffer if an error has occured or received data
243 * is not correct */
244 if (rc || (rb->mbuf->version != MSG_VERSION) ||
245 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
246 goto out_remove_buffer;
247 }
248
249 switch (rb->mbuf->type) {
250 case MSG_TYPE_DATA:
251 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
252 memcpy(buf, rb->mbuf->data + rb->offset, written);
253 if (written < (rb->mbuf->datalen - rb->offset)) {
254 rb->offset += written;
255 *has_more_data = 1;
256 goto out_written;
257 }
258 break;
259
260 case MSG_TYPE_WINSIZE:
261 if (rb->mbuf->datalen != sizeof(struct winsize))
262 break;
263 hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data));
264 break;
265
266 case MSG_TYPE_ERROR: /* ignored ... */
267 case MSG_TYPE_TERMENV: /* ignored ... */
268 case MSG_TYPE_TERMIOS: /* ignored ... */
269 break;
270 }
271
272out_remove_buffer:
273 list_del(&rb->list);
274 destroy_tty_buffer(rb);
275 *has_more_data = !list_empty(&priv->tty_inqueue);
276
277out_written:
278 return written;
279}
280
281/**
282 * hvc_iucv_get_chars() - HVC get_chars operation.
283 * @vtermno: HVC virtual terminal number.
284 * @buf: Pointer to a buffer to store data
285 * @count: Size of buffer available for writing
286 *
287 * The hvc_console thread calls this method to read characters from
288 * the terminal backend. If an IUCV communication path has been established,
289 * pending IUCV messages are received and data is copied into buffer @buf
290 * up to @count bytes.
291 *
292 * Locking: The routine gets called under an irqsave() spinlock; and
293 * the routine locks the struct hvc_iucv_private->lock to call
294 * helper functions.
295 */
296static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
297{
298 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
299 int written;
300 int has_more_data;
301
302 if (count <= 0)
303 return 0;
304
305 if (!priv)
306 return -ENODEV;
307
308 spin_lock(&priv->lock);
309 has_more_data = 0;
310 written = hvc_iucv_write(priv, buf, count, &has_more_data);
311 spin_unlock(&priv->lock);
312
313 /* if there are still messages on the queue... schedule another run */
314 if (has_more_data)
315 hvc_kick();
316
317 return written;
318}
319
320/**
321 * hvc_iucv_send() - Send an IUCV message containing terminal data.
322 * @priv: Pointer to struct hvc_iucv_private instance.
323 * @buf: Buffer containing data to send.
324 * @size: Size of buffer and amount of data to send.
325 *
326 * If an IUCV communication path is established, the function copies the buffer
327 * data to a newly allocated struct iucv_tty_buffer element, sends the data and
328 * puts the element to the outqueue.
329 *
330 * If there is no IUCV communication path established, the function returns 0.
331 * If an existing IUCV communicaton path has been severed, the function returns
332 * -EPIPE (can be passed to HVC layer to cause a tty hangup).
333 */
334static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
335 int count)
336{
337 struct iucv_tty_buffer *sb;
338 int rc;
339 u16 len;
340
341 if (priv->iucv_state == IUCV_SEVERED)
342 return -EPIPE;
343
344 if (priv->iucv_state == IUCV_DISCONN)
345 return 0;
346
347 len = min_t(u16, MSG_MAX_DATALEN, count);
348
349 /* allocate internal buffer to store msg data and also compute total
350 * message length */
351 sb = alloc_tty_buffer(len, GFP_ATOMIC);
352 if (!sb)
353 return -ENOMEM;
354
355 sb->mbuf->datalen = len;
356 memcpy(sb->mbuf->data, buf, len);
357
358 list_add_tail(&sb->list, &priv->tty_outqueue);
359
360 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
361 (void *) sb->mbuf, sb->msg.length);
362 if (rc) {
363 list_del(&sb->list);
364 destroy_tty_buffer(sb);
365 len = 0;
366 }
367
368 return len;
369}
370
371/**
372 * hvc_iucv_put_chars() - HVC put_chars operation.
373 * @vtermno: HVC virtual terminal number.
374 * @buf: Pointer to an buffer to read data from
375 * @count: Size of buffer available for reading
376 *
377 * The hvc_console thread calls this method to write characters from
378 * to the terminal backend.
379 * The function calls hvc_iucv_send() under the lock of the
380 * struct hvc_iucv_private instance that corresponds to the tty @vtermno.
381 *
382 * Locking: The method gets called under an irqsave() spinlock; and
383 * locks struct hvc_iucv_private->lock.
384 */
385static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
386{
387 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
388 int sent;
389
390 if (count <= 0)
391 return 0;
392
393 if (!priv)
394 return -ENODEV;
395
396 spin_lock(&priv->lock);
397 sent = hvc_iucv_send(priv, buf, count);
398 spin_unlock(&priv->lock);
399
400 return sent;
401}
402
403/**
404 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
405 * @hp: Pointer to the HVC device (struct hvc_struct)
406 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
407 * hvc_iucv_private instance.
408 *
409 * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private
410 * instance that is derived from @id. Always returns 0.
411 *
412 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
413 */
414static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
415{
416 struct hvc_iucv_private *priv;
417
418 priv = hvc_iucv_get_private(id);
419 if (!priv)
420 return 0;
421
422 spin_lock_bh(&priv->lock);
423 priv->tty_state = TTY_OPENED;
424 spin_unlock_bh(&priv->lock);
425
426 return 0;
427}
428
429/**
430 * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed.
431 * @priv: Pointer to the struct hvc_iucv_private instance.
432 *
433 * The functions severs the established IUCV communication path (if any), and
434 * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally,
435 * the functions resets the states to TTY_CLOSED and IUCV_DISCONN.
436 */
437static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
438{
439 destroy_tty_buffer_list(&priv->tty_outqueue);
440 destroy_tty_buffer_list(&priv->tty_inqueue);
441
442 priv->tty_state = TTY_CLOSED;
443 priv->iucv_state = IUCV_DISCONN;
444}
445
446/**
447 * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups.
448 * @hp: Pointer to the HVC device (struct hvc_struct)
449 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
450 * hvc_iucv_private instance.
451 *
452 * This routine notifies the HVC backend that a tty hangup (carrier loss,
453 * virtual or otherwise) has occured.
454 *
455 * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep
456 * an existing IUCV communication path established.
457 * (Background: vhangup() is called from user space (by getty or login) to
458 * disable writing to the tty by other applications).
459 *
460 * If the tty has been opened (e.g. getty) and an established IUCV path has been
461 * severed (we caused the tty hangup in that case), then the functions invokes
462 * hvc_iucv_cleanup() to clean up.
463 *
464 * Locking: struct hvc_iucv_private->lock
465 */
466static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
467{
468 struct hvc_iucv_private *priv;
469
470 priv = hvc_iucv_get_private(id);
471 if (!priv)
472 return;
473
474 spin_lock_bh(&priv->lock);
475 /* NOTE: If the hangup was scheduled by ourself (from the iucv
476 * path_servered callback [IUCV_SEVERED]), then we have to
477 * finally clean up the tty backend structure and set state to
478 * TTY_CLOSED.
479 *
480 * If the tty was hung up otherwise (e.g. vhangup()), then we
481 * ignore this hangup and keep an established IUCV path open...
482 * (...the reason is that we are not able to connect back to the
483 * client if we disconnect on hang up) */
484 priv->tty_state = TTY_CLOSED;
485
486 if (priv->iucv_state == IUCV_SEVERED)
487 hvc_iucv_cleanup(priv);
488 spin_unlock_bh(&priv->lock);
489}
490
491/**
492 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
493 * @hp: Pointer to the HVC device (struct hvc_struct)
494 * @id: Additional data (originally passed to hvc_alloc):
495 * the index of an struct hvc_iucv_private instance.
496 *
497 * This routine notifies the HVC backend that the last tty device file
498 * descriptor has been closed.
499 * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private
500 * instance.
501 *
502 * Locking: struct hvc_iucv_private->lock
503 */
504static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
505{
506 struct hvc_iucv_private *priv;
507 struct iucv_path *path;
508
509 priv = hvc_iucv_get_private(id);
510 if (!priv)
511 return;
512
513 spin_lock_bh(&priv->lock);
514 path = priv->path; /* save reference to IUCV path */
515 priv->path = NULL;
516 hvc_iucv_cleanup(priv);
517 spin_unlock_bh(&priv->lock);
518
519 /* sever IUCV path outside of priv->lock due to lock ordering of:
520 * priv->lock <--> iucv_table_lock */
521 if (path) {
522 iucv_path_sever(path, NULL);
523 iucv_path_free(path);
524 }
525}
526
527/**
528 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
529 * @path: Pending path (struct iucv_path)
530 * @ipvmid: Originator z/VM system identifier
531 * @ipuser: User specified data for this path
532 * (AF_IUCV: port/service name and originator port)
533 *
534 * The function uses the @ipuser data to check to determine if the pending
535 * path belongs to a terminal managed by this HVC backend.
536 * If the check is successful, then an additional check is done to ensure
537 * that a terminal cannot be accessed multiple times (only one connection
538 * to a terminal is allowed). In that particular case, the pending path is
539 * severed. If it is the first connection, the pending path is accepted and
540 * associated to the struct hvc_iucv_private. The iucv state is updated to
541 * reflect that a communication path has been established.
542 *
543 * Returns 0 if the path belongs to a terminal managed by the this HVC backend;
544 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
545 *
546 * Locking: struct hvc_iucv_private->lock
547 */
548static int hvc_iucv_path_pending(struct iucv_path *path,
549 u8 ipvmid[8], u8 ipuser[16])
550{
551 struct hvc_iucv_private *priv;
552 u8 nuser_data[16];
553 int i, rc;
554
555 priv = NULL;
556 for (i = 0; i < hvc_iucv_devices; i++)
557 if (hvc_iucv_table[i] &&
558 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
559 priv = hvc_iucv_table[i];
560 break;
561 }
562
563 if (!priv)
564 return -ENODEV;
565
566 spin_lock(&priv->lock);
567
568 /* If the terminal is already connected or being severed, then sever
569 * this path to enforce that there is only ONE established communication
570 * path per terminal. */
571 if (priv->iucv_state != IUCV_DISCONN) {
572 iucv_path_sever(path, ipuser);
573 iucv_path_free(path);
574 goto out_path_handled;
575 }
576
577 /* accept path */
578 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
579 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
580 path->msglim = 0xffff; /* IUCV MSGLIMIT */
581 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
582 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
583 if (rc) {
584 iucv_path_sever(path, ipuser);
585 iucv_path_free(path);
586 goto out_path_handled;
587 }
588 priv->path = path;
589 priv->iucv_state = IUCV_CONNECTED;
590
591out_path_handled:
592 spin_unlock(&priv->lock);
593 return 0;
594}
595
596/**
597 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
598 * @path: Pending path (struct iucv_path)
599 * @ipuser: User specified data for this path
600 * (AF_IUCV: port/service name and originator port)
601 *
602 * The function also severs the path (as required by the IUCV protocol) and
603 * sets the iucv state to IUCV_SEVERED for the associated struct
604 * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
605 * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
606 *
607 * If tty portion of the HVC is closed then clean up the outqueue in addition.
608 *
609 * Locking: struct hvc_iucv_private->lock
610 */
611static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
612{
613 struct hvc_iucv_private *priv = path->private;
614
615 spin_lock(&priv->lock);
616 priv->iucv_state = IUCV_SEVERED;
617
618 /* NOTE: If the tty has not yet been opened by a getty program
619 * (e.g. to see console messages), then cleanup the
620 * hvc_iucv_private structure to allow re-connects.
621 *
622 * If the tty has been opened, the get_chars() callback returns
623 * -EPIPE to signal the hvc console layer to hang up the tty. */
624 priv->path = NULL;
625 if (priv->tty_state == TTY_CLOSED)
626 hvc_iucv_cleanup(priv);
627 spin_unlock(&priv->lock);
628
629 /* finally sever path (outside of priv->lock due to lock ordering) */
630 iucv_path_sever(path, ipuser);
631 iucv_path_free(path);
632}
633
634/**
635 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
636 * @path: Pending path (struct iucv_path)
637 * @msg: Pointer to the IUCV message
638 *
639 * The function stores an incoming message on the input queue for later
640 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
641 * However, if the tty has not yet been opened, the message is rejected.
642 *
643 * Locking: struct hvc_iucv_private->lock
644 */
645static void hvc_iucv_msg_pending(struct iucv_path *path,
646 struct iucv_message *msg)
647{
648 struct hvc_iucv_private *priv = path->private;
649 struct iucv_tty_buffer *rb;
650
651 spin_lock(&priv->lock);
652
653 /* reject messages if tty has not yet been opened */
654 if (priv->tty_state == TTY_CLOSED) {
655 iucv_message_reject(path, msg);
656 goto unlock_return;
657 }
658
659 /* allocate buffer an empty buffer element */
660 rb = alloc_tty_buffer(0, GFP_ATOMIC);
661 if (!rb) {
662 iucv_message_reject(path, msg);
663 goto unlock_return; /* -ENOMEM */
664 }
665 rb->msg = *msg;
666
667 list_add_tail(&rb->list, &priv->tty_inqueue);
668
669 hvc_kick(); /* wakup hvc console thread */
670
671unlock_return:
672 spin_unlock(&priv->lock);
673}
674
675/**
676 * hvc_iucv_msg_complete() - IUCV handler to process message completion
677 * @path: Pending path (struct iucv_path)
678 * @msg: Pointer to the IUCV message
679 *
680 * The function is called upon completion of message delivery and the
681 * message is removed from the outqueue. Additional delivery information
682 * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and
683 * purged messages (0x010000 (IPADPGNR)).
684 *
685 * Locking: struct hvc_iucv_private->lock
686 */
687static void hvc_iucv_msg_complete(struct iucv_path *path,
688 struct iucv_message *msg)
689{
690 struct hvc_iucv_private *priv = path->private;
691 struct iucv_tty_buffer *ent, *next;
692 LIST_HEAD(list_remove);
693
694 spin_lock(&priv->lock);
695 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
696 if (ent->msg.id == msg->id) {
697 list_move(&ent->list, &list_remove);
698 break;
699 }
700 spin_unlock(&priv->lock);
701 destroy_tty_buffer_list(&list_remove);
702}
703
704
705/* HVC operations */
706static struct hv_ops hvc_iucv_ops = {
707 .get_chars = hvc_iucv_get_chars,
708 .put_chars = hvc_iucv_put_chars,
709 .notifier_add = hvc_iucv_notifier_add,
710 .notifier_del = hvc_iucv_notifier_del,
711 .notifier_hangup = hvc_iucv_notifier_hangup,
712};
713
714/**
715 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
716 * @id: hvc_iucv_table index
717 *
718 * This function allocates a new hvc_iucv_private struct and put the
719 * instance into hvc_iucv_table at index @id.
720 * Returns 0 on success; otherwise non-zero.
721 */
722static int __init hvc_iucv_alloc(int id)
723{
724 struct hvc_iucv_private *priv;
725 char name[9];
726 int rc;
727
728 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
729 if (!priv)
730 return -ENOMEM;
731
732 spin_lock_init(&priv->lock);
733 INIT_LIST_HEAD(&priv->tty_outqueue);
734 INIT_LIST_HEAD(&priv->tty_inqueue);
735
736 /* Finally allocate hvc */
737 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
738 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE);
739 if (IS_ERR(priv->hvc)) {
740 rc = PTR_ERR(priv->hvc);
741 kfree(priv);
742 return rc;
743 }
744
745 /* setup iucv related information */
2dc184c0 746 snprintf(name, 9, "lnxhvc%-2d", id);
44a01d5b
HB
747 memcpy(priv->srv_name, name, 8);
748 ASCEBC(priv->srv_name, 8);
749
750 hvc_iucv_table[id] = priv;
751 return 0;
752}
753
754/**
755 * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV
756 */
757static int __init hvc_iucv_init(void)
758{
759 int rc, i;
760
761 if (!MACHINE_IS_VM) {
762 pr_warning("The z/VM IUCV Hypervisor console cannot be "
763 "used without z/VM.\n");
764 return -ENODEV;
765 }
766
767 if (!hvc_iucv_devices)
768 return -ENODEV;
769
770 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES)
771 return -EINVAL;
772
773 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
774 sizeof(struct iucv_tty_buffer),
775 0, 0, NULL);
776 if (!hvc_iucv_buffer_cache) {
777 pr_err("Not enough memory for driver initialization "
778 "(rs=%d).\n", 1);
779 return -ENOMEM;
780 }
781
782 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
783 hvc_iucv_buffer_cache);
784 if (!hvc_iucv_mempool) {
785 pr_err("Not enough memory for driver initialization "
786 "(rs=%d).\n", 2);
787 kmem_cache_destroy(hvc_iucv_buffer_cache);
788 return -ENOMEM;
789 }
790
791 /* allocate hvc_iucv_private structs */
792 for (i = 0; i < hvc_iucv_devices; i++) {
793 rc = hvc_iucv_alloc(i);
794 if (rc) {
795 pr_err("Could not create new z/VM IUCV HVC backend "
796 "rc=%d.\n", rc);
797 goto out_error_hvc;
798 }
799 }
800
801 /* register IUCV callback handler */
802 rc = iucv_register(&hvc_iucv_handler, 0);
803 if (rc) {
804 pr_err("Could not register iucv handler (rc=%d).\n", rc);
805 goto out_error_iucv;
806 }
807
808 return 0;
809
810out_error_iucv:
811 iucv_unregister(&hvc_iucv_handler, 0);
812out_error_hvc:
813 for (i = 0; i < hvc_iucv_devices; i++)
814 if (hvc_iucv_table[i]) {
815 if (hvc_iucv_table[i]->hvc)
816 hvc_remove(hvc_iucv_table[i]->hvc);
817 kfree(hvc_iucv_table[i]);
818 }
819 mempool_destroy(hvc_iucv_mempool);
820 kmem_cache_destroy(hvc_iucv_buffer_cache);
821 return rc;
822}
823
824/**
825 * hvc_iucv_console_init() - Early console initialization
826 */
827static int __init hvc_iucv_console_init(void)
828{
829 if (!MACHINE_IS_VM || !hvc_iucv_devices)
830 return -ENODEV;
831 return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops);
832}
833
834/**
835 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
836 * @val: Parameter value (numeric)
837 */
838static int __init hvc_iucv_config(char *val)
839{
840 return strict_strtoul(val, 10, &hvc_iucv_devices);
841}
842
843
844module_init(hvc_iucv_init);
845console_initcall(hvc_iucv_console_init);
846__setup("hvc_iucv=", hvc_iucv_config);
847
848MODULE_LICENSE("GPL");
849MODULE_DESCRIPTION("HVC back-end for z/VM IUCV.");
850MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");