]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/um/drivers/line.c
1a8d6591c2044af2f4ba8ec456dca34ce9592020
[mirror_ubuntu-zesty-kernel.git] / arch / um / drivers / line.c
1 /*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6 #include "linux/irqreturn.h"
7 #include "linux/kd.h"
8 #include "linux/sched.h"
9 #include "linux/slab.h"
10 #include "chan.h"
11 #include "irq_kern.h"
12 #include "irq_user.h"
13 #include "kern_util.h"
14 #include "os.h"
15
16 #define LINE_BUFSIZE 4096
17
18 static irqreturn_t line_interrupt(int irq, void *data)
19 {
20 struct chan *chan = data;
21 struct line *line = chan->line;
22
23 if (line)
24 chan_interrupt(&line->chan_list, &line->task, line->tty, irq);
25 return IRQ_HANDLED;
26 }
27
28 static void line_timer_cb(struct work_struct *work)
29 {
30 struct line *line = container_of(work, struct line, task.work);
31
32 if (!line->throttled)
33 chan_interrupt(&line->chan_list, &line->task, line->tty,
34 line->driver->read_irq);
35 }
36
37 /*
38 * Returns the free space inside the ring buffer of this line.
39 *
40 * Should be called while holding line->lock (this does not modify data).
41 */
42 static int write_room(struct line *line)
43 {
44 int n;
45
46 if (line->buffer == NULL)
47 return LINE_BUFSIZE - 1;
48
49 /* This is for the case where the buffer is wrapped! */
50 n = line->head - line->tail;
51
52 if (n <= 0)
53 n += LINE_BUFSIZE; /* The other case */
54 return n - 1;
55 }
56
57 int line_write_room(struct tty_struct *tty)
58 {
59 struct line *line = tty->driver_data;
60 unsigned long flags;
61 int room;
62
63 spin_lock_irqsave(&line->lock, flags);
64 room = write_room(line);
65 spin_unlock_irqrestore(&line->lock, flags);
66
67 return room;
68 }
69
70 int line_chars_in_buffer(struct tty_struct *tty)
71 {
72 struct line *line = tty->driver_data;
73 unsigned long flags;
74 int ret;
75
76 spin_lock_irqsave(&line->lock, flags);
77 /* write_room subtracts 1 for the needed NULL, so we readd it.*/
78 ret = LINE_BUFSIZE - (write_room(line) + 1);
79 spin_unlock_irqrestore(&line->lock, flags);
80
81 return ret;
82 }
83
84 /*
85 * This copies the content of buf into the circular buffer associated with
86 * this line.
87 * The return value is the number of characters actually copied, i.e. the ones
88 * for which there was space: this function is not supposed to ever flush out
89 * the circular buffer.
90 *
91 * Must be called while holding line->lock!
92 */
93 static int buffer_data(struct line *line, const char *buf, int len)
94 {
95 int end, room;
96
97 if (line->buffer == NULL) {
98 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC);
99 if (line->buffer == NULL) {
100 printk(KERN_ERR "buffer_data - atomic allocation "
101 "failed\n");
102 return 0;
103 }
104 line->head = line->buffer;
105 line->tail = line->buffer;
106 }
107
108 room = write_room(line);
109 len = (len > room) ? room : len;
110
111 end = line->buffer + LINE_BUFSIZE - line->tail;
112
113 if (len < end) {
114 memcpy(line->tail, buf, len);
115 line->tail += len;
116 }
117 else {
118 /* The circular buffer is wrapping */
119 memcpy(line->tail, buf, end);
120 buf += end;
121 memcpy(line->buffer, buf, len - end);
122 line->tail = line->buffer + len - end;
123 }
124
125 return len;
126 }
127
128 /*
129 * Flushes the ring buffer to the output channels. That is, write_chan is
130 * called, passing it line->head as buffer, and an appropriate count.
131 *
132 * On exit, returns 1 when the buffer is empty,
133 * 0 when the buffer is not empty on exit,
134 * and -errno when an error occurred.
135 *
136 * Must be called while holding line->lock!*/
137 static int flush_buffer(struct line *line)
138 {
139 int n, count;
140
141 if ((line->buffer == NULL) || (line->head == line->tail))
142 return 1;
143
144 if (line->tail < line->head) {
145 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */
146 count = line->buffer + LINE_BUFSIZE - line->head;
147
148 n = write_chan(&line->chan_list, line->head, count,
149 line->driver->write_irq);
150 if (n < 0)
151 return n;
152 if (n == count) {
153 /*
154 * We have flushed from ->head to buffer end, now we
155 * must flush only from the beginning to ->tail.
156 */
157 line->head = line->buffer;
158 } else {
159 line->head += n;
160 return 0;
161 }
162 }
163
164 count = line->tail - line->head;
165 n = write_chan(&line->chan_list, line->head, count,
166 line->driver->write_irq);
167
168 if (n < 0)
169 return n;
170
171 line->head += n;
172 return line->head == line->tail;
173 }
174
175 void line_flush_buffer(struct tty_struct *tty)
176 {
177 struct line *line = tty->driver_data;
178 unsigned long flags;
179
180 spin_lock_irqsave(&line->lock, flags);
181 flush_buffer(line);
182 spin_unlock_irqrestore(&line->lock, flags);
183 }
184
185 /*
186 * We map both ->flush_chars and ->put_char (which go in pair) onto
187 * ->flush_buffer and ->write. Hope it's not that bad.
188 */
189 void line_flush_chars(struct tty_struct *tty)
190 {
191 line_flush_buffer(tty);
192 }
193
194 int line_put_char(struct tty_struct *tty, unsigned char ch)
195 {
196 return line_write(tty, &ch, sizeof(ch));
197 }
198
199 int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
200 {
201 struct line *line = tty->driver_data;
202 unsigned long flags;
203 int n, ret = 0;
204
205 spin_lock_irqsave(&line->lock, flags);
206 if (line->head != line->tail)
207 ret = buffer_data(line, buf, len);
208 else {
209 n = write_chan(&line->chan_list, buf, len,
210 line->driver->write_irq);
211 if (n < 0) {
212 ret = n;
213 goto out_up;
214 }
215
216 len -= n;
217 ret += n;
218 if (len > 0)
219 ret += buffer_data(line, buf + n, len);
220 }
221 out_up:
222 spin_unlock_irqrestore(&line->lock, flags);
223 return ret;
224 }
225
226 void line_set_termios(struct tty_struct *tty, struct ktermios * old)
227 {
228 /* nothing */
229 }
230
231 static const struct {
232 int cmd;
233 char *level;
234 char *name;
235 } tty_ioctls[] = {
236 /* don't print these, they flood the log ... */
237 { TCGETS, NULL, "TCGETS" },
238 { TCSETS, NULL, "TCSETS" },
239 { TCSETSW, NULL, "TCSETSW" },
240 { TCFLSH, NULL, "TCFLSH" },
241 { TCSBRK, NULL, "TCSBRK" },
242
243 /* general tty stuff */
244 { TCSETSF, KERN_DEBUG, "TCSETSF" },
245 { TCGETA, KERN_DEBUG, "TCGETA" },
246 { TIOCMGET, KERN_DEBUG, "TIOCMGET" },
247 { TCSBRKP, KERN_DEBUG, "TCSBRKP" },
248 { TIOCMSET, KERN_DEBUG, "TIOCMSET" },
249
250 /* linux-specific ones */
251 { TIOCLINUX, KERN_INFO, "TIOCLINUX" },
252 { KDGKBMODE, KERN_INFO, "KDGKBMODE" },
253 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" },
254 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" },
255 };
256
257 int line_ioctl(struct tty_struct *tty, unsigned int cmd,
258 unsigned long arg)
259 {
260 int ret;
261 int i;
262
263 ret = 0;
264 switch(cmd) {
265 #ifdef TIOCGETP
266 case TIOCGETP:
267 case TIOCSETP:
268 case TIOCSETN:
269 #endif
270 #ifdef TIOCGETC
271 case TIOCGETC:
272 case TIOCSETC:
273 #endif
274 #ifdef TIOCGLTC
275 case TIOCGLTC:
276 case TIOCSLTC:
277 #endif
278 /* Note: these are out of date as we now have TCGETS2 etc but this
279 whole lot should probably go away */
280 case TCGETS:
281 case TCSETSF:
282 case TCSETSW:
283 case TCSETS:
284 case TCGETA:
285 case TCSETAF:
286 case TCSETAW:
287 case TCSETA:
288 case TCXONC:
289 case TCFLSH:
290 case TIOCOUTQ:
291 case TIOCINQ:
292 case TIOCGLCKTRMIOS:
293 case TIOCSLCKTRMIOS:
294 case TIOCPKT:
295 case TIOCGSOFTCAR:
296 case TIOCSSOFTCAR:
297 return -ENOIOCTLCMD;
298 #if 0
299 case TCwhatever:
300 /* do something */
301 break;
302 #endif
303 default:
304 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++)
305 if (cmd == tty_ioctls[i].cmd)
306 break;
307 if (i == ARRAY_SIZE(tty_ioctls)) {
308 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n",
309 __func__, tty->name, cmd);
310 }
311 ret = -ENOIOCTLCMD;
312 break;
313 }
314 return ret;
315 }
316
317 void line_throttle(struct tty_struct *tty)
318 {
319 struct line *line = tty->driver_data;
320
321 deactivate_chan(&line->chan_list, line->driver->read_irq);
322 line->throttled = 1;
323 }
324
325 void line_unthrottle(struct tty_struct *tty)
326 {
327 struct line *line = tty->driver_data;
328
329 line->throttled = 0;
330 chan_interrupt(&line->chan_list, &line->task, tty,
331 line->driver->read_irq);
332
333 /*
334 * Maybe there is enough stuff pending that calling the interrupt
335 * throttles us again. In this case, line->throttled will be 1
336 * again and we shouldn't turn the interrupt back on.
337 */
338 if (!line->throttled)
339 reactivate_chan(&line->chan_list, line->driver->read_irq);
340 }
341
342 static irqreturn_t line_write_interrupt(int irq, void *data)
343 {
344 struct chan *chan = data;
345 struct line *line = chan->line;
346 struct tty_struct *tty = line->tty;
347 int err;
348
349 /*
350 * Interrupts are disabled here because genirq keep irqs disabled when
351 * calling the action handler.
352 */
353
354 spin_lock(&line->lock);
355 err = flush_buffer(line);
356 if (err == 0) {
357 return IRQ_NONE;
358 } else if (err < 0) {
359 line->head = line->buffer;
360 line->tail = line->buffer;
361 }
362 spin_unlock(&line->lock);
363
364 if (tty == NULL)
365 return IRQ_NONE;
366
367 tty_wakeup(tty);
368 return IRQ_HANDLED;
369 }
370
371 int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
372 {
373 const struct line_driver *driver = line->driver;
374 int err = 0, flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
375
376 if (input)
377 err = um_request_irq(driver->read_irq, fd, IRQ_READ,
378 line_interrupt, flags,
379 driver->read_irq_name, data);
380 if (err)
381 return err;
382 if (output)
383 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
384 line_write_interrupt, flags,
385 driver->write_irq_name, data);
386 line->have_irq = 1;
387 return err;
388 }
389
390 /*
391 * Normally, a driver like this can rely mostly on the tty layer
392 * locking, particularly when it comes to the driver structure.
393 * However, in this case, mconsole requests can come in "from the
394 * side", and race with opens and closes.
395 *
396 * mconsole config requests will want to be sure the device isn't in
397 * use, and get_config, open, and close will want a stable
398 * configuration. The checking and modification of the configuration
399 * is done under a spinlock. Checking whether the device is in use is
400 * line->tty->count > 1, also under the spinlock.
401 *
402 * line->count serves to decide whether the device should be enabled or
403 * disabled on the host. If it's equal to 0, then we are doing the
404 * first open or last close. Otherwise, open and close just return.
405 */
406
407 int line_open(struct line *lines, struct tty_struct *tty)
408 {
409 struct line *line = &lines[tty->index];
410 int err = -ENODEV;
411
412 mutex_lock(&line->count_lock);
413 if (!line->valid)
414 goto out_unlock;
415
416 err = 0;
417 if (line->count++)
418 goto out_unlock;
419
420 BUG_ON(tty->driver_data);
421 tty->driver_data = line;
422 line->tty = tty;
423
424 err = enable_chan(line);
425 if (err) /* line_close() will be called by our caller */
426 goto out_unlock;
427
428 INIT_DELAYED_WORK(&line->task, line_timer_cb);
429
430 if (!line->sigio) {
431 chan_enable_winch(&line->chan_list, tty);
432 line->sigio = 1;
433 }
434
435 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
436 &tty->winsize.ws_col);
437 out_unlock:
438 mutex_unlock(&line->count_lock);
439 return err;
440 }
441
442 static void unregister_winch(struct tty_struct *tty);
443
444 void line_close(struct tty_struct *tty, struct file * filp)
445 {
446 struct line *line = tty->driver_data;
447
448 /*
449 * If line_open fails (and tty->driver_data is never set),
450 * tty_open will call line_close. So just return in this case.
451 */
452 if (line == NULL)
453 return;
454
455 /* We ignore the error anyway! */
456 flush_buffer(line);
457
458 mutex_lock(&line->count_lock);
459 BUG_ON(!line->valid);
460
461 if (--line->count)
462 goto out_unlock;
463
464 line->tty = NULL;
465 tty->driver_data = NULL;
466
467 if (line->sigio) {
468 unregister_winch(tty);
469 line->sigio = 0;
470 }
471
472 out_unlock:
473 mutex_unlock(&line->count_lock);
474 }
475
476 void close_lines(struct line *lines, int nlines)
477 {
478 int i;
479
480 for(i = 0; i < nlines; i++)
481 close_chan(&lines[i].chan_list, 0);
482 }
483
484 static int setup_one_line(struct line *lines, int n, char *init,
485 const struct chan_opts *opts, char **error_out)
486 {
487 struct line *line = &lines[n];
488 int err = -EINVAL;
489
490 mutex_lock(&line->count_lock);
491
492 if (line->count) {
493 *error_out = "Device is already open";
494 goto out;
495 }
496
497 if (!strcmp(init, "none")) {
498 if (line->valid) {
499 line->valid = 0;
500 kfree(line->init_str);
501 parse_chan_pair(NULL, line, n, opts, error_out);
502 err = 0;
503 }
504 } else {
505 char *new = kstrdup(init, GFP_KERNEL);
506 if (!new) {
507 *error_out = "Failed to allocate memory";
508 return -ENOMEM;
509 }
510 line->init_str = new;
511 line->valid = 1;
512 err = parse_chan_pair(new, line, n, opts, error_out);
513 if (err) {
514 line->init_str = NULL;
515 line->valid = 0;
516 kfree(new);
517 }
518 }
519 out:
520 mutex_unlock(&line->count_lock);
521 return err;
522 }
523
524 /*
525 * Common setup code for both startup command line and mconsole initialization.
526 * @lines contains the array (of size @num) to modify;
527 * @init is the setup string;
528 * @error_out is an error string in the case of failure;
529 */
530
531 int line_setup(char **conf, unsigned int num, char **def,
532 char *init, char *name)
533 {
534 char *error;
535
536 if (*init == '=') {
537 /*
538 * We said con=/ssl= instead of con#=, so we are configuring all
539 * consoles at once.
540 */
541 *def = init + 1;
542 } else {
543 char *end;
544 unsigned n = simple_strtoul(init, &end, 0);
545
546 if (*end != '=') {
547 error = "Couldn't parse device number";
548 goto out;
549 }
550 if (n >= num) {
551 error = "Device number out of range";
552 goto out;
553 }
554 conf[n] = end + 1;
555 }
556 return 0;
557
558 out:
559 printk(KERN_ERR "Failed to set up %s with "
560 "configuration string \"%s\" : %s\n", name, init, error);
561 return -EINVAL;
562 }
563
564 int line_config(struct line *lines, unsigned int num, char *str,
565 const struct chan_opts *opts, char **error_out)
566 {
567 char *end;
568 int n;
569
570 if (*str == '=') {
571 *error_out = "Can't configure all devices from mconsole";
572 return -EINVAL;
573 }
574
575 n = simple_strtoul(str, &end, 0);
576 if (*end++ != '=') {
577 *error_out = "Couldn't parse device number";
578 return -EINVAL;
579 }
580 if (n >= num) {
581 *error_out = "Device number out of range";
582 return -EINVAL;
583 }
584
585 return setup_one_line(lines, n, end, opts, error_out);
586 }
587
588 int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
589 int size, char **error_out)
590 {
591 struct line *line;
592 char *end;
593 int dev, n = 0;
594
595 dev = simple_strtoul(name, &end, 0);
596 if ((*end != '\0') || (end == name)) {
597 *error_out = "line_get_config failed to parse device number";
598 return 0;
599 }
600
601 if ((dev < 0) || (dev >= num)) {
602 *error_out = "device number out of range";
603 return 0;
604 }
605
606 line = &lines[dev];
607
608 mutex_lock(&line->count_lock);
609 if (!line->valid)
610 CONFIG_CHUNK(str, size, n, "none", 1);
611 else if (line->tty == NULL)
612 CONFIG_CHUNK(str, size, n, line->init_str, 1);
613 else n = chan_config_string(&line->chan_list, str, size, error_out);
614 mutex_unlock(&line->count_lock);
615
616 return n;
617 }
618
619 int line_id(char **str, int *start_out, int *end_out)
620 {
621 char *end;
622 int n;
623
624 n = simple_strtoul(*str, &end, 0);
625 if ((*end != '\0') || (end == *str))
626 return -1;
627
628 *str = end;
629 *start_out = n;
630 *end_out = n;
631 return n;
632 }
633
634 int line_remove(struct line *lines, unsigned int num, int n, char **error_out)
635 {
636 if (n >= num) {
637 *error_out = "Device number out of range";
638 return -EINVAL;
639 }
640 return setup_one_line(lines, n, "none", NULL, error_out);
641 }
642
643 struct tty_driver *register_lines(struct line_driver *line_driver,
644 const struct tty_operations *ops,
645 struct line *lines, int nlines)
646 {
647 int i;
648 struct tty_driver *driver = alloc_tty_driver(nlines);
649
650 if (!driver)
651 return NULL;
652
653 driver->driver_name = line_driver->name;
654 driver->name = line_driver->device_name;
655 driver->major = line_driver->major;
656 driver->minor_start = line_driver->minor_start;
657 driver->type = line_driver->type;
658 driver->subtype = line_driver->subtype;
659 driver->flags = TTY_DRIVER_REAL_RAW;
660 driver->init_termios = tty_std_termios;
661 tty_set_operations(driver, ops);
662
663 if (tty_register_driver(driver)) {
664 printk(KERN_ERR "register_lines : can't register %s driver\n",
665 line_driver->name);
666 put_tty_driver(driver);
667 return NULL;
668 }
669
670 for(i = 0; i < nlines; i++) {
671 if (!lines[i].valid)
672 tty_unregister_device(driver, i);
673 }
674
675 mconsole_register_dev(&line_driver->mc);
676 return driver;
677 }
678
679 static DEFINE_SPINLOCK(winch_handler_lock);
680 static LIST_HEAD(winch_handlers);
681
682 void lines_init(struct line *lines, int nlines, struct chan_opts *opts)
683 {
684 struct line *line;
685 char *error;
686 int i;
687
688 for(i = 0; i < nlines; i++) {
689 line = &lines[i];
690 INIT_LIST_HEAD(&line->chan_list);
691
692 if (line->init_str == NULL)
693 continue;
694
695 if (setup_one_line(lines, i, line->init_str, opts, &error))
696 printk(KERN_ERR "setup_one_line failed for "
697 "device %d : %s\n", i, error);
698 }
699 }
700
701 struct winch {
702 struct list_head list;
703 int fd;
704 int tty_fd;
705 int pid;
706 struct tty_struct *tty;
707 unsigned long stack;
708 struct work_struct work;
709 };
710
711 static void __free_winch(struct work_struct *work)
712 {
713 struct winch *winch = container_of(work, struct winch, work);
714 free_irq(WINCH_IRQ, winch);
715
716 if (winch->pid != -1)
717 os_kill_process(winch->pid, 1);
718 if (winch->stack != 0)
719 free_stack(winch->stack, 0);
720 kfree(winch);
721 }
722
723 static void free_winch(struct winch *winch)
724 {
725 int fd = winch->fd;
726 winch->fd = -1;
727 if (fd != -1)
728 os_close_file(fd);
729 list_del(&winch->list);
730 __free_winch(&winch->work);
731 }
732
733 static irqreturn_t winch_interrupt(int irq, void *data)
734 {
735 struct winch *winch = data;
736 struct tty_struct *tty;
737 struct line *line;
738 int fd = winch->fd;
739 int err;
740 char c;
741
742 if (fd != -1) {
743 err = generic_read(fd, &c, NULL);
744 if (err < 0) {
745 if (err != -EAGAIN) {
746 winch->fd = -1;
747 list_del(&winch->list);
748 os_close_file(fd);
749 printk(KERN_ERR "winch_interrupt : "
750 "read failed, errno = %d\n", -err);
751 printk(KERN_ERR "fd %d is losing SIGWINCH "
752 "support\n", winch->tty_fd);
753 INIT_WORK(&winch->work, __free_winch);
754 schedule_work(&winch->work);
755 return IRQ_HANDLED;
756 }
757 goto out;
758 }
759 }
760 tty = winch->tty;
761 if (tty != NULL) {
762 line = tty->driver_data;
763 if (line != NULL) {
764 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
765 &tty->winsize.ws_col);
766 kill_pgrp(tty->pgrp, SIGWINCH, 1);
767 }
768 }
769 out:
770 if (winch->fd != -1)
771 reactivate_fd(winch->fd, WINCH_IRQ);
772 return IRQ_HANDLED;
773 }
774
775 void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
776 unsigned long stack)
777 {
778 struct winch *winch;
779
780 winch = kmalloc(sizeof(*winch), GFP_KERNEL);
781 if (winch == NULL) {
782 printk(KERN_ERR "register_winch_irq - kmalloc failed\n");
783 goto cleanup;
784 }
785
786 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list),
787 .fd = fd,
788 .tty_fd = tty_fd,
789 .pid = pid,
790 .tty = tty,
791 .stack = stack });
792
793 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
794 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
795 "winch", winch) < 0) {
796 printk(KERN_ERR "register_winch_irq - failed to register "
797 "IRQ\n");
798 goto out_free;
799 }
800
801 spin_lock(&winch_handler_lock);
802 list_add(&winch->list, &winch_handlers);
803 spin_unlock(&winch_handler_lock);
804
805 return;
806
807 out_free:
808 kfree(winch);
809 cleanup:
810 os_kill_process(pid, 1);
811 os_close_file(fd);
812 if (stack != 0)
813 free_stack(stack, 0);
814 }
815
816 static void unregister_winch(struct tty_struct *tty)
817 {
818 struct list_head *ele, *next;
819 struct winch *winch;
820
821 spin_lock(&winch_handler_lock);
822
823 list_for_each_safe(ele, next, &winch_handlers) {
824 winch = list_entry(ele, struct winch, list);
825 if (winch->tty == tty) {
826 free_winch(winch);
827 break;
828 }
829 }
830 spin_unlock(&winch_handler_lock);
831 }
832
833 static void winch_cleanup(void)
834 {
835 struct list_head *ele, *next;
836 struct winch *winch;
837
838 spin_lock(&winch_handler_lock);
839
840 list_for_each_safe(ele, next, &winch_handlers) {
841 winch = list_entry(ele, struct winch, list);
842 free_winch(winch);
843 }
844
845 spin_unlock(&winch_handler_lock);
846 }
847 __uml_exitcall(winch_cleanup);
848
849 char *add_xterm_umid(char *base)
850 {
851 char *umid, *title;
852 int len;
853
854 umid = get_umid();
855 if (*umid == '\0')
856 return base;
857
858 len = strlen(base) + strlen(" ()") + strlen(umid) + 1;
859 title = kmalloc(len, GFP_KERNEL);
860 if (title == NULL) {
861 printk(KERN_ERR "Failed to allocate buffer for xterm title\n");
862 return base;
863 }
864
865 snprintf(title, len, "%s (%s)", base, umid);
866 return title;
867 }