]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/um/drivers/port_kern.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / arch / um / drivers / port_kern.c
1 /*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
4 */
5
6 #include "linux/completion.h"
7 #include "linux/interrupt.h"
8 #include "linux/list.h"
9 #include "linux/mutex.h"
10 #include "linux/slab.h"
11 #include "linux/workqueue.h"
12 #include "asm/atomic.h"
13 #include "init.h"
14 #include "irq_kern.h"
15 #include "os.h"
16 #include "port.h"
17
18 struct port_list {
19 struct list_head list;
20 atomic_t wait_count;
21 int has_connection;
22 struct completion done;
23 int port;
24 int fd;
25 spinlock_t lock;
26 struct list_head pending;
27 struct list_head connections;
28 };
29
30 struct port_dev {
31 struct port_list *port;
32 int helper_pid;
33 int telnetd_pid;
34 };
35
36 struct connection {
37 struct list_head list;
38 int fd;
39 int helper_pid;
40 int socket[2];
41 int telnetd_pid;
42 struct port_list *port;
43 };
44
45 static irqreturn_t pipe_interrupt(int irq, void *data)
46 {
47 struct connection *conn = data;
48 int fd;
49
50 fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
51 if (fd < 0) {
52 if (fd == -EAGAIN)
53 return IRQ_NONE;
54
55 printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
56 -fd);
57 os_close_file(conn->fd);
58 }
59
60 list_del(&conn->list);
61
62 conn->fd = fd;
63 list_add(&conn->list, &conn->port->connections);
64
65 complete(&conn->port->done);
66 return IRQ_HANDLED;
67 }
68
69 #define NO_WAITER_MSG \
70 "****\n" \
71 "There are currently no UML consoles waiting for port connections.\n" \
72 "Either disconnect from one to make it available or activate some more\n" \
73 "by enabling more consoles in the UML /etc/inittab.\n" \
74 "****\n"
75
76 static int port_accept(struct port_list *port)
77 {
78 struct connection *conn;
79 int fd, socket[2], pid;
80
81 fd = port_connection(port->fd, socket, &pid);
82 if (fd < 0) {
83 if (fd != -EAGAIN)
84 printk(KERN_ERR "port_accept : port_connection "
85 "returned %d\n", -fd);
86 goto out;
87 }
88
89 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
90 if (conn == NULL) {
91 printk(KERN_ERR "port_accept : failed to allocate "
92 "connection\n");
93 goto out_close;
94 }
95 *conn = ((struct connection)
96 { .list = LIST_HEAD_INIT(conn->list),
97 .fd = fd,
98 .socket = { socket[0], socket[1] },
99 .telnetd_pid = pid,
100 .port = port });
101
102 if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
103 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
104 "telnetd", conn)) {
105 printk(KERN_ERR "port_accept : failed to get IRQ for "
106 "telnetd\n");
107 goto out_free;
108 }
109
110 if (atomic_read(&port->wait_count) == 0) {
111 os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
112 printk(KERN_ERR "No one waiting for port\n");
113 }
114 list_add(&conn->list, &port->pending);
115 return 1;
116
117 out_free:
118 kfree(conn);
119 out_close:
120 os_close_file(fd);
121 os_kill_process(pid, 1);
122 out:
123 return 0;
124 }
125
126 static DEFINE_MUTEX(ports_mutex);
127 static LIST_HEAD(ports);
128
129 static void port_work_proc(struct work_struct *unused)
130 {
131 struct port_list *port;
132 struct list_head *ele;
133 unsigned long flags;
134
135 local_irq_save(flags);
136 list_for_each(ele, &ports) {
137 port = list_entry(ele, struct port_list, list);
138 if (!port->has_connection)
139 continue;
140
141 reactivate_fd(port->fd, ACCEPT_IRQ);
142 while (port_accept(port))
143 ;
144 port->has_connection = 0;
145 }
146 local_irq_restore(flags);
147 }
148
149 DECLARE_WORK(port_work, port_work_proc);
150
151 static irqreturn_t port_interrupt(int irq, void *data)
152 {
153 struct port_list *port = data;
154
155 port->has_connection = 1;
156 schedule_work(&port_work);
157 return IRQ_HANDLED;
158 }
159
160 void *port_data(int port_num)
161 {
162 struct list_head *ele;
163 struct port_list *port;
164 struct port_dev *dev = NULL;
165 int fd;
166
167 mutex_lock(&ports_mutex);
168 list_for_each(ele, &ports) {
169 port = list_entry(ele, struct port_list, list);
170 if (port->port == port_num)
171 goto found;
172 }
173 port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
174 if (port == NULL) {
175 printk(KERN_ERR "Allocation of port list failed\n");
176 goto out;
177 }
178
179 fd = port_listen_fd(port_num);
180 if (fd < 0) {
181 printk(KERN_ERR "binding to port %d failed, errno = %d\n",
182 port_num, -fd);
183 goto out_free;
184 }
185
186 if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
187 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
188 "port", port)) {
189 printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
190 goto out_close;
191 }
192
193 *port = ((struct port_list)
194 { .list = LIST_HEAD_INIT(port->list),
195 .wait_count = ATOMIC_INIT(0),
196 .has_connection = 0,
197 .port = port_num,
198 .fd = fd,
199 .pending = LIST_HEAD_INIT(port->pending),
200 .connections = LIST_HEAD_INIT(port->connections) });
201 spin_lock_init(&port->lock);
202 init_completion(&port->done);
203 list_add(&port->list, &ports);
204
205 found:
206 dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
207 if (dev == NULL) {
208 printk(KERN_ERR "Allocation of port device entry failed\n");
209 goto out;
210 }
211
212 *dev = ((struct port_dev) { .port = port,
213 .helper_pid = -1,
214 .telnetd_pid = -1 });
215 goto out;
216
217 out_close:
218 os_close_file(fd);
219 out_free:
220 kfree(port);
221 out:
222 mutex_unlock(&ports_mutex);
223 return dev;
224 }
225
226 int port_wait(void *data)
227 {
228 struct port_dev *dev = data;
229 struct connection *conn;
230 struct port_list *port = dev->port;
231 int fd;
232
233 atomic_inc(&port->wait_count);
234 while (1) {
235 fd = -ERESTARTSYS;
236 if (wait_for_completion_interruptible(&port->done))
237 goto out;
238
239 spin_lock(&port->lock);
240
241 conn = list_entry(port->connections.next, struct connection,
242 list);
243 list_del(&conn->list);
244 spin_unlock(&port->lock);
245
246 os_shutdown_socket(conn->socket[0], 1, 1);
247 os_close_file(conn->socket[0]);
248 os_shutdown_socket(conn->socket[1], 1, 1);
249 os_close_file(conn->socket[1]);
250
251 /* This is done here because freeing an IRQ can't be done
252 * within the IRQ handler. So, pipe_interrupt always ups
253 * the semaphore regardless of whether it got a successful
254 * connection. Then we loop here throwing out failed
255 * connections until a good one is found.
256 */
257 free_irq(TELNETD_IRQ, conn);
258
259 if (conn->fd >= 0)
260 break;
261 os_close_file(conn->fd);
262 kfree(conn);
263 }
264
265 fd = conn->fd;
266 dev->helper_pid = conn->helper_pid;
267 dev->telnetd_pid = conn->telnetd_pid;
268 kfree(conn);
269 out:
270 atomic_dec(&port->wait_count);
271 return fd;
272 }
273
274 void port_remove_dev(void *d)
275 {
276 struct port_dev *dev = d;
277
278 if (dev->helper_pid != -1)
279 os_kill_process(dev->helper_pid, 0);
280 if (dev->telnetd_pid != -1)
281 os_kill_process(dev->telnetd_pid, 1);
282 dev->helper_pid = -1;
283 dev->telnetd_pid = -1;
284 }
285
286 void port_kern_free(void *d)
287 {
288 struct port_dev *dev = d;
289
290 port_remove_dev(dev);
291 kfree(dev);
292 }
293
294 static void free_port(void)
295 {
296 struct list_head *ele;
297 struct port_list *port;
298
299 list_for_each(ele, &ports) {
300 port = list_entry(ele, struct port_list, list);
301 free_irq_by_fd(port->fd);
302 os_close_file(port->fd);
303 }
304 }
305
306 __uml_exitcall(free_port);