]>
Commit | Line | Data |
---|---|---|
c89f2750 | 1 | /* |
c89f2750 DDT |
2 | * Copyright (C) 2012 Intel, Inc. |
3 | * Copyright (C) 2013 Intel, Inc. | |
2f3be882 | 4 | * Copyright (C) 2014 Linaro Limited |
726ea1a8 | 5 | * Copyright (C) 2011-2016 Google, Inc. |
c89f2750 DDT |
6 | * |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | /* This source file contains the implementation of a special device driver | |
19 | * that intends to provide a *very* fast communication channel between the | |
20 | * guest system and the QEMU emulator. | |
21 | * | |
22 | * Usage from the guest is simply the following (error handling simplified): | |
23 | * | |
24 | * int fd = open("/dev/qemu_pipe",O_RDWR); | |
25 | * .... write() or read() through the pipe. | |
26 | * | |
27 | * This driver doesn't deal with the exact protocol used during the session. | |
28 | * It is intended to be as simple as something like: | |
29 | * | |
30 | * // do this _just_ after opening the fd to connect to a specific | |
31 | * // emulator service. | |
32 | * const char* msg = "<pipename>"; | |
33 | * if (write(fd, msg, strlen(msg)+1) < 0) { | |
34 | * ... could not connect to <pipename> service | |
35 | * close(fd); | |
36 | * } | |
37 | * | |
38 | * // after this, simply read() and write() to communicate with the | |
39 | * // service. Exact protocol details left as an exercise to the reader. | |
40 | * | |
41 | * This driver is very fast because it doesn't copy any data through | |
42 | * intermediate buffers, since the emulator is capable of translating | |
43 | * guest user addresses into host ones. | |
44 | * | |
45 | * Note that we must however ensure that each user page involved in the | |
46 | * exchange is properly mapped during a transfer. | |
47 | */ | |
48 | ||
726ea1a8 | 49 | |
c89f2750 DDT |
50 | #include <linux/module.h> |
51 | #include <linux/interrupt.h> | |
52 | #include <linux/kernel.h> | |
53 | #include <linux/spinlock.h> | |
54 | #include <linux/miscdevice.h> | |
55 | #include <linux/platform_device.h> | |
56 | #include <linux/poll.h> | |
57 | #include <linux/sched.h> | |
58 | #include <linux/bitops.h> | |
59 | #include <linux/slab.h> | |
60 | #include <linux/io.h> | |
a99698fa | 61 | #include <linux/goldfish.h> |
1d427da1 | 62 | #include <linux/dma-mapping.h> |
2f3be882 | 63 | #include <linux/mm.h> |
d62f324b | 64 | #include <linux/acpi.h> |
c89f2750 | 65 | |
726ea1a8 JQ |
66 | /* |
67 | * Update this when something changes in the driver's behavior so the host | |
68 | * can benefit from knowing it | |
69 | */ | |
70 | enum { | |
71 | PIPE_DRIVER_VERSION = 2, | |
72 | PIPE_CURRENT_DEVICE_VERSION = 2 | |
73 | }; | |
74 | ||
c89f2750 DDT |
75 | /* |
76 | * IMPORTANT: The following constants must match the ones used and defined | |
77 | * in external/qemu/hw/goldfish_pipe.c in the Android source tree. | |
78 | */ | |
79 | ||
c89f2750 | 80 | /* List of bitflags returned in status of CMD_POLL command */ |
726ea1a8 JQ |
81 | enum PipePollFlags { |
82 | PIPE_POLL_IN = 1 << 0, | |
83 | PIPE_POLL_OUT = 1 << 1, | |
84 | PIPE_POLL_HUP = 1 << 2 | |
85 | }; | |
c89f2750 DDT |
86 | |
87 | /* Possible status values used to signal errors - see goldfish_pipe_error_convert */ | |
726ea1a8 JQ |
88 | enum PipeErrors { |
89 | PIPE_ERROR_INVAL = -1, | |
90 | PIPE_ERROR_AGAIN = -2, | |
91 | PIPE_ERROR_NOMEM = -3, | |
92 | PIPE_ERROR_IO = -4 | |
93 | }; | |
c89f2750 DDT |
94 | |
95 | /* Bit-flags used to signal events from the emulator */ | |
726ea1a8 JQ |
96 | enum PipeWakeFlags { |
97 | PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */ | |
98 | PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */ | |
99 | PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */ | |
100 | }; | |
101 | ||
102 | /* Bit flags for the 'flags' field */ | |
103 | enum PipeFlagsBits { | |
104 | BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */ | |
105 | BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */ | |
106 | BIT_WAKE_ON_READ = 2, /* want to be woken on reads */ | |
107 | }; | |
108 | ||
109 | enum PipeRegs { | |
110 | PIPE_REG_CMD = 0, | |
111 | ||
112 | PIPE_REG_SIGNAL_BUFFER_HIGH = 4, | |
113 | PIPE_REG_SIGNAL_BUFFER = 8, | |
114 | PIPE_REG_SIGNAL_BUFFER_COUNT = 12, | |
115 | ||
116 | PIPE_REG_OPEN_BUFFER_HIGH = 20, | |
117 | PIPE_REG_OPEN_BUFFER = 24, | |
118 | ||
119 | PIPE_REG_VERSION = 36, | |
120 | ||
121 | PIPE_REG_GET_SIGNALLED = 48, | |
122 | }; | |
123 | ||
124 | enum PipeCmdCode { | |
125 | PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */ | |
126 | PIPE_CMD_CLOSE, | |
127 | PIPE_CMD_POLL, | |
128 | PIPE_CMD_WRITE, | |
129 | PIPE_CMD_WAKE_ON_WRITE, | |
130 | PIPE_CMD_READ, | |
131 | PIPE_CMD_WAKE_ON_READ, | |
132 | ||
133 | /* | |
134 | * TODO(zyy): implement a deferred read/write execution to allow | |
135 | * parallel processing of pipe operations on the host. | |
136 | */ | |
137 | PIPE_CMD_WAKE_ON_DONE_IO, | |
138 | }; | |
139 | ||
140 | enum { | |
141 | MAX_BUFFERS_PER_COMMAND = 336, | |
142 | MAX_SIGNALLED_PIPES = 64, | |
143 | INITIAL_PIPES_CAPACITY = 64 | |
144 | }; | |
145 | ||
146 | struct goldfish_pipe_dev; | |
147 | struct goldfish_pipe; | |
148 | struct goldfish_pipe_command; | |
149 | ||
150 | /* A per-pipe command structure, shared with the host */ | |
151 | struct goldfish_pipe_command { | |
152 | s32 cmd; /* PipeCmdCode, guest -> host */ | |
153 | s32 id; /* pipe id, guest -> host */ | |
154 | s32 status; /* command execution status, host -> guest */ | |
155 | s32 reserved; /* to pad to 64-bit boundary */ | |
156 | union { | |
157 | /* Parameters for PIPE_CMD_{READ,WRITE} */ | |
158 | struct { | |
159 | /* number of buffers, guest -> host */ | |
160 | u32 buffers_count; | |
161 | /* number of consumed bytes, host -> guest */ | |
162 | s32 consumed_size; | |
163 | /* buffer pointers, guest -> host */ | |
164 | u64 ptrs[MAX_BUFFERS_PER_COMMAND]; | |
165 | /* buffer sizes, guest -> host */ | |
166 | u32 sizes[MAX_BUFFERS_PER_COMMAND]; | |
167 | } rw_params; | |
168 | }; | |
169 | }; | |
170 | ||
171 | /* A single signalled pipe information */ | |
172 | struct signalled_pipe_buffer { | |
173 | u32 id; | |
c89f2750 DDT |
174 | u32 flags; |
175 | }; | |
176 | ||
726ea1a8 JQ |
177 | /* Parameters for the PIPE_CMD_OPEN command */ |
178 | struct open_command_param { | |
179 | u64 command_buffer_ptr; | |
180 | u32 rw_params_max_count; | |
c89f2750 DDT |
181 | }; |
182 | ||
726ea1a8 JQ |
183 | /* Device-level set of buffers shared with the host */ |
184 | struct goldfish_pipe_dev_buffers { | |
185 | struct open_command_param open_command_params; | |
186 | struct signalled_pipe_buffer signalled_pipe_buffers[ | |
187 | MAX_SIGNALLED_PIPES]; | |
188 | }; | |
c89f2750 DDT |
189 | |
190 | /* This data type models a given pipe instance */ | |
191 | struct goldfish_pipe { | |
726ea1a8 JQ |
192 | /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
193 | u32 id; | |
194 | /* The wake flags pipe is waiting for | |
195 | * Note: not protected with any lock, uses atomic operations | |
196 | * and barriers to make it thread-safe. | |
197 | */ | |
c89f2750 | 198 | unsigned long flags; |
726ea1a8 JQ |
199 | /* wake flags host have signalled, |
200 | * - protected by goldfish_pipe_dev::lock | |
201 | */ | |
202 | unsigned long signalled_flags; | |
203 | ||
204 | /* A pointer to command buffer */ | |
205 | struct goldfish_pipe_command *command_buffer; | |
206 | ||
207 | /* doubly linked list of signalled pipes, protected by | |
208 | * goldfish_pipe_dev::lock | |
209 | */ | |
210 | struct goldfish_pipe *prev_signalled; | |
211 | struct goldfish_pipe *next_signalled; | |
212 | ||
213 | /* | |
214 | * A pipe's own lock. Protects the following: | |
215 | * - *command_buffer - makes sure a command can safely write its | |
216 | * parameters to the host and read the results back. | |
217 | */ | |
218 | struct mutex lock; | |
219 | ||
220 | /* A wake queue for sleeping until host signals an event */ | |
c89f2750 | 221 | wait_queue_head_t wake_queue; |
726ea1a8 JQ |
222 | /* Pointer to the parent goldfish_pipe_dev instance */ |
223 | struct goldfish_pipe_dev *dev; | |
c89f2750 DDT |
224 | }; |
225 | ||
726ea1a8 JQ |
226 | /* The global driver data. Holds a reference to the i/o page used to |
227 | * communicate with the emulator, and a wake queue for blocked tasks | |
228 | * waiting to be awoken. | |
229 | */ | |
230 | struct goldfish_pipe_dev { | |
231 | /* | |
232 | * Global device spinlock. Protects the following members: | |
233 | * - pipes, pipes_capacity | |
234 | * - [*pipes, *pipes + pipes_capacity) - array data | |
235 | * - first_signalled_pipe, | |
236 | * goldfish_pipe::prev_signalled, | |
237 | * goldfish_pipe::next_signalled, | |
238 | * goldfish_pipe::signalled_flags - all singnalled-related fields, | |
239 | * in all allocated pipes | |
240 | * - open_command_params - PIPE_CMD_OPEN-related buffers | |
241 | * | |
242 | * It looks like a lot of different fields, but the trick is that | |
243 | * the only operation that happens often is the signalled pipes array | |
244 | * manipulation. That's why it's OK for now to keep the rest of the | |
245 | * fields under the same lock. If we notice too much contention because | |
246 | * of PIPE_CMD_OPEN, then we should add a separate lock there. | |
247 | */ | |
248 | spinlock_t lock; | |
c89f2750 | 249 | |
726ea1a8 JQ |
250 | /* |
251 | * Array of the pipes of |pipes_capacity| elements, | |
252 | * indexed by goldfish_pipe::id | |
253 | */ | |
254 | struct goldfish_pipe **pipes; | |
255 | u32 pipes_capacity; | |
256 | ||
257 | /* Pointers to the buffers host uses for interaction with this driver */ | |
258 | struct goldfish_pipe_dev_buffers *buffers; | |
259 | ||
260 | /* Head of a doubly linked list of signalled pipes */ | |
261 | struct goldfish_pipe *first_signalled_pipe; | |
262 | ||
263 | /* Some device-specific data */ | |
264 | int irq; | |
265 | int version; | |
266 | unsigned char __iomem *base; | |
c89f2750 DDT |
267 | }; |
268 | ||
70caf709 | 269 | static struct goldfish_pipe_dev pipe_dev[1] = {}; |
c89f2750 | 270 | |
726ea1a8 | 271 | static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 272 | { |
726ea1a8 JQ |
273 | pipe->command_buffer->cmd = cmd; |
274 | /* failure by default */ | |
275 | pipe->command_buffer->status = PIPE_ERROR_INVAL; | |
276 | writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); | |
277 | return pipe->command_buffer->status; | |
c89f2750 DDT |
278 | } |
279 | ||
726ea1a8 | 280 | static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 281 | { |
726ea1a8 | 282 | int status; |
c89f2750 | 283 | |
726ea1a8 JQ |
284 | if (mutex_lock_interruptible(&pipe->lock)) |
285 | return PIPE_ERROR_IO; | |
286 | status = goldfish_cmd_locked(pipe, cmd); | |
287 | mutex_unlock(&pipe->lock); | |
288 | return status; | |
c89f2750 DDT |
289 | } |
290 | ||
726ea1a8 JQ |
291 | /* |
292 | * This function converts an error code returned by the emulator through | |
c89f2750 DDT |
293 | * the PIPE_REG_STATUS i/o register into a valid negative errno value. |
294 | */ | |
295 | static int goldfish_pipe_error_convert(int status) | |
296 | { | |
297 | switch (status) { | |
298 | case PIPE_ERROR_AGAIN: | |
299 | return -EAGAIN; | |
300 | case PIPE_ERROR_NOMEM: | |
301 | return -ENOMEM; | |
302 | case PIPE_ERROR_IO: | |
303 | return -EIO; | |
304 | default: | |
305 | return -EINVAL; | |
306 | } | |
307 | } | |
308 | ||
726ea1a8 JQ |
309 | static int pin_user_pages(unsigned long first_page, unsigned long last_page, |
310 | unsigned int last_page_size, int is_write, | |
311 | struct page *pages[MAX_BUFFERS_PER_COMMAND], | |
312 | unsigned int *iter_last_page_size) | |
c89f2750 | 313 | { |
726ea1a8 JQ |
314 | int ret; |
315 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; | |
316 | ||
317 | if (requested_pages > MAX_BUFFERS_PER_COMMAND) { | |
318 | requested_pages = MAX_BUFFERS_PER_COMMAND; | |
319 | *iter_last_page_size = PAGE_SIZE; | |
320 | } else { | |
321 | *iter_last_page_size = last_page_size; | |
322 | } | |
323 | ||
324 | ret = get_user_pages_fast( | |
325 | first_page, requested_pages, !is_write, pages); | |
326 | if (ret <= 0) | |
327 | return -EFAULT; | |
328 | if (ret < requested_pages) | |
329 | *iter_last_page_size = PAGE_SIZE; | |
330 | return ret; | |
c89f2750 | 331 | |
c89f2750 DDT |
332 | } |
333 | ||
726ea1a8 JQ |
334 | static void release_user_pages(struct page **pages, int pages_count, |
335 | int is_write, s32 consumed_size) | |
c89f2750 | 336 | { |
726ea1a8 | 337 | int i; |
c89f2750 | 338 | |
726ea1a8 JQ |
339 | for (i = 0; i < pages_count; i++) { |
340 | if (!is_write && consumed_size > 0) | |
341 | set_page_dirty(pages[i]); | |
342 | put_page(pages[i]); | |
343 | } | |
344 | } | |
345 | ||
346 | /* Populate the call parameters, merging adjacent pages together */ | |
347 | static void populate_rw_params( | |
348 | struct page **pages, int pages_count, | |
349 | unsigned long address, unsigned long address_end, | |
350 | unsigned long first_page, unsigned long last_page, | |
351 | unsigned int iter_last_page_size, int is_write, | |
352 | struct goldfish_pipe_command *command) | |
353 | { | |
354 | /* | |
355 | * Process the first page separately - it's the only page that | |
356 | * needs special handling for its start address. | |
357 | */ | |
358 | unsigned long xaddr = page_to_phys(pages[0]); | |
359 | unsigned long xaddr_prev = xaddr; | |
360 | int buffer_idx = 0; | |
361 | int i = 1; | |
362 | int size_on_page = first_page == last_page | |
363 | ? (int)(address_end - address) | |
364 | : (PAGE_SIZE - (address & ~PAGE_MASK)); | |
365 | command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); | |
366 | command->rw_params.sizes[0] = size_on_page; | |
367 | for (; i < pages_count; ++i) { | |
368 | xaddr = page_to_phys(pages[i]); | |
369 | size_on_page = (i == pages_count - 1) ? | |
370 | iter_last_page_size : PAGE_SIZE; | |
371 | if (xaddr == xaddr_prev + PAGE_SIZE) { | |
372 | command->rw_params.sizes[buffer_idx] += size_on_page; | |
373 | } else { | |
374 | ++buffer_idx; | |
375 | command->rw_params.ptrs[buffer_idx] = (u64)xaddr; | |
376 | command->rw_params.sizes[buffer_idx] = size_on_page; | |
377 | } | |
378 | xaddr_prev = xaddr; | |
379 | } | |
380 | command->rw_params.buffers_count = buffer_idx + 1; | |
381 | } | |
c89f2750 | 382 | |
726ea1a8 JQ |
383 | static int transfer_max_buffers(struct goldfish_pipe *pipe, |
384 | unsigned long address, unsigned long address_end, int is_write, | |
385 | unsigned long last_page, unsigned int last_page_size, | |
386 | s32 *consumed_size, int *status) | |
387 | { | |
f563dab4 | 388 | static struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
726ea1a8 JQ |
389 | unsigned long first_page = address & PAGE_MASK; |
390 | unsigned int iter_last_page_size; | |
391 | int pages_count = pin_user_pages(first_page, last_page, | |
392 | last_page_size, is_write, | |
393 | pages, &iter_last_page_size); | |
c89f2750 | 394 | |
726ea1a8 JQ |
395 | if (pages_count < 0) |
396 | return pages_count; | |
397 | ||
398 | /* Serialize access to the pipe command buffers */ | |
399 | if (mutex_lock_interruptible(&pipe->lock)) | |
400 | return -ERESTARTSYS; | |
401 | ||
402 | populate_rw_params(pages, pages_count, address, address_end, | |
403 | first_page, last_page, iter_last_page_size, is_write, | |
404 | pipe->command_buffer); | |
405 | ||
406 | /* Transfer the data */ | |
407 | *status = goldfish_cmd_locked(pipe, | |
408 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); | |
409 | ||
410 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; | |
411 | ||
726ea1a8 JQ |
412 | release_user_pages(pages, pages_count, is_write, *consumed_size); |
413 | ||
f563dab4 GKH |
414 | mutex_unlock(&pipe->lock); |
415 | ||
726ea1a8 | 416 | return 0; |
c89f2750 DDT |
417 | } |
418 | ||
726ea1a8 | 419 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
c89f2750 | 420 | { |
726ea1a8 | 421 | u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
c89f2750 | 422 | |
726ea1a8 JQ |
423 | set_bit(wakeBit, &pipe->flags); |
424 | ||
425 | /* Tell the emulator we're going to wait for a wake event */ | |
426 | (void)goldfish_cmd(pipe, | |
427 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); | |
428 | ||
429 | while (test_bit(wakeBit, &pipe->flags)) { | |
430 | if (wait_event_interruptible( | |
431 | pipe->wake_queue, | |
432 | !test_bit(wakeBit, &pipe->flags))) | |
433 | return -ERESTARTSYS; | |
434 | ||
435 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) | |
436 | return -EIO; | |
437 | } | |
c89f2750 | 438 | |
c89f2750 DDT |
439 | return 0; |
440 | } | |
441 | ||
726ea1a8 JQ |
442 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
443 | char __user *buffer, size_t bufflen, int is_write) | |
c89f2750 | 444 | { |
c89f2750 | 445 | struct goldfish_pipe *pipe = filp->private_data; |
2f3be882 | 446 | int count = 0, ret = -EINVAL; |
726ea1a8 JQ |
447 | unsigned long address, address_end, last_page; |
448 | unsigned int last_page_size; | |
c89f2750 DDT |
449 | |
450 | /* If the emulator already closed the pipe, no need to go further */ | |
726ea1a8 | 451 | if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) |
c89f2750 | 452 | return -EIO; |
c89f2750 | 453 | /* Null reads or writes succeeds */ |
3411d035 | 454 | if (unlikely(bufflen == 0)) |
c89f2750 | 455 | return 0; |
c89f2750 | 456 | /* Check the buffer range for access */ |
726ea1a8 JQ |
457 | if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, |
458 | buffer, bufflen))) | |
c89f2750 DDT |
459 | return -EFAULT; |
460 | ||
726ea1a8 | 461 | address = (unsigned long)buffer; |
c89f2750 | 462 | address_end = address + bufflen; |
726ea1a8 JQ |
463 | last_page = (address_end - 1) & PAGE_MASK; |
464 | last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; | |
c89f2750 DDT |
465 | |
466 | while (address < address_end) { | |
726ea1a8 JQ |
467 | s32 consumed_size; |
468 | int status; | |
4f42071c | 469 | |
726ea1a8 JQ |
470 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
471 | last_page, last_page_size, &consumed_size, | |
472 | &status); | |
2f3be882 | 473 | if (ret < 0) |
5fae054c | 474 | break; |
c89f2750 | 475 | |
726ea1a8 JQ |
476 | if (consumed_size > 0) { |
477 | /* No matter what's the status, we've transferred | |
478 | * something. | |
4f42071c | 479 | */ |
726ea1a8 JQ |
480 | count += consumed_size; |
481 | address += consumed_size; | |
c89f2750 | 482 | } |
726ea1a8 | 483 | if (status > 0) |
c89f2750 | 484 | continue; |
726ea1a8 JQ |
485 | if (status == 0) { |
486 | /* EOF */ | |
2f3be882 | 487 | ret = 0; |
c89f2750 | 488 | break; |
726ea1a8 JQ |
489 | } |
490 | if (count > 0) { | |
2f3be882 | 491 | /* |
726ea1a8 JQ |
492 | * An error occurred, but we already transferred |
493 | * something on one of the previous iterations. | |
2f3be882 CD |
494 | * Just return what we already copied and log this |
495 | * err. | |
2f3be882 | 496 | */ |
25dd0f40 | 497 | if (status != PIPE_ERROR_AGAIN) |
726ea1a8 | 498 | pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", |
2f3be882 | 499 | status, is_write ? "write" : "read"); |
c89f2750 | 500 | break; |
2f3be882 | 501 | } |
c89f2750 | 502 | |
2f3be882 | 503 | /* |
726ea1a8 | 504 | * If the error is not PIPE_ERROR_AGAIN, or if we are in |
2f3be882 CD |
505 | * non-blocking mode, just return the error code. |
506 | */ | |
c89f2750 DDT |
507 | if (status != PIPE_ERROR_AGAIN || |
508 | (filp->f_flags & O_NONBLOCK) != 0) { | |
509 | ret = goldfish_pipe_error_convert(status); | |
510 | break; | |
511 | } | |
512 | ||
726ea1a8 JQ |
513 | status = wait_for_host_signal(pipe, is_write); |
514 | if (status < 0) | |
515 | return status; | |
c89f2750 | 516 | } |
2f3be882 | 517 | |
726ea1a8 | 518 | if (count > 0) |
2f3be882 | 519 | return count; |
726ea1a8 | 520 | return ret; |
c89f2750 DDT |
521 | } |
522 | ||
523 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, | |
726ea1a8 | 524 | size_t bufflen, loff_t *ppos) |
c89f2750 | 525 | { |
726ea1a8 JQ |
526 | return goldfish_pipe_read_write(filp, buffer, bufflen, |
527 | /* is_write */ 0); | |
c89f2750 DDT |
528 | } |
529 | ||
530 | static ssize_t goldfish_pipe_write(struct file *filp, | |
531 | const char __user *buffer, size_t bufflen, | |
532 | loff_t *ppos) | |
533 | { | |
726ea1a8 JQ |
534 | return goldfish_pipe_read_write(filp, |
535 | /* cast away the const */(char __user *)buffer, bufflen, | |
536 | /* is_write */ 1); | |
c89f2750 DDT |
537 | } |
538 | ||
afc9a42b | 539 | static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) |
c89f2750 DDT |
540 | { |
541 | struct goldfish_pipe *pipe = filp->private_data; | |
afc9a42b | 542 | __poll_t mask = 0; |
c89f2750 DDT |
543 | int status; |
544 | ||
c89f2750 DDT |
545 | poll_wait(filp, &pipe->wake_queue, wait); |
546 | ||
726ea1a8 JQ |
547 | status = goldfish_cmd(pipe, PIPE_CMD_POLL); |
548 | if (status < 0) | |
549 | return -ERESTARTSYS; | |
c89f2750 DDT |
550 | |
551 | if (status & PIPE_POLL_IN) | |
552 | mask |= POLLIN | POLLRDNORM; | |
c89f2750 DDT |
553 | if (status & PIPE_POLL_OUT) |
554 | mask |= POLLOUT | POLLWRNORM; | |
c89f2750 DDT |
555 | if (status & PIPE_POLL_HUP) |
556 | mask |= POLLHUP; | |
c89f2750 DDT |
557 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
558 | mask |= POLLERR; | |
559 | ||
560 | return mask; | |
561 | } | |
562 | ||
726ea1a8 JQ |
563 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
564 | u32 id, u32 flags) | |
c89f2750 | 565 | { |
726ea1a8 | 566 | struct goldfish_pipe *pipe; |
c89f2750 | 567 | |
726ea1a8 JQ |
568 | if (WARN_ON(id >= dev->pipes_capacity)) |
569 | return; | |
570 | ||
571 | pipe = dev->pipes[id]; | |
572 | if (!pipe) | |
573 | return; | |
574 | pipe->signalled_flags |= flags; | |
575 | ||
576 | if (pipe->prev_signalled || pipe->next_signalled | |
577 | || dev->first_signalled_pipe == pipe) | |
578 | return; /* already in the list */ | |
579 | pipe->next_signalled = dev->first_signalled_pipe; | |
580 | if (dev->first_signalled_pipe) | |
581 | dev->first_signalled_pipe->prev_signalled = pipe; | |
582 | dev->first_signalled_pipe = pipe; | |
583 | } | |
49a75c44 | 584 | |
726ea1a8 JQ |
585 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
586 | struct goldfish_pipe *pipe) { | |
587 | if (pipe->prev_signalled) | |
588 | pipe->prev_signalled->next_signalled = pipe->next_signalled; | |
589 | if (pipe->next_signalled) | |
590 | pipe->next_signalled->prev_signalled = pipe->prev_signalled; | |
591 | if (pipe == dev->first_signalled_pipe) | |
592 | dev->first_signalled_pipe = pipe->next_signalled; | |
593 | pipe->prev_signalled = NULL; | |
594 | pipe->next_signalled = NULL; | |
595 | } | |
25c72c78 | 596 | |
726ea1a8 JQ |
597 | static struct goldfish_pipe *signalled_pipes_pop_front( |
598 | struct goldfish_pipe_dev *dev, int *wakes) | |
599 | { | |
600 | struct goldfish_pipe *pipe; | |
601 | unsigned long flags; | |
c89f2750 | 602 | |
726ea1a8 | 603 | spin_lock_irqsave(&dev->lock, flags); |
c89f2750 | 604 | |
726ea1a8 JQ |
605 | pipe = dev->first_signalled_pipe; |
606 | if (pipe) { | |
607 | *wakes = pipe->signalled_flags; | |
608 | pipe->signalled_flags = 0; | |
609 | /* | |
610 | * This is an optimized version of | |
611 | * signalled_pipes_remove_locked() | |
612 | * - We want to make it as fast as possible to | |
613 | * wake the sleeping pipe operations faster. | |
614 | */ | |
615 | dev->first_signalled_pipe = pipe->next_signalled; | |
616 | if (dev->first_signalled_pipe) | |
617 | dev->first_signalled_pipe->prev_signalled = NULL; | |
618 | pipe->next_signalled = NULL; | |
619 | } | |
c89f2750 | 620 | |
726ea1a8 JQ |
621 | spin_unlock_irqrestore(&dev->lock, flags); |
622 | return pipe; | |
623 | } | |
624 | ||
625 | static void goldfish_interrupt_task(unsigned long unused) | |
626 | { | |
627 | struct goldfish_pipe_dev *dev = pipe_dev; | |
628 | /* Iterate over the signalled pipes and wake them one by one */ | |
629 | struct goldfish_pipe *pipe; | |
630 | int wakes; | |
631 | ||
632 | while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) { | |
c89f2750 | 633 | if (wakes & PIPE_WAKE_CLOSED) { |
726ea1a8 JQ |
634 | pipe->flags = 1 << BIT_CLOSED_ON_HOST; |
635 | } else { | |
636 | if (wakes & PIPE_WAKE_READ) | |
637 | clear_bit(BIT_WAKE_ON_READ, &pipe->flags); | |
638 | if (wakes & PIPE_WAKE_WRITE) | |
639 | clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); | |
c89f2750 | 640 | } |
726ea1a8 JQ |
641 | /* |
642 | * wake_up_interruptible() implies a write barrier, so don't | |
643 | * explicitly add another one here. | |
644 | */ | |
c89f2750 | 645 | wake_up_interruptible(&pipe->wake_queue); |
c89f2750 | 646 | } |
726ea1a8 JQ |
647 | } |
648 | DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); | |
c89f2750 | 649 | |
726ea1a8 JQ |
650 | /* |
651 | * The general idea of the interrupt handling: | |
652 | * | |
653 | * 1. device raises an interrupt if there's at least one signalled pipe | |
654 | * 2. IRQ handler reads the signalled pipes and their count from the device | |
655 | * 3. device writes them into a shared buffer and returns the count | |
656 | * it only resets the IRQ if it has returned all signalled pipes, | |
657 | * otherwise it leaves it raised, so IRQ handler will be called | |
658 | * again for the next chunk | |
659 | * 4. IRQ handler adds all returned pipes to the device's signalled pipes list | |
660 | * 5. IRQ handler launches a tasklet to process the signalled pipes from the | |
661 | * list in a separate context | |
662 | */ | |
663 | static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) | |
664 | { | |
665 | u32 count; | |
666 | u32 i; | |
667 | unsigned long flags; | |
668 | struct goldfish_pipe_dev *dev = dev_id; | |
669 | ||
670 | if (dev != pipe_dev) | |
671 | return IRQ_NONE; | |
672 | ||
673 | /* Request the signalled pipes from the device */ | |
674 | spin_lock_irqsave(&dev->lock, flags); | |
675 | ||
676 | count = readl(dev->base + PIPE_REG_GET_SIGNALLED); | |
677 | if (count == 0) { | |
678 | spin_unlock_irqrestore(&dev->lock, flags); | |
679 | return IRQ_NONE; | |
680 | } | |
681 | if (count > MAX_SIGNALLED_PIPES) | |
682 | count = MAX_SIGNALLED_PIPES; | |
683 | ||
684 | for (i = 0; i < count; ++i) | |
685 | signalled_pipes_add_locked(dev, | |
686 | dev->buffers->signalled_pipe_buffers[i].id, | |
687 | dev->buffers->signalled_pipe_buffers[i].flags); | |
688 | ||
689 | spin_unlock_irqrestore(&dev->lock, flags); | |
690 | ||
691 | tasklet_schedule(&goldfish_interrupt_tasklet); | |
692 | return IRQ_HANDLED; | |
693 | } | |
694 | ||
695 | static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) | |
696 | { | |
697 | int id; | |
698 | ||
699 | for (id = 0; id < dev->pipes_capacity; ++id) | |
700 | if (!dev->pipes[id]) | |
701 | return id; | |
702 | ||
703 | { | |
704 | /* Reallocate the array */ | |
705 | u32 new_capacity = 2 * dev->pipes_capacity; | |
706 | struct goldfish_pipe **pipes = | |
3eff8ecd | 707 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
726ea1a8 JQ |
708 | if (!pipes) |
709 | return -ENOMEM; | |
710 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); | |
711 | kfree(dev->pipes); | |
712 | dev->pipes = pipes; | |
713 | id = dev->pipes_capacity; | |
714 | dev->pipes_capacity = new_capacity; | |
715 | } | |
716 | return id; | |
c89f2750 DDT |
717 | } |
718 | ||
719 | /** | |
726ea1a8 | 720 | * goldfish_pipe_open - open a channel to the AVD |
c89f2750 DDT |
721 | * @inode: inode of device |
722 | * @file: file struct of opener | |
723 | * | |
724 | * Create a new pipe link between the emulator and the use application. | |
725 | * Each new request produces a new pipe. | |
726 | * | |
727 | * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit | |
728 | * right now so this is fine. A move to 64bit will need this addressing | |
729 | */ | |
730 | static int goldfish_pipe_open(struct inode *inode, struct file *file) | |
731 | { | |
c89f2750 | 732 | struct goldfish_pipe_dev *dev = pipe_dev; |
726ea1a8 JQ |
733 | unsigned long flags; |
734 | int id; | |
735 | int status; | |
c89f2750 DDT |
736 | |
737 | /* Allocate new pipe kernel object */ | |
726ea1a8 | 738 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
c89f2750 DDT |
739 | if (pipe == NULL) |
740 | return -ENOMEM; | |
741 | ||
742 | pipe->dev = dev; | |
743 | mutex_init(&pipe->lock); | |
744 | init_waitqueue_head(&pipe->wake_queue); | |
745 | ||
746 | /* | |
726ea1a8 JQ |
747 | * Command buffer needs to be allocated on its own page to make sure |
748 | * it is physically contiguous in host's address space. | |
c89f2750 | 749 | */ |
726ea1a8 JQ |
750 | pipe->command_buffer = |
751 | (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); | |
752 | if (!pipe->command_buffer) { | |
753 | status = -ENOMEM; | |
754 | goto err_pipe; | |
755 | } | |
c89f2750 | 756 | |
726ea1a8 JQ |
757 | spin_lock_irqsave(&dev->lock, flags); |
758 | ||
759 | id = get_free_pipe_id_locked(dev); | |
760 | if (id < 0) { | |
761 | status = id; | |
762 | goto err_id_locked; | |
c89f2750 DDT |
763 | } |
764 | ||
726ea1a8 JQ |
765 | dev->pipes[id] = pipe; |
766 | pipe->id = id; | |
767 | pipe->command_buffer->id = id; | |
768 | ||
769 | /* Now tell the emulator we're opening a new pipe. */ | |
770 | dev->buffers->open_command_params.rw_params_max_count = | |
771 | MAX_BUFFERS_PER_COMMAND; | |
772 | dev->buffers->open_command_params.command_buffer_ptr = | |
773 | (u64)(unsigned long)__pa(pipe->command_buffer); | |
774 | status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); | |
775 | spin_unlock_irqrestore(&dev->lock, flags); | |
776 | if (status < 0) | |
777 | goto err_cmd; | |
c89f2750 DDT |
778 | /* All is done, save the pipe into the file's private data field */ |
779 | file->private_data = pipe; | |
780 | return 0; | |
726ea1a8 JQ |
781 | |
782 | err_cmd: | |
783 | spin_lock_irqsave(&dev->lock, flags); | |
784 | dev->pipes[id] = NULL; | |
785 | err_id_locked: | |
786 | spin_unlock_irqrestore(&dev->lock, flags); | |
787 | free_page((unsigned long)pipe->command_buffer); | |
788 | err_pipe: | |
789 | kfree(pipe); | |
790 | return status; | |
c89f2750 DDT |
791 | } |
792 | ||
793 | static int goldfish_pipe_release(struct inode *inode, struct file *filp) | |
794 | { | |
726ea1a8 | 795 | unsigned long flags; |
c89f2750 | 796 | struct goldfish_pipe *pipe = filp->private_data; |
726ea1a8 | 797 | struct goldfish_pipe_dev *dev = pipe->dev; |
c89f2750 DDT |
798 | |
799 | /* The guest is closing the channel, so tell the emulator right now */ | |
726ea1a8 JQ |
800 | (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); |
801 | ||
802 | spin_lock_irqsave(&dev->lock, flags); | |
803 | dev->pipes[pipe->id] = NULL; | |
804 | signalled_pipes_remove_locked(dev, pipe); | |
805 | spin_unlock_irqrestore(&dev->lock, flags); | |
806 | ||
c89f2750 | 807 | filp->private_data = NULL; |
726ea1a8 JQ |
808 | free_page((unsigned long)pipe->command_buffer); |
809 | kfree(pipe); | |
c89f2750 DDT |
810 | return 0; |
811 | } | |
812 | ||
813 | static const struct file_operations goldfish_pipe_fops = { | |
814 | .owner = THIS_MODULE, | |
815 | .read = goldfish_pipe_read, | |
816 | .write = goldfish_pipe_write, | |
817 | .poll = goldfish_pipe_poll, | |
818 | .open = goldfish_pipe_open, | |
819 | .release = goldfish_pipe_release, | |
820 | }; | |
821 | ||
726ea1a8 | 822 | static struct miscdevice goldfish_pipe_dev = { |
c89f2750 DDT |
823 | .minor = MISC_DYNAMIC_MINOR, |
824 | .name = "goldfish_pipe", | |
825 | .fops = &goldfish_pipe_fops, | |
826 | }; | |
827 | ||
726ea1a8 JQ |
828 | static int goldfish_pipe_device_init(struct platform_device *pdev) |
829 | { | |
830 | char *page; | |
831 | struct goldfish_pipe_dev *dev = pipe_dev; | |
832 | int err = devm_request_irq(&pdev->dev, dev->irq, | |
833 | goldfish_pipe_interrupt, | |
834 | IRQF_SHARED, "goldfish_pipe", dev); | |
835 | if (err) { | |
836 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); | |
837 | return err; | |
838 | } | |
839 | ||
840 | err = misc_register(&goldfish_pipe_dev); | |
841 | if (err) { | |
842 | dev_err(&pdev->dev, "unable to register v2 device\n"); | |
843 | return err; | |
844 | } | |
845 | ||
846 | dev->first_signalled_pipe = NULL; | |
847 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; | |
848 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), | |
849 | GFP_KERNEL); | |
850 | if (!dev->pipes) | |
851 | return -ENOMEM; | |
852 | ||
853 | /* | |
854 | * We're going to pass two buffers, open_command_params and | |
855 | * signalled_pipe_buffers, to the host. This means each of those buffers | |
856 | * needs to be contained in a single physical page. The easiest choice | |
857 | * is to just allocate a page and place the buffers in it. | |
858 | */ | |
859 | if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE)) | |
860 | return -ENOMEM; | |
861 | ||
862 | page = (char *)__get_free_page(GFP_KERNEL); | |
863 | if (!page) { | |
864 | kfree(dev->pipes); | |
865 | return -ENOMEM; | |
866 | } | |
867 | dev->buffers = (struct goldfish_pipe_dev_buffers *)page; | |
868 | ||
869 | /* Send the buffer addresses to the host */ | |
870 | { | |
871 | u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); | |
872 | ||
873 | writel((u32)(unsigned long)(paddr >> 32), | |
874 | dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); | |
875 | writel((u32)(unsigned long)paddr, | |
876 | dev->base + PIPE_REG_SIGNAL_BUFFER); | |
877 | writel((u32)MAX_SIGNALLED_PIPES, | |
878 | dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); | |
879 | ||
880 | paddr = __pa(&dev->buffers->open_command_params); | |
881 | writel((u32)(unsigned long)(paddr >> 32), | |
882 | dev->base + PIPE_REG_OPEN_BUFFER_HIGH); | |
883 | writel((u32)(unsigned long)paddr, | |
884 | dev->base + PIPE_REG_OPEN_BUFFER); | |
885 | } | |
886 | return 0; | |
887 | } | |
888 | ||
889 | static void goldfish_pipe_device_deinit(struct platform_device *pdev) | |
890 | { | |
891 | struct goldfish_pipe_dev *dev = pipe_dev; | |
892 | ||
893 | misc_deregister(&goldfish_pipe_dev); | |
894 | kfree(dev->pipes); | |
895 | free_page((unsigned long)dev->buffers); | |
896 | } | |
897 | ||
c89f2750 DDT |
898 | static int goldfish_pipe_probe(struct platform_device *pdev) |
899 | { | |
900 | int err; | |
901 | struct resource *r; | |
902 | struct goldfish_pipe_dev *dev = pipe_dev; | |
903 | ||
726ea1a8 JQ |
904 | if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE)) |
905 | return -ENOMEM; | |
906 | ||
c89f2750 DDT |
907 | /* not thread safe, but this should not happen */ |
908 | WARN_ON(dev->base != NULL); | |
909 | ||
910 | spin_lock_init(&dev->lock); | |
911 | ||
912 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
913 | if (r == NULL || resource_size(r) < PAGE_SIZE) { | |
914 | dev_err(&pdev->dev, "can't allocate i/o page\n"); | |
915 | return -EINVAL; | |
916 | } | |
917 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); | |
918 | if (dev->base == NULL) { | |
919 | dev_err(&pdev->dev, "ioremap failed\n"); | |
920 | return -EINVAL; | |
921 | } | |
922 | ||
923 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
924 | if (r == NULL) { | |
925 | err = -EINVAL; | |
926 | goto error; | |
927 | } | |
928 | dev->irq = r->start; | |
929 | ||
726ea1a8 JQ |
930 | /* |
931 | * Exchange the versions with the host device | |
932 | * | |
933 | * Note: v1 driver used to not report its version, so we write it before | |
934 | * reading device version back: this allows the host implementation to | |
935 | * detect the old driver (if there was no version write before read). | |
4f42071c | 936 | */ |
726ea1a8 | 937 | writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
4f42071c | 938 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
726ea1a8 JQ |
939 | if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) |
940 | return -EINVAL; | |
941 | ||
942 | err = goldfish_pipe_device_init(pdev); | |
943 | if (!err) | |
944 | return 0; | |
c89f2750 DDT |
945 | |
946 | error: | |
947 | dev->base = NULL; | |
948 | return err; | |
949 | } | |
950 | ||
951 | static int goldfish_pipe_remove(struct platform_device *pdev) | |
952 | { | |
953 | struct goldfish_pipe_dev *dev = pipe_dev; | |
726ea1a8 | 954 | goldfish_pipe_device_deinit(pdev); |
c89f2750 DDT |
955 | dev->base = NULL; |
956 | return 0; | |
957 | } | |
958 | ||
d62f324b JH |
959 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { |
960 | { "GFSH0003", 0 }, | |
961 | { }, | |
962 | }; | |
963 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); | |
964 | ||
91a18a41 GH |
965 | static const struct of_device_id goldfish_pipe_of_match[] = { |
966 | { .compatible = "google,android-pipe", }, | |
967 | {}, | |
968 | }; | |
969 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); | |
970 | ||
726ea1a8 | 971 | static struct platform_driver goldfish_pipe_driver = { |
c89f2750 DDT |
972 | .probe = goldfish_pipe_probe, |
973 | .remove = goldfish_pipe_remove, | |
974 | .driver = { | |
91a18a41 | 975 | .name = "goldfish_pipe", |
91a18a41 | 976 | .of_match_table = goldfish_pipe_of_match, |
d62f324b | 977 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), |
c89f2750 DDT |
978 | } |
979 | }; | |
980 | ||
726ea1a8 | 981 | module_platform_driver(goldfish_pipe_driver); |
c89f2750 DDT |
982 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
983 | MODULE_LICENSE("GPL"); |