]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/firewire/fw-cdev.c
firewire: cdev: fix documentation of FW_CDEV_IOC_GET_INFO
[mirror_ubuntu-bionic-kernel.git] / drivers / firewire / fw-cdev.c
CommitLineData
c781c06d
KH
1/*
2 * Char device for device raw access
19a15b93 3 *
c781c06d 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
19a15b93
KH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <linux/vmalloc.h>
d67cfb96 27#include <linux/mutex.h>
19a15b93 28#include <linux/poll.h>
a64408b9
SR
29#include <linux/preempt.h>
30#include <linux/time.h>
cf417e54 31#include <linux/spinlock.h>
19a15b93
KH
32#include <linux/delay.h>
33#include <linux/mm.h>
a3aca3da 34#include <linux/idr.h>
19a15b93 35#include <linux/compat.h>
9640d3d7 36#include <linux/firewire-cdev.h>
a64408b9 37#include <asm/system.h>
19a15b93
KH
38#include <asm/uaccess.h>
39#include "fw-transaction.h"
40#include "fw-topology.h"
41#include "fw-device.h"
19a15b93 42
3964a449 43struct client;
45ee3199
JF
44struct client_resource;
45typedef void (*client_resource_release_fn_t)(struct client *,
46 struct client_resource *);
3964a449 47struct client_resource {
45ee3199
JF
48 client_resource_release_fn_t release;
49 int handle;
3964a449
KH
50};
51
c781c06d
KH
52/*
53 * dequeue_event() just kfree()'s the event, so the event has to be
54 * the first field in the struct.
55 */
56
19a15b93
KH
57struct event {
58 struct { void *data; size_t size; } v[2];
59 struct list_head link;
60};
61
97bd9efa
KH
62struct bus_reset {
63 struct event event;
64 struct fw_cdev_event_bus_reset reset;
65};
66
19a15b93
KH
67struct response {
68 struct event event;
69 struct fw_transaction transaction;
70 struct client *client;
3964a449 71 struct client_resource resource;
19a15b93
KH
72 struct fw_cdev_event_response response;
73};
74
75struct iso_interrupt {
76 struct event event;
77 struct fw_cdev_event_iso_interrupt interrupt;
78};
79
80struct client {
344bbc4d 81 u32 version;
19a15b93 82 struct fw_device *device;
45ee3199 83
19a15b93 84 spinlock_t lock;
45ee3199
JF
85 bool in_shutdown;
86 struct idr resource_idr;
19a15b93 87 struct list_head event_list;
19a15b93 88 wait_queue_head_t wait;
da8ecffa 89 u64 bus_reset_closure;
9aad8125 90
19a15b93 91 struct fw_iso_context *iso_context;
abaa5743 92 u64 iso_closure;
9aad8125
KH
93 struct fw_iso_buffer buffer;
94 unsigned long vm_start;
97bd9efa
KH
95
96 struct list_head link;
19a15b93
KH
97};
98
53dca511 99static inline void __user *u64_to_uptr(__u64 value)
19a15b93
KH
100{
101 return (void __user *)(unsigned long)value;
102}
103
53dca511 104static inline __u64 uptr_to_u64(void __user *ptr)
19a15b93
KH
105{
106 return (__u64)(unsigned long)ptr;
107}
108
109static int fw_device_op_open(struct inode *inode, struct file *file)
110{
111 struct fw_device *device;
112 struct client *client;
113
96b19062 114 device = fw_device_get_by_devt(inode->i_rdev);
a3aca3da
KH
115 if (device == NULL)
116 return -ENODEV;
19a15b93 117
551f4cb9
JF
118 if (fw_device_is_shutdown(device)) {
119 fw_device_put(device);
120 return -ENODEV;
121 }
122
2d826cc5 123 client = kzalloc(sizeof(*client), GFP_KERNEL);
96b19062
SR
124 if (client == NULL) {
125 fw_device_put(device);
19a15b93 126 return -ENOMEM;
96b19062 127 }
19a15b93 128
96b19062 129 client->device = device;
19a15b93 130 spin_lock_init(&client->lock);
45ee3199
JF
131 idr_init(&client->resource_idr);
132 INIT_LIST_HEAD(&client->event_list);
19a15b93
KH
133 init_waitqueue_head(&client->wait);
134
135 file->private_data = client;
136
d67cfb96 137 mutex_lock(&device->client_list_mutex);
97bd9efa 138 list_add_tail(&client->link, &device->client_list);
d67cfb96 139 mutex_unlock(&device->client_list_mutex);
97bd9efa 140
19a15b93
KH
141 return 0;
142}
143
144static void queue_event(struct client *client, struct event *event,
145 void *data0, size_t size0, void *data1, size_t size1)
146{
147 unsigned long flags;
148
149 event->v[0].data = data0;
150 event->v[0].size = size0;
151 event->v[1].data = data1;
152 event->v[1].size = size1;
153
154 spin_lock_irqsave(&client->lock, flags);
45ee3199
JF
155 if (client->in_shutdown)
156 kfree(event);
157 else
158 list_add_tail(&event->link, &client->event_list);
19a15b93 159 spin_unlock_irqrestore(&client->lock, flags);
83431cba
JF
160
161 wake_up_interruptible(&client->wait);
19a15b93
KH
162}
163
53dca511
SR
164static int dequeue_event(struct client *client,
165 char __user *buffer, size_t count)
19a15b93
KH
166{
167 unsigned long flags;
168 struct event *event;
169 size_t size, total;
2dbd7d7e 170 int i, ret;
19a15b93 171
2dbd7d7e
SR
172 ret = wait_event_interruptible(client->wait,
173 !list_empty(&client->event_list) ||
174 fw_device_is_shutdown(client->device));
175 if (ret < 0)
176 return ret;
19a15b93 177
2603bf21
KH
178 if (list_empty(&client->event_list) &&
179 fw_device_is_shutdown(client->device))
180 return -ENODEV;
19a15b93 181
2603bf21 182 spin_lock_irqsave(&client->lock, flags);
a459b8ab 183 event = list_first_entry(&client->event_list, struct event, link);
19a15b93 184 list_del(&event->link);
19a15b93
KH
185 spin_unlock_irqrestore(&client->lock, flags);
186
19a15b93
KH
187 total = 0;
188 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
189 size = min(event->v[i].size, count - total);
2603bf21 190 if (copy_to_user(buffer + total, event->v[i].data, size)) {
2dbd7d7e 191 ret = -EFAULT;
19a15b93 192 goto out;
2603bf21 193 }
19a15b93
KH
194 total += size;
195 }
2dbd7d7e 196 ret = total;
19a15b93
KH
197
198 out:
199 kfree(event);
200
2dbd7d7e 201 return ret;
19a15b93
KH
202}
203
53dca511
SR
204static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
205 size_t count, loff_t *offset)
19a15b93
KH
206{
207 struct client *client = file->private_data;
208
209 return dequeue_event(client, buffer, count);
210}
211
53dca511
SR
212static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
213 struct client *client)
344bbc4d 214{
da8ecffa 215 struct fw_card *card = client->device->card;
cf417e54
JF
216 unsigned long flags;
217
218 spin_lock_irqsave(&card->lock, flags);
344bbc4d 219
da8ecffa 220 event->closure = client->bus_reset_closure;
344bbc4d 221 event->type = FW_CDEV_EVENT_BUS_RESET;
cf5a56ac 222 event->generation = client->device->generation;
da8ecffa 223 event->node_id = client->device->node_id;
344bbc4d
KH
224 event->local_node_id = card->local_node->node_id;
225 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
226 event->irm_node_id = card->irm_node->node_id;
227 event->root_node_id = card->root_node->node_id;
cf417e54
JF
228
229 spin_unlock_irqrestore(&card->lock, flags);
344bbc4d
KH
230}
231
53dca511
SR
232static void for_each_client(struct fw_device *device,
233 void (*callback)(struct client *client))
2603bf21 234{
2603bf21 235 struct client *c;
2603bf21 236
d67cfb96 237 mutex_lock(&device->client_list_mutex);
2603bf21
KH
238 list_for_each_entry(c, &device->client_list, link)
239 callback(c);
d67cfb96 240 mutex_unlock(&device->client_list_mutex);
2603bf21
KH
241}
242
53dca511 243static void queue_bus_reset_event(struct client *client)
97bd9efa
KH
244{
245 struct bus_reset *bus_reset;
97bd9efa 246
d67cfb96 247 bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL);
97bd9efa
KH
248 if (bus_reset == NULL) {
249 fw_notify("Out of memory when allocating bus reset event\n");
250 return;
251 }
252
da8ecffa 253 fill_bus_reset_event(&bus_reset->reset, client);
97bd9efa
KH
254
255 queue_event(client, &bus_reset->event,
2d826cc5 256 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
97bd9efa
KH
257}
258
259void fw_device_cdev_update(struct fw_device *device)
260{
2603bf21
KH
261 for_each_client(device, queue_bus_reset_event);
262}
97bd9efa 263
2603bf21
KH
264static void wake_up_client(struct client *client)
265{
266 wake_up_interruptible(&client->wait);
267}
97bd9efa 268
2603bf21
KH
269void fw_device_cdev_remove(struct fw_device *device)
270{
271 for_each_client(device, wake_up_client);
97bd9efa
KH
272}
273
4f259223 274static int ioctl_get_info(struct client *client, void *buffer)
19a15b93 275{
4f259223 276 struct fw_cdev_get_info *get_info = buffer;
344bbc4d 277 struct fw_cdev_event_bus_reset bus_reset;
c9755e14 278 unsigned long ret = 0;
344bbc4d 279
4f259223
KH
280 client->version = get_info->version;
281 get_info->version = FW_CDEV_VERSION;
cf417e54 282 get_info->card = client->device->card->index;
344bbc4d 283
c9755e14
SR
284 down_read(&fw_device_rwsem);
285
4f259223
KH
286 if (get_info->rom != 0) {
287 void __user *uptr = u64_to_uptr(get_info->rom);
288 size_t want = get_info->rom_length;
d84702a5 289 size_t have = client->device->config_rom_length * 4;
344bbc4d 290
c9755e14
SR
291 ret = copy_to_user(uptr, client->device->config_rom,
292 min(want, have));
344bbc4d 293 }
4f259223 294 get_info->rom_length = client->device->config_rom_length * 4;
344bbc4d 295
c9755e14
SR
296 up_read(&fw_device_rwsem);
297
298 if (ret != 0)
299 return -EFAULT;
300
4f259223
KH
301 client->bus_reset_closure = get_info->bus_reset_closure;
302 if (get_info->bus_reset != 0) {
303 void __user *uptr = u64_to_uptr(get_info->bus_reset);
344bbc4d 304
da8ecffa 305 fill_bus_reset_event(&bus_reset, client);
2d826cc5 306 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
344bbc4d
KH
307 return -EFAULT;
308 }
19a15b93 309
19a15b93
KH
310 return 0;
311}
312
53dca511
SR
313static int add_client_resource(struct client *client,
314 struct client_resource *resource, gfp_t gfp_mask)
3964a449
KH
315{
316 unsigned long flags;
45ee3199
JF
317 int ret;
318
319 retry:
320 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
321 return -ENOMEM;
3964a449
KH
322
323 spin_lock_irqsave(&client->lock, flags);
45ee3199
JF
324 if (client->in_shutdown)
325 ret = -ECANCELED;
326 else
327 ret = idr_get_new(&client->resource_idr, resource,
328 &resource->handle);
3964a449 329 spin_unlock_irqrestore(&client->lock, flags);
45ee3199
JF
330
331 if (ret == -EAGAIN)
332 goto retry;
333
334 return ret < 0 ? ret : 0;
3964a449
KH
335}
336
53dca511
SR
337static int release_client_resource(struct client *client, u32 handle,
338 client_resource_release_fn_t release,
339 struct client_resource **resource)
3964a449
KH
340{
341 struct client_resource *r;
342 unsigned long flags;
343
344 spin_lock_irqsave(&client->lock, flags);
45ee3199
JF
345 if (client->in_shutdown)
346 r = NULL;
347 else
348 r = idr_find(&client->resource_idr, handle);
349 if (r && r->release == release)
350 idr_remove(&client->resource_idr, handle);
3964a449
KH
351 spin_unlock_irqrestore(&client->lock, flags);
352
45ee3199 353 if (!(r && r->release == release))
3964a449
KH
354 return -EINVAL;
355
356 if (resource)
357 *resource = r;
358 else
359 r->release(client, r);
360
361 return 0;
362}
363
53dca511
SR
364static void release_transaction(struct client *client,
365 struct client_resource *resource)
3964a449
KH
366{
367 struct response *response =
368 container_of(resource, struct response, resource);
369
370 fw_cancel_transaction(client->device->card, &response->transaction);
371}
372
53dca511
SR
373static void complete_transaction(struct fw_card *card, int rcode,
374 void *payload, size_t length, void *data)
19a15b93
KH
375{
376 struct response *response = data;
377 struct client *client = response->client;
28cf6a04 378 unsigned long flags;
8401d92b 379 struct fw_cdev_event_response *r = &response->response;
19a15b93 380
8401d92b
DM
381 if (length < r->length)
382 r->length = length;
19a15b93 383 if (rcode == RCODE_COMPLETE)
8401d92b 384 memcpy(r->data, payload, r->length);
19a15b93 385
28cf6a04 386 spin_lock_irqsave(&client->lock, flags);
45ee3199
JF
387 /*
388 * If called while in shutdown, the idr tree must be left untouched.
389 * The idr handle will be removed later.
390 */
391 if (!client->in_shutdown)
392 idr_remove(&client->resource_idr, response->resource.handle);
28cf6a04
KH
393 spin_unlock_irqrestore(&client->lock, flags);
394
8401d92b
DM
395 r->type = FW_CDEV_EVENT_RESPONSE;
396 r->rcode = rcode;
397
398 /*
399 * In the case that sizeof(*r) doesn't align with the position of the
400 * data, and the read is short, preserve an extra copy of the data
401 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
402 * for short reads and some apps depended on it, this is both safe
403 * and prudent for compatibility.
404 */
405 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
406 queue_event(client, &response->event, r, sizeof(*r),
407 r->data, r->length);
408 else
409 queue_event(client, &response->event, r, sizeof(*r) + r->length,
410 NULL, 0);
19a15b93
KH
411}
412
350958f9 413static int ioctl_send_request(struct client *client, void *buffer)
19a15b93
KH
414{
415 struct fw_device *device = client->device;
4f259223 416 struct fw_cdev_send_request *request = buffer;
19a15b93 417 struct response *response;
1f3125af 418 int ret;
19a15b93 419
19a15b93 420 /* What is the biggest size we'll accept, really? */
4f259223 421 if (request->length > 4096)
19a15b93
KH
422 return -EINVAL;
423
2d826cc5 424 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
19a15b93
KH
425 if (response == NULL)
426 return -ENOMEM;
427
428 response->client = client;
4f259223
KH
429 response->response.length = request->length;
430 response->response.closure = request->closure;
19a15b93 431
4f259223 432 if (request->data &&
19a15b93 433 copy_from_user(response->response.data,
4f259223 434 u64_to_uptr(request->data), request->length)) {
1f3125af 435 ret = -EFAULT;
45ee3199 436 goto failed;
1f3125af
SR
437 }
438
439 switch (request->tcode) {
440 case TCODE_WRITE_QUADLET_REQUEST:
441 case TCODE_WRITE_BLOCK_REQUEST:
442 case TCODE_READ_QUADLET_REQUEST:
443 case TCODE_READ_BLOCK_REQUEST:
444 case TCODE_LOCK_MASK_SWAP:
445 case TCODE_LOCK_COMPARE_SWAP:
446 case TCODE_LOCK_FETCH_ADD:
447 case TCODE_LOCK_LITTLE_ADD:
448 case TCODE_LOCK_BOUNDED_ADD:
449 case TCODE_LOCK_WRAP_ADD:
450 case TCODE_LOCK_VENDOR_DEPENDENT:
451 break;
452 default:
453 ret = -EINVAL;
45ee3199 454 goto failed;
19a15b93
KH
455 }
456
3964a449 457 response->resource.release = release_transaction;
45ee3199
JF
458 ret = add_client_resource(client, &response->resource, GFP_KERNEL);
459 if (ret < 0)
460 goto failed;
28cf6a04 461
19a15b93 462 fw_send_request(device->card, &response->transaction,
4f259223 463 request->tcode & 0x1f,
907293d7 464 device->node->node_id,
4f259223 465 request->generation,
f1397490 466 device->max_speed,
4f259223
KH
467 request->offset,
468 response->response.data, request->length,
19a15b93
KH
469 complete_transaction, response);
470
4f259223 471 if (request->data)
2d826cc5 472 return sizeof(request) + request->length;
19a15b93 473 else
2d826cc5 474 return sizeof(request);
45ee3199 475 failed:
1f3125af
SR
476 kfree(response);
477
478 return ret;
19a15b93
KH
479}
480
481struct address_handler {
482 struct fw_address_handler handler;
483 __u64 closure;
484 struct client *client;
3964a449 485 struct client_resource resource;
19a15b93
KH
486};
487
488struct request {
489 struct fw_request *request;
490 void *data;
491 size_t length;
3964a449 492 struct client_resource resource;
19a15b93
KH
493};
494
495struct request_event {
496 struct event event;
497 struct fw_cdev_event_request request;
498};
499
53dca511
SR
500static void release_request(struct client *client,
501 struct client_resource *resource)
3964a449
KH
502{
503 struct request *request =
504 container_of(resource, struct request, resource);
505
506 fw_send_response(client->device->card, request->request,
507 RCODE_CONFLICT_ERROR);
508 kfree(request);
509}
510
53dca511
SR
511static void handle_request(struct fw_card *card, struct fw_request *r,
512 int tcode, int destination, int source,
513 int generation, int speed,
514 unsigned long long offset,
515 void *payload, size_t length, void *callback_data)
19a15b93
KH
516{
517 struct address_handler *handler = callback_data;
518 struct request *request;
519 struct request_event *e;
19a15b93 520 struct client *client = handler->client;
45ee3199 521 int ret;
19a15b93 522
2d826cc5
KH
523 request = kmalloc(sizeof(*request), GFP_ATOMIC);
524 e = kmalloc(sizeof(*e), GFP_ATOMIC);
45ee3199
JF
525 if (request == NULL || e == NULL)
526 goto failed;
19a15b93
KH
527
528 request->request = r;
529 request->data = payload;
530 request->length = length;
531
3964a449 532 request->resource.release = release_request;
45ee3199
JF
533 ret = add_client_resource(client, &request->resource, GFP_ATOMIC);
534 if (ret < 0)
535 goto failed;
19a15b93
KH
536
537 e->request.type = FW_CDEV_EVENT_REQUEST;
538 e->request.tcode = tcode;
539 e->request.offset = offset;
540 e->request.length = length;
3964a449 541 e->request.handle = request->resource.handle;
19a15b93
KH
542 e->request.closure = handler->closure;
543
544 queue_event(client, &e->event,
2d826cc5 545 &e->request, sizeof(e->request), payload, length);
45ee3199
JF
546 return;
547
548 failed:
549 kfree(request);
550 kfree(e);
551 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
19a15b93
KH
552}
553
53dca511
SR
554static void release_address_handler(struct client *client,
555 struct client_resource *resource)
3964a449
KH
556{
557 struct address_handler *handler =
558 container_of(resource, struct address_handler, resource);
559
560 fw_core_remove_address_handler(&handler->handler);
561 kfree(handler);
562}
563
4f259223 564static int ioctl_allocate(struct client *client, void *buffer)
19a15b93 565{
4f259223 566 struct fw_cdev_allocate *request = buffer;
19a15b93 567 struct address_handler *handler;
19a15b93 568 struct fw_address_region region;
45ee3199 569 int ret;
19a15b93 570
2d826cc5 571 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
19a15b93
KH
572 if (handler == NULL)
573 return -ENOMEM;
574
4f259223
KH
575 region.start = request->offset;
576 region.end = request->offset + request->length;
577 handler->handler.length = request->length;
19a15b93
KH
578 handler->handler.address_callback = handle_request;
579 handler->handler.callback_data = handler;
4f259223 580 handler->closure = request->closure;
19a15b93
KH
581 handler->client = client;
582
3e0b5f0d
SR
583 ret = fw_core_add_address_handler(&handler->handler, &region);
584 if (ret < 0) {
19a15b93 585 kfree(handler);
3e0b5f0d 586 return ret;
19a15b93
KH
587 }
588
3964a449 589 handler->resource.release = release_address_handler;
45ee3199
JF
590 ret = add_client_resource(client, &handler->resource, GFP_KERNEL);
591 if (ret < 0) {
592 release_address_handler(client, &handler->resource);
593 return ret;
594 }
4f259223 595 request->handle = handler->resource.handle;
19a15b93
KH
596
597 return 0;
598}
599
4f259223 600static int ioctl_deallocate(struct client *client, void *buffer)
9472316b 601{
4f259223 602 struct fw_cdev_deallocate *request = buffer;
9472316b 603
45ee3199
JF
604 return release_client_resource(client, request->handle,
605 release_address_handler, NULL);
9472316b
KH
606}
607
4f259223 608static int ioctl_send_response(struct client *client, void *buffer)
19a15b93 609{
4f259223 610 struct fw_cdev_send_response *request = buffer;
3964a449 611 struct client_resource *resource;
19a15b93 612 struct request *r;
19a15b93 613
45ee3199
JF
614 if (release_client_resource(client, request->handle,
615 release_request, &resource) < 0)
19a15b93 616 return -EINVAL;
45ee3199 617
3964a449 618 r = container_of(resource, struct request, resource);
4f259223
KH
619 if (request->length < r->length)
620 r->length = request->length;
621 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
19a15b93
KH
622 return -EFAULT;
623
4f259223 624 fw_send_response(client->device->card, r->request, request->rcode);
19a15b93
KH
625 kfree(r);
626
627 return 0;
628}
629
4f259223 630static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
5371842b 631{
4f259223 632 struct fw_cdev_initiate_bus_reset *request = buffer;
5371842b
KH
633 int short_reset;
634
4f259223 635 short_reset = (request->type == FW_CDEV_SHORT_RESET);
5371842b
KH
636
637 return fw_core_initiate_bus_reset(client->device->card, short_reset);
638}
639
66dea3e5
KH
640struct descriptor {
641 struct fw_descriptor d;
3964a449 642 struct client_resource resource;
66dea3e5
KH
643 u32 data[0];
644};
645
3964a449
KH
646static void release_descriptor(struct client *client,
647 struct client_resource *resource)
648{
649 struct descriptor *descriptor =
650 container_of(resource, struct descriptor, resource);
651
652 fw_core_remove_descriptor(&descriptor->d);
653 kfree(descriptor);
654}
655
4f259223 656static int ioctl_add_descriptor(struct client *client, void *buffer)
66dea3e5 657{
4f259223 658 struct fw_cdev_add_descriptor *request = buffer;
66dea3e5 659 struct descriptor *descriptor;
45ee3199 660 int ret;
66dea3e5 661
4f259223 662 if (request->length > 256)
66dea3e5
KH
663 return -EINVAL;
664
665 descriptor =
2d826cc5 666 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
66dea3e5
KH
667 if (descriptor == NULL)
668 return -ENOMEM;
669
670 if (copy_from_user(descriptor->data,
4f259223 671 u64_to_uptr(request->data), request->length * 4)) {
45ee3199
JF
672 ret = -EFAULT;
673 goto failed;
66dea3e5
KH
674 }
675
4f259223
KH
676 descriptor->d.length = request->length;
677 descriptor->d.immediate = request->immediate;
678 descriptor->d.key = request->key;
66dea3e5
KH
679 descriptor->d.data = descriptor->data;
680
45ee3199
JF
681 ret = fw_core_add_descriptor(&descriptor->d);
682 if (ret < 0)
683 goto failed;
66dea3e5 684
3964a449 685 descriptor->resource.release = release_descriptor;
45ee3199
JF
686 ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL);
687 if (ret < 0) {
688 fw_core_remove_descriptor(&descriptor->d);
689 goto failed;
690 }
4f259223 691 request->handle = descriptor->resource.handle;
66dea3e5
KH
692
693 return 0;
45ee3199
JF
694 failed:
695 kfree(descriptor);
696
697 return ret;
66dea3e5
KH
698}
699
4f259223 700static int ioctl_remove_descriptor(struct client *client, void *buffer)
66dea3e5 701{
4f259223 702 struct fw_cdev_remove_descriptor *request = buffer;
66dea3e5 703
45ee3199
JF
704 return release_client_resource(client, request->handle,
705 release_descriptor, NULL);
66dea3e5
KH
706}
707
53dca511
SR
708static void iso_callback(struct fw_iso_context *context, u32 cycle,
709 size_t header_length, void *header, void *data)
19a15b93
KH
710{
711 struct client *client = data;
930e4b7f 712 struct iso_interrupt *irq;
19a15b93 713
930e4b7f
SR
714 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
715 if (irq == NULL)
19a15b93
KH
716 return;
717
930e4b7f
SR
718 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
719 irq->interrupt.closure = client->iso_closure;
720 irq->interrupt.cycle = cycle;
721 irq->interrupt.header_length = header_length;
722 memcpy(irq->interrupt.header, header, header_length);
723 queue_event(client, &irq->event, &irq->interrupt,
724 sizeof(irq->interrupt) + header_length, NULL, 0);
19a15b93
KH
725}
726
4f259223 727static int ioctl_create_iso_context(struct client *client, void *buffer)
19a15b93 728{
4f259223 729 struct fw_cdev_create_iso_context *request = buffer;
24315c5e 730 struct fw_iso_context *context;
19a15b93 731
fae60312
SR
732 /* We only support one context at this time. */
733 if (client->iso_context != NULL)
734 return -EBUSY;
735
4f259223 736 if (request->channel > 63)
21efb3cf
KH
737 return -EINVAL;
738
4f259223 739 switch (request->type) {
c70dc788 740 case FW_ISO_CONTEXT_RECEIVE:
4f259223 741 if (request->header_size < 4 || (request->header_size & 3))
c70dc788 742 return -EINVAL;
98b6cbe8 743
c70dc788
KH
744 break;
745
746 case FW_ISO_CONTEXT_TRANSMIT:
4f259223 747 if (request->speed > SCODE_3200)
c70dc788
KH
748 return -EINVAL;
749
750 break;
751
752 default:
21efb3cf 753 return -EINVAL;
c70dc788
KH
754 }
755
24315c5e
KH
756 context = fw_iso_context_create(client->device->card,
757 request->type,
758 request->channel,
759 request->speed,
760 request->header_size,
761 iso_callback, client);
762 if (IS_ERR(context))
763 return PTR_ERR(context);
764
abaa5743 765 client->iso_closure = request->closure;
24315c5e 766 client->iso_context = context;
19a15b93 767
abaa5743
KH
768 /* We only support one context at this time. */
769 request->handle = 0;
770
19a15b93
KH
771 return 0;
772}
773
1ca31ae7
KH
774/* Macros for decoding the iso packet control header. */
775#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
776#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
777#define GET_SKIP(v) (((v) >> 17) & 0x01)
7a100344
SR
778#define GET_TAG(v) (((v) >> 18) & 0x03)
779#define GET_SY(v) (((v) >> 20) & 0x0f)
1ca31ae7
KH
780#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
781
4f259223 782static int ioctl_queue_iso(struct client *client, void *buffer)
19a15b93 783{
4f259223 784 struct fw_cdev_queue_iso *request = buffer;
19a15b93 785 struct fw_cdev_iso_packet __user *p, *end, *next;
9b32d5f3 786 struct fw_iso_context *ctx = client->iso_context;
ef370ee7 787 unsigned long payload, buffer_end, header_length;
1ca31ae7 788 u32 control;
19a15b93
KH
789 int count;
790 struct {
791 struct fw_iso_packet packet;
792 u8 header[256];
793 } u;
794
abaa5743 795 if (ctx == NULL || request->handle != 0)
19a15b93 796 return -EINVAL;
19a15b93 797
c781c06d
KH
798 /*
799 * If the user passes a non-NULL data pointer, has mmap()'ed
19a15b93
KH
800 * the iso buffer, and the pointer points inside the buffer,
801 * we setup the payload pointers accordingly. Otherwise we
9aad8125 802 * set them both to 0, which will still let packets with
19a15b93
KH
803 * payload_length == 0 through. In other words, if no packets
804 * use the indirect payload, the iso buffer need not be mapped
c781c06d
KH
805 * and the request->data pointer is ignored.
806 */
19a15b93 807
4f259223 808 payload = (unsigned long)request->data - client->vm_start;
ef370ee7 809 buffer_end = client->buffer.page_count << PAGE_SHIFT;
4f259223 810 if (request->data == 0 || client->buffer.pages == NULL ||
ef370ee7 811 payload >= buffer_end) {
9aad8125 812 payload = 0;
ef370ee7 813 buffer_end = 0;
19a15b93
KH
814 }
815
1ccc9147
AV
816 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
817
818 if (!access_ok(VERIFY_READ, p, request->size))
19a15b93
KH
819 return -EFAULT;
820
4f259223 821 end = (void __user *)p + request->size;
19a15b93
KH
822 count = 0;
823 while (p < end) {
1ca31ae7 824 if (get_user(control, &p->control))
19a15b93 825 return -EFAULT;
1ca31ae7
KH
826 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
827 u.packet.interrupt = GET_INTERRUPT(control);
828 u.packet.skip = GET_SKIP(control);
829 u.packet.tag = GET_TAG(control);
830 u.packet.sy = GET_SY(control);
831 u.packet.header_length = GET_HEADER_LENGTH(control);
295e3feb 832
9b32d5f3 833 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
295e3feb
KH
834 header_length = u.packet.header_length;
835 } else {
c781c06d
KH
836 /*
837 * We require that header_length is a multiple of
838 * the fixed header size, ctx->header_size.
839 */
9b32d5f3
KH
840 if (ctx->header_size == 0) {
841 if (u.packet.header_length > 0)
842 return -EINVAL;
843 } else if (u.packet.header_length % ctx->header_size != 0) {
295e3feb 844 return -EINVAL;
9b32d5f3 845 }
295e3feb
KH
846 header_length = 0;
847 }
848
19a15b93 849 next = (struct fw_cdev_iso_packet __user *)
295e3feb 850 &p->header[header_length / 4];
19a15b93
KH
851 if (next > end)
852 return -EINVAL;
853 if (__copy_from_user
295e3feb 854 (u.packet.header, p->header, header_length))
19a15b93 855 return -EFAULT;
98b6cbe8 856 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
19a15b93
KH
857 u.packet.header_length + u.packet.payload_length > 0)
858 return -EINVAL;
ef370ee7 859 if (payload + u.packet.payload_length > buffer_end)
19a15b93
KH
860 return -EINVAL;
861
9b32d5f3
KH
862 if (fw_iso_context_queue(ctx, &u.packet,
863 &client->buffer, payload))
19a15b93
KH
864 break;
865
866 p = next;
867 payload += u.packet.payload_length;
868 count++;
869 }
870
4f259223
KH
871 request->size -= uptr_to_u64(p) - request->packets;
872 request->packets = uptr_to_u64(p);
873 request->data = client->vm_start + payload;
19a15b93
KH
874
875 return count;
876}
877
4f259223 878static int ioctl_start_iso(struct client *client, void *buffer)
19a15b93 879{
4f259223 880 struct fw_cdev_start_iso *request = buffer;
19a15b93 881
fae60312 882 if (client->iso_context == NULL || request->handle != 0)
abaa5743 883 return -EINVAL;
fae60312 884
eb0306ea 885 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
4f259223 886 if (request->tags == 0 || request->tags > 15)
eb0306ea
KH
887 return -EINVAL;
888
4f259223 889 if (request->sync > 15)
eb0306ea
KH
890 return -EINVAL;
891 }
892
4f259223
KH
893 return fw_iso_context_start(client->iso_context, request->cycle,
894 request->sync, request->tags);
19a15b93
KH
895}
896
4f259223 897static int ioctl_stop_iso(struct client *client, void *buffer)
b8295668 898{
abaa5743
KH
899 struct fw_cdev_stop_iso *request = buffer;
900
fae60312 901 if (client->iso_context == NULL || request->handle != 0)
abaa5743
KH
902 return -EINVAL;
903
b8295668
KH
904 return fw_iso_context_stop(client->iso_context);
905}
906
a64408b9
SR
907static int ioctl_get_cycle_timer(struct client *client, void *buffer)
908{
909 struct fw_cdev_get_cycle_timer *request = buffer;
910 struct fw_card *card = client->device->card;
911 unsigned long long bus_time;
912 struct timeval tv;
913 unsigned long flags;
914
915 preempt_disable();
916 local_irq_save(flags);
917
918 bus_time = card->driver->get_bus_time(card);
919 do_gettimeofday(&tv);
920
921 local_irq_restore(flags);
922 preempt_enable();
923
924 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
925 request->cycle_timer = bus_time & 0xffffffff;
926 return 0;
927}
928
4f259223
KH
929static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
930 ioctl_get_info,
931 ioctl_send_request,
932 ioctl_allocate,
933 ioctl_deallocate,
934 ioctl_send_response,
935 ioctl_initiate_bus_reset,
936 ioctl_add_descriptor,
937 ioctl_remove_descriptor,
938 ioctl_create_iso_context,
939 ioctl_queue_iso,
940 ioctl_start_iso,
941 ioctl_stop_iso,
a64408b9 942 ioctl_get_cycle_timer,
4f259223
KH
943};
944
53dca511
SR
945static int dispatch_ioctl(struct client *client,
946 unsigned int cmd, void __user *arg)
19a15b93 947{
4f259223 948 char buffer[256];
2dbd7d7e 949 int ret;
4f259223
KH
950
951 if (_IOC_TYPE(cmd) != '#' ||
952 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
19a15b93 953 return -EINVAL;
4f259223
KH
954
955 if (_IOC_DIR(cmd) & _IOC_WRITE) {
2d826cc5 956 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
4f259223
KH
957 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
958 return -EFAULT;
959 }
960
2dbd7d7e
SR
961 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
962 if (ret < 0)
963 return ret;
4f259223
KH
964
965 if (_IOC_DIR(cmd) & _IOC_READ) {
2d826cc5 966 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
4f259223
KH
967 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
968 return -EFAULT;
19a15b93 969 }
4f259223 970
2dbd7d7e 971 return ret;
19a15b93
KH
972}
973
53dca511
SR
974static long fw_device_op_ioctl(struct file *file,
975 unsigned int cmd, unsigned long arg)
19a15b93
KH
976{
977 struct client *client = file->private_data;
978
551f4cb9
JF
979 if (fw_device_is_shutdown(client->device))
980 return -ENODEV;
981
19a15b93
KH
982 return dispatch_ioctl(client, cmd, (void __user *) arg);
983}
984
985#ifdef CONFIG_COMPAT
53dca511
SR
986static long fw_device_op_compat_ioctl(struct file *file,
987 unsigned int cmd, unsigned long arg)
19a15b93
KH
988{
989 struct client *client = file->private_data;
990
551f4cb9
JF
991 if (fw_device_is_shutdown(client->device))
992 return -ENODEV;
993
19a15b93
KH
994 return dispatch_ioctl(client, cmd, compat_ptr(arg));
995}
996#endif
997
998static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
999{
1000 struct client *client = file->private_data;
9aad8125
KH
1001 enum dma_data_direction direction;
1002 unsigned long size;
2dbd7d7e 1003 int page_count, ret;
9aad8125 1004
551f4cb9
JF
1005 if (fw_device_is_shutdown(client->device))
1006 return -ENODEV;
1007
9aad8125
KH
1008 /* FIXME: We could support multiple buffers, but we don't. */
1009 if (client->buffer.pages != NULL)
1010 return -EBUSY;
1011
1012 if (!(vma->vm_flags & VM_SHARED))
1013 return -EINVAL;
19a15b93 1014
9aad8125 1015 if (vma->vm_start & ~PAGE_MASK)
19a15b93
KH
1016 return -EINVAL;
1017
1018 client->vm_start = vma->vm_start;
9aad8125
KH
1019 size = vma->vm_end - vma->vm_start;
1020 page_count = size >> PAGE_SHIFT;
1021 if (size & ~PAGE_MASK)
1022 return -EINVAL;
1023
1024 if (vma->vm_flags & VM_WRITE)
1025 direction = DMA_TO_DEVICE;
1026 else
1027 direction = DMA_FROM_DEVICE;
1028
2dbd7d7e
SR
1029 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1030 page_count, direction);
1031 if (ret < 0)
1032 return ret;
19a15b93 1033
2dbd7d7e
SR
1034 ret = fw_iso_buffer_map(&client->buffer, vma);
1035 if (ret < 0)
9aad8125
KH
1036 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1037
2dbd7d7e 1038 return ret;
19a15b93
KH
1039}
1040
45ee3199
JF
1041static int shutdown_resource(int id, void *p, void *data)
1042{
1043 struct client_resource *r = p;
1044 struct client *client = data;
1045
1046 r->release(client, r);
1047
1048 return 0;
1049}
1050
19a15b93
KH
1051static int fw_device_op_release(struct inode *inode, struct file *file)
1052{
1053 struct client *client = file->private_data;
2603bf21 1054 struct event *e, *next_e;
45ee3199 1055 unsigned long flags;
19a15b93 1056
97811e34
SR
1057 mutex_lock(&client->device->client_list_mutex);
1058 list_del(&client->link);
1059 mutex_unlock(&client->device->client_list_mutex);
1060
9aad8125
KH
1061 if (client->buffer.pages)
1062 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1063
19a15b93
KH
1064 if (client->iso_context)
1065 fw_iso_context_destroy(client->iso_context);
1066
45ee3199
JF
1067 /* Freeze client->resource_idr and client->event_list */
1068 spin_lock_irqsave(&client->lock, flags);
1069 client->in_shutdown = true;
1070 spin_unlock_irqrestore(&client->lock, flags);
66dea3e5 1071
45ee3199
JF
1072 idr_for_each(&client->resource_idr, shutdown_resource, client);
1073 idr_remove_all(&client->resource_idr);
1074 idr_destroy(&client->resource_idr);
28cf6a04 1075
2603bf21
KH
1076 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1077 kfree(e);
19a15b93 1078
45ee3199
JF
1079 /*
1080 * FIXME: client should be reference-counted. It's extremely unlikely
1081 * but there may still be transactions being completed at this point.
1082 */
19a15b93
KH
1083 fw_device_put(client->device);
1084 kfree(client);
1085
1086 return 0;
1087}
1088
1089static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1090{
1091 struct client *client = file->private_data;
2603bf21 1092 unsigned int mask = 0;
19a15b93
KH
1093
1094 poll_wait(file, &client->wait, pt);
1095
2603bf21
KH
1096 if (fw_device_is_shutdown(client->device))
1097 mask |= POLLHUP | POLLERR;
19a15b93 1098 if (!list_empty(&client->event_list))
2603bf21
KH
1099 mask |= POLLIN | POLLRDNORM;
1100
1101 return mask;
19a15b93
KH
1102}
1103
21ebcd12 1104const struct file_operations fw_device_ops = {
19a15b93
KH
1105 .owner = THIS_MODULE,
1106 .open = fw_device_op_open,
1107 .read = fw_device_op_read,
1108 .unlocked_ioctl = fw_device_op_ioctl,
1109 .poll = fw_device_op_poll,
1110 .release = fw_device_op_release,
1111 .mmap = fw_device_op_mmap,
1112
1113#ifdef CONFIG_COMPAT
5af4e5ea 1114 .compat_ioctl = fw_device_op_compat_ioctl,
19a15b93
KH
1115#endif
1116};