]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - drivers/firewire/fw-cdev.c
firewire: cdev: fix documentation of FW_CDEV_IOC_GET_INFO
[mirror_ubuntu-bionic-kernel.git] / drivers / firewire / fw-cdev.c
... / ...
CommitLineData
1/*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <linux/vmalloc.h>
27#include <linux/mutex.h>
28#include <linux/poll.h>
29#include <linux/preempt.h>
30#include <linux/time.h>
31#include <linux/spinlock.h>
32#include <linux/delay.h>
33#include <linux/mm.h>
34#include <linux/idr.h>
35#include <linux/compat.h>
36#include <linux/firewire-cdev.h>
37#include <asm/system.h>
38#include <asm/uaccess.h>
39#include "fw-transaction.h"
40#include "fw-topology.h"
41#include "fw-device.h"
42
43struct client;
44struct client_resource;
45typedef void (*client_resource_release_fn_t)(struct client *,
46 struct client_resource *);
47struct client_resource {
48 client_resource_release_fn_t release;
49 int handle;
50};
51
52/*
53 * dequeue_event() just kfree()'s the event, so the event has to be
54 * the first field in the struct.
55 */
56
57struct event {
58 struct { void *data; size_t size; } v[2];
59 struct list_head link;
60};
61
62struct bus_reset {
63 struct event event;
64 struct fw_cdev_event_bus_reset reset;
65};
66
67struct response {
68 struct event event;
69 struct fw_transaction transaction;
70 struct client *client;
71 struct client_resource resource;
72 struct fw_cdev_event_response response;
73};
74
75struct iso_interrupt {
76 struct event event;
77 struct fw_cdev_event_iso_interrupt interrupt;
78};
79
80struct client {
81 u32 version;
82 struct fw_device *device;
83
84 spinlock_t lock;
85 bool in_shutdown;
86 struct idr resource_idr;
87 struct list_head event_list;
88 wait_queue_head_t wait;
89 u64 bus_reset_closure;
90
91 struct fw_iso_context *iso_context;
92 u64 iso_closure;
93 struct fw_iso_buffer buffer;
94 unsigned long vm_start;
95
96 struct list_head link;
97};
98
99static inline void __user *u64_to_uptr(__u64 value)
100{
101 return (void __user *)(unsigned long)value;
102}
103
104static inline __u64 uptr_to_u64(void __user *ptr)
105{
106 return (__u64)(unsigned long)ptr;
107}
108
109static int fw_device_op_open(struct inode *inode, struct file *file)
110{
111 struct fw_device *device;
112 struct client *client;
113
114 device = fw_device_get_by_devt(inode->i_rdev);
115 if (device == NULL)
116 return -ENODEV;
117
118 if (fw_device_is_shutdown(device)) {
119 fw_device_put(device);
120 return -ENODEV;
121 }
122
123 client = kzalloc(sizeof(*client), GFP_KERNEL);
124 if (client == NULL) {
125 fw_device_put(device);
126 return -ENOMEM;
127 }
128
129 client->device = device;
130 spin_lock_init(&client->lock);
131 idr_init(&client->resource_idr);
132 INIT_LIST_HEAD(&client->event_list);
133 init_waitqueue_head(&client->wait);
134
135 file->private_data = client;
136
137 mutex_lock(&device->client_list_mutex);
138 list_add_tail(&client->link, &device->client_list);
139 mutex_unlock(&device->client_list_mutex);
140
141 return 0;
142}
143
144static void queue_event(struct client *client, struct event *event,
145 void *data0, size_t size0, void *data1, size_t size1)
146{
147 unsigned long flags;
148
149 event->v[0].data = data0;
150 event->v[0].size = size0;
151 event->v[1].data = data1;
152 event->v[1].size = size1;
153
154 spin_lock_irqsave(&client->lock, flags);
155 if (client->in_shutdown)
156 kfree(event);
157 else
158 list_add_tail(&event->link, &client->event_list);
159 spin_unlock_irqrestore(&client->lock, flags);
160
161 wake_up_interruptible(&client->wait);
162}
163
164static int dequeue_event(struct client *client,
165 char __user *buffer, size_t count)
166{
167 unsigned long flags;
168 struct event *event;
169 size_t size, total;
170 int i, ret;
171
172 ret = wait_event_interruptible(client->wait,
173 !list_empty(&client->event_list) ||
174 fw_device_is_shutdown(client->device));
175 if (ret < 0)
176 return ret;
177
178 if (list_empty(&client->event_list) &&
179 fw_device_is_shutdown(client->device))
180 return -ENODEV;
181
182 spin_lock_irqsave(&client->lock, flags);
183 event = list_first_entry(&client->event_list, struct event, link);
184 list_del(&event->link);
185 spin_unlock_irqrestore(&client->lock, flags);
186
187 total = 0;
188 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
189 size = min(event->v[i].size, count - total);
190 if (copy_to_user(buffer + total, event->v[i].data, size)) {
191 ret = -EFAULT;
192 goto out;
193 }
194 total += size;
195 }
196 ret = total;
197
198 out:
199 kfree(event);
200
201 return ret;
202}
203
204static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
205 size_t count, loff_t *offset)
206{
207 struct client *client = file->private_data;
208
209 return dequeue_event(client, buffer, count);
210}
211
212static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
213 struct client *client)
214{
215 struct fw_card *card = client->device->card;
216 unsigned long flags;
217
218 spin_lock_irqsave(&card->lock, flags);
219
220 event->closure = client->bus_reset_closure;
221 event->type = FW_CDEV_EVENT_BUS_RESET;
222 event->generation = client->device->generation;
223 event->node_id = client->device->node_id;
224 event->local_node_id = card->local_node->node_id;
225 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
226 event->irm_node_id = card->irm_node->node_id;
227 event->root_node_id = card->root_node->node_id;
228
229 spin_unlock_irqrestore(&card->lock, flags);
230}
231
232static void for_each_client(struct fw_device *device,
233 void (*callback)(struct client *client))
234{
235 struct client *c;
236
237 mutex_lock(&device->client_list_mutex);
238 list_for_each_entry(c, &device->client_list, link)
239 callback(c);
240 mutex_unlock(&device->client_list_mutex);
241}
242
243static void queue_bus_reset_event(struct client *client)
244{
245 struct bus_reset *bus_reset;
246
247 bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL);
248 if (bus_reset == NULL) {
249 fw_notify("Out of memory when allocating bus reset event\n");
250 return;
251 }
252
253 fill_bus_reset_event(&bus_reset->reset, client);
254
255 queue_event(client, &bus_reset->event,
256 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
257}
258
259void fw_device_cdev_update(struct fw_device *device)
260{
261 for_each_client(device, queue_bus_reset_event);
262}
263
264static void wake_up_client(struct client *client)
265{
266 wake_up_interruptible(&client->wait);
267}
268
269void fw_device_cdev_remove(struct fw_device *device)
270{
271 for_each_client(device, wake_up_client);
272}
273
274static int ioctl_get_info(struct client *client, void *buffer)
275{
276 struct fw_cdev_get_info *get_info = buffer;
277 struct fw_cdev_event_bus_reset bus_reset;
278 unsigned long ret = 0;
279
280 client->version = get_info->version;
281 get_info->version = FW_CDEV_VERSION;
282 get_info->card = client->device->card->index;
283
284 down_read(&fw_device_rwsem);
285
286 if (get_info->rom != 0) {
287 void __user *uptr = u64_to_uptr(get_info->rom);
288 size_t want = get_info->rom_length;
289 size_t have = client->device->config_rom_length * 4;
290
291 ret = copy_to_user(uptr, client->device->config_rom,
292 min(want, have));
293 }
294 get_info->rom_length = client->device->config_rom_length * 4;
295
296 up_read(&fw_device_rwsem);
297
298 if (ret != 0)
299 return -EFAULT;
300
301 client->bus_reset_closure = get_info->bus_reset_closure;
302 if (get_info->bus_reset != 0) {
303 void __user *uptr = u64_to_uptr(get_info->bus_reset);
304
305 fill_bus_reset_event(&bus_reset, client);
306 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
307 return -EFAULT;
308 }
309
310 return 0;
311}
312
313static int add_client_resource(struct client *client,
314 struct client_resource *resource, gfp_t gfp_mask)
315{
316 unsigned long flags;
317 int ret;
318
319 retry:
320 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
321 return -ENOMEM;
322
323 spin_lock_irqsave(&client->lock, flags);
324 if (client->in_shutdown)
325 ret = -ECANCELED;
326 else
327 ret = idr_get_new(&client->resource_idr, resource,
328 &resource->handle);
329 spin_unlock_irqrestore(&client->lock, flags);
330
331 if (ret == -EAGAIN)
332 goto retry;
333
334 return ret < 0 ? ret : 0;
335}
336
337static int release_client_resource(struct client *client, u32 handle,
338 client_resource_release_fn_t release,
339 struct client_resource **resource)
340{
341 struct client_resource *r;
342 unsigned long flags;
343
344 spin_lock_irqsave(&client->lock, flags);
345 if (client->in_shutdown)
346 r = NULL;
347 else
348 r = idr_find(&client->resource_idr, handle);
349 if (r && r->release == release)
350 idr_remove(&client->resource_idr, handle);
351 spin_unlock_irqrestore(&client->lock, flags);
352
353 if (!(r && r->release == release))
354 return -EINVAL;
355
356 if (resource)
357 *resource = r;
358 else
359 r->release(client, r);
360
361 return 0;
362}
363
364static void release_transaction(struct client *client,
365 struct client_resource *resource)
366{
367 struct response *response =
368 container_of(resource, struct response, resource);
369
370 fw_cancel_transaction(client->device->card, &response->transaction);
371}
372
373static void complete_transaction(struct fw_card *card, int rcode,
374 void *payload, size_t length, void *data)
375{
376 struct response *response = data;
377 struct client *client = response->client;
378 unsigned long flags;
379 struct fw_cdev_event_response *r = &response->response;
380
381 if (length < r->length)
382 r->length = length;
383 if (rcode == RCODE_COMPLETE)
384 memcpy(r->data, payload, r->length);
385
386 spin_lock_irqsave(&client->lock, flags);
387 /*
388 * If called while in shutdown, the idr tree must be left untouched.
389 * The idr handle will be removed later.
390 */
391 if (!client->in_shutdown)
392 idr_remove(&client->resource_idr, response->resource.handle);
393 spin_unlock_irqrestore(&client->lock, flags);
394
395 r->type = FW_CDEV_EVENT_RESPONSE;
396 r->rcode = rcode;
397
398 /*
399 * In the case that sizeof(*r) doesn't align with the position of the
400 * data, and the read is short, preserve an extra copy of the data
401 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
402 * for short reads and some apps depended on it, this is both safe
403 * and prudent for compatibility.
404 */
405 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
406 queue_event(client, &response->event, r, sizeof(*r),
407 r->data, r->length);
408 else
409 queue_event(client, &response->event, r, sizeof(*r) + r->length,
410 NULL, 0);
411}
412
413static int ioctl_send_request(struct client *client, void *buffer)
414{
415 struct fw_device *device = client->device;
416 struct fw_cdev_send_request *request = buffer;
417 struct response *response;
418 int ret;
419
420 /* What is the biggest size we'll accept, really? */
421 if (request->length > 4096)
422 return -EINVAL;
423
424 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
425 if (response == NULL)
426 return -ENOMEM;
427
428 response->client = client;
429 response->response.length = request->length;
430 response->response.closure = request->closure;
431
432 if (request->data &&
433 copy_from_user(response->response.data,
434 u64_to_uptr(request->data), request->length)) {
435 ret = -EFAULT;
436 goto failed;
437 }
438
439 switch (request->tcode) {
440 case TCODE_WRITE_QUADLET_REQUEST:
441 case TCODE_WRITE_BLOCK_REQUEST:
442 case TCODE_READ_QUADLET_REQUEST:
443 case TCODE_READ_BLOCK_REQUEST:
444 case TCODE_LOCK_MASK_SWAP:
445 case TCODE_LOCK_COMPARE_SWAP:
446 case TCODE_LOCK_FETCH_ADD:
447 case TCODE_LOCK_LITTLE_ADD:
448 case TCODE_LOCK_BOUNDED_ADD:
449 case TCODE_LOCK_WRAP_ADD:
450 case TCODE_LOCK_VENDOR_DEPENDENT:
451 break;
452 default:
453 ret = -EINVAL;
454 goto failed;
455 }
456
457 response->resource.release = release_transaction;
458 ret = add_client_resource(client, &response->resource, GFP_KERNEL);
459 if (ret < 0)
460 goto failed;
461
462 fw_send_request(device->card, &response->transaction,
463 request->tcode & 0x1f,
464 device->node->node_id,
465 request->generation,
466 device->max_speed,
467 request->offset,
468 response->response.data, request->length,
469 complete_transaction, response);
470
471 if (request->data)
472 return sizeof(request) + request->length;
473 else
474 return sizeof(request);
475 failed:
476 kfree(response);
477
478 return ret;
479}
480
481struct address_handler {
482 struct fw_address_handler handler;
483 __u64 closure;
484 struct client *client;
485 struct client_resource resource;
486};
487
488struct request {
489 struct fw_request *request;
490 void *data;
491 size_t length;
492 struct client_resource resource;
493};
494
495struct request_event {
496 struct event event;
497 struct fw_cdev_event_request request;
498};
499
500static void release_request(struct client *client,
501 struct client_resource *resource)
502{
503 struct request *request =
504 container_of(resource, struct request, resource);
505
506 fw_send_response(client->device->card, request->request,
507 RCODE_CONFLICT_ERROR);
508 kfree(request);
509}
510
511static void handle_request(struct fw_card *card, struct fw_request *r,
512 int tcode, int destination, int source,
513 int generation, int speed,
514 unsigned long long offset,
515 void *payload, size_t length, void *callback_data)
516{
517 struct address_handler *handler = callback_data;
518 struct request *request;
519 struct request_event *e;
520 struct client *client = handler->client;
521 int ret;
522
523 request = kmalloc(sizeof(*request), GFP_ATOMIC);
524 e = kmalloc(sizeof(*e), GFP_ATOMIC);
525 if (request == NULL || e == NULL)
526 goto failed;
527
528 request->request = r;
529 request->data = payload;
530 request->length = length;
531
532 request->resource.release = release_request;
533 ret = add_client_resource(client, &request->resource, GFP_ATOMIC);
534 if (ret < 0)
535 goto failed;
536
537 e->request.type = FW_CDEV_EVENT_REQUEST;
538 e->request.tcode = tcode;
539 e->request.offset = offset;
540 e->request.length = length;
541 e->request.handle = request->resource.handle;
542 e->request.closure = handler->closure;
543
544 queue_event(client, &e->event,
545 &e->request, sizeof(e->request), payload, length);
546 return;
547
548 failed:
549 kfree(request);
550 kfree(e);
551 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
552}
553
554static void release_address_handler(struct client *client,
555 struct client_resource *resource)
556{
557 struct address_handler *handler =
558 container_of(resource, struct address_handler, resource);
559
560 fw_core_remove_address_handler(&handler->handler);
561 kfree(handler);
562}
563
564static int ioctl_allocate(struct client *client, void *buffer)
565{
566 struct fw_cdev_allocate *request = buffer;
567 struct address_handler *handler;
568 struct fw_address_region region;
569 int ret;
570
571 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
572 if (handler == NULL)
573 return -ENOMEM;
574
575 region.start = request->offset;
576 region.end = request->offset + request->length;
577 handler->handler.length = request->length;
578 handler->handler.address_callback = handle_request;
579 handler->handler.callback_data = handler;
580 handler->closure = request->closure;
581 handler->client = client;
582
583 ret = fw_core_add_address_handler(&handler->handler, &region);
584 if (ret < 0) {
585 kfree(handler);
586 return ret;
587 }
588
589 handler->resource.release = release_address_handler;
590 ret = add_client_resource(client, &handler->resource, GFP_KERNEL);
591 if (ret < 0) {
592 release_address_handler(client, &handler->resource);
593 return ret;
594 }
595 request->handle = handler->resource.handle;
596
597 return 0;
598}
599
600static int ioctl_deallocate(struct client *client, void *buffer)
601{
602 struct fw_cdev_deallocate *request = buffer;
603
604 return release_client_resource(client, request->handle,
605 release_address_handler, NULL);
606}
607
608static int ioctl_send_response(struct client *client, void *buffer)
609{
610 struct fw_cdev_send_response *request = buffer;
611 struct client_resource *resource;
612 struct request *r;
613
614 if (release_client_resource(client, request->handle,
615 release_request, &resource) < 0)
616 return -EINVAL;
617
618 r = container_of(resource, struct request, resource);
619 if (request->length < r->length)
620 r->length = request->length;
621 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
622 return -EFAULT;
623
624 fw_send_response(client->device->card, r->request, request->rcode);
625 kfree(r);
626
627 return 0;
628}
629
630static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
631{
632 struct fw_cdev_initiate_bus_reset *request = buffer;
633 int short_reset;
634
635 short_reset = (request->type == FW_CDEV_SHORT_RESET);
636
637 return fw_core_initiate_bus_reset(client->device->card, short_reset);
638}
639
640struct descriptor {
641 struct fw_descriptor d;
642 struct client_resource resource;
643 u32 data[0];
644};
645
646static void release_descriptor(struct client *client,
647 struct client_resource *resource)
648{
649 struct descriptor *descriptor =
650 container_of(resource, struct descriptor, resource);
651
652 fw_core_remove_descriptor(&descriptor->d);
653 kfree(descriptor);
654}
655
656static int ioctl_add_descriptor(struct client *client, void *buffer)
657{
658 struct fw_cdev_add_descriptor *request = buffer;
659 struct descriptor *descriptor;
660 int ret;
661
662 if (request->length > 256)
663 return -EINVAL;
664
665 descriptor =
666 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
667 if (descriptor == NULL)
668 return -ENOMEM;
669
670 if (copy_from_user(descriptor->data,
671 u64_to_uptr(request->data), request->length * 4)) {
672 ret = -EFAULT;
673 goto failed;
674 }
675
676 descriptor->d.length = request->length;
677 descriptor->d.immediate = request->immediate;
678 descriptor->d.key = request->key;
679 descriptor->d.data = descriptor->data;
680
681 ret = fw_core_add_descriptor(&descriptor->d);
682 if (ret < 0)
683 goto failed;
684
685 descriptor->resource.release = release_descriptor;
686 ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL);
687 if (ret < 0) {
688 fw_core_remove_descriptor(&descriptor->d);
689 goto failed;
690 }
691 request->handle = descriptor->resource.handle;
692
693 return 0;
694 failed:
695 kfree(descriptor);
696
697 return ret;
698}
699
700static int ioctl_remove_descriptor(struct client *client, void *buffer)
701{
702 struct fw_cdev_remove_descriptor *request = buffer;
703
704 return release_client_resource(client, request->handle,
705 release_descriptor, NULL);
706}
707
708static void iso_callback(struct fw_iso_context *context, u32 cycle,
709 size_t header_length, void *header, void *data)
710{
711 struct client *client = data;
712 struct iso_interrupt *irq;
713
714 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
715 if (irq == NULL)
716 return;
717
718 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
719 irq->interrupt.closure = client->iso_closure;
720 irq->interrupt.cycle = cycle;
721 irq->interrupt.header_length = header_length;
722 memcpy(irq->interrupt.header, header, header_length);
723 queue_event(client, &irq->event, &irq->interrupt,
724 sizeof(irq->interrupt) + header_length, NULL, 0);
725}
726
727static int ioctl_create_iso_context(struct client *client, void *buffer)
728{
729 struct fw_cdev_create_iso_context *request = buffer;
730 struct fw_iso_context *context;
731
732 /* We only support one context at this time. */
733 if (client->iso_context != NULL)
734 return -EBUSY;
735
736 if (request->channel > 63)
737 return -EINVAL;
738
739 switch (request->type) {
740 case FW_ISO_CONTEXT_RECEIVE:
741 if (request->header_size < 4 || (request->header_size & 3))
742 return -EINVAL;
743
744 break;
745
746 case FW_ISO_CONTEXT_TRANSMIT:
747 if (request->speed > SCODE_3200)
748 return -EINVAL;
749
750 break;
751
752 default:
753 return -EINVAL;
754 }
755
756 context = fw_iso_context_create(client->device->card,
757 request->type,
758 request->channel,
759 request->speed,
760 request->header_size,
761 iso_callback, client);
762 if (IS_ERR(context))
763 return PTR_ERR(context);
764
765 client->iso_closure = request->closure;
766 client->iso_context = context;
767
768 /* We only support one context at this time. */
769 request->handle = 0;
770
771 return 0;
772}
773
774/* Macros for decoding the iso packet control header. */
775#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
776#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
777#define GET_SKIP(v) (((v) >> 17) & 0x01)
778#define GET_TAG(v) (((v) >> 18) & 0x03)
779#define GET_SY(v) (((v) >> 20) & 0x0f)
780#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
781
782static int ioctl_queue_iso(struct client *client, void *buffer)
783{
784 struct fw_cdev_queue_iso *request = buffer;
785 struct fw_cdev_iso_packet __user *p, *end, *next;
786 struct fw_iso_context *ctx = client->iso_context;
787 unsigned long payload, buffer_end, header_length;
788 u32 control;
789 int count;
790 struct {
791 struct fw_iso_packet packet;
792 u8 header[256];
793 } u;
794
795 if (ctx == NULL || request->handle != 0)
796 return -EINVAL;
797
798 /*
799 * If the user passes a non-NULL data pointer, has mmap()'ed
800 * the iso buffer, and the pointer points inside the buffer,
801 * we setup the payload pointers accordingly. Otherwise we
802 * set them both to 0, which will still let packets with
803 * payload_length == 0 through. In other words, if no packets
804 * use the indirect payload, the iso buffer need not be mapped
805 * and the request->data pointer is ignored.
806 */
807
808 payload = (unsigned long)request->data - client->vm_start;
809 buffer_end = client->buffer.page_count << PAGE_SHIFT;
810 if (request->data == 0 || client->buffer.pages == NULL ||
811 payload >= buffer_end) {
812 payload = 0;
813 buffer_end = 0;
814 }
815
816 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
817
818 if (!access_ok(VERIFY_READ, p, request->size))
819 return -EFAULT;
820
821 end = (void __user *)p + request->size;
822 count = 0;
823 while (p < end) {
824 if (get_user(control, &p->control))
825 return -EFAULT;
826 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
827 u.packet.interrupt = GET_INTERRUPT(control);
828 u.packet.skip = GET_SKIP(control);
829 u.packet.tag = GET_TAG(control);
830 u.packet.sy = GET_SY(control);
831 u.packet.header_length = GET_HEADER_LENGTH(control);
832
833 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
834 header_length = u.packet.header_length;
835 } else {
836 /*
837 * We require that header_length is a multiple of
838 * the fixed header size, ctx->header_size.
839 */
840 if (ctx->header_size == 0) {
841 if (u.packet.header_length > 0)
842 return -EINVAL;
843 } else if (u.packet.header_length % ctx->header_size != 0) {
844 return -EINVAL;
845 }
846 header_length = 0;
847 }
848
849 next = (struct fw_cdev_iso_packet __user *)
850 &p->header[header_length / 4];
851 if (next > end)
852 return -EINVAL;
853 if (__copy_from_user
854 (u.packet.header, p->header, header_length))
855 return -EFAULT;
856 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
857 u.packet.header_length + u.packet.payload_length > 0)
858 return -EINVAL;
859 if (payload + u.packet.payload_length > buffer_end)
860 return -EINVAL;
861
862 if (fw_iso_context_queue(ctx, &u.packet,
863 &client->buffer, payload))
864 break;
865
866 p = next;
867 payload += u.packet.payload_length;
868 count++;
869 }
870
871 request->size -= uptr_to_u64(p) - request->packets;
872 request->packets = uptr_to_u64(p);
873 request->data = client->vm_start + payload;
874
875 return count;
876}
877
878static int ioctl_start_iso(struct client *client, void *buffer)
879{
880 struct fw_cdev_start_iso *request = buffer;
881
882 if (client->iso_context == NULL || request->handle != 0)
883 return -EINVAL;
884
885 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
886 if (request->tags == 0 || request->tags > 15)
887 return -EINVAL;
888
889 if (request->sync > 15)
890 return -EINVAL;
891 }
892
893 return fw_iso_context_start(client->iso_context, request->cycle,
894 request->sync, request->tags);
895}
896
897static int ioctl_stop_iso(struct client *client, void *buffer)
898{
899 struct fw_cdev_stop_iso *request = buffer;
900
901 if (client->iso_context == NULL || request->handle != 0)
902 return -EINVAL;
903
904 return fw_iso_context_stop(client->iso_context);
905}
906
907static int ioctl_get_cycle_timer(struct client *client, void *buffer)
908{
909 struct fw_cdev_get_cycle_timer *request = buffer;
910 struct fw_card *card = client->device->card;
911 unsigned long long bus_time;
912 struct timeval tv;
913 unsigned long flags;
914
915 preempt_disable();
916 local_irq_save(flags);
917
918 bus_time = card->driver->get_bus_time(card);
919 do_gettimeofday(&tv);
920
921 local_irq_restore(flags);
922 preempt_enable();
923
924 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
925 request->cycle_timer = bus_time & 0xffffffff;
926 return 0;
927}
928
929static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
930 ioctl_get_info,
931 ioctl_send_request,
932 ioctl_allocate,
933 ioctl_deallocate,
934 ioctl_send_response,
935 ioctl_initiate_bus_reset,
936 ioctl_add_descriptor,
937 ioctl_remove_descriptor,
938 ioctl_create_iso_context,
939 ioctl_queue_iso,
940 ioctl_start_iso,
941 ioctl_stop_iso,
942 ioctl_get_cycle_timer,
943};
944
945static int dispatch_ioctl(struct client *client,
946 unsigned int cmd, void __user *arg)
947{
948 char buffer[256];
949 int ret;
950
951 if (_IOC_TYPE(cmd) != '#' ||
952 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
953 return -EINVAL;
954
955 if (_IOC_DIR(cmd) & _IOC_WRITE) {
956 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
957 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
958 return -EFAULT;
959 }
960
961 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
962 if (ret < 0)
963 return ret;
964
965 if (_IOC_DIR(cmd) & _IOC_READ) {
966 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
967 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
968 return -EFAULT;
969 }
970
971 return ret;
972}
973
974static long fw_device_op_ioctl(struct file *file,
975 unsigned int cmd, unsigned long arg)
976{
977 struct client *client = file->private_data;
978
979 if (fw_device_is_shutdown(client->device))
980 return -ENODEV;
981
982 return dispatch_ioctl(client, cmd, (void __user *) arg);
983}
984
985#ifdef CONFIG_COMPAT
986static long fw_device_op_compat_ioctl(struct file *file,
987 unsigned int cmd, unsigned long arg)
988{
989 struct client *client = file->private_data;
990
991 if (fw_device_is_shutdown(client->device))
992 return -ENODEV;
993
994 return dispatch_ioctl(client, cmd, compat_ptr(arg));
995}
996#endif
997
998static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
999{
1000 struct client *client = file->private_data;
1001 enum dma_data_direction direction;
1002 unsigned long size;
1003 int page_count, ret;
1004
1005 if (fw_device_is_shutdown(client->device))
1006 return -ENODEV;
1007
1008 /* FIXME: We could support multiple buffers, but we don't. */
1009 if (client->buffer.pages != NULL)
1010 return -EBUSY;
1011
1012 if (!(vma->vm_flags & VM_SHARED))
1013 return -EINVAL;
1014
1015 if (vma->vm_start & ~PAGE_MASK)
1016 return -EINVAL;
1017
1018 client->vm_start = vma->vm_start;
1019 size = vma->vm_end - vma->vm_start;
1020 page_count = size >> PAGE_SHIFT;
1021 if (size & ~PAGE_MASK)
1022 return -EINVAL;
1023
1024 if (vma->vm_flags & VM_WRITE)
1025 direction = DMA_TO_DEVICE;
1026 else
1027 direction = DMA_FROM_DEVICE;
1028
1029 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1030 page_count, direction);
1031 if (ret < 0)
1032 return ret;
1033
1034 ret = fw_iso_buffer_map(&client->buffer, vma);
1035 if (ret < 0)
1036 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1037
1038 return ret;
1039}
1040
1041static int shutdown_resource(int id, void *p, void *data)
1042{
1043 struct client_resource *r = p;
1044 struct client *client = data;
1045
1046 r->release(client, r);
1047
1048 return 0;
1049}
1050
1051static int fw_device_op_release(struct inode *inode, struct file *file)
1052{
1053 struct client *client = file->private_data;
1054 struct event *e, *next_e;
1055 unsigned long flags;
1056
1057 mutex_lock(&client->device->client_list_mutex);
1058 list_del(&client->link);
1059 mutex_unlock(&client->device->client_list_mutex);
1060
1061 if (client->buffer.pages)
1062 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1063
1064 if (client->iso_context)
1065 fw_iso_context_destroy(client->iso_context);
1066
1067 /* Freeze client->resource_idr and client->event_list */
1068 spin_lock_irqsave(&client->lock, flags);
1069 client->in_shutdown = true;
1070 spin_unlock_irqrestore(&client->lock, flags);
1071
1072 idr_for_each(&client->resource_idr, shutdown_resource, client);
1073 idr_remove_all(&client->resource_idr);
1074 idr_destroy(&client->resource_idr);
1075
1076 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1077 kfree(e);
1078
1079 /*
1080 * FIXME: client should be reference-counted. It's extremely unlikely
1081 * but there may still be transactions being completed at this point.
1082 */
1083 fw_device_put(client->device);
1084 kfree(client);
1085
1086 return 0;
1087}
1088
1089static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1090{
1091 struct client *client = file->private_data;
1092 unsigned int mask = 0;
1093
1094 poll_wait(file, &client->wait, pt);
1095
1096 if (fw_device_is_shutdown(client->device))
1097 mask |= POLLHUP | POLLERR;
1098 if (!list_empty(&client->event_list))
1099 mask |= POLLIN | POLLRDNORM;
1100
1101 return mask;
1102}
1103
1104const struct file_operations fw_device_ops = {
1105 .owner = THIS_MODULE,
1106 .open = fw_device_op_open,
1107 .read = fw_device_op_read,
1108 .unlocked_ioctl = fw_device_op_ioctl,
1109 .poll = fw_device_op_poll,
1110 .release = fw_device_op_release,
1111 .mmap = fw_device_op_mmap,
1112
1113#ifdef CONFIG_COMPAT
1114 .compat_ioctl = fw_device_op_compat_ioctl,
1115#endif
1116};