]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Char device for device raw access | |
3 | * | |
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software Foundation, | |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | */ | |
20 | ||
21 | #include <linux/bug.h> | |
22 | #include <linux/compat.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/errno.h> | |
27 | #include <linux/firewire.h> | |
28 | #include <linux/firewire-cdev.h> | |
29 | #include <linux/idr.h> | |
30 | #include <linux/irqflags.h> | |
31 | #include <linux/jiffies.h> | |
32 | #include <linux/kernel.h> | |
33 | #include <linux/kref.h> | |
34 | #include <linux/mm.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/mutex.h> | |
37 | #include <linux/poll.h> | |
38 | #include <linux/sched.h> /* required for linux/wait.h */ | |
39 | #include <linux/slab.h> | |
40 | #include <linux/spinlock.h> | |
41 | #include <linux/string.h> | |
42 | #include <linux/time.h> | |
43 | #include <linux/uaccess.h> | |
44 | #include <linux/vmalloc.h> | |
45 | #include <linux/wait.h> | |
46 | #include <linux/workqueue.h> | |
47 | ||
48 | ||
49 | #include "core.h" | |
50 | ||
51 | /* | |
52 | * ABI version history is documented in linux/firewire-cdev.h. | |
53 | */ | |
54 | #define FW_CDEV_KERNEL_VERSION 5 | |
55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 | |
56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 | |
57 | #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 | |
58 | ||
59 | struct client { | |
60 | u32 version; | |
61 | struct fw_device *device; | |
62 | ||
63 | spinlock_t lock; | |
64 | bool in_shutdown; | |
65 | struct idr resource_idr; | |
66 | struct list_head event_list; | |
67 | wait_queue_head_t wait; | |
68 | wait_queue_head_t tx_flush_wait; | |
69 | u64 bus_reset_closure; | |
70 | ||
71 | struct fw_iso_context *iso_context; | |
72 | u64 iso_closure; | |
73 | struct fw_iso_buffer buffer; | |
74 | unsigned long vm_start; | |
75 | bool buffer_is_mapped; | |
76 | ||
77 | struct list_head phy_receiver_link; | |
78 | u64 phy_receiver_closure; | |
79 | ||
80 | struct list_head link; | |
81 | struct kref kref; | |
82 | }; | |
83 | ||
84 | static inline void client_get(struct client *client) | |
85 | { | |
86 | kref_get(&client->kref); | |
87 | } | |
88 | ||
89 | static void client_release(struct kref *kref) | |
90 | { | |
91 | struct client *client = container_of(kref, struct client, kref); | |
92 | ||
93 | fw_device_put(client->device); | |
94 | kfree(client); | |
95 | } | |
96 | ||
97 | static void client_put(struct client *client) | |
98 | { | |
99 | kref_put(&client->kref, client_release); | |
100 | } | |
101 | ||
102 | struct client_resource; | |
103 | typedef void (*client_resource_release_fn_t)(struct client *, | |
104 | struct client_resource *); | |
105 | struct client_resource { | |
106 | client_resource_release_fn_t release; | |
107 | int handle; | |
108 | }; | |
109 | ||
110 | struct address_handler_resource { | |
111 | struct client_resource resource; | |
112 | struct fw_address_handler handler; | |
113 | __u64 closure; | |
114 | struct client *client; | |
115 | }; | |
116 | ||
117 | struct outbound_transaction_resource { | |
118 | struct client_resource resource; | |
119 | struct fw_transaction transaction; | |
120 | }; | |
121 | ||
122 | struct inbound_transaction_resource { | |
123 | struct client_resource resource; | |
124 | struct fw_card *card; | |
125 | struct fw_request *request; | |
126 | void *data; | |
127 | size_t length; | |
128 | }; | |
129 | ||
130 | struct descriptor_resource { | |
131 | struct client_resource resource; | |
132 | struct fw_descriptor descriptor; | |
133 | u32 data[0]; | |
134 | }; | |
135 | ||
136 | struct iso_resource { | |
137 | struct client_resource resource; | |
138 | struct client *client; | |
139 | /* Schedule work and access todo only with client->lock held. */ | |
140 | struct delayed_work work; | |
141 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, | |
142 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; | |
143 | int generation; | |
144 | u64 channels; | |
145 | s32 bandwidth; | |
146 | struct iso_resource_event *e_alloc, *e_dealloc; | |
147 | }; | |
148 | ||
149 | static void release_iso_resource(struct client *, struct client_resource *); | |
150 | ||
151 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) | |
152 | { | |
153 | client_get(r->client); | |
154 | if (!queue_delayed_work(fw_workqueue, &r->work, delay)) | |
155 | client_put(r->client); | |
156 | } | |
157 | ||
158 | static void schedule_if_iso_resource(struct client_resource *resource) | |
159 | { | |
160 | if (resource->release == release_iso_resource) | |
161 | schedule_iso_resource(container_of(resource, | |
162 | struct iso_resource, resource), 0); | |
163 | } | |
164 | ||
165 | /* | |
166 | * dequeue_event() just kfree()'s the event, so the event has to be | |
167 | * the first field in a struct XYZ_event. | |
168 | */ | |
169 | struct event { | |
170 | struct { void *data; size_t size; } v[2]; | |
171 | struct list_head link; | |
172 | }; | |
173 | ||
174 | struct bus_reset_event { | |
175 | struct event event; | |
176 | struct fw_cdev_event_bus_reset reset; | |
177 | }; | |
178 | ||
179 | struct outbound_transaction_event { | |
180 | struct event event; | |
181 | struct client *client; | |
182 | struct outbound_transaction_resource r; | |
183 | struct fw_cdev_event_response response; | |
184 | }; | |
185 | ||
186 | struct inbound_transaction_event { | |
187 | struct event event; | |
188 | union { | |
189 | struct fw_cdev_event_request request; | |
190 | struct fw_cdev_event_request2 request2; | |
191 | } req; | |
192 | }; | |
193 | ||
194 | struct iso_interrupt_event { | |
195 | struct event event; | |
196 | struct fw_cdev_event_iso_interrupt interrupt; | |
197 | }; | |
198 | ||
199 | struct iso_interrupt_mc_event { | |
200 | struct event event; | |
201 | struct fw_cdev_event_iso_interrupt_mc interrupt; | |
202 | }; | |
203 | ||
204 | struct iso_resource_event { | |
205 | struct event event; | |
206 | struct fw_cdev_event_iso_resource iso_resource; | |
207 | }; | |
208 | ||
209 | struct outbound_phy_packet_event { | |
210 | struct event event; | |
211 | struct client *client; | |
212 | struct fw_packet p; | |
213 | struct fw_cdev_event_phy_packet phy_packet; | |
214 | }; | |
215 | ||
216 | struct inbound_phy_packet_event { | |
217 | struct event event; | |
218 | struct fw_cdev_event_phy_packet phy_packet; | |
219 | }; | |
220 | ||
221 | #ifdef CONFIG_COMPAT | |
222 | static void __user *u64_to_uptr(u64 value) | |
223 | { | |
224 | if (in_compat_syscall()) | |
225 | return compat_ptr(value); | |
226 | else | |
227 | return (void __user *)(unsigned long)value; | |
228 | } | |
229 | ||
230 | static u64 uptr_to_u64(void __user *ptr) | |
231 | { | |
232 | if (in_compat_syscall()) | |
233 | return ptr_to_compat(ptr); | |
234 | else | |
235 | return (u64)(unsigned long)ptr; | |
236 | } | |
237 | #else | |
238 | static inline void __user *u64_to_uptr(u64 value) | |
239 | { | |
240 | return (void __user *)(unsigned long)value; | |
241 | } | |
242 | ||
243 | static inline u64 uptr_to_u64(void __user *ptr) | |
244 | { | |
245 | return (u64)(unsigned long)ptr; | |
246 | } | |
247 | #endif /* CONFIG_COMPAT */ | |
248 | ||
249 | static int fw_device_op_open(struct inode *inode, struct file *file) | |
250 | { | |
251 | struct fw_device *device; | |
252 | struct client *client; | |
253 | ||
254 | device = fw_device_get_by_devt(inode->i_rdev); | |
255 | if (device == NULL) | |
256 | return -ENODEV; | |
257 | ||
258 | if (fw_device_is_shutdown(device)) { | |
259 | fw_device_put(device); | |
260 | return -ENODEV; | |
261 | } | |
262 | ||
263 | client = kzalloc(sizeof(*client), GFP_KERNEL); | |
264 | if (client == NULL) { | |
265 | fw_device_put(device); | |
266 | return -ENOMEM; | |
267 | } | |
268 | ||
269 | client->device = device; | |
270 | spin_lock_init(&client->lock); | |
271 | idr_init(&client->resource_idr); | |
272 | INIT_LIST_HEAD(&client->event_list); | |
273 | init_waitqueue_head(&client->wait); | |
274 | init_waitqueue_head(&client->tx_flush_wait); | |
275 | INIT_LIST_HEAD(&client->phy_receiver_link); | |
276 | INIT_LIST_HEAD(&client->link); | |
277 | kref_init(&client->kref); | |
278 | ||
279 | file->private_data = client; | |
280 | ||
281 | return nonseekable_open(inode, file); | |
282 | } | |
283 | ||
284 | static void queue_event(struct client *client, struct event *event, | |
285 | void *data0, size_t size0, void *data1, size_t size1) | |
286 | { | |
287 | unsigned long flags; | |
288 | ||
289 | event->v[0].data = data0; | |
290 | event->v[0].size = size0; | |
291 | event->v[1].data = data1; | |
292 | event->v[1].size = size1; | |
293 | ||
294 | spin_lock_irqsave(&client->lock, flags); | |
295 | if (client->in_shutdown) | |
296 | kfree(event); | |
297 | else | |
298 | list_add_tail(&event->link, &client->event_list); | |
299 | spin_unlock_irqrestore(&client->lock, flags); | |
300 | ||
301 | wake_up_interruptible(&client->wait); | |
302 | } | |
303 | ||
304 | static int dequeue_event(struct client *client, | |
305 | char __user *buffer, size_t count) | |
306 | { | |
307 | struct event *event; | |
308 | size_t size, total; | |
309 | int i, ret; | |
310 | ||
311 | ret = wait_event_interruptible(client->wait, | |
312 | !list_empty(&client->event_list) || | |
313 | fw_device_is_shutdown(client->device)); | |
314 | if (ret < 0) | |
315 | return ret; | |
316 | ||
317 | if (list_empty(&client->event_list) && | |
318 | fw_device_is_shutdown(client->device)) | |
319 | return -ENODEV; | |
320 | ||
321 | spin_lock_irq(&client->lock); | |
322 | event = list_first_entry(&client->event_list, struct event, link); | |
323 | list_del(&event->link); | |
324 | spin_unlock_irq(&client->lock); | |
325 | ||
326 | total = 0; | |
327 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { | |
328 | size = min(event->v[i].size, count - total); | |
329 | if (copy_to_user(buffer + total, event->v[i].data, size)) { | |
330 | ret = -EFAULT; | |
331 | goto out; | |
332 | } | |
333 | total += size; | |
334 | } | |
335 | ret = total; | |
336 | ||
337 | out: | |
338 | kfree(event); | |
339 | ||
340 | return ret; | |
341 | } | |
342 | ||
343 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, | |
344 | size_t count, loff_t *offset) | |
345 | { | |
346 | struct client *client = file->private_data; | |
347 | ||
348 | return dequeue_event(client, buffer, count); | |
349 | } | |
350 | ||
351 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | |
352 | struct client *client) | |
353 | { | |
354 | struct fw_card *card = client->device->card; | |
355 | ||
356 | spin_lock_irq(&card->lock); | |
357 | ||
358 | event->closure = client->bus_reset_closure; | |
359 | event->type = FW_CDEV_EVENT_BUS_RESET; | |
360 | event->generation = client->device->generation; | |
361 | event->node_id = client->device->node_id; | |
362 | event->local_node_id = card->local_node->node_id; | |
363 | event->bm_node_id = card->bm_node_id; | |
364 | event->irm_node_id = card->irm_node->node_id; | |
365 | event->root_node_id = card->root_node->node_id; | |
366 | ||
367 | spin_unlock_irq(&card->lock); | |
368 | } | |
369 | ||
370 | static void for_each_client(struct fw_device *device, | |
371 | void (*callback)(struct client *client)) | |
372 | { | |
373 | struct client *c; | |
374 | ||
375 | mutex_lock(&device->client_list_mutex); | |
376 | list_for_each_entry(c, &device->client_list, link) | |
377 | callback(c); | |
378 | mutex_unlock(&device->client_list_mutex); | |
379 | } | |
380 | ||
381 | static int schedule_reallocations(int id, void *p, void *data) | |
382 | { | |
383 | schedule_if_iso_resource(p); | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
388 | static void queue_bus_reset_event(struct client *client) | |
389 | { | |
390 | struct bus_reset_event *e; | |
391 | ||
392 | e = kzalloc(sizeof(*e), GFP_KERNEL); | |
393 | if (e == NULL) | |
394 | return; | |
395 | ||
396 | fill_bus_reset_event(&e->reset, client); | |
397 | ||
398 | queue_event(client, &e->event, | |
399 | &e->reset, sizeof(e->reset), NULL, 0); | |
400 | ||
401 | spin_lock_irq(&client->lock); | |
402 | idr_for_each(&client->resource_idr, schedule_reallocations, client); | |
403 | spin_unlock_irq(&client->lock); | |
404 | } | |
405 | ||
406 | void fw_device_cdev_update(struct fw_device *device) | |
407 | { | |
408 | for_each_client(device, queue_bus_reset_event); | |
409 | } | |
410 | ||
411 | static void wake_up_client(struct client *client) | |
412 | { | |
413 | wake_up_interruptible(&client->wait); | |
414 | } | |
415 | ||
416 | void fw_device_cdev_remove(struct fw_device *device) | |
417 | { | |
418 | for_each_client(device, wake_up_client); | |
419 | } | |
420 | ||
421 | union ioctl_arg { | |
422 | struct fw_cdev_get_info get_info; | |
423 | struct fw_cdev_send_request send_request; | |
424 | struct fw_cdev_allocate allocate; | |
425 | struct fw_cdev_deallocate deallocate; | |
426 | struct fw_cdev_send_response send_response; | |
427 | struct fw_cdev_initiate_bus_reset initiate_bus_reset; | |
428 | struct fw_cdev_add_descriptor add_descriptor; | |
429 | struct fw_cdev_remove_descriptor remove_descriptor; | |
430 | struct fw_cdev_create_iso_context create_iso_context; | |
431 | struct fw_cdev_queue_iso queue_iso; | |
432 | struct fw_cdev_start_iso start_iso; | |
433 | struct fw_cdev_stop_iso stop_iso; | |
434 | struct fw_cdev_get_cycle_timer get_cycle_timer; | |
435 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; | |
436 | struct fw_cdev_send_stream_packet send_stream_packet; | |
437 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; | |
438 | struct fw_cdev_send_phy_packet send_phy_packet; | |
439 | struct fw_cdev_receive_phy_packets receive_phy_packets; | |
440 | struct fw_cdev_set_iso_channels set_iso_channels; | |
441 | struct fw_cdev_flush_iso flush_iso; | |
442 | }; | |
443 | ||
444 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | |
445 | { | |
446 | struct fw_cdev_get_info *a = &arg->get_info; | |
447 | struct fw_cdev_event_bus_reset bus_reset; | |
448 | unsigned long ret = 0; | |
449 | ||
450 | client->version = a->version; | |
451 | a->version = FW_CDEV_KERNEL_VERSION; | |
452 | a->card = client->device->card->index; | |
453 | ||
454 | down_read(&fw_device_rwsem); | |
455 | ||
456 | if (a->rom != 0) { | |
457 | size_t want = a->rom_length; | |
458 | size_t have = client->device->config_rom_length * 4; | |
459 | ||
460 | ret = copy_to_user(u64_to_uptr(a->rom), | |
461 | client->device->config_rom, min(want, have)); | |
462 | } | |
463 | a->rom_length = client->device->config_rom_length * 4; | |
464 | ||
465 | up_read(&fw_device_rwsem); | |
466 | ||
467 | if (ret != 0) | |
468 | return -EFAULT; | |
469 | ||
470 | mutex_lock(&client->device->client_list_mutex); | |
471 | ||
472 | client->bus_reset_closure = a->bus_reset_closure; | |
473 | if (a->bus_reset != 0) { | |
474 | fill_bus_reset_event(&bus_reset, client); | |
475 | /* unaligned size of bus_reset is 36 bytes */ | |
476 | ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); | |
477 | } | |
478 | if (ret == 0 && list_empty(&client->link)) | |
479 | list_add_tail(&client->link, &client->device->client_list); | |
480 | ||
481 | mutex_unlock(&client->device->client_list_mutex); | |
482 | ||
483 | return ret ? -EFAULT : 0; | |
484 | } | |
485 | ||
486 | static int add_client_resource(struct client *client, | |
487 | struct client_resource *resource, gfp_t gfp_mask) | |
488 | { | |
489 | bool preload = gfpflags_allow_blocking(gfp_mask); | |
490 | unsigned long flags; | |
491 | int ret; | |
492 | ||
493 | if (preload) | |
494 | idr_preload(gfp_mask); | |
495 | spin_lock_irqsave(&client->lock, flags); | |
496 | ||
497 | if (client->in_shutdown) | |
498 | ret = -ECANCELED; | |
499 | else | |
500 | ret = idr_alloc(&client->resource_idr, resource, 0, 0, | |
501 | GFP_NOWAIT); | |
502 | if (ret >= 0) { | |
503 | resource->handle = ret; | |
504 | client_get(client); | |
505 | schedule_if_iso_resource(resource); | |
506 | } | |
507 | ||
508 | spin_unlock_irqrestore(&client->lock, flags); | |
509 | if (preload) | |
510 | idr_preload_end(); | |
511 | ||
512 | return ret < 0 ? ret : 0; | |
513 | } | |
514 | ||
515 | static int release_client_resource(struct client *client, u32 handle, | |
516 | client_resource_release_fn_t release, | |
517 | struct client_resource **return_resource) | |
518 | { | |
519 | struct client_resource *resource; | |
520 | ||
521 | spin_lock_irq(&client->lock); | |
522 | if (client->in_shutdown) | |
523 | resource = NULL; | |
524 | else | |
525 | resource = idr_find(&client->resource_idr, handle); | |
526 | if (resource && resource->release == release) | |
527 | idr_remove(&client->resource_idr, handle); | |
528 | spin_unlock_irq(&client->lock); | |
529 | ||
530 | if (!(resource && resource->release == release)) | |
531 | return -EINVAL; | |
532 | ||
533 | if (return_resource) | |
534 | *return_resource = resource; | |
535 | else | |
536 | resource->release(client, resource); | |
537 | ||
538 | client_put(client); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | static void release_transaction(struct client *client, | |
544 | struct client_resource *resource) | |
545 | { | |
546 | } | |
547 | ||
548 | static void complete_transaction(struct fw_card *card, int rcode, | |
549 | void *payload, size_t length, void *data) | |
550 | { | |
551 | struct outbound_transaction_event *e = data; | |
552 | struct fw_cdev_event_response *rsp = &e->response; | |
553 | struct client *client = e->client; | |
554 | unsigned long flags; | |
555 | ||
556 | if (length < rsp->length) | |
557 | rsp->length = length; | |
558 | if (rcode == RCODE_COMPLETE) | |
559 | memcpy(rsp->data, payload, rsp->length); | |
560 | ||
561 | spin_lock_irqsave(&client->lock, flags); | |
562 | idr_remove(&client->resource_idr, e->r.resource.handle); | |
563 | if (client->in_shutdown) | |
564 | wake_up(&client->tx_flush_wait); | |
565 | spin_unlock_irqrestore(&client->lock, flags); | |
566 | ||
567 | rsp->type = FW_CDEV_EVENT_RESPONSE; | |
568 | rsp->rcode = rcode; | |
569 | ||
570 | /* | |
571 | * In the case that sizeof(*rsp) doesn't align with the position of the | |
572 | * data, and the read is short, preserve an extra copy of the data | |
573 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless | |
574 | * for short reads and some apps depended on it, this is both safe | |
575 | * and prudent for compatibility. | |
576 | */ | |
577 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) | |
578 | queue_event(client, &e->event, rsp, sizeof(*rsp), | |
579 | rsp->data, rsp->length); | |
580 | else | |
581 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, | |
582 | NULL, 0); | |
583 | ||
584 | /* Drop the idr's reference */ | |
585 | client_put(client); | |
586 | } | |
587 | ||
588 | static int init_request(struct client *client, | |
589 | struct fw_cdev_send_request *request, | |
590 | int destination_id, int speed) | |
591 | { | |
592 | struct outbound_transaction_event *e; | |
593 | int ret; | |
594 | ||
595 | if (request->tcode != TCODE_STREAM_DATA && | |
596 | (request->length > 4096 || request->length > 512 << speed)) | |
597 | return -EIO; | |
598 | ||
599 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && | |
600 | request->length < 4) | |
601 | return -EINVAL; | |
602 | ||
603 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); | |
604 | if (e == NULL) | |
605 | return -ENOMEM; | |
606 | ||
607 | e->client = client; | |
608 | e->response.length = request->length; | |
609 | e->response.closure = request->closure; | |
610 | ||
611 | if (request->data && | |
612 | copy_from_user(e->response.data, | |
613 | u64_to_uptr(request->data), request->length)) { | |
614 | ret = -EFAULT; | |
615 | goto failed; | |
616 | } | |
617 | ||
618 | e->r.resource.release = release_transaction; | |
619 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); | |
620 | if (ret < 0) | |
621 | goto failed; | |
622 | ||
623 | fw_send_request(client->device->card, &e->r.transaction, | |
624 | request->tcode, destination_id, request->generation, | |
625 | speed, request->offset, e->response.data, | |
626 | request->length, complete_transaction, e); | |
627 | return 0; | |
628 | ||
629 | failed: | |
630 | kfree(e); | |
631 | ||
632 | return ret; | |
633 | } | |
634 | ||
635 | static int ioctl_send_request(struct client *client, union ioctl_arg *arg) | |
636 | { | |
637 | switch (arg->send_request.tcode) { | |
638 | case TCODE_WRITE_QUADLET_REQUEST: | |
639 | case TCODE_WRITE_BLOCK_REQUEST: | |
640 | case TCODE_READ_QUADLET_REQUEST: | |
641 | case TCODE_READ_BLOCK_REQUEST: | |
642 | case TCODE_LOCK_MASK_SWAP: | |
643 | case TCODE_LOCK_COMPARE_SWAP: | |
644 | case TCODE_LOCK_FETCH_ADD: | |
645 | case TCODE_LOCK_LITTLE_ADD: | |
646 | case TCODE_LOCK_BOUNDED_ADD: | |
647 | case TCODE_LOCK_WRAP_ADD: | |
648 | case TCODE_LOCK_VENDOR_DEPENDENT: | |
649 | break; | |
650 | default: | |
651 | return -EINVAL; | |
652 | } | |
653 | ||
654 | return init_request(client, &arg->send_request, client->device->node_id, | |
655 | client->device->max_speed); | |
656 | } | |
657 | ||
658 | static inline bool is_fcp_request(struct fw_request *request) | |
659 | { | |
660 | return request == NULL; | |
661 | } | |
662 | ||
663 | static void release_request(struct client *client, | |
664 | struct client_resource *resource) | |
665 | { | |
666 | struct inbound_transaction_resource *r = container_of(resource, | |
667 | struct inbound_transaction_resource, resource); | |
668 | ||
669 | if (is_fcp_request(r->request)) | |
670 | kfree(r->data); | |
671 | else | |
672 | fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); | |
673 | ||
674 | fw_card_put(r->card); | |
675 | kfree(r); | |
676 | } | |
677 | ||
678 | static void handle_request(struct fw_card *card, struct fw_request *request, | |
679 | int tcode, int destination, int source, | |
680 | int generation, unsigned long long offset, | |
681 | void *payload, size_t length, void *callback_data) | |
682 | { | |
683 | struct address_handler_resource *handler = callback_data; | |
684 | struct inbound_transaction_resource *r; | |
685 | struct inbound_transaction_event *e; | |
686 | size_t event_size0; | |
687 | void *fcp_frame = NULL; | |
688 | int ret; | |
689 | ||
690 | /* card may be different from handler->client->device->card */ | |
691 | fw_card_get(card); | |
692 | ||
693 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | |
694 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | |
695 | if (r == NULL || e == NULL) | |
696 | goto failed; | |
697 | ||
698 | r->card = card; | |
699 | r->request = request; | |
700 | r->data = payload; | |
701 | r->length = length; | |
702 | ||
703 | if (is_fcp_request(request)) { | |
704 | /* | |
705 | * FIXME: Let core-transaction.c manage a | |
706 | * single reference-counted copy? | |
707 | */ | |
708 | fcp_frame = kmemdup(payload, length, GFP_ATOMIC); | |
709 | if (fcp_frame == NULL) | |
710 | goto failed; | |
711 | ||
712 | r->data = fcp_frame; | |
713 | } | |
714 | ||
715 | r->resource.release = release_request; | |
716 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); | |
717 | if (ret < 0) | |
718 | goto failed; | |
719 | ||
720 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { | |
721 | struct fw_cdev_event_request *req = &e->req.request; | |
722 | ||
723 | if (tcode & 0x10) | |
724 | tcode = TCODE_LOCK_REQUEST; | |
725 | ||
726 | req->type = FW_CDEV_EVENT_REQUEST; | |
727 | req->tcode = tcode; | |
728 | req->offset = offset; | |
729 | req->length = length; | |
730 | req->handle = r->resource.handle; | |
731 | req->closure = handler->closure; | |
732 | event_size0 = sizeof(*req); | |
733 | } else { | |
734 | struct fw_cdev_event_request2 *req = &e->req.request2; | |
735 | ||
736 | req->type = FW_CDEV_EVENT_REQUEST2; | |
737 | req->tcode = tcode; | |
738 | req->offset = offset; | |
739 | req->source_node_id = source; | |
740 | req->destination_node_id = destination; | |
741 | req->card = card->index; | |
742 | req->generation = generation; | |
743 | req->length = length; | |
744 | req->handle = r->resource.handle; | |
745 | req->closure = handler->closure; | |
746 | event_size0 = sizeof(*req); | |
747 | } | |
748 | ||
749 | queue_event(handler->client, &e->event, | |
750 | &e->req, event_size0, r->data, length); | |
751 | return; | |
752 | ||
753 | failed: | |
754 | kfree(r); | |
755 | kfree(e); | |
756 | kfree(fcp_frame); | |
757 | ||
758 | if (!is_fcp_request(request)) | |
759 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | |
760 | ||
761 | fw_card_put(card); | |
762 | } | |
763 | ||
764 | static void release_address_handler(struct client *client, | |
765 | struct client_resource *resource) | |
766 | { | |
767 | struct address_handler_resource *r = | |
768 | container_of(resource, struct address_handler_resource, resource); | |
769 | ||
770 | fw_core_remove_address_handler(&r->handler); | |
771 | kfree(r); | |
772 | } | |
773 | ||
774 | static int ioctl_allocate(struct client *client, union ioctl_arg *arg) | |
775 | { | |
776 | struct fw_cdev_allocate *a = &arg->allocate; | |
777 | struct address_handler_resource *r; | |
778 | struct fw_address_region region; | |
779 | int ret; | |
780 | ||
781 | r = kmalloc(sizeof(*r), GFP_KERNEL); | |
782 | if (r == NULL) | |
783 | return -ENOMEM; | |
784 | ||
785 | region.start = a->offset; | |
786 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) | |
787 | region.end = a->offset + a->length; | |
788 | else | |
789 | region.end = a->region_end; | |
790 | ||
791 | r->handler.length = a->length; | |
792 | r->handler.address_callback = handle_request; | |
793 | r->handler.callback_data = r; | |
794 | r->closure = a->closure; | |
795 | r->client = client; | |
796 | ||
797 | ret = fw_core_add_address_handler(&r->handler, ®ion); | |
798 | if (ret < 0) { | |
799 | kfree(r); | |
800 | return ret; | |
801 | } | |
802 | a->offset = r->handler.offset; | |
803 | ||
804 | r->resource.release = release_address_handler; | |
805 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | |
806 | if (ret < 0) { | |
807 | release_address_handler(client, &r->resource); | |
808 | return ret; | |
809 | } | |
810 | a->handle = r->resource.handle; | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) | |
816 | { | |
817 | return release_client_resource(client, arg->deallocate.handle, | |
818 | release_address_handler, NULL); | |
819 | } | |
820 | ||
821 | static int ioctl_send_response(struct client *client, union ioctl_arg *arg) | |
822 | { | |
823 | struct fw_cdev_send_response *a = &arg->send_response; | |
824 | struct client_resource *resource; | |
825 | struct inbound_transaction_resource *r; | |
826 | int ret = 0; | |
827 | ||
828 | if (release_client_resource(client, a->handle, | |
829 | release_request, &resource) < 0) | |
830 | return -EINVAL; | |
831 | ||
832 | r = container_of(resource, struct inbound_transaction_resource, | |
833 | resource); | |
834 | if (is_fcp_request(r->request)) | |
835 | goto out; | |
836 | ||
837 | if (a->length != fw_get_response_length(r->request)) { | |
838 | ret = -EINVAL; | |
839 | kfree(r->request); | |
840 | goto out; | |
841 | } | |
842 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { | |
843 | ret = -EFAULT; | |
844 | kfree(r->request); | |
845 | goto out; | |
846 | } | |
847 | fw_send_response(r->card, r->request, a->rcode); | |
848 | out: | |
849 | fw_card_put(r->card); | |
850 | kfree(r); | |
851 | ||
852 | return ret; | |
853 | } | |
854 | ||
855 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) | |
856 | { | |
857 | fw_schedule_bus_reset(client->device->card, true, | |
858 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); | |
859 | return 0; | |
860 | } | |
861 | ||
862 | static void release_descriptor(struct client *client, | |
863 | struct client_resource *resource) | |
864 | { | |
865 | struct descriptor_resource *r = | |
866 | container_of(resource, struct descriptor_resource, resource); | |
867 | ||
868 | fw_core_remove_descriptor(&r->descriptor); | |
869 | kfree(r); | |
870 | } | |
871 | ||
872 | static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) | |
873 | { | |
874 | struct fw_cdev_add_descriptor *a = &arg->add_descriptor; | |
875 | struct descriptor_resource *r; | |
876 | int ret; | |
877 | ||
878 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | |
879 | if (!client->device->is_local) | |
880 | return -ENOSYS; | |
881 | ||
882 | if (a->length > 256) | |
883 | return -EINVAL; | |
884 | ||
885 | r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); | |
886 | if (r == NULL) | |
887 | return -ENOMEM; | |
888 | ||
889 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { | |
890 | ret = -EFAULT; | |
891 | goto failed; | |
892 | } | |
893 | ||
894 | r->descriptor.length = a->length; | |
895 | r->descriptor.immediate = a->immediate; | |
896 | r->descriptor.key = a->key; | |
897 | r->descriptor.data = r->data; | |
898 | ||
899 | ret = fw_core_add_descriptor(&r->descriptor); | |
900 | if (ret < 0) | |
901 | goto failed; | |
902 | ||
903 | r->resource.release = release_descriptor; | |
904 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | |
905 | if (ret < 0) { | |
906 | fw_core_remove_descriptor(&r->descriptor); | |
907 | goto failed; | |
908 | } | |
909 | a->handle = r->resource.handle; | |
910 | ||
911 | return 0; | |
912 | failed: | |
913 | kfree(r); | |
914 | ||
915 | return ret; | |
916 | } | |
917 | ||
918 | static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) | |
919 | { | |
920 | return release_client_resource(client, arg->remove_descriptor.handle, | |
921 | release_descriptor, NULL); | |
922 | } | |
923 | ||
924 | static void iso_callback(struct fw_iso_context *context, u32 cycle, | |
925 | size_t header_length, void *header, void *data) | |
926 | { | |
927 | struct client *client = data; | |
928 | struct iso_interrupt_event *e; | |
929 | ||
930 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); | |
931 | if (e == NULL) | |
932 | return; | |
933 | ||
934 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | |
935 | e->interrupt.closure = client->iso_closure; | |
936 | e->interrupt.cycle = cycle; | |
937 | e->interrupt.header_length = header_length; | |
938 | memcpy(e->interrupt.header, header, header_length); | |
939 | queue_event(client, &e->event, &e->interrupt, | |
940 | sizeof(e->interrupt) + header_length, NULL, 0); | |
941 | } | |
942 | ||
943 | static void iso_mc_callback(struct fw_iso_context *context, | |
944 | dma_addr_t completed, void *data) | |
945 | { | |
946 | struct client *client = data; | |
947 | struct iso_interrupt_mc_event *e; | |
948 | ||
949 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | |
950 | if (e == NULL) | |
951 | return; | |
952 | ||
953 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; | |
954 | e->interrupt.closure = client->iso_closure; | |
955 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, | |
956 | completed); | |
957 | queue_event(client, &e->event, &e->interrupt, | |
958 | sizeof(e->interrupt), NULL, 0); | |
959 | } | |
960 | ||
961 | static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) | |
962 | { | |
963 | if (context->type == FW_ISO_CONTEXT_TRANSMIT) | |
964 | return DMA_TO_DEVICE; | |
965 | else | |
966 | return DMA_FROM_DEVICE; | |
967 | } | |
968 | ||
969 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |
970 | { | |
971 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; | |
972 | struct fw_iso_context *context; | |
973 | fw_iso_callback_t cb; | |
974 | int ret; | |
975 | ||
976 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || | |
977 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || | |
978 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != | |
979 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); | |
980 | ||
981 | switch (a->type) { | |
982 | case FW_ISO_CONTEXT_TRANSMIT: | |
983 | if (a->speed > SCODE_3200 || a->channel > 63) | |
984 | return -EINVAL; | |
985 | ||
986 | cb = iso_callback; | |
987 | break; | |
988 | ||
989 | case FW_ISO_CONTEXT_RECEIVE: | |
990 | if (a->header_size < 4 || (a->header_size & 3) || | |
991 | a->channel > 63) | |
992 | return -EINVAL; | |
993 | ||
994 | cb = iso_callback; | |
995 | break; | |
996 | ||
997 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | |
998 | cb = (fw_iso_callback_t)iso_mc_callback; | |
999 | break; | |
1000 | ||
1001 | default: | |
1002 | return -EINVAL; | |
1003 | } | |
1004 | ||
1005 | context = fw_iso_context_create(client->device->card, a->type, | |
1006 | a->channel, a->speed, a->header_size, cb, client); | |
1007 | if (IS_ERR(context)) | |
1008 | return PTR_ERR(context); | |
1009 | if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) | |
1010 | context->drop_overflow_headers = true; | |
1011 | ||
1012 | /* We only support one context at this time. */ | |
1013 | spin_lock_irq(&client->lock); | |
1014 | if (client->iso_context != NULL) { | |
1015 | spin_unlock_irq(&client->lock); | |
1016 | fw_iso_context_destroy(context); | |
1017 | ||
1018 | return -EBUSY; | |
1019 | } | |
1020 | if (!client->buffer_is_mapped) { | |
1021 | ret = fw_iso_buffer_map_dma(&client->buffer, | |
1022 | client->device->card, | |
1023 | iso_dma_direction(context)); | |
1024 | if (ret < 0) { | |
1025 | spin_unlock_irq(&client->lock); | |
1026 | fw_iso_context_destroy(context); | |
1027 | ||
1028 | return ret; | |
1029 | } | |
1030 | client->buffer_is_mapped = true; | |
1031 | } | |
1032 | client->iso_closure = a->closure; | |
1033 | client->iso_context = context; | |
1034 | spin_unlock_irq(&client->lock); | |
1035 | ||
1036 | a->handle = 0; | |
1037 | ||
1038 | return 0; | |
1039 | } | |
1040 | ||
1041 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) | |
1042 | { | |
1043 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; | |
1044 | struct fw_iso_context *ctx = client->iso_context; | |
1045 | ||
1046 | if (ctx == NULL || a->handle != 0) | |
1047 | return -EINVAL; | |
1048 | ||
1049 | return fw_iso_context_set_channels(ctx, &a->channels); | |
1050 | } | |
1051 | ||
1052 | /* Macros for decoding the iso packet control header. */ | |
1053 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) | |
1054 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) | |
1055 | #define GET_SKIP(v) (((v) >> 17) & 0x01) | |
1056 | #define GET_TAG(v) (((v) >> 18) & 0x03) | |
1057 | #define GET_SY(v) (((v) >> 20) & 0x0f) | |
1058 | #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) | |
1059 | ||
1060 | static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |
1061 | { | |
1062 | struct fw_cdev_queue_iso *a = &arg->queue_iso; | |
1063 | struct fw_cdev_iso_packet __user *p, *end, *next; | |
1064 | struct fw_iso_context *ctx = client->iso_context; | |
1065 | unsigned long payload, buffer_end, transmit_header_bytes = 0; | |
1066 | u32 control; | |
1067 | int count; | |
1068 | struct { | |
1069 | struct fw_iso_packet packet; | |
1070 | u8 header[256]; | |
1071 | } u; | |
1072 | ||
1073 | if (ctx == NULL || a->handle != 0) | |
1074 | return -EINVAL; | |
1075 | ||
1076 | /* | |
1077 | * If the user passes a non-NULL data pointer, has mmap()'ed | |
1078 | * the iso buffer, and the pointer points inside the buffer, | |
1079 | * we setup the payload pointers accordingly. Otherwise we | |
1080 | * set them both to 0, which will still let packets with | |
1081 | * payload_length == 0 through. In other words, if no packets | |
1082 | * use the indirect payload, the iso buffer need not be mapped | |
1083 | * and the a->data pointer is ignored. | |
1084 | */ | |
1085 | payload = (unsigned long)a->data - client->vm_start; | |
1086 | buffer_end = client->buffer.page_count << PAGE_SHIFT; | |
1087 | if (a->data == 0 || client->buffer.pages == NULL || | |
1088 | payload >= buffer_end) { | |
1089 | payload = 0; | |
1090 | buffer_end = 0; | |
1091 | } | |
1092 | ||
1093 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) | |
1094 | return -EINVAL; | |
1095 | ||
1096 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | |
1097 | if (!access_ok(VERIFY_READ, p, a->size)) | |
1098 | return -EFAULT; | |
1099 | ||
1100 | end = (void __user *)p + a->size; | |
1101 | count = 0; | |
1102 | while (p < end) { | |
1103 | if (get_user(control, &p->control)) | |
1104 | return -EFAULT; | |
1105 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); | |
1106 | u.packet.interrupt = GET_INTERRUPT(control); | |
1107 | u.packet.skip = GET_SKIP(control); | |
1108 | u.packet.tag = GET_TAG(control); | |
1109 | u.packet.sy = GET_SY(control); | |
1110 | u.packet.header_length = GET_HEADER_LENGTH(control); | |
1111 | ||
1112 | switch (ctx->type) { | |
1113 | case FW_ISO_CONTEXT_TRANSMIT: | |
1114 | if (u.packet.header_length & 3) | |
1115 | return -EINVAL; | |
1116 | transmit_header_bytes = u.packet.header_length; | |
1117 | break; | |
1118 | ||
1119 | case FW_ISO_CONTEXT_RECEIVE: | |
1120 | if (u.packet.header_length == 0 || | |
1121 | u.packet.header_length % ctx->header_size != 0) | |
1122 | return -EINVAL; | |
1123 | break; | |
1124 | ||
1125 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | |
1126 | if (u.packet.payload_length == 0 || | |
1127 | u.packet.payload_length & 3) | |
1128 | return -EINVAL; | |
1129 | break; | |
1130 | } | |
1131 | ||
1132 | next = (struct fw_cdev_iso_packet __user *) | |
1133 | &p->header[transmit_header_bytes / 4]; | |
1134 | if (next > end) | |
1135 | return -EINVAL; | |
1136 | if (__copy_from_user | |
1137 | (u.packet.header, p->header, transmit_header_bytes)) | |
1138 | return -EFAULT; | |
1139 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && | |
1140 | u.packet.header_length + u.packet.payload_length > 0) | |
1141 | return -EINVAL; | |
1142 | if (payload + u.packet.payload_length > buffer_end) | |
1143 | return -EINVAL; | |
1144 | ||
1145 | if (fw_iso_context_queue(ctx, &u.packet, | |
1146 | &client->buffer, payload)) | |
1147 | break; | |
1148 | ||
1149 | p = next; | |
1150 | payload += u.packet.payload_length; | |
1151 | count++; | |
1152 | } | |
1153 | fw_iso_context_queue_flush(ctx); | |
1154 | ||
1155 | a->size -= uptr_to_u64(p) - a->packets; | |
1156 | a->packets = uptr_to_u64(p); | |
1157 | a->data = client->vm_start + payload; | |
1158 | ||
1159 | return count; | |
1160 | } | |
1161 | ||
1162 | static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) | |
1163 | { | |
1164 | struct fw_cdev_start_iso *a = &arg->start_iso; | |
1165 | ||
1166 | BUILD_BUG_ON( | |
1167 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || | |
1168 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || | |
1169 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || | |
1170 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || | |
1171 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); | |
1172 | ||
1173 | if (client->iso_context == NULL || a->handle != 0) | |
1174 | return -EINVAL; | |
1175 | ||
1176 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && | |
1177 | (a->tags == 0 || a->tags > 15 || a->sync > 15)) | |
1178 | return -EINVAL; | |
1179 | ||
1180 | return fw_iso_context_start(client->iso_context, | |
1181 | a->cycle, a->sync, a->tags); | |
1182 | } | |
1183 | ||
1184 | static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) | |
1185 | { | |
1186 | struct fw_cdev_stop_iso *a = &arg->stop_iso; | |
1187 | ||
1188 | if (client->iso_context == NULL || a->handle != 0) | |
1189 | return -EINVAL; | |
1190 | ||
1191 | return fw_iso_context_stop(client->iso_context); | |
1192 | } | |
1193 | ||
1194 | static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg) | |
1195 | { | |
1196 | struct fw_cdev_flush_iso *a = &arg->flush_iso; | |
1197 | ||
1198 | if (client->iso_context == NULL || a->handle != 0) | |
1199 | return -EINVAL; | |
1200 | ||
1201 | return fw_iso_context_flush_completions(client->iso_context); | |
1202 | } | |
1203 | ||
1204 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) | |
1205 | { | |
1206 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; | |
1207 | struct fw_card *card = client->device->card; | |
1208 | struct timespec ts = {0, 0}; | |
1209 | u32 cycle_time; | |
1210 | int ret = 0; | |
1211 | ||
1212 | local_irq_disable(); | |
1213 | ||
1214 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); | |
1215 | ||
1216 | switch (a->clk_id) { | |
1217 | case CLOCK_REALTIME: getnstimeofday(&ts); break; | |
1218 | case CLOCK_MONOTONIC: ktime_get_ts(&ts); break; | |
1219 | case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; | |
1220 | default: | |
1221 | ret = -EINVAL; | |
1222 | } | |
1223 | ||
1224 | local_irq_enable(); | |
1225 | ||
1226 | a->tv_sec = ts.tv_sec; | |
1227 | a->tv_nsec = ts.tv_nsec; | |
1228 | a->cycle_timer = cycle_time; | |
1229 | ||
1230 | return ret; | |
1231 | } | |
1232 | ||
1233 | static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) | |
1234 | { | |
1235 | struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; | |
1236 | struct fw_cdev_get_cycle_timer2 ct2; | |
1237 | ||
1238 | ct2.clk_id = CLOCK_REALTIME; | |
1239 | ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); | |
1240 | ||
1241 | a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; | |
1242 | a->cycle_timer = ct2.cycle_timer; | |
1243 | ||
1244 | return 0; | |
1245 | } | |
1246 | ||
1247 | static void iso_resource_work(struct work_struct *work) | |
1248 | { | |
1249 | struct iso_resource_event *e; | |
1250 | struct iso_resource *r = | |
1251 | container_of(work, struct iso_resource, work.work); | |
1252 | struct client *client = r->client; | |
1253 | int generation, channel, bandwidth, todo; | |
1254 | bool skip, free, success; | |
1255 | ||
1256 | spin_lock_irq(&client->lock); | |
1257 | generation = client->device->generation; | |
1258 | todo = r->todo; | |
1259 | /* Allow 1000ms grace period for other reallocations. */ | |
1260 | if (todo == ISO_RES_ALLOC && | |
1261 | time_before64(get_jiffies_64(), | |
1262 | client->device->card->reset_jiffies + HZ)) { | |
1263 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); | |
1264 | skip = true; | |
1265 | } else { | |
1266 | /* We could be called twice within the same generation. */ | |
1267 | skip = todo == ISO_RES_REALLOC && | |
1268 | r->generation == generation; | |
1269 | } | |
1270 | free = todo == ISO_RES_DEALLOC || | |
1271 | todo == ISO_RES_ALLOC_ONCE || | |
1272 | todo == ISO_RES_DEALLOC_ONCE; | |
1273 | r->generation = generation; | |
1274 | spin_unlock_irq(&client->lock); | |
1275 | ||
1276 | if (skip) | |
1277 | goto out; | |
1278 | ||
1279 | bandwidth = r->bandwidth; | |
1280 | ||
1281 | fw_iso_resource_manage(client->device->card, generation, | |
1282 | r->channels, &channel, &bandwidth, | |
1283 | todo == ISO_RES_ALLOC || | |
1284 | todo == ISO_RES_REALLOC || | |
1285 | todo == ISO_RES_ALLOC_ONCE); | |
1286 | /* | |
1287 | * Is this generation outdated already? As long as this resource sticks | |
1288 | * in the idr, it will be scheduled again for a newer generation or at | |
1289 | * shutdown. | |
1290 | */ | |
1291 | if (channel == -EAGAIN && | |
1292 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | |
1293 | goto out; | |
1294 | ||
1295 | success = channel >= 0 || bandwidth > 0; | |
1296 | ||
1297 | spin_lock_irq(&client->lock); | |
1298 | /* | |
1299 | * Transit from allocation to reallocation, except if the client | |
1300 | * requested deallocation in the meantime. | |
1301 | */ | |
1302 | if (r->todo == ISO_RES_ALLOC) | |
1303 | r->todo = ISO_RES_REALLOC; | |
1304 | /* | |
1305 | * Allocation or reallocation failure? Pull this resource out of the | |
1306 | * idr and prepare for deletion, unless the client is shutting down. | |
1307 | */ | |
1308 | if (r->todo == ISO_RES_REALLOC && !success && | |
1309 | !client->in_shutdown && | |
1310 | idr_remove(&client->resource_idr, r->resource.handle)) { | |
1311 | client_put(client); | |
1312 | free = true; | |
1313 | } | |
1314 | spin_unlock_irq(&client->lock); | |
1315 | ||
1316 | if (todo == ISO_RES_ALLOC && channel >= 0) | |
1317 | r->channels = 1ULL << channel; | |
1318 | ||
1319 | if (todo == ISO_RES_REALLOC && success) | |
1320 | goto out; | |
1321 | ||
1322 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { | |
1323 | e = r->e_alloc; | |
1324 | r->e_alloc = NULL; | |
1325 | } else { | |
1326 | e = r->e_dealloc; | |
1327 | r->e_dealloc = NULL; | |
1328 | } | |
1329 | e->iso_resource.handle = r->resource.handle; | |
1330 | e->iso_resource.channel = channel; | |
1331 | e->iso_resource.bandwidth = bandwidth; | |
1332 | ||
1333 | queue_event(client, &e->event, | |
1334 | &e->iso_resource, sizeof(e->iso_resource), NULL, 0); | |
1335 | ||
1336 | if (free) { | |
1337 | cancel_delayed_work(&r->work); | |
1338 | kfree(r->e_alloc); | |
1339 | kfree(r->e_dealloc); | |
1340 | kfree(r); | |
1341 | } | |
1342 | out: | |
1343 | client_put(client); | |
1344 | } | |
1345 | ||
1346 | static void release_iso_resource(struct client *client, | |
1347 | struct client_resource *resource) | |
1348 | { | |
1349 | struct iso_resource *r = | |
1350 | container_of(resource, struct iso_resource, resource); | |
1351 | ||
1352 | spin_lock_irq(&client->lock); | |
1353 | r->todo = ISO_RES_DEALLOC; | |
1354 | schedule_iso_resource(r, 0); | |
1355 | spin_unlock_irq(&client->lock); | |
1356 | } | |
1357 | ||
1358 | static int init_iso_resource(struct client *client, | |
1359 | struct fw_cdev_allocate_iso_resource *request, int todo) | |
1360 | { | |
1361 | struct iso_resource_event *e1, *e2; | |
1362 | struct iso_resource *r; | |
1363 | int ret; | |
1364 | ||
1365 | if ((request->channels == 0 && request->bandwidth == 0) || | |
1366 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) | |
1367 | return -EINVAL; | |
1368 | ||
1369 | r = kmalloc(sizeof(*r), GFP_KERNEL); | |
1370 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | |
1371 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | |
1372 | if (r == NULL || e1 == NULL || e2 == NULL) { | |
1373 | ret = -ENOMEM; | |
1374 | goto fail; | |
1375 | } | |
1376 | ||
1377 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | |
1378 | r->client = client; | |
1379 | r->todo = todo; | |
1380 | r->generation = -1; | |
1381 | r->channels = request->channels; | |
1382 | r->bandwidth = request->bandwidth; | |
1383 | r->e_alloc = e1; | |
1384 | r->e_dealloc = e2; | |
1385 | ||
1386 | e1->iso_resource.closure = request->closure; | |
1387 | e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | |
1388 | e2->iso_resource.closure = request->closure; | |
1389 | e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | |
1390 | ||
1391 | if (todo == ISO_RES_ALLOC) { | |
1392 | r->resource.release = release_iso_resource; | |
1393 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | |
1394 | if (ret < 0) | |
1395 | goto fail; | |
1396 | } else { | |
1397 | r->resource.release = NULL; | |
1398 | r->resource.handle = -1; | |
1399 | schedule_iso_resource(r, 0); | |
1400 | } | |
1401 | request->handle = r->resource.handle; | |
1402 | ||
1403 | return 0; | |
1404 | fail: | |
1405 | kfree(r); | |
1406 | kfree(e1); | |
1407 | kfree(e2); | |
1408 | ||
1409 | return ret; | |
1410 | } | |
1411 | ||
1412 | static int ioctl_allocate_iso_resource(struct client *client, | |
1413 | union ioctl_arg *arg) | |
1414 | { | |
1415 | return init_iso_resource(client, | |
1416 | &arg->allocate_iso_resource, ISO_RES_ALLOC); | |
1417 | } | |
1418 | ||
1419 | static int ioctl_deallocate_iso_resource(struct client *client, | |
1420 | union ioctl_arg *arg) | |
1421 | { | |
1422 | return release_client_resource(client, | |
1423 | arg->deallocate.handle, release_iso_resource, NULL); | |
1424 | } | |
1425 | ||
1426 | static int ioctl_allocate_iso_resource_once(struct client *client, | |
1427 | union ioctl_arg *arg) | |
1428 | { | |
1429 | return init_iso_resource(client, | |
1430 | &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); | |
1431 | } | |
1432 | ||
1433 | static int ioctl_deallocate_iso_resource_once(struct client *client, | |
1434 | union ioctl_arg *arg) | |
1435 | { | |
1436 | return init_iso_resource(client, | |
1437 | &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); | |
1438 | } | |
1439 | ||
1440 | /* | |
1441 | * Returns a speed code: Maximum speed to or from this device, | |
1442 | * limited by the device's link speed, the local node's link speed, | |
1443 | * and all PHY port speeds between the two links. | |
1444 | */ | |
1445 | static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) | |
1446 | { | |
1447 | return client->device->max_speed; | |
1448 | } | |
1449 | ||
1450 | static int ioctl_send_broadcast_request(struct client *client, | |
1451 | union ioctl_arg *arg) | |
1452 | { | |
1453 | struct fw_cdev_send_request *a = &arg->send_request; | |
1454 | ||
1455 | switch (a->tcode) { | |
1456 | case TCODE_WRITE_QUADLET_REQUEST: | |
1457 | case TCODE_WRITE_BLOCK_REQUEST: | |
1458 | break; | |
1459 | default: | |
1460 | return -EINVAL; | |
1461 | } | |
1462 | ||
1463 | /* Security policy: Only allow accesses to Units Space. */ | |
1464 | if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) | |
1465 | return -EACCES; | |
1466 | ||
1467 | return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); | |
1468 | } | |
1469 | ||
1470 | static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) | |
1471 | { | |
1472 | struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; | |
1473 | struct fw_cdev_send_request request; | |
1474 | int dest; | |
1475 | ||
1476 | if (a->speed > client->device->card->link_speed || | |
1477 | a->length > 1024 << a->speed) | |
1478 | return -EIO; | |
1479 | ||
1480 | if (a->tag > 3 || a->channel > 63 || a->sy > 15) | |
1481 | return -EINVAL; | |
1482 | ||
1483 | dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); | |
1484 | request.tcode = TCODE_STREAM_DATA; | |
1485 | request.length = a->length; | |
1486 | request.closure = a->closure; | |
1487 | request.data = a->data; | |
1488 | request.generation = a->generation; | |
1489 | ||
1490 | return init_request(client, &request, dest, a->speed); | |
1491 | } | |
1492 | ||
1493 | static void outbound_phy_packet_callback(struct fw_packet *packet, | |
1494 | struct fw_card *card, int status) | |
1495 | { | |
1496 | struct outbound_phy_packet_event *e = | |
1497 | container_of(packet, struct outbound_phy_packet_event, p); | |
1498 | ||
1499 | switch (status) { | |
1500 | /* expected: */ | |
1501 | case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; | |
1502 | /* should never happen with PHY packets: */ | |
1503 | case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; | |
1504 | case ACK_BUSY_X: | |
1505 | case ACK_BUSY_A: | |
1506 | case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; | |
1507 | case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; | |
1508 | case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; | |
1509 | /* stale generation; cancelled; on certain controllers: no ack */ | |
1510 | default: e->phy_packet.rcode = status; break; | |
1511 | } | |
1512 | e->phy_packet.data[0] = packet->timestamp; | |
1513 | ||
1514 | queue_event(e->client, &e->event, &e->phy_packet, | |
1515 | sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); | |
1516 | client_put(e->client); | |
1517 | } | |
1518 | ||
1519 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) | |
1520 | { | |
1521 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; | |
1522 | struct fw_card *card = client->device->card; | |
1523 | struct outbound_phy_packet_event *e; | |
1524 | ||
1525 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | |
1526 | if (!client->device->is_local) | |
1527 | return -ENOSYS; | |
1528 | ||
1529 | e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); | |
1530 | if (e == NULL) | |
1531 | return -ENOMEM; | |
1532 | ||
1533 | client_get(client); | |
1534 | e->client = client; | |
1535 | e->p.speed = SCODE_100; | |
1536 | e->p.generation = a->generation; | |
1537 | e->p.header[0] = TCODE_LINK_INTERNAL << 4; | |
1538 | e->p.header[1] = a->data[0]; | |
1539 | e->p.header[2] = a->data[1]; | |
1540 | e->p.header_length = 12; | |
1541 | e->p.callback = outbound_phy_packet_callback; | |
1542 | e->phy_packet.closure = a->closure; | |
1543 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; | |
1544 | if (is_ping_packet(a->data)) | |
1545 | e->phy_packet.length = 4; | |
1546 | ||
1547 | card->driver->send_request(card, &e->p); | |
1548 | ||
1549 | return 0; | |
1550 | } | |
1551 | ||
1552 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) | |
1553 | { | |
1554 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; | |
1555 | struct fw_card *card = client->device->card; | |
1556 | ||
1557 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | |
1558 | if (!client->device->is_local) | |
1559 | return -ENOSYS; | |
1560 | ||
1561 | spin_lock_irq(&card->lock); | |
1562 | ||
1563 | list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); | |
1564 | client->phy_receiver_closure = a->closure; | |
1565 | ||
1566 | spin_unlock_irq(&card->lock); | |
1567 | ||
1568 | return 0; | |
1569 | } | |
1570 | ||
1571 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) | |
1572 | { | |
1573 | struct client *client; | |
1574 | struct inbound_phy_packet_event *e; | |
1575 | unsigned long flags; | |
1576 | ||
1577 | spin_lock_irqsave(&card->lock, flags); | |
1578 | ||
1579 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { | |
1580 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); | |
1581 | if (e == NULL) | |
1582 | break; | |
1583 | ||
1584 | e->phy_packet.closure = client->phy_receiver_closure; | |
1585 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; | |
1586 | e->phy_packet.rcode = RCODE_COMPLETE; | |
1587 | e->phy_packet.length = 8; | |
1588 | e->phy_packet.data[0] = p->header[1]; | |
1589 | e->phy_packet.data[1] = p->header[2]; | |
1590 | queue_event(client, &e->event, | |
1591 | &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); | |
1592 | } | |
1593 | ||
1594 | spin_unlock_irqrestore(&card->lock, flags); | |
1595 | } | |
1596 | ||
1597 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { | |
1598 | [0x00] = ioctl_get_info, | |
1599 | [0x01] = ioctl_send_request, | |
1600 | [0x02] = ioctl_allocate, | |
1601 | [0x03] = ioctl_deallocate, | |
1602 | [0x04] = ioctl_send_response, | |
1603 | [0x05] = ioctl_initiate_bus_reset, | |
1604 | [0x06] = ioctl_add_descriptor, | |
1605 | [0x07] = ioctl_remove_descriptor, | |
1606 | [0x08] = ioctl_create_iso_context, | |
1607 | [0x09] = ioctl_queue_iso, | |
1608 | [0x0a] = ioctl_start_iso, | |
1609 | [0x0b] = ioctl_stop_iso, | |
1610 | [0x0c] = ioctl_get_cycle_timer, | |
1611 | [0x0d] = ioctl_allocate_iso_resource, | |
1612 | [0x0e] = ioctl_deallocate_iso_resource, | |
1613 | [0x0f] = ioctl_allocate_iso_resource_once, | |
1614 | [0x10] = ioctl_deallocate_iso_resource_once, | |
1615 | [0x11] = ioctl_get_speed, | |
1616 | [0x12] = ioctl_send_broadcast_request, | |
1617 | [0x13] = ioctl_send_stream_packet, | |
1618 | [0x14] = ioctl_get_cycle_timer2, | |
1619 | [0x15] = ioctl_send_phy_packet, | |
1620 | [0x16] = ioctl_receive_phy_packets, | |
1621 | [0x17] = ioctl_set_iso_channels, | |
1622 | [0x18] = ioctl_flush_iso, | |
1623 | }; | |
1624 | ||
1625 | static int dispatch_ioctl(struct client *client, | |
1626 | unsigned int cmd, void __user *arg) | |
1627 | { | |
1628 | union ioctl_arg buffer; | |
1629 | int ret; | |
1630 | ||
1631 | if (fw_device_is_shutdown(client->device)) | |
1632 | return -ENODEV; | |
1633 | ||
1634 | if (_IOC_TYPE(cmd) != '#' || | |
1635 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || | |
1636 | _IOC_SIZE(cmd) > sizeof(buffer)) | |
1637 | return -ENOTTY; | |
1638 | ||
1639 | memset(&buffer, 0, sizeof(buffer)); | |
1640 | ||
1641 | if (_IOC_DIR(cmd) & _IOC_WRITE) | |
1642 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | |
1643 | return -EFAULT; | |
1644 | ||
1645 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); | |
1646 | if (ret < 0) | |
1647 | return ret; | |
1648 | ||
1649 | if (_IOC_DIR(cmd) & _IOC_READ) | |
1650 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) | |
1651 | return -EFAULT; | |
1652 | ||
1653 | return ret; | |
1654 | } | |
1655 | ||
1656 | static long fw_device_op_ioctl(struct file *file, | |
1657 | unsigned int cmd, unsigned long arg) | |
1658 | { | |
1659 | return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); | |
1660 | } | |
1661 | ||
1662 | #ifdef CONFIG_COMPAT | |
1663 | static long fw_device_op_compat_ioctl(struct file *file, | |
1664 | unsigned int cmd, unsigned long arg) | |
1665 | { | |
1666 | return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); | |
1667 | } | |
1668 | #endif | |
1669 | ||
1670 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |
1671 | { | |
1672 | struct client *client = file->private_data; | |
1673 | unsigned long size; | |
1674 | int page_count, ret; | |
1675 | ||
1676 | if (fw_device_is_shutdown(client->device)) | |
1677 | return -ENODEV; | |
1678 | ||
1679 | /* FIXME: We could support multiple buffers, but we don't. */ | |
1680 | if (client->buffer.pages != NULL) | |
1681 | return -EBUSY; | |
1682 | ||
1683 | if (!(vma->vm_flags & VM_SHARED)) | |
1684 | return -EINVAL; | |
1685 | ||
1686 | if (vma->vm_start & ~PAGE_MASK) | |
1687 | return -EINVAL; | |
1688 | ||
1689 | client->vm_start = vma->vm_start; | |
1690 | size = vma->vm_end - vma->vm_start; | |
1691 | page_count = size >> PAGE_SHIFT; | |
1692 | if (size & ~PAGE_MASK) | |
1693 | return -EINVAL; | |
1694 | ||
1695 | ret = fw_iso_buffer_alloc(&client->buffer, page_count); | |
1696 | if (ret < 0) | |
1697 | return ret; | |
1698 | ||
1699 | spin_lock_irq(&client->lock); | |
1700 | if (client->iso_context) { | |
1701 | ret = fw_iso_buffer_map_dma(&client->buffer, | |
1702 | client->device->card, | |
1703 | iso_dma_direction(client->iso_context)); | |
1704 | client->buffer_is_mapped = (ret == 0); | |
1705 | } | |
1706 | spin_unlock_irq(&client->lock); | |
1707 | if (ret < 0) | |
1708 | goto fail; | |
1709 | ||
1710 | ret = fw_iso_buffer_map_vma(&client->buffer, vma); | |
1711 | if (ret < 0) | |
1712 | goto fail; | |
1713 | ||
1714 | return 0; | |
1715 | fail: | |
1716 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | |
1717 | return ret; | |
1718 | } | |
1719 | ||
1720 | static int is_outbound_transaction_resource(int id, void *p, void *data) | |
1721 | { | |
1722 | struct client_resource *resource = p; | |
1723 | ||
1724 | return resource->release == release_transaction; | |
1725 | } | |
1726 | ||
1727 | static int has_outbound_transactions(struct client *client) | |
1728 | { | |
1729 | int ret; | |
1730 | ||
1731 | spin_lock_irq(&client->lock); | |
1732 | ret = idr_for_each(&client->resource_idr, | |
1733 | is_outbound_transaction_resource, NULL); | |
1734 | spin_unlock_irq(&client->lock); | |
1735 | ||
1736 | return ret; | |
1737 | } | |
1738 | ||
1739 | static int shutdown_resource(int id, void *p, void *data) | |
1740 | { | |
1741 | struct client_resource *resource = p; | |
1742 | struct client *client = data; | |
1743 | ||
1744 | resource->release(client, resource); | |
1745 | client_put(client); | |
1746 | ||
1747 | return 0; | |
1748 | } | |
1749 | ||
1750 | static int fw_device_op_release(struct inode *inode, struct file *file) | |
1751 | { | |
1752 | struct client *client = file->private_data; | |
1753 | struct event *event, *next_event; | |
1754 | ||
1755 | spin_lock_irq(&client->device->card->lock); | |
1756 | list_del(&client->phy_receiver_link); | |
1757 | spin_unlock_irq(&client->device->card->lock); | |
1758 | ||
1759 | mutex_lock(&client->device->client_list_mutex); | |
1760 | list_del(&client->link); | |
1761 | mutex_unlock(&client->device->client_list_mutex); | |
1762 | ||
1763 | if (client->iso_context) | |
1764 | fw_iso_context_destroy(client->iso_context); | |
1765 | ||
1766 | if (client->buffer.pages) | |
1767 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | |
1768 | ||
1769 | /* Freeze client->resource_idr and client->event_list */ | |
1770 | spin_lock_irq(&client->lock); | |
1771 | client->in_shutdown = true; | |
1772 | spin_unlock_irq(&client->lock); | |
1773 | ||
1774 | wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); | |
1775 | ||
1776 | idr_for_each(&client->resource_idr, shutdown_resource, client); | |
1777 | idr_destroy(&client->resource_idr); | |
1778 | ||
1779 | list_for_each_entry_safe(event, next_event, &client->event_list, link) | |
1780 | kfree(event); | |
1781 | ||
1782 | client_put(client); | |
1783 | ||
1784 | return 0; | |
1785 | } | |
1786 | ||
1787 | static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) | |
1788 | { | |
1789 | struct client *client = file->private_data; | |
1790 | unsigned int mask = 0; | |
1791 | ||
1792 | poll_wait(file, &client->wait, pt); | |
1793 | ||
1794 | if (fw_device_is_shutdown(client->device)) | |
1795 | mask |= POLLHUP | POLLERR; | |
1796 | if (!list_empty(&client->event_list)) | |
1797 | mask |= POLLIN | POLLRDNORM; | |
1798 | ||
1799 | return mask; | |
1800 | } | |
1801 | ||
1802 | const struct file_operations fw_device_ops = { | |
1803 | .owner = THIS_MODULE, | |
1804 | .llseek = no_llseek, | |
1805 | .open = fw_device_op_open, | |
1806 | .read = fw_device_op_read, | |
1807 | .unlocked_ioctl = fw_device_op_ioctl, | |
1808 | .mmap = fw_device_op_mmap, | |
1809 | .release = fw_device_op_release, | |
1810 | .poll = fw_device_op_poll, | |
1811 | #ifdef CONFIG_COMPAT | |
1812 | .compat_ioctl = fw_device_op_compat_ioctl, | |
1813 | #endif | |
1814 | }; |