]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/macintosh/adb.c
Merge branch 'sbp2-spindown' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[mirror_ubuntu-artful-kernel.git] / drivers / macintosh / adb.c
1 /*
2 * Device driver for the Apple Desktop Bus
3 * and the /dev/adb device on macintoshes.
4 *
5 * Copyright (C) 1996 Paul Mackerras.
6 *
7 * Modified to declare controllers as structures, added
8 * client notification of bus reset and handles PowerBook
9 * sleep, by Benjamin Herrenschmidt.
10 *
11 * To do:
12 *
13 * - /sys/bus/adb to list the devices and infos
14 * - more /dev/adb to allow userland to receive the
15 * flow of auto-polling datas from a given device.
16 * - move bus probe to a kernel thread
17 */
18
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/module.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/sched.h>
27 #include <linux/smp_lock.h>
28 #include <linux/adb.h>
29 #include <linux/cuda.h>
30 #include <linux/pmu.h>
31 #include <linux/notifier.h>
32 #include <linux/wait.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/spinlock.h>
36 #include <linux/completion.h>
37 #include <linux/device.h>
38 #include <linux/kthread.h>
39 #include <linux/platform_device.h>
40 #include <linux/mutex.h>
41
42 #include <asm/uaccess.h>
43 #ifdef CONFIG_PPC
44 #include <asm/prom.h>
45 #include <asm/machdep.h>
46 #endif
47
48
49 EXPORT_SYMBOL(adb_controller);
50 EXPORT_SYMBOL(adb_client_list);
51
52 extern struct adb_driver via_macii_driver;
53 extern struct adb_driver via_maciisi_driver;
54 extern struct adb_driver via_cuda_driver;
55 extern struct adb_driver adb_iop_driver;
56 extern struct adb_driver via_pmu_driver;
57 extern struct adb_driver macio_adb_driver;
58
59 static struct adb_driver *adb_driver_list[] = {
60 #ifdef CONFIG_ADB_MACII
61 &via_macii_driver,
62 #endif
63 #ifdef CONFIG_ADB_MACIISI
64 &via_maciisi_driver,
65 #endif
66 #ifdef CONFIG_ADB_CUDA
67 &via_cuda_driver,
68 #endif
69 #ifdef CONFIG_ADB_IOP
70 &adb_iop_driver,
71 #endif
72 #if defined(CONFIG_ADB_PMU) || defined(CONFIG_ADB_PMU68K)
73 &via_pmu_driver,
74 #endif
75 #ifdef CONFIG_ADB_MACIO
76 &macio_adb_driver,
77 #endif
78 NULL
79 };
80
81 static struct class *adb_dev_class;
82
83 struct adb_driver *adb_controller;
84 BLOCKING_NOTIFIER_HEAD(adb_client_list);
85 static int adb_got_sleep;
86 static int adb_inited;
87 static DECLARE_MUTEX(adb_probe_mutex);
88 static int sleepy_trackpad;
89 static int autopoll_devs;
90 int __adb_probe_sync;
91
92 static int adb_scan_bus(void);
93 static int do_adb_reset_bus(void);
94 static void adbdev_init(void);
95 static int try_handler_change(int, int);
96
97 static struct adb_handler {
98 void (*handler)(unsigned char *, int, int);
99 int original_address;
100 int handler_id;
101 int busy;
102 } adb_handler[16];
103
104 /*
105 * The adb_handler_mutex mutex protects all accesses to the original_address
106 * and handler_id fields of adb_handler[i] for all i, and changes to the
107 * handler field.
108 * Accesses to the handler field are protected by the adb_handler_lock
109 * rwlock. It is held across all calls to any handler, so that by the
110 * time adb_unregister returns, we know that the old handler isn't being
111 * called.
112 */
113 static DEFINE_MUTEX(adb_handler_mutex);
114 static DEFINE_RWLOCK(adb_handler_lock);
115
116 #if 0
117 static void printADBreply(struct adb_request *req)
118 {
119 int i;
120
121 printk("adb reply (%d)", req->reply_len);
122 for(i = 0; i < req->reply_len; i++)
123 printk(" %x", req->reply[i]);
124 printk("\n");
125
126 }
127 #endif
128
129 static int adb_scan_bus(void)
130 {
131 int i, highFree=0, noMovement;
132 int devmask = 0;
133 struct adb_request req;
134
135 /* assumes adb_handler[] is all zeroes at this point */
136 for (i = 1; i < 16; i++) {
137 /* see if there is anything at address i */
138 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
139 (i << 4) | 0xf);
140 if (req.reply_len > 1)
141 /* one or more devices at this address */
142 adb_handler[i].original_address = i;
143 else if (i > highFree)
144 highFree = i;
145 }
146
147 /* Note we reset noMovement to 0 each time we move a device */
148 for (noMovement = 1; noMovement < 2 && highFree > 0; noMovement++) {
149 for (i = 1; i < 16; i++) {
150 if (adb_handler[i].original_address == 0)
151 continue;
152 /*
153 * Send a "talk register 3" command to address i
154 * to provoke a collision if there is more than
155 * one device at this address.
156 */
157 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
158 (i << 4) | 0xf);
159 /*
160 * Move the device(s) which didn't detect a
161 * collision to address `highFree'. Hopefully
162 * this only moves one device.
163 */
164 adb_request(&req, NULL, ADBREQ_SYNC, 3,
165 (i<< 4) | 0xb, (highFree | 0x60), 0xfe);
166 /*
167 * See if anybody actually moved. This is suggested
168 * by HW TechNote 01:
169 *
170 * http://developer.apple.com/technotes/hw/hw_01.html
171 */
172 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
173 (highFree << 4) | 0xf);
174 if (req.reply_len <= 1) continue;
175 /*
176 * Test whether there are any device(s) left
177 * at address i.
178 */
179 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
180 (i << 4) | 0xf);
181 if (req.reply_len > 1) {
182 /*
183 * There are still one or more devices
184 * left at address i. Register the one(s)
185 * we moved to `highFree', and find a new
186 * value for highFree.
187 */
188 adb_handler[highFree].original_address =
189 adb_handler[i].original_address;
190 while (highFree > 0 &&
191 adb_handler[highFree].original_address)
192 highFree--;
193 if (highFree <= 0)
194 break;
195
196 noMovement = 0;
197 }
198 else {
199 /*
200 * No devices left at address i; move the
201 * one(s) we moved to `highFree' back to i.
202 */
203 adb_request(&req, NULL, ADBREQ_SYNC, 3,
204 (highFree << 4) | 0xb,
205 (i | 0x60), 0xfe);
206 }
207 }
208 }
209
210 /* Now fill in the handler_id field of the adb_handler entries. */
211 printk(KERN_DEBUG "adb devices:");
212 for (i = 1; i < 16; i++) {
213 if (adb_handler[i].original_address == 0)
214 continue;
215 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
216 (i << 4) | 0xf);
217 adb_handler[i].handler_id = req.reply[2];
218 printk(" [%d]: %d %x", i, adb_handler[i].original_address,
219 adb_handler[i].handler_id);
220 devmask |= 1 << i;
221 }
222 printk("\n");
223 return devmask;
224 }
225
226 /*
227 * This kernel task handles ADB probing. It dies once probing is
228 * completed.
229 */
230 static int
231 adb_probe_task(void *x)
232 {
233 printk(KERN_INFO "adb: starting probe task...\n");
234 do_adb_reset_bus();
235 printk(KERN_INFO "adb: finished probe task...\n");
236
237 up(&adb_probe_mutex);
238
239 return 0;
240 }
241
242 static void
243 __adb_probe_task(struct work_struct *bullshit)
244 {
245 kthread_run(adb_probe_task, NULL, "kadbprobe");
246 }
247
248 static DECLARE_WORK(adb_reset_work, __adb_probe_task);
249
250 int
251 adb_reset_bus(void)
252 {
253 if (__adb_probe_sync) {
254 do_adb_reset_bus();
255 return 0;
256 }
257
258 down(&adb_probe_mutex);
259 schedule_work(&adb_reset_work);
260 return 0;
261 }
262
263 #ifdef CONFIG_PM
264 /*
265 * notify clients before sleep
266 */
267 static int adb_suspend(struct platform_device *dev, pm_message_t state)
268 {
269 adb_got_sleep = 1;
270 /* We need to get a lock on the probe thread */
271 down(&adb_probe_mutex);
272 /* Stop autopoll */
273 if (adb_controller->autopoll)
274 adb_controller->autopoll(0);
275 blocking_notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL);
276
277 return 0;
278 }
279
280 /*
281 * reset bus after sleep
282 */
283 static int adb_resume(struct platform_device *dev)
284 {
285 adb_got_sleep = 0;
286 up(&adb_probe_mutex);
287 adb_reset_bus();
288
289 return 0;
290 }
291 #endif /* CONFIG_PM */
292
293 int __init adb_init(void)
294 {
295 struct adb_driver *driver;
296 int i;
297
298 #ifdef CONFIG_PPC32
299 if (!machine_is(chrp) && !machine_is(powermac))
300 return 0;
301 #endif
302 #ifdef CONFIG_MAC
303 if (!MACH_IS_MAC)
304 return 0;
305 #endif
306
307 /* xmon may do early-init */
308 if (adb_inited)
309 return 0;
310 adb_inited = 1;
311
312 adb_controller = NULL;
313
314 i = 0;
315 while ((driver = adb_driver_list[i++]) != NULL) {
316 if (!driver->probe()) {
317 adb_controller = driver;
318 break;
319 }
320 }
321 if ((adb_controller == NULL) || adb_controller->init()) {
322 printk(KERN_WARNING "Warning: no ADB interface detected\n");
323 adb_controller = NULL;
324 } else {
325 #ifdef CONFIG_PPC
326 if (machine_is_compatible("AAPL,PowerBook1998") ||
327 machine_is_compatible("PowerBook1,1"))
328 sleepy_trackpad = 1;
329 #endif /* CONFIG_PPC */
330
331 adbdev_init();
332 adb_reset_bus();
333 }
334 return 0;
335 }
336
337 device_initcall(adb_init);
338
339 static int
340 do_adb_reset_bus(void)
341 {
342 int ret;
343
344 if (adb_controller == NULL)
345 return -ENXIO;
346
347 if (adb_controller->autopoll)
348 adb_controller->autopoll(0);
349
350 blocking_notifier_call_chain(&adb_client_list,
351 ADB_MSG_PRE_RESET, NULL);
352
353 if (sleepy_trackpad) {
354 /* Let the trackpad settle down */
355 msleep(500);
356 }
357
358 mutex_lock(&adb_handler_mutex);
359 write_lock_irq(&adb_handler_lock);
360 memset(adb_handler, 0, sizeof(adb_handler));
361 write_unlock_irq(&adb_handler_lock);
362
363 /* That one is still a bit synchronous, oh well... */
364 if (adb_controller->reset_bus)
365 ret = adb_controller->reset_bus();
366 else
367 ret = 0;
368
369 if (sleepy_trackpad) {
370 /* Let the trackpad settle down */
371 msleep(1500);
372 }
373
374 if (!ret) {
375 autopoll_devs = adb_scan_bus();
376 if (adb_controller->autopoll)
377 adb_controller->autopoll(autopoll_devs);
378 }
379 mutex_unlock(&adb_handler_mutex);
380
381 blocking_notifier_call_chain(&adb_client_list,
382 ADB_MSG_POST_RESET, NULL);
383
384 return ret;
385 }
386
387 void
388 adb_poll(void)
389 {
390 if ((adb_controller == NULL)||(adb_controller->poll == NULL))
391 return;
392 adb_controller->poll();
393 }
394
395 static void adb_sync_req_done(struct adb_request *req)
396 {
397 struct completion *comp = req->arg;
398
399 complete(comp);
400 }
401
402 int
403 adb_request(struct adb_request *req, void (*done)(struct adb_request *),
404 int flags, int nbytes, ...)
405 {
406 va_list list;
407 int i;
408 int rc;
409 struct completion comp;
410
411 if ((adb_controller == NULL) || (adb_controller->send_request == NULL))
412 return -ENXIO;
413 if (nbytes < 1)
414 return -EINVAL;
415
416 req->nbytes = nbytes+1;
417 req->done = done;
418 req->reply_expected = flags & ADBREQ_REPLY;
419 req->data[0] = ADB_PACKET;
420 va_start(list, nbytes);
421 for (i = 0; i < nbytes; ++i)
422 req->data[i+1] = va_arg(list, int);
423 va_end(list);
424
425 if (flags & ADBREQ_NOSEND)
426 return 0;
427
428 /* Synchronous requests block using an on-stack completion */
429 if (flags & ADBREQ_SYNC) {
430 WARN_ON(done);
431 req->done = adb_sync_req_done;
432 req->arg = &comp;
433 init_completion(&comp);
434 }
435
436 rc = adb_controller->send_request(req, 0);
437
438 if ((flags & ADBREQ_SYNC) && !rc && !req->complete)
439 wait_for_completion(&comp);
440
441 return rc;
442 }
443
444 /* Ultimately this should return the number of devices with
445 the given default id.
446 And it does it now ! Note: changed behaviour: This function
447 will now register if default_id _and_ handler_id both match
448 but handler_id can be left to 0 to match with default_id only.
449 When handler_id is set, this function will try to adjust
450 the handler_id id it doesn't match. */
451 int
452 adb_register(int default_id, int handler_id, struct adb_ids *ids,
453 void (*handler)(unsigned char *, int, int))
454 {
455 int i;
456
457 mutex_lock(&adb_handler_mutex);
458 ids->nids = 0;
459 for (i = 1; i < 16; i++) {
460 if ((adb_handler[i].original_address == default_id) &&
461 (!handler_id || (handler_id == adb_handler[i].handler_id) ||
462 try_handler_change(i, handler_id))) {
463 if (adb_handler[i].handler != 0) {
464 printk(KERN_ERR
465 "Two handlers for ADB device %d\n",
466 default_id);
467 continue;
468 }
469 write_lock_irq(&adb_handler_lock);
470 adb_handler[i].handler = handler;
471 write_unlock_irq(&adb_handler_lock);
472 ids->id[ids->nids++] = i;
473 }
474 }
475 mutex_unlock(&adb_handler_mutex);
476 return ids->nids;
477 }
478
479 int
480 adb_unregister(int index)
481 {
482 int ret = -ENODEV;
483
484 mutex_lock(&adb_handler_mutex);
485 write_lock_irq(&adb_handler_lock);
486 if (adb_handler[index].handler) {
487 while(adb_handler[index].busy) {
488 write_unlock_irq(&adb_handler_lock);
489 yield();
490 write_lock_irq(&adb_handler_lock);
491 }
492 ret = 0;
493 adb_handler[index].handler = NULL;
494 }
495 write_unlock_irq(&adb_handler_lock);
496 mutex_unlock(&adb_handler_mutex);
497 return ret;
498 }
499
500 void
501 adb_input(unsigned char *buf, int nb, int autopoll)
502 {
503 int i, id;
504 static int dump_adb_input = 0;
505 unsigned long flags;
506
507 void (*handler)(unsigned char *, int, int);
508
509 /* We skip keystrokes and mouse moves when the sleep process
510 * has been started. We stop autopoll, but this is another security
511 */
512 if (adb_got_sleep)
513 return;
514
515 id = buf[0] >> 4;
516 if (dump_adb_input) {
517 printk(KERN_INFO "adb packet: ");
518 for (i = 0; i < nb; ++i)
519 printk(" %x", buf[i]);
520 printk(", id = %d\n", id);
521 }
522 write_lock_irqsave(&adb_handler_lock, flags);
523 handler = adb_handler[id].handler;
524 if (handler != NULL)
525 adb_handler[id].busy = 1;
526 write_unlock_irqrestore(&adb_handler_lock, flags);
527 if (handler != NULL) {
528 (*handler)(buf, nb, autopoll);
529 wmb();
530 adb_handler[id].busy = 0;
531 }
532
533 }
534
535 /* Try to change handler to new_id. Will return 1 if successful. */
536 static int try_handler_change(int address, int new_id)
537 {
538 struct adb_request req;
539
540 if (adb_handler[address].handler_id == new_id)
541 return 1;
542 adb_request(&req, NULL, ADBREQ_SYNC, 3,
543 ADB_WRITEREG(address, 3), address | 0x20, new_id);
544 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
545 ADB_READREG(address, 3));
546 if (req.reply_len < 2)
547 return 0;
548 if (req.reply[2] != new_id)
549 return 0;
550 adb_handler[address].handler_id = req.reply[2];
551
552 return 1;
553 }
554
555 int
556 adb_try_handler_change(int address, int new_id)
557 {
558 int ret;
559
560 mutex_lock(&adb_handler_mutex);
561 ret = try_handler_change(address, new_id);
562 mutex_unlock(&adb_handler_mutex);
563 return ret;
564 }
565
566 int
567 adb_get_infos(int address, int *original_address, int *handler_id)
568 {
569 mutex_lock(&adb_handler_mutex);
570 *original_address = adb_handler[address].original_address;
571 *handler_id = adb_handler[address].handler_id;
572 mutex_unlock(&adb_handler_mutex);
573
574 return (*original_address != 0);
575 }
576
577
578 /*
579 * /dev/adb device driver.
580 */
581
582 #define ADB_MAJOR 56 /* major number for /dev/adb */
583
584 struct adbdev_state {
585 spinlock_t lock;
586 atomic_t n_pending;
587 struct adb_request *completed;
588 wait_queue_head_t wait_queue;
589 int inuse;
590 };
591
592 static void adb_write_done(struct adb_request *req)
593 {
594 struct adbdev_state *state = (struct adbdev_state *) req->arg;
595 unsigned long flags;
596
597 if (!req->complete) {
598 req->reply_len = 0;
599 req->complete = 1;
600 }
601 spin_lock_irqsave(&state->lock, flags);
602 atomic_dec(&state->n_pending);
603 if (!state->inuse) {
604 kfree(req);
605 if (atomic_read(&state->n_pending) == 0) {
606 spin_unlock_irqrestore(&state->lock, flags);
607 kfree(state);
608 return;
609 }
610 } else {
611 struct adb_request **ap = &state->completed;
612 while (*ap != NULL)
613 ap = &(*ap)->next;
614 req->next = NULL;
615 *ap = req;
616 wake_up_interruptible(&state->wait_queue);
617 }
618 spin_unlock_irqrestore(&state->lock, flags);
619 }
620
621 static int
622 do_adb_query(struct adb_request *req)
623 {
624 int ret = -EINVAL;
625
626 switch(req->data[1])
627 {
628 case ADB_QUERY_GETDEVINFO:
629 if (req->nbytes < 3)
630 break;
631 mutex_lock(&adb_handler_mutex);
632 req->reply[0] = adb_handler[req->data[2]].original_address;
633 req->reply[1] = adb_handler[req->data[2]].handler_id;
634 mutex_unlock(&adb_handler_mutex);
635 req->complete = 1;
636 req->reply_len = 2;
637 adb_write_done(req);
638 ret = 0;
639 break;
640 }
641 return ret;
642 }
643
644 static int adb_open(struct inode *inode, struct file *file)
645 {
646 struct adbdev_state *state;
647 int ret = 0;
648
649 lock_kernel();
650 if (iminor(inode) > 0 || adb_controller == NULL) {
651 ret = -ENXIO;
652 goto out;
653 }
654 state = kmalloc(sizeof(struct adbdev_state), GFP_KERNEL);
655 if (state == 0) {
656 ret = -ENOMEM;
657 goto out;
658 }
659 file->private_data = state;
660 spin_lock_init(&state->lock);
661 atomic_set(&state->n_pending, 0);
662 state->completed = NULL;
663 init_waitqueue_head(&state->wait_queue);
664 state->inuse = 1;
665
666 out:
667 unlock_kernel();
668 return ret;
669 }
670
671 static int adb_release(struct inode *inode, struct file *file)
672 {
673 struct adbdev_state *state = file->private_data;
674 unsigned long flags;
675
676 lock_kernel();
677 if (state) {
678 file->private_data = NULL;
679 spin_lock_irqsave(&state->lock, flags);
680 if (atomic_read(&state->n_pending) == 0
681 && state->completed == NULL) {
682 spin_unlock_irqrestore(&state->lock, flags);
683 kfree(state);
684 } else {
685 state->inuse = 0;
686 spin_unlock_irqrestore(&state->lock, flags);
687 }
688 }
689 unlock_kernel();
690 return 0;
691 }
692
693 static ssize_t adb_read(struct file *file, char __user *buf,
694 size_t count, loff_t *ppos)
695 {
696 int ret = 0;
697 struct adbdev_state *state = file->private_data;
698 struct adb_request *req;
699 wait_queue_t wait = __WAITQUEUE_INITIALIZER(wait,current);
700 unsigned long flags;
701
702 if (count < 2)
703 return -EINVAL;
704 if (count > sizeof(req->reply))
705 count = sizeof(req->reply);
706 if (!access_ok(VERIFY_WRITE, buf, count))
707 return -EFAULT;
708
709 req = NULL;
710 spin_lock_irqsave(&state->lock, flags);
711 add_wait_queue(&state->wait_queue, &wait);
712 current->state = TASK_INTERRUPTIBLE;
713
714 for (;;) {
715 req = state->completed;
716 if (req != NULL)
717 state->completed = req->next;
718 else if (atomic_read(&state->n_pending) == 0)
719 ret = -EIO;
720 if (req != NULL || ret != 0)
721 break;
722
723 if (file->f_flags & O_NONBLOCK) {
724 ret = -EAGAIN;
725 break;
726 }
727 if (signal_pending(current)) {
728 ret = -ERESTARTSYS;
729 break;
730 }
731 spin_unlock_irqrestore(&state->lock, flags);
732 schedule();
733 spin_lock_irqsave(&state->lock, flags);
734 }
735
736 current->state = TASK_RUNNING;
737 remove_wait_queue(&state->wait_queue, &wait);
738 spin_unlock_irqrestore(&state->lock, flags);
739
740 if (ret)
741 return ret;
742
743 ret = req->reply_len;
744 if (ret > count)
745 ret = count;
746 if (ret > 0 && copy_to_user(buf, req->reply, ret))
747 ret = -EFAULT;
748
749 kfree(req);
750 return ret;
751 }
752
753 static ssize_t adb_write(struct file *file, const char __user *buf,
754 size_t count, loff_t *ppos)
755 {
756 int ret/*, i*/;
757 struct adbdev_state *state = file->private_data;
758 struct adb_request *req;
759
760 if (count < 2 || count > sizeof(req->data))
761 return -EINVAL;
762 if (adb_controller == NULL)
763 return -ENXIO;
764 if (!access_ok(VERIFY_READ, buf, count))
765 return -EFAULT;
766
767 req = kmalloc(sizeof(struct adb_request),
768 GFP_KERNEL);
769 if (req == NULL)
770 return -ENOMEM;
771
772 req->nbytes = count;
773 req->done = adb_write_done;
774 req->arg = (void *) state;
775 req->complete = 0;
776
777 ret = -EFAULT;
778 if (copy_from_user(req->data, buf, count))
779 goto out;
780
781 atomic_inc(&state->n_pending);
782
783 /* If a probe is in progress or we are sleeping, wait for it to complete */
784 down(&adb_probe_mutex);
785
786 /* Queries are special requests sent to the ADB driver itself */
787 if (req->data[0] == ADB_QUERY) {
788 if (count > 1)
789 ret = do_adb_query(req);
790 else
791 ret = -EINVAL;
792 up(&adb_probe_mutex);
793 }
794 /* Special case for ADB_BUSRESET request, all others are sent to
795 the controller */
796 else if ((req->data[0] == ADB_PACKET)&&(count > 1)
797 &&(req->data[1] == ADB_BUSRESET)) {
798 ret = do_adb_reset_bus();
799 up(&adb_probe_mutex);
800 atomic_dec(&state->n_pending);
801 if (ret == 0)
802 ret = count;
803 goto out;
804 } else {
805 req->reply_expected = ((req->data[1] & 0xc) == 0xc);
806 if (adb_controller && adb_controller->send_request)
807 ret = adb_controller->send_request(req, 0);
808 else
809 ret = -ENXIO;
810 up(&adb_probe_mutex);
811 }
812
813 if (ret != 0) {
814 atomic_dec(&state->n_pending);
815 goto out;
816 }
817 return count;
818
819 out:
820 kfree(req);
821 return ret;
822 }
823
824 static const struct file_operations adb_fops = {
825 .owner = THIS_MODULE,
826 .llseek = no_llseek,
827 .read = adb_read,
828 .write = adb_write,
829 .open = adb_open,
830 .release = adb_release,
831 };
832
833 static struct platform_driver adb_pfdrv = {
834 .driver = {
835 .name = "adb",
836 },
837 #ifdef CONFIG_PM
838 .suspend = adb_suspend,
839 .resume = adb_resume,
840 #endif
841 };
842
843 static struct platform_device adb_pfdev = {
844 .name = "adb",
845 };
846
847 static int __init
848 adb_dummy_probe(struct platform_device *dev)
849 {
850 if (dev == &adb_pfdev)
851 return 0;
852 return -ENODEV;
853 }
854
855 static void __init
856 adbdev_init(void)
857 {
858 if (register_chrdev(ADB_MAJOR, "adb", &adb_fops)) {
859 printk(KERN_ERR "adb: unable to get major %d\n", ADB_MAJOR);
860 return;
861 }
862
863 adb_dev_class = class_create(THIS_MODULE, "adb");
864 if (IS_ERR(adb_dev_class))
865 return;
866 device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), "adb");
867
868 platform_device_register(&adb_pfdev);
869 platform_driver_probe(&adb_pfdrv, adb_dummy_probe);
870 }