]>
Commit | Line | Data |
---|---|---|
338a1281 OP |
1 | /* |
2 | * Driver for the Diolan DLN-2 USB adapter | |
3 | * | |
4 | * Copyright (c) 2014 Intel Corporation | |
5 | * | |
6 | * Derived from: | |
7 | * i2c-diolan-u2c.c | |
8 | * Copyright (c) 2010-2011 Ericsson AB | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation, version 2. | |
13 | */ | |
14 | ||
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/usb.h> | |
20 | #include <linux/i2c.h> | |
21 | #include <linux/mutex.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/mfd/core.h> | |
24 | #include <linux/mfd/dln2.h> | |
25 | #include <linux/rculist.h> | |
26 | ||
27 | struct dln2_header { | |
28 | __le16 size; | |
29 | __le16 id; | |
30 | __le16 echo; | |
31 | __le16 handle; | |
32 | }; | |
33 | ||
34 | struct dln2_response { | |
35 | struct dln2_header hdr; | |
36 | __le16 result; | |
37 | }; | |
38 | ||
39 | #define DLN2_GENERIC_MODULE_ID 0x00 | |
40 | #define DLN2_GENERIC_CMD(cmd) DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID) | |
41 | #define CMD_GET_DEVICE_VER DLN2_GENERIC_CMD(0x30) | |
42 | #define CMD_GET_DEVICE_SN DLN2_GENERIC_CMD(0x31) | |
43 | ||
44 | #define DLN2_HW_ID 0x200 | |
45 | #define DLN2_USB_TIMEOUT 200 /* in ms */ | |
46 | #define DLN2_MAX_RX_SLOTS 16 | |
47 | #define DLN2_MAX_URBS 16 | |
48 | #define DLN2_RX_BUF_SIZE 512 | |
49 | ||
50 | enum dln2_handle { | |
51 | DLN2_HANDLE_EVENT = 0, /* don't change, hardware defined */ | |
52 | DLN2_HANDLE_CTRL, | |
53 | DLN2_HANDLE_GPIO, | |
54 | DLN2_HANDLE_I2C, | |
21cf3318 | 55 | DLN2_HANDLE_SPI, |
338a1281 OP |
56 | DLN2_HANDLES |
57 | }; | |
58 | ||
59 | /* | |
60 | * Receive context used between the receive demultiplexer and the transfer | |
61 | * routine. While sending a request the transfer routine will look for a free | |
62 | * receive context and use it to wait for a response and to receive the URB and | |
63 | * thus the response data. | |
64 | */ | |
65 | struct dln2_rx_context { | |
66 | /* completion used to wait for a response */ | |
67 | struct completion done; | |
68 | ||
69 | /* if non-NULL the URB contains the response */ | |
70 | struct urb *urb; | |
71 | ||
72 | /* if true then this context is used to wait for a response */ | |
73 | bool in_use; | |
74 | }; | |
75 | ||
76 | /* | |
77 | * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the | |
78 | * handle header field to identify the module in dln2_dev.mod_rx_slots and then | |
79 | * the echo header field to index the slots field and find the receive context | |
80 | * for a particular request. | |
81 | */ | |
82 | struct dln2_mod_rx_slots { | |
83 | /* RX slots bitmap */ | |
84 | DECLARE_BITMAP(bmap, DLN2_MAX_RX_SLOTS); | |
85 | ||
86 | /* used to wait for a free RX slot */ | |
87 | wait_queue_head_t wq; | |
88 | ||
89 | /* used to wait for an RX operation to complete */ | |
90 | struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS]; | |
91 | ||
92 | /* avoid races between alloc/free_rx_slot and dln2_rx_transfer */ | |
93 | spinlock_t lock; | |
94 | }; | |
95 | ||
96 | struct dln2_dev { | |
97 | struct usb_device *usb_dev; | |
98 | struct usb_interface *interface; | |
99 | u8 ep_in; | |
100 | u8 ep_out; | |
101 | ||
102 | struct urb *rx_urb[DLN2_MAX_URBS]; | |
103 | void *rx_buf[DLN2_MAX_URBS]; | |
104 | ||
105 | struct dln2_mod_rx_slots mod_rx_slots[DLN2_HANDLES]; | |
106 | ||
107 | struct list_head event_cb_list; | |
108 | spinlock_t event_cb_lock; | |
109 | ||
110 | bool disconnect; | |
111 | int active_transfers; | |
112 | wait_queue_head_t disconnect_wq; | |
113 | spinlock_t disconnect_lock; | |
114 | }; | |
115 | ||
116 | struct dln2_event_cb_entry { | |
117 | struct list_head list; | |
118 | u16 id; | |
119 | struct platform_device *pdev; | |
120 | dln2_event_cb_t callback; | |
121 | }; | |
122 | ||
123 | int dln2_register_event_cb(struct platform_device *pdev, u16 id, | |
124 | dln2_event_cb_t event_cb) | |
125 | { | |
126 | struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent); | |
127 | struct dln2_event_cb_entry *i, *entry; | |
128 | unsigned long flags; | |
129 | int ret = 0; | |
130 | ||
131 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
132 | if (!entry) | |
133 | return -ENOMEM; | |
134 | ||
135 | entry->id = id; | |
136 | entry->callback = event_cb; | |
137 | entry->pdev = pdev; | |
138 | ||
139 | spin_lock_irqsave(&dln2->event_cb_lock, flags); | |
140 | ||
141 | list_for_each_entry(i, &dln2->event_cb_list, list) { | |
142 | if (i->id == id) { | |
143 | ret = -EBUSY; | |
144 | break; | |
145 | } | |
146 | } | |
147 | ||
148 | if (!ret) | |
149 | list_add_rcu(&entry->list, &dln2->event_cb_list); | |
150 | ||
151 | spin_unlock_irqrestore(&dln2->event_cb_lock, flags); | |
152 | ||
153 | if (ret) | |
154 | kfree(entry); | |
155 | ||
156 | return ret; | |
157 | } | |
158 | EXPORT_SYMBOL(dln2_register_event_cb); | |
159 | ||
160 | void dln2_unregister_event_cb(struct platform_device *pdev, u16 id) | |
161 | { | |
162 | struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent); | |
163 | struct dln2_event_cb_entry *i; | |
164 | unsigned long flags; | |
165 | bool found = false; | |
166 | ||
167 | spin_lock_irqsave(&dln2->event_cb_lock, flags); | |
168 | ||
169 | list_for_each_entry(i, &dln2->event_cb_list, list) { | |
170 | if (i->id == id) { | |
171 | list_del_rcu(&i->list); | |
172 | found = true; | |
173 | break; | |
174 | } | |
175 | } | |
176 | ||
177 | spin_unlock_irqrestore(&dln2->event_cb_lock, flags); | |
178 | ||
179 | if (found) { | |
180 | synchronize_rcu(); | |
181 | kfree(i); | |
182 | } | |
183 | } | |
184 | EXPORT_SYMBOL(dln2_unregister_event_cb); | |
185 | ||
186 | /* | |
187 | * Returns true if a valid transfer slot is found. In this case the URB must not | |
188 | * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer | |
189 | * is woke up. It will be resubmitted there. | |
190 | */ | |
191 | static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb, | |
192 | u16 handle, u16 rx_slot) | |
193 | { | |
194 | struct device *dev = &dln2->interface->dev; | |
195 | struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle]; | |
196 | struct dln2_rx_context *rxc; | |
197 | bool valid_slot = false; | |
198 | ||
00ee7a37 OP |
199 | if (rx_slot >= DLN2_MAX_RX_SLOTS) |
200 | goto out; | |
201 | ||
338a1281 OP |
202 | rxc = &rxs->slots[rx_slot]; |
203 | ||
204 | /* | |
205 | * No need to disable interrupts as this lock is not taken in interrupt | |
206 | * context elsewhere in this driver. This function (or its callers) are | |
207 | * also not exported to other modules. | |
208 | */ | |
209 | spin_lock(&rxs->lock); | |
210 | if (rxc->in_use && !rxc->urb) { | |
211 | rxc->urb = urb; | |
212 | complete(&rxc->done); | |
213 | valid_slot = true; | |
214 | } | |
215 | spin_unlock(&rxs->lock); | |
216 | ||
00ee7a37 | 217 | out: |
338a1281 OP |
218 | if (!valid_slot) |
219 | dev_warn(dev, "bad/late response %d/%d\n", handle, rx_slot); | |
220 | ||
221 | return valid_slot; | |
222 | } | |
223 | ||
224 | static void dln2_run_event_callbacks(struct dln2_dev *dln2, u16 id, u16 echo, | |
225 | void *data, int len) | |
226 | { | |
227 | struct dln2_event_cb_entry *i; | |
228 | ||
229 | rcu_read_lock(); | |
230 | ||
231 | list_for_each_entry_rcu(i, &dln2->event_cb_list, list) { | |
232 | if (i->id == id) { | |
233 | i->callback(i->pdev, echo, data, len); | |
234 | break; | |
235 | } | |
236 | } | |
237 | ||
238 | rcu_read_unlock(); | |
239 | } | |
240 | ||
241 | static void dln2_rx(struct urb *urb) | |
242 | { | |
243 | struct dln2_dev *dln2 = urb->context; | |
244 | struct dln2_header *hdr = urb->transfer_buffer; | |
245 | struct device *dev = &dln2->interface->dev; | |
246 | u16 id, echo, handle, size; | |
247 | u8 *data; | |
248 | int len; | |
249 | int err; | |
250 | ||
251 | switch (urb->status) { | |
252 | case 0: | |
253 | /* success */ | |
254 | break; | |
255 | case -ECONNRESET: | |
256 | case -ENOENT: | |
257 | case -ESHUTDOWN: | |
258 | case -EPIPE: | |
259 | /* this urb is terminated, clean up */ | |
260 | dev_dbg(dev, "urb shutting down with status %d\n", urb->status); | |
261 | return; | |
262 | default: | |
263 | dev_dbg(dev, "nonzero urb status received %d\n", urb->status); | |
264 | goto out; | |
265 | } | |
266 | ||
267 | if (urb->actual_length < sizeof(struct dln2_header)) { | |
268 | dev_err(dev, "short response: %d\n", urb->actual_length); | |
269 | goto out; | |
270 | } | |
271 | ||
272 | handle = le16_to_cpu(hdr->handle); | |
273 | id = le16_to_cpu(hdr->id); | |
274 | echo = le16_to_cpu(hdr->echo); | |
275 | size = le16_to_cpu(hdr->size); | |
276 | ||
277 | if (size != urb->actual_length) { | |
278 | dev_err(dev, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n", | |
279 | handle, id, echo, size, urb->actual_length); | |
280 | goto out; | |
281 | } | |
282 | ||
283 | if (handle >= DLN2_HANDLES) { | |
284 | dev_warn(dev, "invalid handle %d\n", handle); | |
285 | goto out; | |
286 | } | |
287 | ||
288 | data = urb->transfer_buffer + sizeof(struct dln2_header); | |
289 | len = urb->actual_length - sizeof(struct dln2_header); | |
290 | ||
291 | if (handle == DLN2_HANDLE_EVENT) { | |
292 | dln2_run_event_callbacks(dln2, id, echo, data, len); | |
293 | } else { | |
294 | /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ | |
295 | if (dln2_transfer_complete(dln2, urb, handle, echo)) | |
296 | return; | |
297 | } | |
298 | ||
299 | out: | |
300 | err = usb_submit_urb(urb, GFP_ATOMIC); | |
301 | if (err < 0) | |
302 | dev_err(dev, "failed to resubmit RX URB: %d\n", err); | |
303 | } | |
304 | ||
305 | static void *dln2_prep_buf(u16 handle, u16 cmd, u16 echo, const void *obuf, | |
306 | int *obuf_len, gfp_t gfp) | |
307 | { | |
308 | int len; | |
309 | void *buf; | |
310 | struct dln2_header *hdr; | |
311 | ||
312 | len = *obuf_len + sizeof(*hdr); | |
313 | buf = kmalloc(len, gfp); | |
314 | if (!buf) | |
315 | return NULL; | |
316 | ||
317 | hdr = (struct dln2_header *)buf; | |
318 | hdr->id = cpu_to_le16(cmd); | |
319 | hdr->size = cpu_to_le16(len); | |
320 | hdr->echo = cpu_to_le16(echo); | |
321 | hdr->handle = cpu_to_le16(handle); | |
322 | ||
323 | memcpy(buf + sizeof(*hdr), obuf, *obuf_len); | |
324 | ||
325 | *obuf_len = len; | |
326 | ||
327 | return buf; | |
328 | } | |
329 | ||
330 | static int dln2_send_wait(struct dln2_dev *dln2, u16 handle, u16 cmd, u16 echo, | |
331 | const void *obuf, int obuf_len) | |
332 | { | |
333 | int ret = 0; | |
334 | int len = obuf_len; | |
335 | void *buf; | |
336 | int actual; | |
337 | ||
338 | buf = dln2_prep_buf(handle, cmd, echo, obuf, &len, GFP_KERNEL); | |
339 | if (!buf) | |
340 | return -ENOMEM; | |
341 | ||
342 | ret = usb_bulk_msg(dln2->usb_dev, | |
343 | usb_sndbulkpipe(dln2->usb_dev, dln2->ep_out), | |
344 | buf, len, &actual, DLN2_USB_TIMEOUT); | |
345 | ||
346 | kfree(buf); | |
347 | ||
348 | return ret; | |
349 | } | |
350 | ||
351 | static bool find_free_slot(struct dln2_dev *dln2, u16 handle, int *slot) | |
352 | { | |
353 | struct dln2_mod_rx_slots *rxs; | |
354 | unsigned long flags; | |
355 | ||
356 | if (dln2->disconnect) { | |
357 | *slot = -ENODEV; | |
358 | return true; | |
359 | } | |
360 | ||
361 | rxs = &dln2->mod_rx_slots[handle]; | |
362 | ||
363 | spin_lock_irqsave(&rxs->lock, flags); | |
364 | ||
365 | *slot = find_first_zero_bit(rxs->bmap, DLN2_MAX_RX_SLOTS); | |
366 | ||
367 | if (*slot < DLN2_MAX_RX_SLOTS) { | |
368 | struct dln2_rx_context *rxc = &rxs->slots[*slot]; | |
369 | ||
370 | set_bit(*slot, rxs->bmap); | |
371 | rxc->in_use = true; | |
372 | } | |
373 | ||
374 | spin_unlock_irqrestore(&rxs->lock, flags); | |
375 | ||
376 | return *slot < DLN2_MAX_RX_SLOTS; | |
377 | } | |
378 | ||
379 | static int alloc_rx_slot(struct dln2_dev *dln2, u16 handle) | |
380 | { | |
381 | int ret; | |
382 | int slot; | |
383 | ||
384 | /* | |
385 | * No need to timeout here, the wait is bounded by the timeout in | |
386 | * _dln2_transfer. | |
387 | */ | |
388 | ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq, | |
389 | find_free_slot(dln2, handle, &slot)); | |
390 | if (ret < 0) | |
391 | return ret; | |
392 | ||
393 | return slot; | |
394 | } | |
395 | ||
396 | static void free_rx_slot(struct dln2_dev *dln2, u16 handle, int slot) | |
397 | { | |
398 | struct dln2_mod_rx_slots *rxs; | |
399 | struct urb *urb = NULL; | |
400 | unsigned long flags; | |
401 | struct dln2_rx_context *rxc; | |
402 | ||
403 | rxs = &dln2->mod_rx_slots[handle]; | |
404 | ||
405 | spin_lock_irqsave(&rxs->lock, flags); | |
406 | ||
407 | clear_bit(slot, rxs->bmap); | |
408 | ||
409 | rxc = &rxs->slots[slot]; | |
410 | rxc->in_use = false; | |
411 | urb = rxc->urb; | |
412 | rxc->urb = NULL; | |
413 | reinit_completion(&rxc->done); | |
414 | ||
415 | spin_unlock_irqrestore(&rxs->lock, flags); | |
416 | ||
417 | if (urb) { | |
418 | int err; | |
419 | struct device *dev = &dln2->interface->dev; | |
420 | ||
421 | err = usb_submit_urb(urb, GFP_KERNEL); | |
422 | if (err < 0) | |
423 | dev_err(dev, "failed to resubmit RX URB: %d\n", err); | |
424 | } | |
425 | ||
426 | wake_up_interruptible(&rxs->wq); | |
427 | } | |
428 | ||
429 | static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd, | |
430 | const void *obuf, unsigned obuf_len, | |
431 | void *ibuf, unsigned *ibuf_len) | |
432 | { | |
433 | int ret = 0; | |
434 | int rx_slot; | |
435 | struct dln2_response *rsp; | |
436 | struct dln2_rx_context *rxc; | |
437 | struct device *dev = &dln2->interface->dev; | |
48579a9a | 438 | const unsigned long timeout = msecs_to_jiffies(DLN2_USB_TIMEOUT); |
338a1281 | 439 | struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle]; |
2fc2b484 | 440 | int size; |
338a1281 OP |
441 | |
442 | spin_lock(&dln2->disconnect_lock); | |
443 | if (!dln2->disconnect) | |
444 | dln2->active_transfers++; | |
445 | else | |
446 | ret = -ENODEV; | |
447 | spin_unlock(&dln2->disconnect_lock); | |
448 | ||
449 | if (ret) | |
450 | return ret; | |
451 | ||
452 | rx_slot = alloc_rx_slot(dln2, handle); | |
453 | if (rx_slot < 0) { | |
454 | ret = rx_slot; | |
455 | goto out_decr; | |
456 | } | |
457 | ||
458 | ret = dln2_send_wait(dln2, handle, cmd, rx_slot, obuf, obuf_len); | |
459 | if (ret < 0) { | |
460 | dev_err(dev, "USB write failed: %d\n", ret); | |
461 | goto out_free_rx_slot; | |
462 | } | |
463 | ||
464 | rxc = &rxs->slots[rx_slot]; | |
465 | ||
466 | ret = wait_for_completion_interruptible_timeout(&rxc->done, timeout); | |
467 | if (ret <= 0) { | |
468 | if (!ret) | |
469 | ret = -ETIMEDOUT; | |
470 | goto out_free_rx_slot; | |
7ca2b1c6 OP |
471 | } else { |
472 | ret = 0; | |
338a1281 OP |
473 | } |
474 | ||
475 | if (dln2->disconnect) { | |
476 | ret = -ENODEV; | |
477 | goto out_free_rx_slot; | |
478 | } | |
479 | ||
480 | /* if we got here we know that the response header has been checked */ | |
481 | rsp = rxc->urb->transfer_buffer; | |
2fc2b484 | 482 | size = le16_to_cpu(rsp->hdr.size); |
338a1281 | 483 | |
2fc2b484 | 484 | if (size < sizeof(*rsp)) { |
338a1281 OP |
485 | ret = -EPROTO; |
486 | goto out_free_rx_slot; | |
487 | } | |
488 | ||
489 | if (le16_to_cpu(rsp->result) > 0x80) { | |
490 | dev_dbg(dev, "%d received response with error %d\n", | |
491 | handle, le16_to_cpu(rsp->result)); | |
492 | ret = -EREMOTEIO; | |
493 | goto out_free_rx_slot; | |
494 | } | |
495 | ||
7ca2b1c6 | 496 | if (!ibuf) |
338a1281 | 497 | goto out_free_rx_slot; |
338a1281 | 498 | |
2fc2b484 DC |
499 | if (*ibuf_len > size - sizeof(*rsp)) |
500 | *ibuf_len = size - sizeof(*rsp); | |
338a1281 OP |
501 | |
502 | memcpy(ibuf, rsp + 1, *ibuf_len); | |
503 | ||
504 | out_free_rx_slot: | |
505 | free_rx_slot(dln2, handle, rx_slot); | |
506 | out_decr: | |
507 | spin_lock(&dln2->disconnect_lock); | |
508 | dln2->active_transfers--; | |
509 | spin_unlock(&dln2->disconnect_lock); | |
510 | if (dln2->disconnect) | |
511 | wake_up(&dln2->disconnect_wq); | |
512 | ||
513 | return ret; | |
514 | } | |
515 | ||
516 | int dln2_transfer(struct platform_device *pdev, u16 cmd, | |
517 | const void *obuf, unsigned obuf_len, | |
518 | void *ibuf, unsigned *ibuf_len) | |
519 | { | |
520 | struct dln2_platform_data *dln2_pdata; | |
521 | struct dln2_dev *dln2; | |
522 | u16 handle; | |
523 | ||
524 | dln2 = dev_get_drvdata(pdev->dev.parent); | |
525 | dln2_pdata = dev_get_platdata(&pdev->dev); | |
526 | handle = dln2_pdata->handle; | |
527 | ||
528 | return _dln2_transfer(dln2, handle, cmd, obuf, obuf_len, ibuf, | |
529 | ibuf_len); | |
530 | } | |
531 | EXPORT_SYMBOL(dln2_transfer); | |
532 | ||
533 | static int dln2_check_hw(struct dln2_dev *dln2) | |
534 | { | |
535 | int ret; | |
536 | __le32 hw_type; | |
537 | int len = sizeof(hw_type); | |
538 | ||
539 | ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_VER, | |
540 | NULL, 0, &hw_type, &len); | |
541 | if (ret < 0) | |
542 | return ret; | |
543 | if (len < sizeof(hw_type)) | |
544 | return -EREMOTEIO; | |
545 | ||
546 | if (le32_to_cpu(hw_type) != DLN2_HW_ID) { | |
547 | dev_err(&dln2->interface->dev, "Device ID 0x%x not supported\n", | |
548 | le32_to_cpu(hw_type)); | |
549 | return -ENODEV; | |
550 | } | |
551 | ||
552 | return 0; | |
553 | } | |
554 | ||
555 | static int dln2_print_serialno(struct dln2_dev *dln2) | |
556 | { | |
557 | int ret; | |
558 | __le32 serial_no; | |
559 | int len = sizeof(serial_no); | |
560 | struct device *dev = &dln2->interface->dev; | |
561 | ||
562 | ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_SN, NULL, 0, | |
563 | &serial_no, &len); | |
564 | if (ret < 0) | |
565 | return ret; | |
566 | if (len < sizeof(serial_no)) | |
567 | return -EREMOTEIO; | |
568 | ||
569 | dev_info(dev, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no)); | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static int dln2_hw_init(struct dln2_dev *dln2) | |
575 | { | |
576 | int ret; | |
577 | ||
578 | ret = dln2_check_hw(dln2); | |
579 | if (ret < 0) | |
580 | return ret; | |
581 | ||
582 | return dln2_print_serialno(dln2); | |
583 | } | |
584 | ||
585 | static void dln2_free_rx_urbs(struct dln2_dev *dln2) | |
586 | { | |
587 | int i; | |
588 | ||
589 | for (i = 0; i < DLN2_MAX_URBS; i++) { | |
338a1281 OP |
590 | usb_free_urb(dln2->rx_urb[i]); |
591 | kfree(dln2->rx_buf[i]); | |
592 | } | |
593 | } | |
594 | ||
ee231aee OP |
595 | static void dln2_stop_rx_urbs(struct dln2_dev *dln2) |
596 | { | |
597 | int i; | |
598 | ||
599 | for (i = 0; i < DLN2_MAX_URBS; i++) | |
600 | usb_kill_urb(dln2->rx_urb[i]); | |
601 | } | |
602 | ||
338a1281 OP |
603 | static void dln2_free(struct dln2_dev *dln2) |
604 | { | |
605 | dln2_free_rx_urbs(dln2); | |
606 | usb_put_dev(dln2->usb_dev); | |
607 | kfree(dln2); | |
608 | } | |
609 | ||
610 | static int dln2_setup_rx_urbs(struct dln2_dev *dln2, | |
611 | struct usb_host_interface *hostif) | |
612 | { | |
613 | int i; | |
338a1281 | 614 | const int rx_max_size = DLN2_RX_BUF_SIZE; |
338a1281 OP |
615 | |
616 | for (i = 0; i < DLN2_MAX_URBS; i++) { | |
617 | dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL); | |
618 | if (!dln2->rx_buf[i]) | |
619 | return -ENOMEM; | |
620 | ||
621 | dln2->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); | |
622 | if (!dln2->rx_urb[i]) | |
623 | return -ENOMEM; | |
624 | ||
625 | usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev, | |
626 | usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in), | |
627 | dln2->rx_buf[i], rx_max_size, dln2_rx, dln2); | |
ee231aee OP |
628 | } |
629 | ||
630 | return 0; | |
631 | } | |
338a1281 | 632 | |
ee231aee OP |
633 | static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp) |
634 | { | |
635 | struct device *dev = &dln2->interface->dev; | |
636 | int ret; | |
637 | int i; | |
638 | ||
639 | for (i = 0; i < DLN2_MAX_URBS; i++) { | |
640 | ret = usb_submit_urb(dln2->rx_urb[i], gfp); | |
338a1281 OP |
641 | if (ret < 0) { |
642 | dev_err(dev, "failed to submit RX URB: %d\n", ret); | |
643 | return ret; | |
644 | } | |
645 | } | |
646 | ||
647 | return 0; | |
648 | } | |
649 | ||
650 | static struct dln2_platform_data dln2_pdata_gpio = { | |
651 | .handle = DLN2_HANDLE_GPIO, | |
652 | }; | |
653 | ||
654 | /* Only one I2C port seems to be supported on current hardware */ | |
655 | static struct dln2_platform_data dln2_pdata_i2c = { | |
656 | .handle = DLN2_HANDLE_I2C, | |
657 | .port = 0, | |
658 | }; | |
659 | ||
21cf3318 LP |
660 | /* Only one SPI port supported */ |
661 | static struct dln2_platform_data dln2_pdata_spi = { | |
662 | .handle = DLN2_HANDLE_SPI, | |
663 | .port = 0, | |
664 | }; | |
665 | ||
338a1281 OP |
666 | static const struct mfd_cell dln2_devs[] = { |
667 | { | |
668 | .name = "dln2-gpio", | |
669 | .platform_data = &dln2_pdata_gpio, | |
670 | .pdata_size = sizeof(struct dln2_platform_data), | |
671 | }, | |
672 | { | |
673 | .name = "dln2-i2c", | |
674 | .platform_data = &dln2_pdata_i2c, | |
675 | .pdata_size = sizeof(struct dln2_platform_data), | |
676 | }, | |
21cf3318 LP |
677 | { |
678 | .name = "dln2-spi", | |
679 | .platform_data = &dln2_pdata_spi, | |
680 | .pdata_size = sizeof(struct dln2_platform_data), | |
681 | }, | |
338a1281 OP |
682 | }; |
683 | ||
ee231aee | 684 | static void dln2_stop(struct dln2_dev *dln2) |
338a1281 | 685 | { |
338a1281 OP |
686 | int i, j; |
687 | ||
688 | /* don't allow starting new transfers */ | |
689 | spin_lock(&dln2->disconnect_lock); | |
690 | dln2->disconnect = true; | |
691 | spin_unlock(&dln2->disconnect_lock); | |
692 | ||
693 | /* cancel in progress transfers */ | |
694 | for (i = 0; i < DLN2_HANDLES; i++) { | |
695 | struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[i]; | |
696 | unsigned long flags; | |
697 | ||
698 | spin_lock_irqsave(&rxs->lock, flags); | |
699 | ||
700 | /* cancel all response waiters */ | |
701 | for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) { | |
702 | struct dln2_rx_context *rxc = &rxs->slots[j]; | |
703 | ||
704 | if (rxc->in_use) | |
705 | complete(&rxc->done); | |
706 | } | |
707 | ||
708 | spin_unlock_irqrestore(&rxs->lock, flags); | |
709 | } | |
710 | ||
711 | /* wait for transfers to end */ | |
712 | wait_event(dln2->disconnect_wq, !dln2->active_transfers); | |
713 | ||
ee231aee OP |
714 | dln2_stop_rx_urbs(dln2); |
715 | } | |
716 | ||
717 | static void dln2_disconnect(struct usb_interface *interface) | |
718 | { | |
719 | struct dln2_dev *dln2 = usb_get_intfdata(interface); | |
720 | ||
721 | dln2_stop(dln2); | |
722 | ||
338a1281 OP |
723 | mfd_remove_devices(&interface->dev); |
724 | ||
725 | dln2_free(dln2); | |
726 | } | |
727 | ||
728 | static int dln2_probe(struct usb_interface *interface, | |
729 | const struct usb_device_id *usb_id) | |
730 | { | |
731 | struct usb_host_interface *hostif = interface->cur_altsetting; | |
732 | struct device *dev = &interface->dev; | |
733 | struct dln2_dev *dln2; | |
734 | int ret; | |
735 | int i, j; | |
736 | ||
737 | if (hostif->desc.bInterfaceNumber != 0 || | |
738 | hostif->desc.bNumEndpoints < 2) | |
739 | return -ENODEV; | |
740 | ||
741 | dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL); | |
742 | if (!dln2) | |
743 | return -ENOMEM; | |
744 | ||
745 | dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress; | |
746 | dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress; | |
747 | dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface)); | |
748 | dln2->interface = interface; | |
749 | usb_set_intfdata(interface, dln2); | |
750 | init_waitqueue_head(&dln2->disconnect_wq); | |
751 | ||
752 | for (i = 0; i < DLN2_HANDLES; i++) { | |
753 | init_waitqueue_head(&dln2->mod_rx_slots[i].wq); | |
754 | spin_lock_init(&dln2->mod_rx_slots[i].lock); | |
755 | for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) | |
756 | init_completion(&dln2->mod_rx_slots[i].slots[j].done); | |
757 | } | |
758 | ||
759 | spin_lock_init(&dln2->event_cb_lock); | |
760 | spin_lock_init(&dln2->disconnect_lock); | |
761 | INIT_LIST_HEAD(&dln2->event_cb_list); | |
762 | ||
763 | ret = dln2_setup_rx_urbs(dln2, hostif); | |
764 | if (ret) | |
ee231aee OP |
765 | goto out_free; |
766 | ||
767 | ret = dln2_start_rx_urbs(dln2, GFP_KERNEL); | |
768 | if (ret) | |
769 | goto out_stop_rx; | |
338a1281 OP |
770 | |
771 | ret = dln2_hw_init(dln2); | |
772 | if (ret < 0) { | |
773 | dev_err(dev, "failed to initialize hardware\n"); | |
ee231aee | 774 | goto out_stop_rx; |
338a1281 OP |
775 | } |
776 | ||
777 | ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs)); | |
778 | if (ret != 0) { | |
779 | dev_err(dev, "failed to add mfd devices to core\n"); | |
ee231aee | 780 | goto out_stop_rx; |
338a1281 OP |
781 | } |
782 | ||
783 | return 0; | |
784 | ||
ee231aee OP |
785 | out_stop_rx: |
786 | dln2_stop_rx_urbs(dln2); | |
787 | ||
788 | out_free: | |
338a1281 OP |
789 | dln2_free(dln2); |
790 | ||
791 | return ret; | |
792 | } | |
793 | ||
3daa122d OP |
794 | static int dln2_suspend(struct usb_interface *iface, pm_message_t message) |
795 | { | |
796 | struct dln2_dev *dln2 = usb_get_intfdata(iface); | |
797 | ||
798 | dln2_stop(dln2); | |
799 | ||
800 | return 0; | |
801 | } | |
802 | ||
803 | static int dln2_resume(struct usb_interface *iface) | |
804 | { | |
805 | struct dln2_dev *dln2 = usb_get_intfdata(iface); | |
806 | ||
807 | dln2->disconnect = false; | |
808 | ||
809 | return dln2_start_rx_urbs(dln2, GFP_NOIO); | |
810 | } | |
811 | ||
338a1281 OP |
812 | static const struct usb_device_id dln2_table[] = { |
813 | { USB_DEVICE(0xa257, 0x2013) }, | |
814 | { } | |
815 | }; | |
816 | ||
817 | MODULE_DEVICE_TABLE(usb, dln2_table); | |
818 | ||
819 | static struct usb_driver dln2_driver = { | |
820 | .name = "dln2", | |
821 | .probe = dln2_probe, | |
822 | .disconnect = dln2_disconnect, | |
823 | .id_table = dln2_table, | |
3daa122d OP |
824 | .suspend = dln2_suspend, |
825 | .resume = dln2_resume, | |
338a1281 OP |
826 | }; |
827 | ||
828 | module_usb_driver(dln2_driver); | |
829 | ||
830 | MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>"); | |
831 | MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter"); | |
832 | MODULE_LICENSE("GPL v2"); |