]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/greybus/gpio.c
HID: usbhid: Add HID_QUIRK_NOGET for Aten CS-1758 KVM switch
[mirror_ubuntu-artful-kernel.git] / drivers / staging / greybus / gpio.c
1 /*
2 * GPIO Greybus driver.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/gpio.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/mutex.h>
17
18 #include "greybus.h"
19 #include "gbphy.h"
20
21 struct gb_gpio_line {
22 /* The following has to be an array of line_max entries */
23 /* --> make them just a flags field */
24 u8 active: 1,
25 direction: 1, /* 0 = output, 1 = input */
26 value: 1; /* 0 = low, 1 = high */
27 u16 debounce_usec;
28
29 u8 irq_type;
30 bool irq_type_pending;
31 bool masked;
32 bool masked_pending;
33 };
34
35 struct gb_gpio_controller {
36 struct gbphy_device *gbphy_dev;
37 struct gb_connection *connection;
38 u8 line_max; /* max line number */
39 struct gb_gpio_line *lines;
40
41 struct gpio_chip chip;
42 struct irq_chip irqc;
43 struct irq_chip *irqchip;
44 struct irq_domain *irqdomain;
45 unsigned int irq_base;
46 irq_flow_handler_t irq_handler;
47 unsigned int irq_default_type;
48 struct mutex irq_lock;
49 };
50 #define gpio_chip_to_gb_gpio_controller(chip) \
51 container_of(chip, struct gb_gpio_controller, chip)
52 #define irq_data_to_gpio_chip(d) (d->domain->host_data)
53
54 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
55 {
56 struct gb_gpio_line_count_response response;
57 int ret;
58
59 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
60 NULL, 0, &response, sizeof(response));
61 if (!ret)
62 ggc->line_max = response.count;
63 return ret;
64 }
65
66 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
67 {
68 struct gb_gpio_activate_request request;
69 struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
70 int ret;
71
72 ret = gbphy_runtime_get_sync(gbphy_dev);
73 if (ret)
74 return ret;
75
76 request.which = which;
77 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
78 &request, sizeof(request), NULL, 0);
79 if (ret) {
80 gbphy_runtime_put_autosuspend(gbphy_dev);
81 return ret;
82 }
83
84 ggc->lines[which].active = true;
85
86 return 0;
87 }
88
89 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
90 u8 which)
91 {
92 struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
93 struct device *dev = &gbphy_dev->dev;
94 struct gb_gpio_deactivate_request request;
95 int ret;
96
97 request.which = which;
98 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
99 &request, sizeof(request), NULL, 0);
100 if (ret) {
101 dev_err(dev, "failed to deactivate gpio %u\n", which);
102 goto out_pm_put;
103 }
104
105 ggc->lines[which].active = false;
106
107 out_pm_put:
108 gbphy_runtime_put_autosuspend(gbphy_dev);
109 }
110
111 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
112 u8 which)
113 {
114 struct device *dev = &ggc->gbphy_dev->dev;
115 struct gb_gpio_get_direction_request request;
116 struct gb_gpio_get_direction_response response;
117 int ret;
118 u8 direction;
119
120 request.which = which;
121 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
122 &request, sizeof(request),
123 &response, sizeof(response));
124 if (ret)
125 return ret;
126
127 direction = response.direction;
128 if (direction && direction != 1) {
129 dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
130 which, direction);
131 }
132 ggc->lines[which].direction = direction ? 1 : 0;
133 return 0;
134 }
135
136 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
137 u8 which)
138 {
139 struct gb_gpio_direction_in_request request;
140 int ret;
141
142 request.which = which;
143 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
144 &request, sizeof(request), NULL, 0);
145 if (!ret)
146 ggc->lines[which].direction = 1;
147 return ret;
148 }
149
150 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
151 u8 which, bool value_high)
152 {
153 struct gb_gpio_direction_out_request request;
154 int ret;
155
156 request.which = which;
157 request.value = value_high ? 1 : 0;
158 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
159 &request, sizeof(request), NULL, 0);
160 if (!ret)
161 ggc->lines[which].direction = 0;
162 return ret;
163 }
164
165 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
166 u8 which)
167 {
168 struct device *dev = &ggc->gbphy_dev->dev;
169 struct gb_gpio_get_value_request request;
170 struct gb_gpio_get_value_response response;
171 int ret;
172 u8 value;
173
174 request.which = which;
175 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
176 &request, sizeof(request),
177 &response, sizeof(response));
178 if (ret) {
179 dev_err(dev, "failed to get value of gpio %u\n", which);
180 return ret;
181 }
182
183 value = response.value;
184 if (value && value != 1) {
185 dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
186 which, value);
187 }
188 ggc->lines[which].value = value ? 1 : 0;
189 return 0;
190 }
191
192 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
193 u8 which, bool value_high)
194 {
195 struct device *dev = &ggc->gbphy_dev->dev;
196 struct gb_gpio_set_value_request request;
197 int ret;
198
199 if (ggc->lines[which].direction == 1) {
200 dev_warn(dev, "refusing to set value of input gpio %u\n",
201 which);
202 return;
203 }
204
205 request.which = which;
206 request.value = value_high ? 1 : 0;
207 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
208 &request, sizeof(request), NULL, 0);
209 if (ret) {
210 dev_err(dev, "failed to set value of gpio %u\n", which);
211 return;
212 }
213
214 ggc->lines[which].value = request.value;
215 }
216
217 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
218 u8 which, u16 debounce_usec)
219 {
220 struct gb_gpio_set_debounce_request request;
221 int ret;
222
223 request.which = which;
224 request.usec = cpu_to_le16(debounce_usec);
225 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
226 &request, sizeof(request), NULL, 0);
227 if (!ret)
228 ggc->lines[which].debounce_usec = debounce_usec;
229 return ret;
230 }
231
232 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
233 {
234 struct device *dev = &ggc->gbphy_dev->dev;
235 struct gb_gpio_irq_mask_request request;
236 int ret;
237
238 request.which = hwirq;
239 ret = gb_operation_sync(ggc->connection,
240 GB_GPIO_TYPE_IRQ_MASK,
241 &request, sizeof(request), NULL, 0);
242 if (ret)
243 dev_err(dev, "failed to mask irq: %d\n", ret);
244 }
245
246 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
247 {
248 struct device *dev = &ggc->gbphy_dev->dev;
249 struct gb_gpio_irq_unmask_request request;
250 int ret;
251
252 request.which = hwirq;
253 ret = gb_operation_sync(ggc->connection,
254 GB_GPIO_TYPE_IRQ_UNMASK,
255 &request, sizeof(request), NULL, 0);
256 if (ret)
257 dev_err(dev, "failed to unmask irq: %d\n", ret);
258 }
259
260 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
261 u8 hwirq, u8 type)
262 {
263 struct device *dev = &ggc->gbphy_dev->dev;
264 struct gb_gpio_irq_type_request request;
265 int ret;
266
267 request.which = hwirq;
268 request.type = type;
269
270 ret = gb_operation_sync(ggc->connection,
271 GB_GPIO_TYPE_IRQ_TYPE,
272 &request, sizeof(request), NULL, 0);
273 if (ret)
274 dev_err(dev, "failed to set irq type: %d\n", ret);
275 }
276
277 static void gb_gpio_irq_mask(struct irq_data *d)
278 {
279 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
280 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
281 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
282
283 line->masked = true;
284 line->masked_pending = true;
285 }
286
287 static void gb_gpio_irq_unmask(struct irq_data *d)
288 {
289 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
290 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
291 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
292
293 line->masked = false;
294 line->masked_pending = true;
295 }
296
297 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
298 {
299 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
300 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
301 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
302 struct device *dev = &ggc->gbphy_dev->dev;
303 u8 irq_type;
304
305 switch (type) {
306 case IRQ_TYPE_NONE:
307 irq_type = GB_GPIO_IRQ_TYPE_NONE;
308 break;
309 case IRQ_TYPE_EDGE_RISING:
310 irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
311 break;
312 case IRQ_TYPE_EDGE_FALLING:
313 irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
314 break;
315 case IRQ_TYPE_EDGE_BOTH:
316 irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
317 break;
318 case IRQ_TYPE_LEVEL_LOW:
319 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
320 break;
321 case IRQ_TYPE_LEVEL_HIGH:
322 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
323 break;
324 default:
325 dev_err(dev, "unsupported irq type: %u\n", type);
326 return -EINVAL;
327 }
328
329 line->irq_type = irq_type;
330 line->irq_type_pending = true;
331
332 return 0;
333 }
334
335 static void gb_gpio_irq_bus_lock(struct irq_data *d)
336 {
337 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
338 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
339
340 mutex_lock(&ggc->irq_lock);
341 }
342
343 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
344 {
345 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
346 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
347 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
348
349 if (line->irq_type_pending) {
350 _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
351 line->irq_type_pending = false;
352 }
353
354 if (line->masked_pending) {
355 if (line->masked)
356 _gb_gpio_irq_mask(ggc, d->hwirq);
357 else
358 _gb_gpio_irq_unmask(ggc, d->hwirq);
359 line->masked_pending = false;
360 }
361
362 mutex_unlock(&ggc->irq_lock);
363 }
364
365 static int gb_gpio_request_handler(struct gb_operation *op)
366 {
367 struct gb_connection *connection = op->connection;
368 struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
369 struct device *dev = &ggc->gbphy_dev->dev;
370 struct gb_message *request;
371 struct gb_gpio_irq_event_request *event;
372 u8 type = op->type;
373 int irq;
374 struct irq_desc *desc;
375
376 if (type != GB_GPIO_TYPE_IRQ_EVENT) {
377 dev_err(dev, "unsupported unsolicited request: %u\n", type);
378 return -EINVAL;
379 }
380
381 request = op->request;
382
383 if (request->payload_size < sizeof(*event)) {
384 dev_err(dev, "short event received (%zu < %zu)\n",
385 request->payload_size, sizeof(*event));
386 return -EINVAL;
387 }
388
389 event = request->payload;
390 if (event->which > ggc->line_max) {
391 dev_err(dev, "invalid hw irq: %d\n", event->which);
392 return -EINVAL;
393 }
394
395 irq = irq_find_mapping(ggc->irqdomain, event->which);
396 if (!irq) {
397 dev_err(dev, "failed to find IRQ\n");
398 return -EINVAL;
399 }
400 desc = irq_to_desc(irq);
401 if (!desc) {
402 dev_err(dev, "failed to look up irq\n");
403 return -EINVAL;
404 }
405
406 local_irq_disable();
407 generic_handle_irq_desc(desc);
408 local_irq_enable();
409
410 return 0;
411 }
412
413 static int gb_gpio_request(struct gpio_chip *chip, unsigned offset)
414 {
415 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
416
417 return gb_gpio_activate_operation(ggc, (u8)offset);
418 }
419
420 static void gb_gpio_free(struct gpio_chip *chip, unsigned offset)
421 {
422 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
423
424 gb_gpio_deactivate_operation(ggc, (u8)offset);
425 }
426
427 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
428 {
429 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
430 u8 which;
431 int ret;
432
433 which = (u8)offset;
434 ret = gb_gpio_get_direction_operation(ggc, which);
435 if (ret)
436 return ret;
437
438 return ggc->lines[which].direction ? 1 : 0;
439 }
440
441 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
442 {
443 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
444
445 return gb_gpio_direction_in_operation(ggc, (u8)offset);
446 }
447
448 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
449 int value)
450 {
451 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
452
453 return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
454 }
455
456 static int gb_gpio_get(struct gpio_chip *chip, unsigned offset)
457 {
458 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
459 u8 which;
460 int ret;
461
462 which = (u8)offset;
463 ret = gb_gpio_get_value_operation(ggc, which);
464 if (ret)
465 return ret;
466
467 return ggc->lines[which].value;
468 }
469
470 static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
471 {
472 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
473
474 gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
475 }
476
477 static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
478 unsigned debounce)
479 {
480 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
481 u16 usec;
482
483 if (debounce > U16_MAX)
484 return -EINVAL;
485 usec = (u16)debounce;
486
487 return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
488 }
489
490 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
491 {
492 int ret;
493
494 /* Now find out how many lines there are */
495 ret = gb_gpio_line_count_operation(ggc);
496 if (ret)
497 return ret;
498
499 ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
500 GFP_KERNEL);
501 if (!ggc->lines)
502 return -ENOMEM;
503
504 return ret;
505 }
506
507 /**
508 * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
509 * @d: the irqdomain used by this irqchip
510 * @irq: the global irq number used by this GB gpio irqchip irq
511 * @hwirq: the local IRQ/GPIO line offset on this GB gpio
512 *
513 * This function will set up the mapping for a certain IRQ line on a
514 * GB gpio by assigning the GB gpio as chip data, and using the irqchip
515 * stored inside the GB gpio.
516 */
517 static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
518 irq_hw_number_t hwirq)
519 {
520 struct gpio_chip *chip = domain->host_data;
521 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
522
523 irq_set_chip_data(irq, ggc);
524 irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
525 irq_set_noprobe(irq);
526 /*
527 * No set-up of the hardware will happen if IRQ_TYPE_NONE
528 * is passed as default type.
529 */
530 if (ggc->irq_default_type != IRQ_TYPE_NONE)
531 irq_set_irq_type(irq, ggc->irq_default_type);
532
533 return 0;
534 }
535
536 static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
537 {
538 irq_set_chip_and_handler(irq, NULL, NULL);
539 irq_set_chip_data(irq, NULL);
540 }
541
542 static const struct irq_domain_ops gb_gpio_domain_ops = {
543 .map = gb_gpio_irq_map,
544 .unmap = gb_gpio_irq_unmap,
545 };
546
547 /**
548 * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
549 * @ggc: the gb_gpio_controller to remove the irqchip from
550 *
551 * This is called only from gb_gpio_remove()
552 */
553 static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
554 {
555 unsigned int offset;
556
557 /* Remove all IRQ mappings and delete the domain */
558 if (ggc->irqdomain) {
559 for (offset = 0; offset < (ggc->line_max + 1); offset++)
560 irq_dispose_mapping(irq_find_mapping(ggc->irqdomain, offset));
561 irq_domain_remove(ggc->irqdomain);
562 }
563
564 if (ggc->irqchip)
565 ggc->irqchip = NULL;
566 }
567
568 /**
569 * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
570 * @chip: the gpio chip to add the irqchip to
571 * @irqchip: the irqchip to add to the adapter
572 * @first_irq: if not dynamically assigned, the base (first) IRQ to
573 * allocate gpio irqs from
574 * @handler: the irq handler to use (often a predefined irq core function)
575 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
576 * to have the core avoid setting up any default type in the hardware.
577 *
578 * This function closely associates a certain irqchip with a certain
579 * gpio chip, providing an irq domain to translate the local IRQs to
580 * global irqs, and making sure that the gpio chip
581 * is passed as chip data to all related functions. Driver callbacks
582 * need to use container_of() to get their local state containers back
583 * from the gpio chip passed as chip data. An irqdomain will be stored
584 * in the gpio chip that shall be used by the driver to handle IRQ number
585 * translation. The gpio chip will need to be initialized and registered
586 * before calling this function.
587 */
588 static int gb_gpio_irqchip_add(struct gpio_chip *chip,
589 struct irq_chip *irqchip,
590 unsigned int first_irq,
591 irq_flow_handler_t handler,
592 unsigned int type)
593 {
594 struct gb_gpio_controller *ggc;
595 unsigned int offset;
596 unsigned irq_base;
597
598 if (!chip || !irqchip)
599 return -EINVAL;
600
601 ggc = gpio_chip_to_gb_gpio_controller(chip);
602
603 ggc->irqchip = irqchip;
604 ggc->irq_handler = handler;
605 ggc->irq_default_type = type;
606 ggc->irqdomain = irq_domain_add_simple(NULL,
607 ggc->line_max + 1, first_irq,
608 &gb_gpio_domain_ops, chip);
609 if (!ggc->irqdomain) {
610 ggc->irqchip = NULL;
611 return -EINVAL;
612 }
613
614 /*
615 * Prepare the mapping since the irqchip shall be orthogonal to
616 * any gpio calls. If the first_irq was zero, this is
617 * necessary to allocate descriptors for all IRQs.
618 */
619 for (offset = 0; offset < (ggc->line_max + 1); offset++) {
620 irq_base = irq_create_mapping(ggc->irqdomain, offset);
621 if (offset == 0)
622 ggc->irq_base = irq_base;
623 }
624
625 return 0;
626 }
627
628 static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
629 {
630 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
631
632 return irq_find_mapping(ggc->irqdomain, offset);
633 }
634
635 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
636 const struct gbphy_device_id *id)
637 {
638 struct gb_connection *connection;
639 struct gb_gpio_controller *ggc;
640 struct gpio_chip *gpio;
641 struct irq_chip *irqc;
642 int ret;
643
644 ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
645 if (!ggc)
646 return -ENOMEM;
647
648 connection = gb_connection_create(gbphy_dev->bundle,
649 le16_to_cpu(gbphy_dev->cport_desc->id),
650 gb_gpio_request_handler);
651 if (IS_ERR(connection)) {
652 ret = PTR_ERR(connection);
653 goto exit_ggc_free;
654 }
655
656 ggc->connection = connection;
657 gb_connection_set_data(connection, ggc);
658 ggc->gbphy_dev = gbphy_dev;
659 gb_gbphy_set_data(gbphy_dev, ggc);
660
661 ret = gb_connection_enable_tx(connection);
662 if (ret)
663 goto exit_connection_destroy;
664
665 ret = gb_gpio_controller_setup(ggc);
666 if (ret)
667 goto exit_connection_disable;
668
669 irqc = &ggc->irqc;
670 irqc->irq_mask = gb_gpio_irq_mask;
671 irqc->irq_unmask = gb_gpio_irq_unmask;
672 irqc->irq_set_type = gb_gpio_irq_set_type;
673 irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
674 irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
675 irqc->name = "greybus_gpio";
676
677 mutex_init(&ggc->irq_lock);
678
679 gpio = &ggc->chip;
680
681 gpio->label = "greybus_gpio";
682 gpio->parent = &gbphy_dev->dev;
683 gpio->owner = THIS_MODULE;
684
685 gpio->request = gb_gpio_request;
686 gpio->free = gb_gpio_free;
687 gpio->get_direction = gb_gpio_get_direction;
688 gpio->direction_input = gb_gpio_direction_input;
689 gpio->direction_output = gb_gpio_direction_output;
690 gpio->get = gb_gpio_get;
691 gpio->set = gb_gpio_set;
692 gpio->set_debounce = gb_gpio_set_debounce;
693 gpio->to_irq = gb_gpio_to_irq;
694 gpio->base = -1; /* Allocate base dynamically */
695 gpio->ngpio = ggc->line_max + 1;
696 gpio->can_sleep = true;
697
698 ret = gb_connection_enable(connection);
699 if (ret)
700 goto exit_line_free;
701
702 ret = gb_gpio_irqchip_add(gpio, irqc, 0,
703 handle_level_irq, IRQ_TYPE_NONE);
704 if (ret) {
705 dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
706 goto exit_line_free;
707 }
708
709 ret = gpiochip_add(gpio);
710 if (ret) {
711 dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
712 goto exit_gpio_irqchip_remove;
713 }
714
715 gbphy_runtime_put_autosuspend(gbphy_dev);
716 return 0;
717
718 exit_gpio_irqchip_remove:
719 gb_gpio_irqchip_remove(ggc);
720 exit_line_free:
721 kfree(ggc->lines);
722 exit_connection_disable:
723 gb_connection_disable(connection);
724 exit_connection_destroy:
725 gb_connection_destroy(connection);
726 exit_ggc_free:
727 kfree(ggc);
728 return ret;
729 }
730
731 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
732 {
733 struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
734 struct gb_connection *connection = ggc->connection;
735 int ret;
736
737 ret = gbphy_runtime_get_sync(gbphy_dev);
738 if (ret)
739 gbphy_runtime_get_noresume(gbphy_dev);
740
741 gb_connection_disable_rx(connection);
742 gpiochip_remove(&ggc->chip);
743 gb_gpio_irqchip_remove(ggc);
744 gb_connection_disable(connection);
745 gb_connection_destroy(connection);
746 kfree(ggc->lines);
747 kfree(ggc);
748 }
749
750 static const struct gbphy_device_id gb_gpio_id_table[] = {
751 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
752 { },
753 };
754 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
755
756 static struct gbphy_driver gpio_driver = {
757 .name = "gpio",
758 .probe = gb_gpio_probe,
759 .remove = gb_gpio_remove,
760 .id_table = gb_gpio_id_table,
761 };
762
763 module_gbphy_driver(gpio_driver);
764 MODULE_LICENSE("GPL v2");