]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpio/gpio-omap.c
gpio/omap: fix incorrect context restore logic in omap_gpio_runtime_*
[mirror_ubuntu-zesty-kernel.git] / drivers / gpio / gpio-omap.c
CommitLineData
5e1c5ff4 1/*
5e1c5ff4
TL
2 * Support functions for OMAP GPIO
3 *
92105bb7 4 * Copyright (C) 2003-2005 Nokia Corporation
96de0e25 5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
5e1c5ff4 6 *
44169075
SS
7 * Copyright (C) 2009 Texas Instruments
8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9 *
5e1c5ff4
TL
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
5e1c5ff4
TL
15#include <linux/init.h>
16#include <linux/module.h>
5e1c5ff4 17#include <linux/interrupt.h>
3c437ffd 18#include <linux/syscore_ops.h>
92105bb7 19#include <linux/err.h>
f8ce2547 20#include <linux/clk.h>
fced80c7 21#include <linux/io.h>
96751fcb 22#include <linux/device.h>
77640aab 23#include <linux/pm_runtime.h>
55b93c32 24#include <linux/pm.h>
384ebe1c
BC
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/irqdomain.h>
5e1c5ff4 28
a09e64fb 29#include <mach/hardware.h>
5e1c5ff4 30#include <asm/irq.h>
a09e64fb 31#include <mach/irqs.h>
1bc857f7 32#include <asm/gpio.h>
5e1c5ff4
TL
33#include <asm/mach/irq.h>
34
2dc983c5
TKD
35#define OFF_MODE 1
36
03e128ca
C
37static LIST_HEAD(omap_gpio_list);
38
6d62e216
C
39struct gpio_regs {
40 u32 irqenable1;
41 u32 irqenable2;
42 u32 wake_en;
43 u32 ctrl;
44 u32 oe;
45 u32 leveldetect0;
46 u32 leveldetect1;
47 u32 risingdetect;
48 u32 fallingdetect;
49 u32 dataout;
ae547354
NM
50 u32 debounce;
51 u32 debounce_en;
6d62e216
C
52};
53
5e1c5ff4 54struct gpio_bank {
03e128ca 55 struct list_head node;
92105bb7 56 void __iomem *base;
5e1c5ff4 57 u16 irq;
384ebe1c
BC
58 int irq_base;
59 struct irq_domain *domain;
92105bb7
TL
60 u32 suspend_wakeup;
61 u32 saved_wakeup;
3ac4fa99
JY
62 u32 non_wakeup_gpios;
63 u32 enabled_non_wakeup_gpios;
6d62e216 64 struct gpio_regs context;
3ac4fa99
JY
65 u32 saved_datain;
66 u32 saved_fallingdetect;
67 u32 saved_risingdetect;
b144ff6f 68 u32 level_mask;
4318f36b 69 u32 toggle_mask;
5e1c5ff4 70 spinlock_t lock;
52e31344 71 struct gpio_chip chip;
89db9482 72 struct clk *dbck;
058af1ea 73 u32 mod_usage;
8865b9b6 74 u32 dbck_enable_mask;
72f83af9 75 bool dbck_enabled;
77640aab 76 struct device *dev;
d0d665a8 77 bool is_mpuio;
77640aab 78 bool dbck_flag;
0cde8d03 79 bool loses_context;
5de62b86 80 int stride;
d5f46247 81 u32 width;
60a3437d 82 int context_loss_count;
2dc983c5
TKD
83 int power_mode;
84 bool workaround_enabled;
fa87931a
KH
85
86 void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
60a3437d 87 int (*get_context_loss_count)(struct device *dev);
fa87931a
KH
88
89 struct omap_gpio_reg_offs *regs;
5e1c5ff4
TL
90};
91
129fd223
KH
92#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
93#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
c8eef65a 94#define GPIO_MOD_CTRL_BIT BIT(0)
5e1c5ff4 95
25db711d
BC
96static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
97{
98 return gpio_irq - bank->irq_base + bank->chip.base;
99}
100
5e1c5ff4
TL
101static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
102{
92105bb7 103 void __iomem *reg = bank->base;
5e1c5ff4
TL
104 u32 l;
105
fa87931a 106 reg += bank->regs->direction;
5e1c5ff4
TL
107 l = __raw_readl(reg);
108 if (is_input)
109 l |= 1 << gpio;
110 else
111 l &= ~(1 << gpio);
112 __raw_writel(l, reg);
41d87cbd 113 bank->context.oe = l;
5e1c5ff4
TL
114}
115
fa87931a
KH
116
117/* set data out value using dedicate set/clear register */
118static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
5e1c5ff4 119{
92105bb7 120 void __iomem *reg = bank->base;
fa87931a 121 u32 l = GPIO_BIT(bank, gpio);
5e1c5ff4 122
2c836f7e 123 if (enable) {
fa87931a 124 reg += bank->regs->set_dataout;
2c836f7e
TKD
125 bank->context.dataout |= l;
126 } else {
fa87931a 127 reg += bank->regs->clr_dataout;
2c836f7e
TKD
128 bank->context.dataout &= ~l;
129 }
5e1c5ff4 130
5e1c5ff4
TL
131 __raw_writel(l, reg);
132}
133
fa87931a
KH
134/* set data out value using mask register */
135static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
5e1c5ff4 136{
fa87931a
KH
137 void __iomem *reg = bank->base + bank->regs->dataout;
138 u32 gpio_bit = GPIO_BIT(bank, gpio);
139 u32 l;
5e1c5ff4 140
fa87931a
KH
141 l = __raw_readl(reg);
142 if (enable)
143 l |= gpio_bit;
144 else
145 l &= ~gpio_bit;
5e1c5ff4 146 __raw_writel(l, reg);
41d87cbd 147 bank->context.dataout = l;
5e1c5ff4
TL
148}
149
b37c45b8 150static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
b37c45b8 151{
fa87931a 152 void __iomem *reg = bank->base + bank->regs->datain;
b37c45b8 153
fa87931a 154 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
5e1c5ff4 155}
b37c45b8 156
b37c45b8
RQ
157static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
158{
fa87931a 159 void __iomem *reg = bank->base + bank->regs->dataout;
b37c45b8 160
129fd223 161 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
b37c45b8
RQ
162}
163
ece9528e
KH
164static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
165{
166 int l = __raw_readl(base + reg);
167
862ff640 168 if (set)
ece9528e
KH
169 l |= mask;
170 else
171 l &= ~mask;
172
173 __raw_writel(l, base + reg);
174}
92105bb7 175
72f83af9
TKD
176static inline void _gpio_dbck_enable(struct gpio_bank *bank)
177{
178 if (bank->dbck_enable_mask && !bank->dbck_enabled) {
179 clk_enable(bank->dbck);
180 bank->dbck_enabled = true;
181 }
182}
183
184static inline void _gpio_dbck_disable(struct gpio_bank *bank)
185{
186 if (bank->dbck_enable_mask && bank->dbck_enabled) {
187 clk_disable(bank->dbck);
188 bank->dbck_enabled = false;
189 }
190}
191
168ef3d9
FB
192/**
193 * _set_gpio_debounce - low level gpio debounce time
194 * @bank: the gpio bank we're acting upon
195 * @gpio: the gpio number on this @gpio
196 * @debounce: debounce time to use
197 *
198 * OMAP's debounce time is in 31us steps so we need
199 * to convert and round up to the closest unit.
200 */
201static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
202 unsigned debounce)
203{
9942da0e 204 void __iomem *reg;
168ef3d9
FB
205 u32 val;
206 u32 l;
207
77640aab
VC
208 if (!bank->dbck_flag)
209 return;
210
168ef3d9
FB
211 if (debounce < 32)
212 debounce = 0x01;
213 else if (debounce > 7936)
214 debounce = 0xff;
215 else
216 debounce = (debounce / 0x1f) - 1;
217
129fd223 218 l = GPIO_BIT(bank, gpio);
168ef3d9 219
6fd9c421 220 clk_enable(bank->dbck);
9942da0e 221 reg = bank->base + bank->regs->debounce;
168ef3d9
FB
222 __raw_writel(debounce, reg);
223
9942da0e 224 reg = bank->base + bank->regs->debounce_en;
168ef3d9
FB
225 val = __raw_readl(reg);
226
6fd9c421 227 if (debounce)
168ef3d9 228 val |= l;
6fd9c421 229 else
168ef3d9 230 val &= ~l;
f7ec0b0b 231 bank->dbck_enable_mask = val;
168ef3d9
FB
232
233 __raw_writel(val, reg);
6fd9c421
TKD
234 clk_disable(bank->dbck);
235 /*
236 * Enable debounce clock per module.
237 * This call is mandatory because in omap_gpio_request() when
238 * *_runtime_get_sync() is called, _gpio_dbck_enable() within
239 * runtime callbck fails to turn on dbck because dbck_enable_mask
240 * used within _gpio_dbck_enable() is still not initialized at
241 * that point. Therefore we have to enable dbck here.
242 */
243 _gpio_dbck_enable(bank);
ae547354
NM
244 if (bank->dbck_enable_mask) {
245 bank->context.debounce = debounce;
246 bank->context.debounce_en = val;
247 }
168ef3d9
FB
248}
249
5e571f38 250static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
00ece7e4 251 unsigned trigger)
5e1c5ff4 252{
3ac4fa99 253 void __iomem *base = bank->base;
92105bb7
TL
254 u32 gpio_bit = 1 << gpio;
255
5e571f38
TKD
256 _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
257 trigger & IRQ_TYPE_LEVEL_LOW);
258 _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
259 trigger & IRQ_TYPE_LEVEL_HIGH);
260 _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
261 trigger & IRQ_TYPE_EDGE_RISING);
262 _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
263 trigger & IRQ_TYPE_EDGE_FALLING);
264
41d87cbd
TKD
265 bank->context.leveldetect0 =
266 __raw_readl(bank->base + bank->regs->leveldetect0);
267 bank->context.leveldetect1 =
268 __raw_readl(bank->base + bank->regs->leveldetect1);
269 bank->context.risingdetect =
270 __raw_readl(bank->base + bank->regs->risingdetect);
271 bank->context.fallingdetect =
272 __raw_readl(bank->base + bank->regs->fallingdetect);
273
274 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
5e571f38 275 _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
41d87cbd
TKD
276 bank->context.wake_en =
277 __raw_readl(bank->base + bank->regs->wkup_en);
278 }
5e571f38 279
55b220ca 280 /* This part needs to be executed always for OMAP{34xx, 44xx} */
5e571f38
TKD
281 if (!bank->regs->irqctrl) {
282 /* On omap24xx proceed only when valid GPIO bit is set */
283 if (bank->non_wakeup_gpios) {
284 if (!(bank->non_wakeup_gpios & gpio_bit))
285 goto exit;
286 }
287
699117a6
CW
288 /*
289 * Log the edge gpio and manually trigger the IRQ
290 * after resume if the input level changes
291 * to avoid irq lost during PER RET/OFF mode
292 * Applies for omap2 non-wakeup gpio and all omap3 gpios
293 */
294 if (trigger & IRQ_TYPE_EDGE_BOTH)
3ac4fa99
JY
295 bank->enabled_non_wakeup_gpios |= gpio_bit;
296 else
297 bank->enabled_non_wakeup_gpios &= ~gpio_bit;
298 }
5eb3bb9c 299
5e571f38 300exit:
9ea14d8c
TKD
301 bank->level_mask =
302 __raw_readl(bank->base + bank->regs->leveldetect0) |
303 __raw_readl(bank->base + bank->regs->leveldetect1);
92105bb7
TL
304}
305
9198bcd3 306#ifdef CONFIG_ARCH_OMAP1
4318f36b
CM
307/*
308 * This only applies to chips that can't do both rising and falling edge
309 * detection at once. For all other chips, this function is a noop.
310 */
311static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
312{
313 void __iomem *reg = bank->base;
314 u32 l = 0;
315
5e571f38 316 if (!bank->regs->irqctrl)
4318f36b 317 return;
5e571f38
TKD
318
319 reg += bank->regs->irqctrl;
4318f36b
CM
320
321 l = __raw_readl(reg);
322 if ((l >> gpio) & 1)
323 l &= ~(1 << gpio);
324 else
325 l |= 1 << gpio;
326
327 __raw_writel(l, reg);
328}
5e571f38
TKD
329#else
330static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
9198bcd3 331#endif
4318f36b 332
00ece7e4
TKD
333static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
334 unsigned trigger)
92105bb7
TL
335{
336 void __iomem *reg = bank->base;
5e571f38 337 void __iomem *base = bank->base;
92105bb7 338 u32 l = 0;
5e1c5ff4 339
5e571f38
TKD
340 if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
341 set_gpio_trigger(bank, gpio, trigger);
342 } else if (bank->regs->irqctrl) {
343 reg += bank->regs->irqctrl;
344
5e1c5ff4 345 l = __raw_readl(reg);
29501577 346 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
4318f36b 347 bank->toggle_mask |= 1 << gpio;
6cab4860 348 if (trigger & IRQ_TYPE_EDGE_RISING)
5e1c5ff4 349 l |= 1 << gpio;
6cab4860 350 else if (trigger & IRQ_TYPE_EDGE_FALLING)
5e1c5ff4 351 l &= ~(1 << gpio);
92105bb7 352 else
5e571f38
TKD
353 return -EINVAL;
354
355 __raw_writel(l, reg);
356 } else if (bank->regs->edgectrl1) {
5e1c5ff4 357 if (gpio & 0x08)
5e571f38 358 reg += bank->regs->edgectrl2;
5e1c5ff4 359 else
5e571f38
TKD
360 reg += bank->regs->edgectrl1;
361
5e1c5ff4
TL
362 gpio &= 0x07;
363 l = __raw_readl(reg);
364 l &= ~(3 << (gpio << 1));
6cab4860 365 if (trigger & IRQ_TYPE_EDGE_RISING)
6e60e79a 366 l |= 2 << (gpio << 1);
6cab4860 367 if (trigger & IRQ_TYPE_EDGE_FALLING)
6e60e79a 368 l |= 1 << (gpio << 1);
5e571f38
TKD
369
370 /* Enable wake-up during idle for dynamic tick */
371 _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
41d87cbd
TKD
372 bank->context.wake_en =
373 __raw_readl(bank->base + bank->regs->wkup_en);
5e571f38 374 __raw_writel(l, reg);
5e1c5ff4 375 }
92105bb7 376 return 0;
5e1c5ff4
TL
377}
378
e9191028 379static int gpio_irq_type(struct irq_data *d, unsigned type)
5e1c5ff4 380{
25db711d 381 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
92105bb7
TL
382 unsigned gpio;
383 int retval;
a6472533 384 unsigned long flags;
92105bb7 385
e9191028
LB
386 if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
387 gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
92105bb7 388 else
25db711d 389 gpio = irq_to_gpio(bank, d->irq);
5e1c5ff4 390
e5c56ed3 391 if (type & ~IRQ_TYPE_SENSE_MASK)
6e60e79a 392 return -EINVAL;
e5c56ed3 393
9ea14d8c
TKD
394 if (!bank->regs->leveldetect0 &&
395 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
92105bb7
TL
396 return -EINVAL;
397
a6472533 398 spin_lock_irqsave(&bank->lock, flags);
129fd223 399 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
a6472533 400 spin_unlock_irqrestore(&bank->lock, flags);
672e302e
KH
401
402 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
6845664a 403 __irq_set_handler_locked(d->irq, handle_level_irq);
672e302e 404 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
6845664a 405 __irq_set_handler_locked(d->irq, handle_edge_irq);
672e302e 406
92105bb7 407 return retval;
5e1c5ff4
TL
408}
409
410static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
411{
92105bb7 412 void __iomem *reg = bank->base;
5e1c5ff4 413
eef4bec7 414 reg += bank->regs->irqstatus;
5e1c5ff4 415 __raw_writel(gpio_mask, reg);
bee7930f
HD
416
417 /* Workaround for clearing DSP GPIO interrupts to allow retention */
eef4bec7
KH
418 if (bank->regs->irqstatus2) {
419 reg = bank->base + bank->regs->irqstatus2;
bedfd154 420 __raw_writel(gpio_mask, reg);
eef4bec7 421 }
bedfd154
RQ
422
423 /* Flush posted write for the irq status to avoid spurious interrupts */
424 __raw_readl(reg);
5e1c5ff4
TL
425}
426
427static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
428{
129fd223 429 _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
5e1c5ff4
TL
430}
431
ea6dedd7
ID
432static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
433{
434 void __iomem *reg = bank->base;
99c47707 435 u32 l;
c390aad0 436 u32 mask = (1 << bank->width) - 1;
ea6dedd7 437
28f3b5a0 438 reg += bank->regs->irqenable;
99c47707 439 l = __raw_readl(reg);
28f3b5a0 440 if (bank->regs->irqenable_inv)
99c47707
ID
441 l = ~l;
442 l &= mask;
443 return l;
ea6dedd7
ID
444}
445
28f3b5a0 446static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
5e1c5ff4 447{
92105bb7 448 void __iomem *reg = bank->base;
5e1c5ff4
TL
449 u32 l;
450
28f3b5a0
KH
451 if (bank->regs->set_irqenable) {
452 reg += bank->regs->set_irqenable;
453 l = gpio_mask;
454 } else {
455 reg += bank->regs->irqenable;
5e1c5ff4 456 l = __raw_readl(reg);
28f3b5a0
KH
457 if (bank->regs->irqenable_inv)
458 l &= ~gpio_mask;
5e1c5ff4
TL
459 else
460 l |= gpio_mask;
28f3b5a0
KH
461 }
462
463 __raw_writel(l, reg);
41d87cbd 464 bank->context.irqenable1 = l;
28f3b5a0
KH
465}
466
467static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
468{
469 void __iomem *reg = bank->base;
470 u32 l;
471
472 if (bank->regs->clr_irqenable) {
473 reg += bank->regs->clr_irqenable;
5e1c5ff4 474 l = gpio_mask;
28f3b5a0
KH
475 } else {
476 reg += bank->regs->irqenable;
56739a69 477 l = __raw_readl(reg);
28f3b5a0 478 if (bank->regs->irqenable_inv)
56739a69 479 l |= gpio_mask;
92105bb7 480 else
28f3b5a0 481 l &= ~gpio_mask;
5e1c5ff4 482 }
28f3b5a0 483
5e1c5ff4 484 __raw_writel(l, reg);
41d87cbd 485 bank->context.irqenable1 = l;
5e1c5ff4
TL
486}
487
488static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
489{
8276536c
TKD
490 if (enable)
491 _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
492 else
493 _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
5e1c5ff4
TL
494}
495
92105bb7
TL
496/*
497 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
498 * 1510 does not seem to have a wake-up register. If JTAG is connected
499 * to the target, system will wake up always on GPIO events. While
500 * system is running all registered GPIO interrupts need to have wake-up
501 * enabled. When system is suspended, only selected GPIO interrupts need
502 * to have wake-up enabled.
503 */
504static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
505{
f64ad1a0
KH
506 u32 gpio_bit = GPIO_BIT(bank, gpio);
507 unsigned long flags;
a6472533 508
f64ad1a0 509 if (bank->non_wakeup_gpios & gpio_bit) {
862ff640 510 dev_err(bank->dev,
f64ad1a0 511 "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
92105bb7
TL
512 return -EINVAL;
513 }
f64ad1a0
KH
514
515 spin_lock_irqsave(&bank->lock, flags);
516 if (enable)
517 bank->suspend_wakeup |= gpio_bit;
518 else
519 bank->suspend_wakeup &= ~gpio_bit;
520
381a752f 521 __raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
f64ad1a0
KH
522 spin_unlock_irqrestore(&bank->lock, flags);
523
524 return 0;
92105bb7
TL
525}
526
4196dd6b
TL
527static void _reset_gpio(struct gpio_bank *bank, int gpio)
528{
129fd223 529 _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
4196dd6b
TL
530 _set_gpio_irqenable(bank, gpio, 0);
531 _clear_gpio_irqstatus(bank, gpio);
129fd223 532 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
4196dd6b
TL
533}
534
92105bb7 535/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
e9191028 536static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
92105bb7 537{
25db711d
BC
538 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
539 unsigned int gpio = irq_to_gpio(bank, d->irq);
92105bb7 540
25db711d 541 return _set_gpio_wakeup(bank, gpio, enable);
92105bb7
TL
542}
543
3ff164e1 544static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
5e1c5ff4 545{
3ff164e1 546 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
a6472533 547 unsigned long flags;
52e31344 548
55b93c32
TKD
549 /*
550 * If this is the first gpio_request for the bank,
551 * enable the bank module.
552 */
553 if (!bank->mod_usage)
554 pm_runtime_get_sync(bank->dev);
92105bb7 555
55b93c32 556 spin_lock_irqsave(&bank->lock, flags);
4196dd6b
TL
557 /* Set trigger to none. You need to enable the desired trigger with
558 * request_irq() or set_irq_type().
559 */
3ff164e1 560 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
92105bb7 561
fad96ea8
C
562 if (bank->regs->pinctrl) {
563 void __iomem *reg = bank->base + bank->regs->pinctrl;
5e1c5ff4 564
92105bb7 565 /* Claim the pin for MPU */
3ff164e1 566 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
5e1c5ff4 567 }
fad96ea8 568
c8eef65a
C
569 if (bank->regs->ctrl && !bank->mod_usage) {
570 void __iomem *reg = bank->base + bank->regs->ctrl;
571 u32 ctrl;
572
573 ctrl = __raw_readl(reg);
574 /* Module is enabled, clocks are not gated */
575 ctrl &= ~GPIO_MOD_CTRL_BIT;
576 __raw_writel(ctrl, reg);
41d87cbd 577 bank->context.ctrl = ctrl;
058af1ea 578 }
c8eef65a
C
579
580 bank->mod_usage |= 1 << offset;
581
a6472533 582 spin_unlock_irqrestore(&bank->lock, flags);
5e1c5ff4
TL
583
584 return 0;
585}
586
3ff164e1 587static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
5e1c5ff4 588{
3ff164e1 589 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
6ed87c5b 590 void __iomem *base = bank->base;
a6472533 591 unsigned long flags;
5e1c5ff4 592
a6472533 593 spin_lock_irqsave(&bank->lock, flags);
6ed87c5b 594
41d87cbd 595 if (bank->regs->wkup_en) {
9f096868 596 /* Disable wake-up during idle for dynamic tick */
6ed87c5b 597 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
41d87cbd
TKD
598 bank->context.wake_en =
599 __raw_readl(bank->base + bank->regs->wkup_en);
600 }
6ed87c5b 601
c8eef65a
C
602 bank->mod_usage &= ~(1 << offset);
603
604 if (bank->regs->ctrl && !bank->mod_usage) {
605 void __iomem *reg = bank->base + bank->regs->ctrl;
606 u32 ctrl;
607
608 ctrl = __raw_readl(reg);
609 /* Module is disabled, clocks are gated */
610 ctrl |= GPIO_MOD_CTRL_BIT;
611 __raw_writel(ctrl, reg);
41d87cbd 612 bank->context.ctrl = ctrl;
058af1ea 613 }
c8eef65a 614
3ff164e1 615 _reset_gpio(bank, bank->chip.base + offset);
a6472533 616 spin_unlock_irqrestore(&bank->lock, flags);
55b93c32
TKD
617
618 /*
619 * If this is the last gpio to be freed in the bank,
620 * disable the bank module.
621 */
622 if (!bank->mod_usage)
623 pm_runtime_put(bank->dev);
5e1c5ff4
TL
624}
625
626/*
627 * We need to unmask the GPIO bank interrupt as soon as possible to
628 * avoid missing GPIO interrupts for other lines in the bank.
629 * Then we need to mask-read-clear-unmask the triggered GPIO lines
630 * in the bank to avoid missing nested interrupts for a GPIO line.
631 * If we wait to unmask individual GPIO lines in the bank after the
632 * line's interrupt handler has been run, we may miss some nested
633 * interrupts.
634 */
10dd5ce2 635static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
5e1c5ff4 636{
92105bb7 637 void __iomem *isr_reg = NULL;
5e1c5ff4 638 u32 isr;
4318f36b 639 unsigned int gpio_irq, gpio_index;
5e1c5ff4 640 struct gpio_bank *bank;
ea6dedd7
ID
641 u32 retrigger = 0;
642 int unmasked = 0;
ee144182 643 struct irq_chip *chip = irq_desc_get_chip(desc);
5e1c5ff4 644
ee144182 645 chained_irq_enter(chip, desc);
5e1c5ff4 646
6845664a 647 bank = irq_get_handler_data(irq);
eef4bec7 648 isr_reg = bank->base + bank->regs->irqstatus;
55b93c32 649 pm_runtime_get_sync(bank->dev);
b1cc4c55
EK
650
651 if (WARN_ON(!isr_reg))
652 goto exit;
653
92105bb7 654 while(1) {
6e60e79a 655 u32 isr_saved, level_mask = 0;
ea6dedd7 656 u32 enabled;
6e60e79a 657
ea6dedd7
ID
658 enabled = _get_gpio_irqbank_mask(bank);
659 isr_saved = isr = __raw_readl(isr_reg) & enabled;
6e60e79a 660
9ea14d8c 661 if (bank->level_mask)
b144ff6f 662 level_mask = bank->level_mask & enabled;
6e60e79a
TL
663
664 /* clear edge sensitive interrupts before handler(s) are
665 called so that we don't miss any interrupt occurred while
666 executing them */
28f3b5a0 667 _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
6e60e79a 668 _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
28f3b5a0 669 _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
6e60e79a
TL
670
671 /* if there is only edge sensitive GPIO pin interrupts
672 configured, we could unmask GPIO bank interrupt immediately */
ea6dedd7
ID
673 if (!level_mask && !unmasked) {
674 unmasked = 1;
ee144182 675 chained_irq_exit(chip, desc);
ea6dedd7 676 }
92105bb7 677
ea6dedd7
ID
678 isr |= retrigger;
679 retrigger = 0;
92105bb7
TL
680 if (!isr)
681 break;
682
384ebe1c 683 gpio_irq = bank->irq_base;
92105bb7 684 for (; isr != 0; isr >>= 1, gpio_irq++) {
25db711d 685 int gpio = irq_to_gpio(bank, gpio_irq);
4318f36b 686
92105bb7
TL
687 if (!(isr & 1))
688 continue;
29454dde 689
25db711d
BC
690 gpio_index = GPIO_INDEX(bank, gpio);
691
4318f36b
CM
692 /*
693 * Some chips can't respond to both rising and falling
694 * at the same time. If this irq was requested with
695 * both flags, we need to flip the ICR data for the IRQ
696 * to respond to the IRQ for the opposite direction.
697 * This will be indicated in the bank toggle_mask.
698 */
699 if (bank->toggle_mask & (1 << gpio_index))
700 _toggle_gpio_edge_triggering(bank, gpio_index);
4318f36b 701
d8aa0251 702 generic_handle_irq(gpio_irq);
92105bb7 703 }
1a8bfa1e 704 }
ea6dedd7
ID
705 /* if bank has any level sensitive GPIO pin interrupt
706 configured, we must unmask the bank interrupt only after
707 handler(s) are executed in order to avoid spurious bank
708 interrupt */
b1cc4c55 709exit:
ea6dedd7 710 if (!unmasked)
ee144182 711 chained_irq_exit(chip, desc);
55b93c32 712 pm_runtime_put(bank->dev);
5e1c5ff4
TL
713}
714
e9191028 715static void gpio_irq_shutdown(struct irq_data *d)
4196dd6b 716{
e9191028 717 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
25db711d 718 unsigned int gpio = irq_to_gpio(bank, d->irq);
85ec7b97 719 unsigned long flags;
4196dd6b 720
85ec7b97 721 spin_lock_irqsave(&bank->lock, flags);
4196dd6b 722 _reset_gpio(bank, gpio);
85ec7b97 723 spin_unlock_irqrestore(&bank->lock, flags);
4196dd6b
TL
724}
725
e9191028 726static void gpio_ack_irq(struct irq_data *d)
5e1c5ff4 727{
e9191028 728 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
25db711d 729 unsigned int gpio = irq_to_gpio(bank, d->irq);
5e1c5ff4
TL
730
731 _clear_gpio_irqstatus(bank, gpio);
732}
733
e9191028 734static void gpio_mask_irq(struct irq_data *d)
5e1c5ff4 735{
e9191028 736 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
25db711d 737 unsigned int gpio = irq_to_gpio(bank, d->irq);
85ec7b97 738 unsigned long flags;
5e1c5ff4 739
85ec7b97 740 spin_lock_irqsave(&bank->lock, flags);
5e1c5ff4 741 _set_gpio_irqenable(bank, gpio, 0);
129fd223 742 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
85ec7b97 743 spin_unlock_irqrestore(&bank->lock, flags);
5e1c5ff4
TL
744}
745
e9191028 746static void gpio_unmask_irq(struct irq_data *d)
5e1c5ff4 747{
e9191028 748 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
25db711d 749 unsigned int gpio = irq_to_gpio(bank, d->irq);
129fd223 750 unsigned int irq_mask = GPIO_BIT(bank, gpio);
8c04a176 751 u32 trigger = irqd_get_trigger_type(d);
85ec7b97 752 unsigned long flags;
55b6019a 753
85ec7b97 754 spin_lock_irqsave(&bank->lock, flags);
55b6019a 755 if (trigger)
129fd223 756 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
b144ff6f
KH
757
758 /* For level-triggered GPIOs, the clearing must be done after
759 * the HW source is cleared, thus after the handler has run */
760 if (bank->level_mask & irq_mask) {
761 _set_gpio_irqenable(bank, gpio, 0);
762 _clear_gpio_irqstatus(bank, gpio);
763 }
5e1c5ff4 764
4de8c75b 765 _set_gpio_irqenable(bank, gpio, 1);
85ec7b97 766 spin_unlock_irqrestore(&bank->lock, flags);
5e1c5ff4
TL
767}
768
e5c56ed3
DB
769static struct irq_chip gpio_irq_chip = {
770 .name = "GPIO",
e9191028
LB
771 .irq_shutdown = gpio_irq_shutdown,
772 .irq_ack = gpio_ack_irq,
773 .irq_mask = gpio_mask_irq,
774 .irq_unmask = gpio_unmask_irq,
775 .irq_set_type = gpio_irq_type,
776 .irq_set_wake = gpio_wake_enable,
e5c56ed3
DB
777};
778
779/*---------------------------------------------------------------------*/
780
79ee031f 781static int omap_mpuio_suspend_noirq(struct device *dev)
11a78b79 782{
79ee031f 783 struct platform_device *pdev = to_platform_device(dev);
11a78b79 784 struct gpio_bank *bank = platform_get_drvdata(pdev);
5de62b86
TL
785 void __iomem *mask_reg = bank->base +
786 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
a6472533 787 unsigned long flags;
11a78b79 788
a6472533 789 spin_lock_irqsave(&bank->lock, flags);
11a78b79
DB
790 bank->saved_wakeup = __raw_readl(mask_reg);
791 __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
a6472533 792 spin_unlock_irqrestore(&bank->lock, flags);
11a78b79
DB
793
794 return 0;
795}
796
79ee031f 797static int omap_mpuio_resume_noirq(struct device *dev)
11a78b79 798{
79ee031f 799 struct platform_device *pdev = to_platform_device(dev);
11a78b79 800 struct gpio_bank *bank = platform_get_drvdata(pdev);
5de62b86
TL
801 void __iomem *mask_reg = bank->base +
802 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
a6472533 803 unsigned long flags;
11a78b79 804
a6472533 805 spin_lock_irqsave(&bank->lock, flags);
11a78b79 806 __raw_writel(bank->saved_wakeup, mask_reg);
a6472533 807 spin_unlock_irqrestore(&bank->lock, flags);
11a78b79
DB
808
809 return 0;
810}
811
47145210 812static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
79ee031f
MD
813 .suspend_noirq = omap_mpuio_suspend_noirq,
814 .resume_noirq = omap_mpuio_resume_noirq,
815};
816
3c437ffd 817/* use platform_driver for this. */
11a78b79 818static struct platform_driver omap_mpuio_driver = {
11a78b79
DB
819 .driver = {
820 .name = "mpuio",
79ee031f 821 .pm = &omap_mpuio_dev_pm_ops,
11a78b79
DB
822 },
823};
824
825static struct platform_device omap_mpuio_device = {
826 .name = "mpuio",
827 .id = -1,
828 .dev = {
829 .driver = &omap_mpuio_driver.driver,
830 }
831 /* could list the /proc/iomem resources */
832};
833
03e128ca 834static inline void mpuio_init(struct gpio_bank *bank)
11a78b79 835{
77640aab 836 platform_set_drvdata(&omap_mpuio_device, bank);
fcf126d8 837
11a78b79
DB
838 if (platform_driver_register(&omap_mpuio_driver) == 0)
839 (void) platform_device_register(&omap_mpuio_device);
840}
841
e5c56ed3 842/*---------------------------------------------------------------------*/
5e1c5ff4 843
52e31344
DB
844static int gpio_input(struct gpio_chip *chip, unsigned offset)
845{
846 struct gpio_bank *bank;
847 unsigned long flags;
848
849 bank = container_of(chip, struct gpio_bank, chip);
850 spin_lock_irqsave(&bank->lock, flags);
851 _set_gpio_direction(bank, offset, 1);
852 spin_unlock_irqrestore(&bank->lock, flags);
853 return 0;
854}
855
b37c45b8
RQ
856static int gpio_is_input(struct gpio_bank *bank, int mask)
857{
fa87931a 858 void __iomem *reg = bank->base + bank->regs->direction;
b37c45b8 859
b37c45b8
RQ
860 return __raw_readl(reg) & mask;
861}
862
52e31344
DB
863static int gpio_get(struct gpio_chip *chip, unsigned offset)
864{
b37c45b8
RQ
865 struct gpio_bank *bank;
866 void __iomem *reg;
867 int gpio;
868 u32 mask;
869
870 gpio = chip->base + offset;
a8be8daf 871 bank = container_of(chip, struct gpio_bank, chip);
b37c45b8 872 reg = bank->base;
129fd223 873 mask = GPIO_BIT(bank, gpio);
b37c45b8
RQ
874
875 if (gpio_is_input(bank, mask))
876 return _get_gpio_datain(bank, gpio);
877 else
878 return _get_gpio_dataout(bank, gpio);
52e31344
DB
879}
880
881static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
882{
883 struct gpio_bank *bank;
884 unsigned long flags;
885
886 bank = container_of(chip, struct gpio_bank, chip);
887 spin_lock_irqsave(&bank->lock, flags);
fa87931a 888 bank->set_dataout(bank, offset, value);
52e31344
DB
889 _set_gpio_direction(bank, offset, 0);
890 spin_unlock_irqrestore(&bank->lock, flags);
891 return 0;
892}
893
168ef3d9
FB
894static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
895 unsigned debounce)
896{
897 struct gpio_bank *bank;
898 unsigned long flags;
899
900 bank = container_of(chip, struct gpio_bank, chip);
77640aab
VC
901
902 if (!bank->dbck) {
903 bank->dbck = clk_get(bank->dev, "dbclk");
904 if (IS_ERR(bank->dbck))
905 dev_err(bank->dev, "Could not get gpio dbck\n");
906 }
907
168ef3d9
FB
908 spin_lock_irqsave(&bank->lock, flags);
909 _set_gpio_debounce(bank, offset, debounce);
910 spin_unlock_irqrestore(&bank->lock, flags);
911
912 return 0;
913}
914
52e31344
DB
915static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
916{
917 struct gpio_bank *bank;
918 unsigned long flags;
919
920 bank = container_of(chip, struct gpio_bank, chip);
921 spin_lock_irqsave(&bank->lock, flags);
fa87931a 922 bank->set_dataout(bank, offset, value);
52e31344
DB
923 spin_unlock_irqrestore(&bank->lock, flags);
924}
925
a007b709
DB
926static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
927{
928 struct gpio_bank *bank;
929
930 bank = container_of(chip, struct gpio_bank, chip);
384ebe1c 931 return bank->irq_base + offset;
a007b709
DB
932}
933
52e31344
DB
934/*---------------------------------------------------------------------*/
935
9a748053 936static void __init omap_gpio_show_rev(struct gpio_bank *bank)
9f7065da 937{
e5ff4440 938 static bool called;
9f7065da
TL
939 u32 rev;
940
e5ff4440 941 if (called || bank->regs->revision == USHRT_MAX)
9f7065da
TL
942 return;
943
e5ff4440
KH
944 rev = __raw_readw(bank->base + bank->regs->revision);
945 pr_info("OMAP GPIO hardware version %d.%d\n",
9f7065da 946 (rev >> 4) & 0x0f, rev & 0x0f);
e5ff4440
KH
947
948 called = true;
9f7065da
TL
949}
950
8ba55c5c
DB
951/* This lock class tells lockdep that GPIO irqs are in a different
952 * category than their parents, so it won't report false recursion.
953 */
954static struct lock_class_key gpio_lock_class;
955
03e128ca 956static void omap_gpio_mod_init(struct gpio_bank *bank)
2fae7fbe 957{
ab985f0f
TKD
958 void __iomem *base = bank->base;
959 u32 l = 0xffffffff;
2fae7fbe 960
ab985f0f
TKD
961 if (bank->width == 16)
962 l = 0xffff;
963
d0d665a8 964 if (bank->is_mpuio) {
ab985f0f
TKD
965 __raw_writel(l, bank->base + bank->regs->irqenable);
966 return;
2fae7fbe 967 }
ab985f0f
TKD
968
969 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
970 _gpio_rmw(base, bank->regs->irqstatus, l,
971 bank->regs->irqenable_inv == false);
972 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
973 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
974 if (bank->regs->debounce_en)
975 _gpio_rmw(base, bank->regs->debounce_en, 0, 1);
976
2dc983c5
TKD
977 /* Save OE default value (0xffffffff) in the context */
978 bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
ab985f0f
TKD
979 /* Initialize interface clk ungated, module enabled */
980 if (bank->regs->ctrl)
981 _gpio_rmw(base, bank->regs->ctrl, 0, 1);
2fae7fbe
VC
982}
983
8805f410 984static __devinit void
f8b46b58
KH
985omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
986 unsigned int num)
987{
988 struct irq_chip_generic *gc;
989 struct irq_chip_type *ct;
990
991 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
992 handle_simple_irq);
83233749
TP
993 if (!gc) {
994 dev_err(bank->dev, "Memory alloc failed for gc\n");
995 return;
996 }
997
f8b46b58
KH
998 ct = gc->chip_types;
999
1000 /* NOTE: No ack required, reading IRQ status clears it. */
1001 ct->chip.irq_mask = irq_gc_mask_set_bit;
1002 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1003 ct->chip.irq_set_type = gpio_irq_type;
6ed87c5b
TKD
1004
1005 if (bank->regs->wkup_en)
f8b46b58
KH
1006 ct->chip.irq_set_wake = gpio_wake_enable,
1007
1008 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1009 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1010 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1011}
1012
d52b31de 1013static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
2fae7fbe 1014{
77640aab 1015 int j;
2fae7fbe
VC
1016 static int gpio;
1017
2fae7fbe
VC
1018 /*
1019 * REVISIT eventually switch from OMAP-specific gpio structs
1020 * over to the generic ones
1021 */
1022 bank->chip.request = omap_gpio_request;
1023 bank->chip.free = omap_gpio_free;
1024 bank->chip.direction_input = gpio_input;
1025 bank->chip.get = gpio_get;
1026 bank->chip.direction_output = gpio_output;
1027 bank->chip.set_debounce = gpio_debounce;
1028 bank->chip.set = gpio_set;
1029 bank->chip.to_irq = gpio_2irq;
d0d665a8 1030 if (bank->is_mpuio) {
2fae7fbe 1031 bank->chip.label = "mpuio";
6ed87c5b
TKD
1032 if (bank->regs->wkup_en)
1033 bank->chip.dev = &omap_mpuio_device.dev;
2fae7fbe
VC
1034 bank->chip.base = OMAP_MPUIO(0);
1035 } else {
1036 bank->chip.label = "gpio";
1037 bank->chip.base = gpio;
d5f46247 1038 gpio += bank->width;
2fae7fbe 1039 }
d5f46247 1040 bank->chip.ngpio = bank->width;
2fae7fbe
VC
1041
1042 gpiochip_add(&bank->chip);
1043
384ebe1c 1044 for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) {
1475b85d 1045 irq_set_lockdep_class(j, &gpio_lock_class);
6845664a 1046 irq_set_chip_data(j, bank);
d0d665a8 1047 if (bank->is_mpuio) {
f8b46b58
KH
1048 omap_mpuio_alloc_gc(bank, j, bank->width);
1049 } else {
6845664a 1050 irq_set_chip(j, &gpio_irq_chip);
f8b46b58
KH
1051 irq_set_handler(j, handle_simple_irq);
1052 set_irq_flags(j, IRQF_VALID);
1053 }
2fae7fbe 1054 }
6845664a
TG
1055 irq_set_chained_handler(bank->irq, gpio_irq_handler);
1056 irq_set_handler_data(bank->irq, bank);
2fae7fbe
VC
1057}
1058
384ebe1c
BC
1059static const struct of_device_id omap_gpio_match[];
1060
77640aab 1061static int __devinit omap_gpio_probe(struct platform_device *pdev)
5e1c5ff4 1062{
862ff640 1063 struct device *dev = &pdev->dev;
384ebe1c
BC
1064 struct device_node *node = dev->of_node;
1065 const struct of_device_id *match;
77640aab
VC
1066 struct omap_gpio_platform_data *pdata;
1067 struct resource *res;
5e1c5ff4 1068 struct gpio_bank *bank;
03e128ca 1069 int ret = 0;
5e1c5ff4 1070
384ebe1c
BC
1071 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1072
1073 pdata = match ? match->data : dev->platform_data;
1074 if (!pdata)
96751fcb 1075 return -EINVAL;
5492fb1a 1076
96751fcb 1077 bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL);
03e128ca 1078 if (!bank) {
862ff640 1079 dev_err(dev, "Memory alloc failed\n");
96751fcb 1080 return -ENOMEM;
03e128ca 1081 }
92105bb7 1082
77640aab
VC
1083 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1084 if (unlikely(!res)) {
862ff640 1085 dev_err(dev, "Invalid IRQ resource\n");
96751fcb 1086 return -ENODEV;
44169075 1087 }
5e1c5ff4 1088
77640aab 1089 bank->irq = res->start;
862ff640 1090 bank->dev = dev;
77640aab 1091 bank->dbck_flag = pdata->dbck_flag;
5de62b86 1092 bank->stride = pdata->bank_stride;
d5f46247 1093 bank->width = pdata->bank_width;
d0d665a8 1094 bank->is_mpuio = pdata->is_mpuio;
803a2434 1095 bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
0cde8d03 1096 bank->loses_context = pdata->loses_context;
60a3437d 1097 bank->get_context_loss_count = pdata->get_context_loss_count;
fa87931a 1098 bank->regs = pdata->regs;
384ebe1c
BC
1099#ifdef CONFIG_OF_GPIO
1100 bank->chip.of_node = of_node_get(node);
1101#endif
1102
1103 bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1104 if (bank->irq_base < 0) {
1105 dev_err(dev, "Couldn't allocate IRQ numbers\n");
1106 return -ENODEV;
1107 }
1108
1109 bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base,
1110 0, &irq_domain_simple_ops, NULL);
fa87931a
KH
1111
1112 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1113 bank->set_dataout = _set_gpio_dataout_reg;
1114 else
1115 bank->set_dataout = _set_gpio_dataout_mask;
9f7065da 1116
77640aab 1117 spin_lock_init(&bank->lock);
9f7065da 1118
77640aab
VC
1119 /* Static mapping, never released */
1120 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1121 if (unlikely(!res)) {
862ff640 1122 dev_err(dev, "Invalid mem resource\n");
96751fcb
BC
1123 return -ENODEV;
1124 }
1125
1126 if (!devm_request_mem_region(dev, res->start, resource_size(res),
1127 pdev->name)) {
1128 dev_err(dev, "Region already claimed\n");
1129 return -EBUSY;
77640aab 1130 }
89db9482 1131
96751fcb 1132 bank->base = devm_ioremap(dev, res->start, resource_size(res));
77640aab 1133 if (!bank->base) {
862ff640 1134 dev_err(dev, "Could not ioremap\n");
96751fcb 1135 return -ENOMEM;
5e1c5ff4
TL
1136 }
1137
065cd795
TKD
1138 platform_set_drvdata(pdev, bank);
1139
77640aab 1140 pm_runtime_enable(bank->dev);
55b93c32 1141 pm_runtime_irq_safe(bank->dev);
77640aab
VC
1142 pm_runtime_get_sync(bank->dev);
1143
d0d665a8 1144 if (bank->is_mpuio)
ab985f0f
TKD
1145 mpuio_init(bank);
1146
03e128ca 1147 omap_gpio_mod_init(bank);
77640aab 1148 omap_gpio_chip_init(bank);
9a748053 1149 omap_gpio_show_rev(bank);
9f7065da 1150
55b93c32
TKD
1151 pm_runtime_put(bank->dev);
1152
03e128ca 1153 list_add_tail(&bank->node, &omap_gpio_list);
77640aab 1154
03e128ca 1155 return ret;
5e1c5ff4
TL
1156}
1157
55b93c32
TKD
1158#ifdef CONFIG_ARCH_OMAP2PLUS
1159
1160#if defined(CONFIG_PM_SLEEP)
1161static int omap_gpio_suspend(struct device *dev)
92105bb7 1162{
065cd795
TKD
1163 struct platform_device *pdev = to_platform_device(dev);
1164 struct gpio_bank *bank = platform_get_drvdata(pdev);
1165 void __iomem *base = bank->base;
1166 void __iomem *wakeup_enable;
1167 unsigned long flags;
92105bb7 1168
065cd795
TKD
1169 if (!bank->mod_usage || !bank->loses_context)
1170 return 0;
92105bb7 1171
065cd795
TKD
1172 if (!bank->regs->wkup_en || !bank->suspend_wakeup)
1173 return 0;
6ed87c5b 1174
065cd795 1175 wakeup_enable = bank->base + bank->regs->wkup_en;
92105bb7 1176
065cd795
TKD
1177 spin_lock_irqsave(&bank->lock, flags);
1178 bank->saved_wakeup = __raw_readl(wakeup_enable);
1179 _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
1180 _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
1181 spin_unlock_irqrestore(&bank->lock, flags);
92105bb7
TL
1182
1183 return 0;
1184}
1185
55b93c32 1186static int omap_gpio_resume(struct device *dev)
92105bb7 1187{
065cd795
TKD
1188 struct platform_device *pdev = to_platform_device(dev);
1189 struct gpio_bank *bank = platform_get_drvdata(pdev);
1190 void __iomem *base = bank->base;
1191 unsigned long flags;
92105bb7 1192
065cd795
TKD
1193 if (!bank->mod_usage || !bank->loses_context)
1194 return 0;
92105bb7 1195
065cd795
TKD
1196 if (!bank->regs->wkup_en || !bank->saved_wakeup)
1197 return 0;
92105bb7 1198
065cd795
TKD
1199 spin_lock_irqsave(&bank->lock, flags);
1200 _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
1201 _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
1202 spin_unlock_irqrestore(&bank->lock, flags);
92105bb7 1203
55b93c32
TKD
1204 return 0;
1205}
1206#endif /* CONFIG_PM_SLEEP */
3ac4fa99 1207
2dc983c5 1208#if defined(CONFIG_PM_RUNTIME)
60a3437d 1209static void omap_gpio_restore_context(struct gpio_bank *bank);
3ac4fa99 1210
2dc983c5 1211static int omap_gpio_runtime_suspend(struct device *dev)
3ac4fa99 1212{
2dc983c5
TKD
1213 struct platform_device *pdev = to_platform_device(dev);
1214 struct gpio_bank *bank = platform_get_drvdata(pdev);
1215 u32 l1 = 0, l2 = 0;
1216 unsigned long flags;
68942edb 1217 u32 wake_low, wake_hi;
8865b9b6 1218
2dc983c5 1219 spin_lock_irqsave(&bank->lock, flags);
68942edb
KH
1220
1221 /*
1222 * Only edges can generate a wakeup event to the PRCM.
1223 *
1224 * Therefore, ensure any wake-up capable GPIOs have
1225 * edge-detection enabled before going idle to ensure a wakeup
1226 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1227 * NDA TRM 25.5.3.1)
1228 *
1229 * The normal values will be restored upon ->runtime_resume()
1230 * by writing back the values saved in bank->context.
1231 */
1232 wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1233 if (wake_low)
1234 __raw_writel(wake_low | bank->context.fallingdetect,
1235 bank->base + bank->regs->fallingdetect);
1236 wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1237 if (wake_hi)
1238 __raw_writel(wake_hi | bank->context.risingdetect,
1239 bank->base + bank->regs->risingdetect);
1240
2dc983c5
TKD
1241 if (bank->power_mode != OFF_MODE) {
1242 bank->power_mode = 0;
41d87cbd 1243 goto update_gpio_context_count;
2dc983c5
TKD
1244 }
1245 /*
1246 * If going to OFF, remove triggering for all
1247 * non-wakeup GPIOs. Otherwise spurious IRQs will be
1248 * generated. See OMAP2420 Errata item 1.101.
1249 */
2dc983c5
TKD
1250 bank->saved_datain = __raw_readl(bank->base +
1251 bank->regs->datain);
1252 l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
1253 l2 = __raw_readl(bank->base + bank->regs->risingdetect);
3f1686a9 1254
2dc983c5
TKD
1255 bank->saved_fallingdetect = l1;
1256 bank->saved_risingdetect = l2;
1257 l1 &= ~bank->enabled_non_wakeup_gpios;
1258 l2 &= ~bank->enabled_non_wakeup_gpios;
3f1686a9 1259
2dc983c5
TKD
1260 __raw_writel(l1, bank->base + bank->regs->fallingdetect);
1261 __raw_writel(l2, bank->base + bank->regs->risingdetect);
3f1686a9 1262
2dc983c5 1263 bank->workaround_enabled = true;
3f1686a9 1264
41d87cbd 1265update_gpio_context_count:
2dc983c5
TKD
1266 if (bank->get_context_loss_count)
1267 bank->context_loss_count =
60a3437d
TKD
1268 bank->get_context_loss_count(bank->dev);
1269
72f83af9 1270 _gpio_dbck_disable(bank);
2dc983c5 1271 spin_unlock_irqrestore(&bank->lock, flags);
55b93c32 1272
2dc983c5 1273 return 0;
3ac4fa99
JY
1274}
1275
2dc983c5 1276static int omap_gpio_runtime_resume(struct device *dev)
3ac4fa99 1277{
2dc983c5
TKD
1278 struct platform_device *pdev = to_platform_device(dev);
1279 struct gpio_bank *bank = platform_get_drvdata(pdev);
1280 int context_lost_cnt_after;
1281 u32 l = 0, gen, gen0, gen1;
1282 unsigned long flags;
8865b9b6 1283
2dc983c5 1284 spin_lock_irqsave(&bank->lock, flags);
72f83af9 1285 _gpio_dbck_enable(bank);
68942edb
KH
1286
1287 /*
1288 * In ->runtime_suspend(), level-triggered, wakeup-enabled
1289 * GPIOs were set to edge trigger also in order to be able to
1290 * generate a PRCM wakeup. Here we restore the
1291 * pre-runtime_suspend() values for edge triggering.
1292 */
1293 __raw_writel(bank->context.fallingdetect,
1294 bank->base + bank->regs->fallingdetect);
1295 __raw_writel(bank->context.risingdetect,
1296 bank->base + bank->regs->risingdetect);
1297
960edffe 1298 if (!bank->workaround_enabled) {
2dc983c5
TKD
1299 spin_unlock_irqrestore(&bank->lock, flags);
1300 return 0;
1301 }
55b93c32 1302
2dc983c5
TKD
1303 if (bank->get_context_loss_count) {
1304 context_lost_cnt_after =
1305 bank->get_context_loss_count(bank->dev);
1306 if (context_lost_cnt_after != bank->context_loss_count ||
1307 !context_lost_cnt_after) {
1308 omap_gpio_restore_context(bank);
1309 } else {
1310 spin_unlock_irqrestore(&bank->lock, flags);
1311 return 0;
60a3437d 1312 }
2dc983c5 1313 }
43ffcd9a 1314
2dc983c5
TKD
1315 __raw_writel(bank->saved_fallingdetect,
1316 bank->base + bank->regs->fallingdetect);
1317 __raw_writel(bank->saved_risingdetect,
1318 bank->base + bank->regs->risingdetect);
1319 l = __raw_readl(bank->base + bank->regs->datain);
3f1686a9 1320
2dc983c5
TKD
1321 /*
1322 * Check if any of the non-wakeup interrupt GPIOs have changed
1323 * state. If so, generate an IRQ by software. This is
1324 * horribly racy, but it's the best we can do to work around
1325 * this silicon bug.
1326 */
1327 l ^= bank->saved_datain;
1328 l &= bank->enabled_non_wakeup_gpios;
3f1686a9 1329
2dc983c5
TKD
1330 /*
1331 * No need to generate IRQs for the rising edge for gpio IRQs
1332 * configured with falling edge only; and vice versa.
1333 */
1334 gen0 = l & bank->saved_fallingdetect;
1335 gen0 &= bank->saved_datain;
82dbb9d3 1336
2dc983c5
TKD
1337 gen1 = l & bank->saved_risingdetect;
1338 gen1 &= ~(bank->saved_datain);
82dbb9d3 1339
2dc983c5
TKD
1340 /* FIXME: Consider GPIO IRQs with level detections properly! */
1341 gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
1342 /* Consider all GPIO IRQs needed to be updated */
1343 gen |= gen0 | gen1;
82dbb9d3 1344
2dc983c5
TKD
1345 if (gen) {
1346 u32 old0, old1;
82dbb9d3 1347
2dc983c5
TKD
1348 old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
1349 old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
3f1686a9 1350
2dc983c5
TKD
1351 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1352 __raw_writel(old0 | gen, bank->base +
9ea14d8c 1353 bank->regs->leveldetect0);
2dc983c5 1354 __raw_writel(old1 | gen, bank->base +
9ea14d8c 1355 bank->regs->leveldetect1);
2dc983c5 1356 }
9ea14d8c 1357
2dc983c5
TKD
1358 if (cpu_is_omap44xx()) {
1359 __raw_writel(old0 | l, bank->base +
9ea14d8c 1360 bank->regs->leveldetect0);
2dc983c5 1361 __raw_writel(old1 | l, bank->base +
9ea14d8c 1362 bank->regs->leveldetect1);
3ac4fa99 1363 }
2dc983c5
TKD
1364 __raw_writel(old0, bank->base + bank->regs->leveldetect0);
1365 __raw_writel(old1, bank->base + bank->regs->leveldetect1);
1366 }
1367
1368 bank->workaround_enabled = false;
1369 spin_unlock_irqrestore(&bank->lock, flags);
1370
1371 return 0;
1372}
1373#endif /* CONFIG_PM_RUNTIME */
1374
1375void omap2_gpio_prepare_for_idle(int pwr_mode)
1376{
1377 struct gpio_bank *bank;
1378
1379 list_for_each_entry(bank, &omap_gpio_list, node) {
2dc983c5
TKD
1380 if (!bank->mod_usage || !bank->loses_context)
1381 continue;
1382
1383 bank->power_mode = pwr_mode;
1384
2dc983c5
TKD
1385 pm_runtime_put_sync_suspend(bank->dev);
1386 }
1387}
1388
1389void omap2_gpio_resume_after_idle(void)
1390{
1391 struct gpio_bank *bank;
1392
1393 list_for_each_entry(bank, &omap_gpio_list, node) {
2dc983c5
TKD
1394 if (!bank->mod_usage || !bank->loses_context)
1395 continue;
1396
2dc983c5 1397 pm_runtime_get_sync(bank->dev);
3ac4fa99 1398 }
3ac4fa99
JY
1399}
1400
2dc983c5 1401#if defined(CONFIG_PM_RUNTIME)
60a3437d 1402static void omap_gpio_restore_context(struct gpio_bank *bank)
40c670f0 1403{
60a3437d 1404 __raw_writel(bank->context.wake_en,
ae10f233
TKD
1405 bank->base + bank->regs->wkup_en);
1406 __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
60a3437d 1407 __raw_writel(bank->context.leveldetect0,
ae10f233 1408 bank->base + bank->regs->leveldetect0);
60a3437d 1409 __raw_writel(bank->context.leveldetect1,
ae10f233 1410 bank->base + bank->regs->leveldetect1);
60a3437d 1411 __raw_writel(bank->context.risingdetect,
ae10f233 1412 bank->base + bank->regs->risingdetect);
60a3437d 1413 __raw_writel(bank->context.fallingdetect,
ae10f233 1414 bank->base + bank->regs->fallingdetect);
f86bcc30
NM
1415 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1416 __raw_writel(bank->context.dataout,
1417 bank->base + bank->regs->set_dataout);
1418 else
1419 __raw_writel(bank->context.dataout,
1420 bank->base + bank->regs->dataout);
6d13eaaf
NM
1421 __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
1422
ae547354
NM
1423 if (bank->dbck_enable_mask) {
1424 __raw_writel(bank->context.debounce, bank->base +
1425 bank->regs->debounce);
1426 __raw_writel(bank->context.debounce_en,
1427 bank->base + bank->regs->debounce_en);
1428 }
ba805be5
NM
1429
1430 __raw_writel(bank->context.irqenable1,
1431 bank->base + bank->regs->irqenable);
1432 __raw_writel(bank->context.irqenable2,
1433 bank->base + bank->regs->irqenable2);
40c670f0 1434}
2dc983c5 1435#endif /* CONFIG_PM_RUNTIME */
55b93c32
TKD
1436#else
1437#define omap_gpio_suspend NULL
1438#define omap_gpio_resume NULL
2dc983c5
TKD
1439#define omap_gpio_runtime_suspend NULL
1440#define omap_gpio_runtime_resume NULL
40c670f0
RN
1441#endif
1442
55b93c32
TKD
1443static const struct dev_pm_ops gpio_pm_ops = {
1444 SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
2dc983c5
TKD
1445 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1446 NULL)
55b93c32
TKD
1447};
1448
384ebe1c
BC
1449#if defined(CONFIG_OF)
1450static struct omap_gpio_reg_offs omap2_gpio_regs = {
1451 .revision = OMAP24XX_GPIO_REVISION,
1452 .direction = OMAP24XX_GPIO_OE,
1453 .datain = OMAP24XX_GPIO_DATAIN,
1454 .dataout = OMAP24XX_GPIO_DATAOUT,
1455 .set_dataout = OMAP24XX_GPIO_SETDATAOUT,
1456 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT,
1457 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1,
1458 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2,
1459 .irqenable = OMAP24XX_GPIO_IRQENABLE1,
1460 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2,
1461 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1,
1462 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1,
1463 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL,
1464 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN,
1465 .ctrl = OMAP24XX_GPIO_CTRL,
1466 .wkup_en = OMAP24XX_GPIO_WAKE_EN,
1467 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0,
1468 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1,
1469 .risingdetect = OMAP24XX_GPIO_RISINGDETECT,
1470 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT,
1471};
1472
1473static struct omap_gpio_reg_offs omap4_gpio_regs = {
1474 .revision = OMAP4_GPIO_REVISION,
1475 .direction = OMAP4_GPIO_OE,
1476 .datain = OMAP4_GPIO_DATAIN,
1477 .dataout = OMAP4_GPIO_DATAOUT,
1478 .set_dataout = OMAP4_GPIO_SETDATAOUT,
1479 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
1480 .irqstatus = OMAP4_GPIO_IRQSTATUS0,
1481 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
1482 .irqenable = OMAP4_GPIO_IRQSTATUSSET0,
1483 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
1484 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
1485 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0,
1486 .debounce = OMAP4_GPIO_DEBOUNCINGTIME,
1487 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE,
1488 .ctrl = OMAP4_GPIO_CTRL,
1489 .wkup_en = OMAP4_GPIO_IRQWAKEN0,
1490 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0,
1491 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1,
1492 .risingdetect = OMAP4_GPIO_RISINGDETECT,
1493 .fallingdetect = OMAP4_GPIO_FALLINGDETECT,
1494};
1495
1496static struct omap_gpio_platform_data omap2_pdata = {
1497 .regs = &omap2_gpio_regs,
1498 .bank_width = 32,
1499 .dbck_flag = false,
1500};
1501
1502static struct omap_gpio_platform_data omap3_pdata = {
1503 .regs = &omap2_gpio_regs,
1504 .bank_width = 32,
1505 .dbck_flag = true,
1506};
1507
1508static struct omap_gpio_platform_data omap4_pdata = {
1509 .regs = &omap4_gpio_regs,
1510 .bank_width = 32,
1511 .dbck_flag = true,
1512};
1513
1514static const struct of_device_id omap_gpio_match[] = {
1515 {
1516 .compatible = "ti,omap4-gpio",
1517 .data = &omap4_pdata,
1518 },
1519 {
1520 .compatible = "ti,omap3-gpio",
1521 .data = &omap3_pdata,
1522 },
1523 {
1524 .compatible = "ti,omap2-gpio",
1525 .data = &omap2_pdata,
1526 },
1527 { },
1528};
1529MODULE_DEVICE_TABLE(of, omap_gpio_match);
1530#endif
1531
77640aab
VC
1532static struct platform_driver omap_gpio_driver = {
1533 .probe = omap_gpio_probe,
1534 .driver = {
1535 .name = "omap_gpio",
55b93c32 1536 .pm = &gpio_pm_ops,
384ebe1c 1537 .of_match_table = of_match_ptr(omap_gpio_match),
77640aab
VC
1538 },
1539};
1540
5e1c5ff4 1541/*
77640aab
VC
1542 * gpio driver register needs to be done before
1543 * machine_init functions access gpio APIs.
1544 * Hence omap_gpio_drv_reg() is a postcore_initcall.
5e1c5ff4 1545 */
77640aab 1546static int __init omap_gpio_drv_reg(void)
5e1c5ff4 1547{
77640aab 1548 return platform_driver_register(&omap_gpio_driver);
5e1c5ff4 1549}
77640aab 1550postcore_initcall(omap_gpio_drv_reg);