]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/clocksource/sh_mtu2.c
Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[mirror_ubuntu-bionic-kernel.git] / drivers / clocksource / sh_mtu2.c
CommitLineData
d5ed4c2e
MD
1/*
2 * SuperH Timer Support - MTU2
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
d5ed4c2e
MD
14 */
15
346f5e76
LP
16#include <linux/clk.h>
17#include <linux/clockchips.h>
18#include <linux/delay.h>
19#include <linux/err.h>
d5ed4c2e 20#include <linux/init.h>
d5ed4c2e 21#include <linux/interrupt.h>
d5ed4c2e 22#include <linux/io.h>
346f5e76 23#include <linux/ioport.h>
d5ed4c2e 24#include <linux/irq.h>
7deeab5d 25#include <linux/module.h>
346f5e76 26#include <linux/platform_device.h>
57d13370 27#include <linux/pm_domain.h>
3cb6f10a 28#include <linux/pm_runtime.h>
346f5e76
LP
29#include <linux/sh_timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
d5ed4c2e 32
7dad72de 33struct sh_mtu2_device;
42752cc6
LP
34
35struct sh_mtu2_channel {
7dad72de 36 struct sh_mtu2_device *mtu;
d2b93177 37 unsigned int index;
da90a1c6
LP
38
39 void __iomem *base;
42752cc6 40 int irq;
da90a1c6 41
42752cc6
LP
42 struct clock_event_device ced;
43};
44
7dad72de 45struct sh_mtu2_device {
42752cc6
LP
46 struct platform_device *pdev;
47
d5ed4c2e
MD
48 void __iomem *mapbase;
49 struct clk *clk;
42752cc6 50
c54ccb43
LP
51 struct sh_mtu2_channel *channels;
52 unsigned int num_channels;
faf3f4f8
LP
53
54 bool legacy;
55 bool has_clockevent;
d5ed4c2e
MD
56};
57
50393a92 58static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
d5ed4c2e
MD
59
60#define TSTR -1 /* shared register */
61#define TCR 0 /* channel register */
62#define TMDR 1 /* channel register */
63#define TIOR 2 /* channel register */
64#define TIER 3 /* channel register */
65#define TSR 4 /* channel register */
66#define TCNT 5 /* channel register */
67#define TGR 6 /* channel register */
68
f992c241
LP
69#define TCR_CCLR_NONE (0 << 5)
70#define TCR_CCLR_TGRA (1 << 5)
71#define TCR_CCLR_TGRB (2 << 5)
72#define TCR_CCLR_SYNC (3 << 5)
73#define TCR_CCLR_TGRC (5 << 5)
74#define TCR_CCLR_TGRD (6 << 5)
75#define TCR_CCLR_MASK (7 << 5)
76#define TCR_CKEG_RISING (0 << 3)
77#define TCR_CKEG_FALLING (1 << 3)
78#define TCR_CKEG_BOTH (2 << 3)
79#define TCR_CKEG_MASK (3 << 3)
80/* Values 4 to 7 are channel-dependent */
81#define TCR_TPSC_P1 (0 << 0)
82#define TCR_TPSC_P4 (1 << 0)
83#define TCR_TPSC_P16 (2 << 0)
84#define TCR_TPSC_P64 (3 << 0)
85#define TCR_TPSC_CH0_TCLKA (4 << 0)
86#define TCR_TPSC_CH0_TCLKB (5 << 0)
87#define TCR_TPSC_CH0_TCLKC (6 << 0)
88#define TCR_TPSC_CH0_TCLKD (7 << 0)
89#define TCR_TPSC_CH1_TCLKA (4 << 0)
90#define TCR_TPSC_CH1_TCLKB (5 << 0)
91#define TCR_TPSC_CH1_P256 (6 << 0)
92#define TCR_TPSC_CH1_TCNT2 (7 << 0)
93#define TCR_TPSC_CH2_TCLKA (4 << 0)
94#define TCR_TPSC_CH2_TCLKB (5 << 0)
95#define TCR_TPSC_CH2_TCLKC (6 << 0)
96#define TCR_TPSC_CH2_P1024 (7 << 0)
97#define TCR_TPSC_CH34_P256 (4 << 0)
98#define TCR_TPSC_CH34_P1024 (5 << 0)
99#define TCR_TPSC_CH34_TCLKA (6 << 0)
100#define TCR_TPSC_CH34_TCLKB (7 << 0)
101#define TCR_TPSC_MASK (7 << 0)
102
103#define TMDR_BFE (1 << 6)
104#define TMDR_BFB (1 << 5)
105#define TMDR_BFA (1 << 4)
106#define TMDR_MD_NORMAL (0 << 0)
107#define TMDR_MD_PWM_1 (2 << 0)
108#define TMDR_MD_PWM_2 (3 << 0)
109#define TMDR_MD_PHASE_1 (4 << 0)
110#define TMDR_MD_PHASE_2 (5 << 0)
111#define TMDR_MD_PHASE_3 (6 << 0)
112#define TMDR_MD_PHASE_4 (7 << 0)
113#define TMDR_MD_PWM_SYNC (8 << 0)
114#define TMDR_MD_PWM_COMP_CREST (13 << 0)
115#define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
116#define TMDR_MD_PWM_COMP_BOTH (15 << 0)
117#define TMDR_MD_MASK (15 << 0)
118
119#define TIOC_IOCH(n) ((n) << 4)
120#define TIOC_IOCL(n) ((n) << 0)
121#define TIOR_OC_RETAIN (0 << 0)
122#define TIOR_OC_0_CLEAR (1 << 0)
123#define TIOR_OC_0_SET (2 << 0)
124#define TIOR_OC_0_TOGGLE (3 << 0)
125#define TIOR_OC_1_CLEAR (5 << 0)
126#define TIOR_OC_1_SET (6 << 0)
127#define TIOR_OC_1_TOGGLE (7 << 0)
128#define TIOR_IC_RISING (8 << 0)
129#define TIOR_IC_FALLING (9 << 0)
130#define TIOR_IC_BOTH (10 << 0)
131#define TIOR_IC_TCNT (12 << 0)
132#define TIOR_MASK (15 << 0)
133
134#define TIER_TTGE (1 << 7)
135#define TIER_TTGE2 (1 << 6)
136#define TIER_TCIEU (1 << 5)
137#define TIER_TCIEV (1 << 4)
138#define TIER_TGIED (1 << 3)
139#define TIER_TGIEC (1 << 2)
140#define TIER_TGIEB (1 << 1)
141#define TIER_TGIEA (1 << 0)
142
143#define TSR_TCFD (1 << 7)
144#define TSR_TCFU (1 << 5)
145#define TSR_TCFV (1 << 4)
146#define TSR_TGFD (1 << 3)
147#define TSR_TGFC (1 << 2)
148#define TSR_TGFB (1 << 1)
149#define TSR_TGFA (1 << 0)
150
d5ed4c2e
MD
151static unsigned long mtu2_reg_offs[] = {
152 [TCR] = 0,
153 [TMDR] = 1,
154 [TIOR] = 2,
155 [TIER] = 4,
156 [TSR] = 5,
157 [TCNT] = 6,
158 [TGR] = 8,
159};
160
42752cc6 161static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
d5ed4c2e 162{
d5ed4c2e
MD
163 unsigned long offs;
164
faf3f4f8
LP
165 if (reg_nr == TSTR) {
166 if (ch->mtu->legacy)
167 return ioread8(ch->mtu->mapbase);
168 else
169 return ioread8(ch->mtu->mapbase + 0x280);
170 }
d5ed4c2e
MD
171
172 offs = mtu2_reg_offs[reg_nr];
173
174 if ((reg_nr == TCNT) || (reg_nr == TGR))
da90a1c6 175 return ioread16(ch->base + offs);
d5ed4c2e 176 else
da90a1c6 177 return ioread8(ch->base + offs);
d5ed4c2e
MD
178}
179
42752cc6 180static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
d5ed4c2e
MD
181 unsigned long value)
182{
d5ed4c2e
MD
183 unsigned long offs;
184
185 if (reg_nr == TSTR) {
faf3f4f8
LP
186 if (ch->mtu->legacy)
187 return iowrite8(value, ch->mtu->mapbase);
188 else
189 return iowrite8(value, ch->mtu->mapbase + 0x280);
d5ed4c2e
MD
190 }
191
192 offs = mtu2_reg_offs[reg_nr];
193
194 if ((reg_nr == TCNT) || (reg_nr == TGR))
da90a1c6 195 iowrite16(value, ch->base + offs);
d5ed4c2e 196 else
da90a1c6 197 iowrite8(value, ch->base + offs);
d5ed4c2e
MD
198}
199
42752cc6 200static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
d5ed4c2e 201{
d5ed4c2e
MD
202 unsigned long flags, value;
203
204 /* start stop register shared by multiple timer channels */
50393a92 205 raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
42752cc6 206 value = sh_mtu2_read(ch, TSTR);
d5ed4c2e
MD
207
208 if (start)
d2b93177 209 value |= 1 << ch->index;
d5ed4c2e 210 else
d2b93177 211 value &= ~(1 << ch->index);
d5ed4c2e 212
42752cc6 213 sh_mtu2_write(ch, TSTR, value);
50393a92 214 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
d5ed4c2e
MD
215}
216
42752cc6 217static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
d5ed4c2e 218{
f92d62f5
LP
219 unsigned long periodic;
220 unsigned long rate;
d5ed4c2e
MD
221 int ret;
222
42752cc6
LP
223 pm_runtime_get_sync(&ch->mtu->pdev->dev);
224 dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
3cb6f10a 225
d5ed4c2e 226 /* enable clock */
42752cc6 227 ret = clk_enable(ch->mtu->clk);
d5ed4c2e 228 if (ret) {
d2b93177
LP
229 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
230 ch->index);
d5ed4c2e
MD
231 return ret;
232 }
233
234 /* make sure channel is disabled */
42752cc6 235 sh_mtu2_start_stop_ch(ch, 0);
d5ed4c2e 236
42752cc6 237 rate = clk_get_rate(ch->mtu->clk) / 64;
f92d62f5 238 periodic = (rate + HZ/2) / HZ;
d5ed4c2e 239
f992c241
LP
240 /*
241 * "Periodic Counter Operation"
242 * Clear on TGRA compare match, divide clock by 64.
243 */
244 sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
245 sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
246 TIOC_IOCL(TIOR_OC_0_CLEAR));
42752cc6
LP
247 sh_mtu2_write(ch, TGR, periodic);
248 sh_mtu2_write(ch, TCNT, 0);
f992c241
LP
249 sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
250 sh_mtu2_write(ch, TIER, TIER_TGIEA);
d5ed4c2e
MD
251
252 /* enable channel */
42752cc6 253 sh_mtu2_start_stop_ch(ch, 1);
d5ed4c2e
MD
254
255 return 0;
256}
257
42752cc6 258static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
d5ed4c2e
MD
259{
260 /* disable channel */
42752cc6 261 sh_mtu2_start_stop_ch(ch, 0);
d5ed4c2e
MD
262
263 /* stop clock */
42752cc6 264 clk_disable(ch->mtu->clk);
3cb6f10a 265
42752cc6
LP
266 dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
267 pm_runtime_put(&ch->mtu->pdev->dev);
d5ed4c2e
MD
268}
269
270static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
271{
42752cc6 272 struct sh_mtu2_channel *ch = dev_id;
d5ed4c2e
MD
273
274 /* acknowledge interrupt */
42752cc6 275 sh_mtu2_read(ch, TSR);
f992c241 276 sh_mtu2_write(ch, TSR, ~TSR_TGFA);
d5ed4c2e
MD
277
278 /* notify clockevent layer */
42752cc6 279 ch->ced.event_handler(&ch->ced);
d5ed4c2e
MD
280 return IRQ_HANDLED;
281}
282
42752cc6 283static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
d5ed4c2e 284{
42752cc6 285 return container_of(ced, struct sh_mtu2_channel, ced);
d5ed4c2e
MD
286}
287
288static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
289 struct clock_event_device *ced)
290{
42752cc6 291 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
d5ed4c2e
MD
292 int disabled = 0;
293
294 /* deal with old setting first */
295 switch (ced->mode) {
296 case CLOCK_EVT_MODE_PERIODIC:
42752cc6 297 sh_mtu2_disable(ch);
d5ed4c2e
MD
298 disabled = 1;
299 break;
300 default:
301 break;
302 }
303
304 switch (mode) {
305 case CLOCK_EVT_MODE_PERIODIC:
42752cc6 306 dev_info(&ch->mtu->pdev->dev,
d2b93177 307 "ch%u: used for periodic clock events\n", ch->index);
42752cc6 308 sh_mtu2_enable(ch);
d5ed4c2e
MD
309 break;
310 case CLOCK_EVT_MODE_UNUSED:
311 if (!disabled)
42752cc6 312 sh_mtu2_disable(ch);
d5ed4c2e
MD
313 break;
314 case CLOCK_EVT_MODE_SHUTDOWN:
315 default:
316 break;
317 }
318}
319
cc7ad456
RW
320static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
321{
42752cc6 322 pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
cc7ad456
RW
323}
324
325static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
326{
42752cc6 327 pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
cc7ad456
RW
328}
329
42752cc6 330static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
207e21a9 331 const char *name)
d5ed4c2e 332{
42752cc6 333 struct clock_event_device *ced = &ch->ced;
d5ed4c2e
MD
334 int ret;
335
d5ed4c2e
MD
336 ced->name = name;
337 ced->features = CLOCK_EVT_FEAT_PERIODIC;
207e21a9 338 ced->rating = 200;
3cc95047 339 ced->cpumask = cpu_possible_mask;
d5ed4c2e 340 ced->set_mode = sh_mtu2_clock_event_mode;
cc7ad456
RW
341 ced->suspend = sh_mtu2_clock_event_suspend;
342 ced->resume = sh_mtu2_clock_event_resume;
d5ed4c2e 343
d2b93177
LP
344 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
345 ch->index);
da64c2a8
PM
346 clockevents_register_device(ced);
347
42752cc6 348 ret = request_irq(ch->irq, sh_mtu2_interrupt,
276bee05 349 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
42752cc6 350 dev_name(&ch->mtu->pdev->dev), ch);
d5ed4c2e 351 if (ret) {
d2b93177
LP
352 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
353 ch->index, ch->irq);
d5ed4c2e
MD
354 return;
355 }
d5ed4c2e
MD
356}
357
aa83804a 358static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name,
207e21a9 359 bool clockevent)
d5ed4c2e 360{
faf3f4f8
LP
361 if (clockevent) {
362 ch->mtu->has_clockevent = true;
207e21a9 363 sh_mtu2_register_clockevent(ch, name);
faf3f4f8 364 }
d5ed4c2e
MD
365
366 return 0;
367}
368
faf3f4f8 369static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
2e1a5326
LP
370 struct sh_mtu2_device *mtu)
371{
faf3f4f8
LP
372 static const unsigned int channel_offsets[] = {
373 0x300, 0x380, 0x000,
374 };
375 bool clockevent;
2e1a5326 376
2e1a5326
LP
377 ch->mtu = mtu;
378
faf3f4f8
LP
379 if (mtu->legacy) {
380 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
381
382 clockevent = cfg->clockevent_rating != 0;
383
384 ch->irq = platform_get_irq(mtu->pdev, 0);
385 ch->base = mtu->mapbase - cfg->channel_offset;
386 ch->index = cfg->timer_bit;
387 } else {
388 char name[6];
389
390 clockevent = true;
391
392 sprintf(name, "tgi%ua", index);
393 ch->irq = platform_get_irq_byname(mtu->pdev, name);
394 ch->base = mtu->mapbase + channel_offsets[index];
395 ch->index = index;
396 }
397
2e1a5326 398 if (ch->irq < 0) {
faf3f4f8
LP
399 /* Skip channels with no declared interrupt. */
400 if (!mtu->legacy)
401 return 0;
402
d2b93177
LP
403 dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n",
404 ch->index);
2e1a5326
LP
405 return ch->irq;
406 }
407
faf3f4f8 408 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent);
2e1a5326
LP
409}
410
faf3f4f8 411static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
d5ed4c2e 412{
d5ed4c2e 413 struct resource *res;
d5ed4c2e 414
7dad72de 415 res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
d5ed4c2e 416 if (!res) {
7dad72de 417 dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
faf3f4f8 418 return -ENXIO;
d5ed4c2e
MD
419 }
420
faf3f4f8
LP
421 mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
422 if (mtu->mapbase == NULL)
423 return -ENXIO;
424
da90a1c6 425 /*
faf3f4f8
LP
426 * In legacy platform device configuration (with one device per channel)
427 * the resource points to the channel base address.
da90a1c6 428 */
faf3f4f8
LP
429 if (mtu->legacy) {
430 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
431 mtu->mapbase += cfg->channel_offset;
432 }
433
434 return 0;
435}
436
437static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
438{
439 if (mtu->legacy) {
440 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
441 mtu->mapbase -= cfg->channel_offset;
d5ed4c2e
MD
442 }
443
faf3f4f8
LP
444 iounmap(mtu->mapbase);
445}
446
447static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
448 struct platform_device *pdev)
449{
450 struct sh_timer_config *cfg = pdev->dev.platform_data;
451 const struct platform_device_id *id = pdev->id_entry;
452 unsigned int i;
453 int ret;
454
455 mtu->pdev = pdev;
456 mtu->legacy = id->driver_data;
457
458 if (mtu->legacy && !cfg) {
459 dev_err(&mtu->pdev->dev, "missing platform data\n");
460 return -ENXIO;
461 }
da90a1c6 462
faf3f4f8 463 /* Get hold of clock. */
6dc9693b 464 mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck");
7dad72de
LP
465 if (IS_ERR(mtu->clk)) {
466 dev_err(&mtu->pdev->dev, "cannot get clock\n");
faf3f4f8 467 return PTR_ERR(mtu->clk);
d5ed4c2e
MD
468 }
469
7dad72de 470 ret = clk_prepare(mtu->clk);
bd754930 471 if (ret < 0)
faf3f4f8 472 goto err_clk_put;
bd754930 473
faf3f4f8
LP
474 /* Map the memory resource. */
475 ret = sh_mtu2_map_memory(mtu);
476 if (ret < 0) {
477 dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
478 goto err_clk_unprepare;
479 }
480
481 /* Allocate and setup the channels. */
482 if (mtu->legacy)
483 mtu->num_channels = 1;
484 else
485 mtu->num_channels = 3;
486
487 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
488 GFP_KERNEL);
c54ccb43
LP
489 if (mtu->channels == NULL) {
490 ret = -ENOMEM;
faf3f4f8 491 goto err_unmap;
c54ccb43
LP
492 }
493
faf3f4f8
LP
494 if (mtu->legacy) {
495 ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu);
496 if (ret < 0)
497 goto err_unmap;
498 } else {
499 for (i = 0; i < mtu->num_channels; ++i) {
500 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
501 if (ret < 0)
502 goto err_unmap;
503 }
504 }
c54ccb43 505
faf3f4f8 506 platform_set_drvdata(pdev, mtu);
a4a5fc3b
LP
507
508 return 0;
faf3f4f8
LP
509
510err_unmap:
c54ccb43 511 kfree(mtu->channels);
faf3f4f8
LP
512 sh_mtu2_unmap_memory(mtu);
513err_clk_unprepare:
7dad72de 514 clk_unprepare(mtu->clk);
faf3f4f8 515err_clk_put:
7dad72de 516 clk_put(mtu->clk);
d5ed4c2e
MD
517 return ret;
518}
519
1850514b 520static int sh_mtu2_probe(struct platform_device *pdev)
d5ed4c2e 521{
7dad72de 522 struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
d5ed4c2e 523 int ret;
57d13370 524
cc7ad456 525 if (!is_early_platform_device(pdev)) {
3cb6f10a
RW
526 pm_runtime_set_active(&pdev->dev);
527 pm_runtime_enable(&pdev->dev);
cc7ad456 528 }
d5ed4c2e 529
7dad72de 530 if (mtu) {
214a607a 531 dev_info(&pdev->dev, "kept as earlytimer\n");
3cb6f10a 532 goto out;
d5ed4c2e
MD
533 }
534
810c6513 535 mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
c77a565b 536 if (mtu == NULL)
d5ed4c2e 537 return -ENOMEM;
d5ed4c2e 538
7dad72de 539 ret = sh_mtu2_setup(mtu, pdev);
d5ed4c2e 540 if (ret) {
7dad72de 541 kfree(mtu);
3cb6f10a
RW
542 pm_runtime_idle(&pdev->dev);
543 return ret;
d5ed4c2e 544 }
3cb6f10a
RW
545 if (is_early_platform_device(pdev))
546 return 0;
547
548 out:
faf3f4f8 549 if (mtu->has_clockevent)
3cb6f10a
RW
550 pm_runtime_irq_safe(&pdev->dev);
551 else
552 pm_runtime_idle(&pdev->dev);
553
554 return 0;
d5ed4c2e
MD
555}
556
1850514b 557static int sh_mtu2_remove(struct platform_device *pdev)
d5ed4c2e
MD
558{
559 return -EBUSY; /* cannot unregister clockevent */
560}
561
faf3f4f8
LP
562static const struct platform_device_id sh_mtu2_id_table[] = {
563 { "sh_mtu2", 1 },
564 { "sh-mtu2", 0 },
565 { },
566};
567MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
568
d5ed4c2e
MD
569static struct platform_driver sh_mtu2_device_driver = {
570 .probe = sh_mtu2_probe,
1850514b 571 .remove = sh_mtu2_remove,
d5ed4c2e
MD
572 .driver = {
573 .name = "sh_mtu2",
faf3f4f8
LP
574 },
575 .id_table = sh_mtu2_id_table,
d5ed4c2e
MD
576};
577
578static int __init sh_mtu2_init(void)
579{
580 return platform_driver_register(&sh_mtu2_device_driver);
581}
582
583static void __exit sh_mtu2_exit(void)
584{
585 platform_driver_unregister(&sh_mtu2_device_driver);
586}
587
588early_platform_init("earlytimer", &sh_mtu2_device_driver);
342896a5 589subsys_initcall(sh_mtu2_init);
d5ed4c2e
MD
590module_exit(sh_mtu2_exit);
591
592MODULE_AUTHOR("Magnus Damm");
593MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
594MODULE_LICENSE("GPL v2");