2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/clk.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_second: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @clk_sec: pointer to the clock structure for accessing the base_second.
51 * @temp_error1: fused value of the first point trim.
52 * @temp_error2: fused value of the second point trim.
53 * @regulator: pointer to the TMU regulator structure.
54 * @reg_conf: pointer to structure to register with core thermal.
56 struct exynos_tmu_data
{
58 struct exynos_tmu_platform_data
*pdata
;
60 void __iomem
*base_second
;
63 struct work_struct irq_work
;
65 struct clk
*clk
, *clk_sec
;
66 u8 temp_error1
, temp_error2
;
67 struct regulator
*regulator
;
68 struct thermal_sensor_conf
*reg_conf
;
72 * TMU treats temperature as a mapped temperature code.
73 * The temperature is converted differently depending on the calibration type.
75 static int temp_to_code(struct exynos_tmu_data
*data
, u8 temp
)
77 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
80 switch (pdata
->cal_type
) {
81 case TYPE_TWO_POINT_TRIMMING
:
82 temp_code
= (temp
- pdata
->first_point_trim
) *
83 (data
->temp_error2
- data
->temp_error1
) /
84 (pdata
->second_point_trim
- pdata
->first_point_trim
) +
87 case TYPE_ONE_POINT_TRIMMING
:
88 temp_code
= temp
+ data
->temp_error1
- pdata
->first_point_trim
;
91 temp_code
= temp
+ pdata
->default_temp_offset
;
99 * Calculate a temperature value from a temperature code.
100 * The unit of the temperature is degree Celsius.
102 static int code_to_temp(struct exynos_tmu_data
*data
, u8 temp_code
)
104 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
107 switch (pdata
->cal_type
) {
108 case TYPE_TWO_POINT_TRIMMING
:
109 temp
= (temp_code
- data
->temp_error1
) *
110 (pdata
->second_point_trim
- pdata
->first_point_trim
) /
111 (data
->temp_error2
- data
->temp_error1
) +
112 pdata
->first_point_trim
;
114 case TYPE_ONE_POINT_TRIMMING
:
115 temp
= temp_code
- data
->temp_error1
+ pdata
->first_point_trim
;
118 temp
= temp_code
- pdata
->default_temp_offset
;
125 static int exynos_tmu_initialize(struct platform_device
*pdev
)
127 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
128 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
129 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
130 unsigned int status
, trim_info
= 0, con
;
131 unsigned int rising_threshold
= 0, falling_threshold
= 0;
132 int ret
= 0, threshold_code
, i
;
134 mutex_lock(&data
->lock
);
135 clk_enable(data
->clk
);
136 if (!IS_ERR(data
->clk_sec
))
137 clk_enable(data
->clk_sec
);
139 if (TMU_SUPPORTS(pdata
, READY_STATUS
)) {
140 status
= readb(data
->base
+ reg
->tmu_status
);
147 if (TMU_SUPPORTS(pdata
, TRIM_RELOAD
))
148 __raw_writel(1, data
->base
+ reg
->triminfo_ctrl
);
150 /* Save trimming info in order to perform calibration */
151 if (data
->soc
== SOC_ARCH_EXYNOS5440
) {
153 * For exynos5440 soc triminfo value is swapped between TMU0 and
154 * TMU2, so the below logic is needed.
158 trim_info
= readl(data
->base
+
159 EXYNOS5440_EFUSE_SWAP_OFFSET
+ reg
->triminfo_data
);
162 trim_info
= readl(data
->base
+ reg
->triminfo_data
);
165 trim_info
= readl(data
->base
-
166 EXYNOS5440_EFUSE_SWAP_OFFSET
+ reg
->triminfo_data
);
169 /* On exynos5420 the triminfo register is in the shared space */
170 if (data
->soc
== SOC_ARCH_EXYNOS5420_TRIMINFO
)
171 trim_info
= readl(data
->base_second
+
174 trim_info
= readl(data
->base
+ reg
->triminfo_data
);
176 data
->temp_error1
= trim_info
& EXYNOS_TMU_TEMP_MASK
;
177 data
->temp_error2
= ((trim_info
>> EXYNOS_TRIMINFO_85_SHIFT
) &
178 EXYNOS_TMU_TEMP_MASK
);
180 if (!data
->temp_error1
||
181 (pdata
->min_efuse_value
> data
->temp_error1
) ||
182 (data
->temp_error1
> pdata
->max_efuse_value
))
183 data
->temp_error1
= pdata
->efuse_value
& EXYNOS_TMU_TEMP_MASK
;
185 if (!data
->temp_error2
)
187 (pdata
->efuse_value
>> EXYNOS_TRIMINFO_85_SHIFT
) &
188 EXYNOS_TMU_TEMP_MASK
;
190 rising_threshold
= readl(data
->base
+ reg
->threshold_th0
);
192 if (data
->soc
== SOC_ARCH_EXYNOS4210
) {
193 /* Write temperature code for threshold */
194 threshold_code
= temp_to_code(data
, pdata
->threshold
);
195 writeb(threshold_code
,
196 data
->base
+ reg
->threshold_temp
);
197 for (i
= 0; i
< pdata
->non_hw_trigger_levels
; i
++)
198 writeb(pdata
->trigger_levels
[i
], data
->base
+
199 reg
->threshold_th0
+ i
* sizeof(reg
->threshold_th0
));
201 writel(reg
->intclr_rise_mask
, data
->base
+ reg
->tmu_intclear
);
203 /* Write temperature code for rising and falling threshold */
204 for (i
= 0; i
< pdata
->non_hw_trigger_levels
; i
++) {
205 threshold_code
= temp_to_code(data
,
206 pdata
->trigger_levels
[i
]);
207 rising_threshold
&= ~(0xff << 8 * i
);
208 rising_threshold
|= threshold_code
<< 8 * i
;
209 if (pdata
->threshold_falling
) {
210 threshold_code
= temp_to_code(data
,
211 pdata
->trigger_levels
[i
] -
212 pdata
->threshold_falling
);
213 falling_threshold
|= threshold_code
<< 8 * i
;
217 writel(rising_threshold
,
218 data
->base
+ reg
->threshold_th0
);
219 writel(falling_threshold
,
220 data
->base
+ reg
->threshold_th1
);
222 writel((reg
->intclr_rise_mask
<< reg
->intclr_rise_shift
) |
223 (reg
->intclr_fall_mask
<< reg
->intclr_fall_shift
),
224 data
->base
+ reg
->tmu_intclear
);
226 /* if last threshold limit is also present */
227 i
= pdata
->max_trigger_level
- 1;
228 if (pdata
->trigger_levels
[i
] &&
229 (pdata
->trigger_type
[i
] == HW_TRIP
)) {
230 threshold_code
= temp_to_code(data
,
231 pdata
->trigger_levels
[i
]);
232 if (i
== EXYNOS_MAX_TRIGGER_PER_REG
- 1) {
233 /* 1-4 level to be assigned in th0 reg */
234 rising_threshold
&= ~(0xff << 8 * i
);
235 rising_threshold
|= threshold_code
<< 8 * i
;
236 writel(rising_threshold
,
237 data
->base
+ reg
->threshold_th0
);
238 } else if (i
== EXYNOS_MAX_TRIGGER_PER_REG
) {
239 /* 5th level to be assigned in th2 reg */
241 threshold_code
<< reg
->threshold_th3_l0_shift
;
242 writel(rising_threshold
,
243 data
->base
+ reg
->threshold_th2
);
245 con
= readl(data
->base
+ reg
->tmu_ctrl
);
246 con
|= (1 << reg
->therm_trip_en_shift
);
247 writel(con
, data
->base
+ reg
->tmu_ctrl
);
250 /*Clear the PMIN in the common TMU register*/
251 if (reg
->tmu_pmin
&& !data
->id
)
252 writel(0, data
->base_second
+ reg
->tmu_pmin
);
254 clk_disable(data
->clk
);
255 mutex_unlock(&data
->lock
);
256 if (!IS_ERR(data
->clk_sec
))
257 clk_disable(data
->clk_sec
);
262 static void exynos_tmu_control(struct platform_device
*pdev
, bool on
)
264 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
265 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
266 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
267 unsigned int con
, interrupt_en
;
269 mutex_lock(&data
->lock
);
270 clk_enable(data
->clk
);
272 con
= readl(data
->base
+ reg
->tmu_ctrl
);
275 con
|= (pdata
->test_mux
<< reg
->test_mux_addr_shift
);
277 con
&= ~(EXYNOS_TMU_REF_VOLTAGE_MASK
<< EXYNOS_TMU_REF_VOLTAGE_SHIFT
);
278 con
|= pdata
->reference_voltage
<< EXYNOS_TMU_REF_VOLTAGE_SHIFT
;
280 con
&= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK
<< EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT
);
281 con
|= (pdata
->gain
<< EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT
);
283 if (pdata
->noise_cancel_mode
) {
284 con
&= ~(reg
->therm_trip_mode_mask
<<
285 reg
->therm_trip_mode_shift
);
286 con
|= (pdata
->noise_cancel_mode
<< reg
->therm_trip_mode_shift
);
290 con
|= (1 << EXYNOS_TMU_CORE_EN_SHIFT
);
292 pdata
->trigger_enable
[3] << reg
->inten_rise3_shift
|
293 pdata
->trigger_enable
[2] << reg
->inten_rise2_shift
|
294 pdata
->trigger_enable
[1] << reg
->inten_rise1_shift
|
295 pdata
->trigger_enable
[0] << reg
->inten_rise0_shift
;
296 if (TMU_SUPPORTS(pdata
, FALLING_TRIP
))
298 interrupt_en
<< reg
->inten_fall0_shift
;
300 con
&= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT
);
301 interrupt_en
= 0; /* Disable all interrupts */
303 writel(interrupt_en
, data
->base
+ reg
->tmu_inten
);
304 writel(con
, data
->base
+ reg
->tmu_ctrl
);
306 clk_disable(data
->clk
);
307 mutex_unlock(&data
->lock
);
310 static int exynos_tmu_read(struct exynos_tmu_data
*data
)
312 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
313 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
317 mutex_lock(&data
->lock
);
318 clk_enable(data
->clk
);
320 temp_code
= readb(data
->base
+ reg
->tmu_cur_temp
);
322 if (data
->soc
== SOC_ARCH_EXYNOS4210
)
323 /* temp_code should range between 75 and 175 */
324 if (temp_code
< 75 || temp_code
> 175) {
329 temp
= code_to_temp(data
, temp_code
);
331 clk_disable(data
->clk
);
332 mutex_unlock(&data
->lock
);
337 #ifdef CONFIG_THERMAL_EMULATION
338 static int exynos_tmu_set_emulation(void *drv_data
, unsigned long temp
)
340 struct exynos_tmu_data
*data
= drv_data
;
341 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
342 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
346 if (!TMU_SUPPORTS(pdata
, EMULATION
))
349 if (temp
&& temp
< MCELSIUS
)
352 mutex_lock(&data
->lock
);
353 clk_enable(data
->clk
);
355 val
= readl(data
->base
+ reg
->emul_con
);
360 if (TMU_SUPPORTS(pdata
, EMUL_TIME
)) {
361 val
&= ~(EXYNOS_EMUL_TIME_MASK
<< reg
->emul_time_shift
);
362 val
|= (EXYNOS_EMUL_TIME
<< reg
->emul_time_shift
);
364 val
&= ~(EXYNOS_EMUL_DATA_MASK
<< reg
->emul_temp_shift
);
365 val
|= (temp_to_code(data
, temp
) << reg
->emul_temp_shift
) |
368 val
&= ~EXYNOS_EMUL_ENABLE
;
371 writel(val
, data
->base
+ reg
->emul_con
);
373 clk_disable(data
->clk
);
374 mutex_unlock(&data
->lock
);
380 static int exynos_tmu_set_emulation(void *drv_data
, unsigned long temp
)
382 #endif/*CONFIG_THERMAL_EMULATION*/
384 static void exynos_tmu_work(struct work_struct
*work
)
386 struct exynos_tmu_data
*data
= container_of(work
,
387 struct exynos_tmu_data
, irq_work
);
388 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
389 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
390 unsigned int val_irq
, val_type
;
392 if (!IS_ERR(data
->clk_sec
))
393 clk_enable(data
->clk_sec
);
394 /* Find which sensor generated this interrupt */
395 if (reg
->tmu_irqstatus
) {
396 val_type
= readl(data
->base_second
+ reg
->tmu_irqstatus
);
397 if (!((val_type
>> data
->id
) & 0x1))
400 if (!IS_ERR(data
->clk_sec
))
401 clk_disable(data
->clk_sec
);
403 exynos_report_trigger(data
->reg_conf
);
404 mutex_lock(&data
->lock
);
405 clk_enable(data
->clk
);
407 /* TODO: take action based on particular interrupt */
408 val_irq
= readl(data
->base
+ reg
->tmu_intstat
);
409 /* clear the interrupts */
410 writel(val_irq
, data
->base
+ reg
->tmu_intclear
);
412 clk_disable(data
->clk
);
413 mutex_unlock(&data
->lock
);
415 enable_irq(data
->irq
);
418 static irqreturn_t
exynos_tmu_irq(int irq
, void *id
)
420 struct exynos_tmu_data
*data
= id
;
422 disable_irq_nosync(irq
);
423 schedule_work(&data
->irq_work
);
428 static const struct of_device_id exynos_tmu_match
[] = {
430 .compatible
= "samsung,exynos3250-tmu",
431 .data
= (void *)EXYNOS3250_TMU_DRV_DATA
,
434 .compatible
= "samsung,exynos4210-tmu",
435 .data
= (void *)EXYNOS4210_TMU_DRV_DATA
,
438 .compatible
= "samsung,exynos4412-tmu",
439 .data
= (void *)EXYNOS4412_TMU_DRV_DATA
,
442 .compatible
= "samsung,exynos5250-tmu",
443 .data
= (void *)EXYNOS5250_TMU_DRV_DATA
,
446 .compatible
= "samsung,exynos5260-tmu",
447 .data
= (void *)EXYNOS5260_TMU_DRV_DATA
,
450 .compatible
= "samsung,exynos5420-tmu",
451 .data
= (void *)EXYNOS5420_TMU_DRV_DATA
,
454 .compatible
= "samsung,exynos5420-tmu-ext-triminfo",
455 .data
= (void *)EXYNOS5420_TMU_DRV_DATA
,
458 .compatible
= "samsung,exynos5440-tmu",
459 .data
= (void *)EXYNOS5440_TMU_DRV_DATA
,
463 MODULE_DEVICE_TABLE(of
, exynos_tmu_match
);
465 static inline struct exynos_tmu_platform_data
*exynos_get_driver_data(
466 struct platform_device
*pdev
, int id
)
468 struct exynos_tmu_init_data
*data_table
;
469 struct exynos_tmu_platform_data
*tmu_data
;
470 const struct of_device_id
*match
;
472 match
= of_match_node(exynos_tmu_match
, pdev
->dev
.of_node
);
475 data_table
= (struct exynos_tmu_init_data
*) match
->data
;
476 if (!data_table
|| id
>= data_table
->tmu_count
)
478 tmu_data
= data_table
->tmu_data
;
479 return (struct exynos_tmu_platform_data
*) (tmu_data
+ id
);
482 static int exynos_map_dt_data(struct platform_device
*pdev
)
484 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
485 struct exynos_tmu_platform_data
*pdata
;
489 if (!data
|| !pdev
->dev
.of_node
)
493 * Try enabling the regulator if found
494 * TODO: Add regulator as an SOC feature, so that regulator enable
495 * is a compulsory call.
497 data
->regulator
= devm_regulator_get(&pdev
->dev
, "vtmu");
498 if (!IS_ERR(data
->regulator
)) {
499 ret
= regulator_enable(data
->regulator
);
501 dev_err(&pdev
->dev
, "failed to enable vtmu\n");
505 dev_info(&pdev
->dev
, "Regulator node (vtmu) not found\n");
508 data
->id
= of_alias_get_id(pdev
->dev
.of_node
, "tmuctrl");
512 data
->irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
513 if (data
->irq
<= 0) {
514 dev_err(&pdev
->dev
, "failed to get IRQ\n");
518 if (of_address_to_resource(pdev
->dev
.of_node
, 0, &res
)) {
519 dev_err(&pdev
->dev
, "failed to get Resource 0\n");
523 data
->base
= devm_ioremap(&pdev
->dev
, res
.start
, resource_size(&res
));
525 dev_err(&pdev
->dev
, "Failed to ioremap memory\n");
526 return -EADDRNOTAVAIL
;
529 pdata
= exynos_get_driver_data(pdev
, data
->id
);
531 dev_err(&pdev
->dev
, "No platform init data supplied.\n");
536 * Check if the TMU shares some registers and then try to map the
537 * memory of common registers.
539 if (!TMU_SUPPORTS(pdata
, ADDRESS_MULTIPLE
))
542 if (of_address_to_resource(pdev
->dev
.of_node
, 1, &res
)) {
543 dev_err(&pdev
->dev
, "failed to get Resource 1\n");
547 data
->base_second
= devm_ioremap(&pdev
->dev
, res
.start
,
548 resource_size(&res
));
549 if (!data
->base_second
) {
550 dev_err(&pdev
->dev
, "Failed to ioremap memory\n");
557 static int exynos_tmu_probe(struct platform_device
*pdev
)
559 struct exynos_tmu_data
*data
;
560 struct exynos_tmu_platform_data
*pdata
;
561 struct thermal_sensor_conf
*sensor_conf
;
564 data
= devm_kzalloc(&pdev
->dev
, sizeof(struct exynos_tmu_data
),
569 platform_set_drvdata(pdev
, data
);
570 mutex_init(&data
->lock
);
572 ret
= exynos_map_dt_data(pdev
);
578 INIT_WORK(&data
->irq_work
, exynos_tmu_work
);
580 data
->clk
= devm_clk_get(&pdev
->dev
, "tmu_apbif");
581 if (IS_ERR(data
->clk
)) {
582 dev_err(&pdev
->dev
, "Failed to get clock\n");
583 return PTR_ERR(data
->clk
);
586 data
->clk_sec
= devm_clk_get(&pdev
->dev
, "tmu_triminfo_apbif");
587 if (IS_ERR(data
->clk_sec
)) {
588 if (data
->soc
== SOC_ARCH_EXYNOS5420_TRIMINFO
) {
589 dev_err(&pdev
->dev
, "Failed to get triminfo clock\n");
590 return PTR_ERR(data
->clk_sec
);
593 ret
= clk_prepare(data
->clk_sec
);
595 dev_err(&pdev
->dev
, "Failed to get clock\n");
600 ret
= clk_prepare(data
->clk
);
602 dev_err(&pdev
->dev
, "Failed to get clock\n");
606 if (pdata
->type
== SOC_ARCH_EXYNOS3250
||
607 pdata
->type
== SOC_ARCH_EXYNOS4210
||
608 pdata
->type
== SOC_ARCH_EXYNOS4412
||
609 pdata
->type
== SOC_ARCH_EXYNOS5250
||
610 pdata
->type
== SOC_ARCH_EXYNOS5260
||
611 pdata
->type
== SOC_ARCH_EXYNOS5420_TRIMINFO
||
612 pdata
->type
== SOC_ARCH_EXYNOS5440
)
613 data
->soc
= pdata
->type
;
616 dev_err(&pdev
->dev
, "Platform not supported\n");
620 ret
= exynos_tmu_initialize(pdev
);
622 dev_err(&pdev
->dev
, "Failed to initialize TMU\n");
626 exynos_tmu_control(pdev
, true);
628 /* Allocate a structure to register with the exynos core thermal */
629 sensor_conf
= devm_kzalloc(&pdev
->dev
,
630 sizeof(struct thermal_sensor_conf
), GFP_KERNEL
);
635 sprintf(sensor_conf
->name
, "therm_zone%d", data
->id
);
636 sensor_conf
->read_temperature
= (int (*)(void *))exynos_tmu_read
;
637 sensor_conf
->write_emul_temp
=
638 (int (*)(void *, unsigned long))exynos_tmu_set_emulation
;
639 sensor_conf
->driver_data
= data
;
640 sensor_conf
->trip_data
.trip_count
= pdata
->trigger_enable
[0] +
641 pdata
->trigger_enable
[1] + pdata
->trigger_enable
[2]+
642 pdata
->trigger_enable
[3];
644 for (i
= 0; i
< sensor_conf
->trip_data
.trip_count
; i
++) {
645 sensor_conf
->trip_data
.trip_val
[i
] =
646 pdata
->threshold
+ pdata
->trigger_levels
[i
];
647 sensor_conf
->trip_data
.trip_type
[i
] =
648 pdata
->trigger_type
[i
];
651 sensor_conf
->trip_data
.trigger_falling
= pdata
->threshold_falling
;
653 sensor_conf
->cooling_data
.freq_clip_count
= pdata
->freq_tab_count
;
654 for (i
= 0; i
< pdata
->freq_tab_count
; i
++) {
655 sensor_conf
->cooling_data
.freq_data
[i
].freq_clip_max
=
656 pdata
->freq_tab
[i
].freq_clip_max
;
657 sensor_conf
->cooling_data
.freq_data
[i
].temp_level
=
658 pdata
->freq_tab
[i
].temp_level
;
660 sensor_conf
->dev
= &pdev
->dev
;
661 /* Register the sensor with thermal management interface */
662 ret
= exynos_register_thermal(sensor_conf
);
664 dev_err(&pdev
->dev
, "Failed to register thermal interface\n");
667 data
->reg_conf
= sensor_conf
;
669 ret
= devm_request_irq(&pdev
->dev
, data
->irq
, exynos_tmu_irq
,
670 IRQF_TRIGGER_RISING
| IRQF_SHARED
, dev_name(&pdev
->dev
), data
);
672 dev_err(&pdev
->dev
, "Failed to request irq: %d\n", data
->irq
);
678 clk_unprepare(data
->clk
);
680 if (!IS_ERR(data
->clk_sec
))
681 clk_unprepare(data
->clk_sec
);
685 static int exynos_tmu_remove(struct platform_device
*pdev
)
687 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
689 exynos_unregister_thermal(data
->reg_conf
);
691 exynos_tmu_control(pdev
, false);
693 clk_unprepare(data
->clk
);
694 if (!IS_ERR(data
->clk_sec
))
695 clk_unprepare(data
->clk_sec
);
697 if (!IS_ERR(data
->regulator
))
698 regulator_disable(data
->regulator
);
703 #ifdef CONFIG_PM_SLEEP
704 static int exynos_tmu_suspend(struct device
*dev
)
706 exynos_tmu_control(to_platform_device(dev
), false);
711 static int exynos_tmu_resume(struct device
*dev
)
713 struct platform_device
*pdev
= to_platform_device(dev
);
715 exynos_tmu_initialize(pdev
);
716 exynos_tmu_control(pdev
, true);
721 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm
,
722 exynos_tmu_suspend
, exynos_tmu_resume
);
723 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
725 #define EXYNOS_TMU_PM NULL
728 static struct platform_driver exynos_tmu_driver
= {
730 .name
= "exynos-tmu",
731 .owner
= THIS_MODULE
,
733 .of_match_table
= exynos_tmu_match
,
735 .probe
= exynos_tmu_probe
,
736 .remove
= exynos_tmu_remove
,
739 module_platform_driver(exynos_tmu_driver
);
741 MODULE_DESCRIPTION("EXYNOS TMU Driver");
742 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
743 MODULE_LICENSE("GPL");
744 MODULE_ALIAS("platform:exynos-tmu");