]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/thermal/samsung/exynos_tmu.c
thermal: exynos: remove TMU_SUPPORT_TRIM_RELOAD flag
[mirror_ubuntu-artful-kernel.git] / drivers / thermal / samsung / exynos_tmu.c
1 /*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
33
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
37
38 /**
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 driver
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_second: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @clk_sec: pointer to the clock structure for accessing the base_second.
51 * @temp_error1: fused value of the first point trim.
52 * @temp_error2: fused value of the second point trim.
53 * @regulator: pointer to the TMU regulator structure.
54 * @reg_conf: pointer to structure to register with core thermal.
55 */
56 struct exynos_tmu_data {
57 int id;
58 struct exynos_tmu_platform_data *pdata;
59 void __iomem *base;
60 void __iomem *base_second;
61 int irq;
62 enum soc_type soc;
63 struct work_struct irq_work;
64 struct mutex lock;
65 struct clk *clk, *clk_sec;
66 u8 temp_error1, temp_error2;
67 struct regulator *regulator;
68 struct thermal_sensor_conf *reg_conf;
69 };
70
71 /*
72 * TMU treats temperature as a mapped temperature code.
73 * The temperature is converted differently depending on the calibration type.
74 */
75 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
76 {
77 struct exynos_tmu_platform_data *pdata = data->pdata;
78 int temp_code;
79
80 switch (pdata->cal_type) {
81 case TYPE_TWO_POINT_TRIMMING:
82 temp_code = (temp - pdata->first_point_trim) *
83 (data->temp_error2 - data->temp_error1) /
84 (pdata->second_point_trim - pdata->first_point_trim) +
85 data->temp_error1;
86 break;
87 case TYPE_ONE_POINT_TRIMMING:
88 temp_code = temp + data->temp_error1 - pdata->first_point_trim;
89 break;
90 default:
91 temp_code = temp + pdata->default_temp_offset;
92 break;
93 }
94
95 return temp_code;
96 }
97
98 /*
99 * Calculate a temperature value from a temperature code.
100 * The unit of the temperature is degree Celsius.
101 */
102 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
103 {
104 struct exynos_tmu_platform_data *pdata = data->pdata;
105 int temp;
106
107 switch (pdata->cal_type) {
108 case TYPE_TWO_POINT_TRIMMING:
109 temp = (temp_code - data->temp_error1) *
110 (pdata->second_point_trim - pdata->first_point_trim) /
111 (data->temp_error2 - data->temp_error1) +
112 pdata->first_point_trim;
113 break;
114 case TYPE_ONE_POINT_TRIMMING:
115 temp = temp_code - data->temp_error1 + pdata->first_point_trim;
116 break;
117 default:
118 temp = temp_code - pdata->default_temp_offset;
119 break;
120 }
121
122 return temp;
123 }
124
125 static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data)
126 {
127 const struct exynos_tmu_registers *reg = data->pdata->registers;
128 unsigned int val_irq;
129
130 val_irq = readl(data->base + reg->tmu_intstat);
131 /*
132 * Clear the interrupts. Please note that the documentation for
133 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
134 * states that INTCLEAR register has a different placing of bits
135 * responsible for FALL IRQs than INTSTAT register. Exynos5420
136 * and Exynos5440 documentation is correct (Exynos4210 doesn't
137 * support FALL IRQs at all).
138 */
139 writel(val_irq, data->base + reg->tmu_intclear);
140 }
141
142 static int exynos_tmu_initialize(struct platform_device *pdev)
143 {
144 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
145 struct exynos_tmu_platform_data *pdata = data->pdata;
146 const struct exynos_tmu_registers *reg = pdata->registers;
147 unsigned int status, trim_info = 0, con, ctrl;
148 unsigned int rising_threshold = 0, falling_threshold = 0;
149 int ret = 0, threshold_code, i;
150
151 mutex_lock(&data->lock);
152 clk_enable(data->clk);
153 if (!IS_ERR(data->clk_sec))
154 clk_enable(data->clk_sec);
155
156 if (data->soc != SOC_ARCH_EXYNOS5440) {
157 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
158 if (!status) {
159 ret = -EBUSY;
160 goto out;
161 }
162 }
163
164 if (data->soc == SOC_ARCH_EXYNOS3250 ||
165 data->soc == SOC_ARCH_EXYNOS4412 ||
166 data->soc == SOC_ARCH_EXYNOS5250) {
167 if (data->soc == SOC_ARCH_EXYNOS3250) {
168 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
169 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
170 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
171 }
172 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
173 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
174 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
175 }
176
177 /* Save trimming info in order to perform calibration */
178 if (data->soc == SOC_ARCH_EXYNOS5440) {
179 /*
180 * For exynos5440 soc triminfo value is swapped between TMU0 and
181 * TMU2, so the below logic is needed.
182 */
183 switch (data->id) {
184 case 0:
185 trim_info = readl(data->base +
186 EXYNOS5440_EFUSE_SWAP_OFFSET + EXYNOS5440_TMU_S0_7_TRIM);
187 break;
188 case 1:
189 trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
190 break;
191 case 2:
192 trim_info = readl(data->base -
193 EXYNOS5440_EFUSE_SWAP_OFFSET + EXYNOS5440_TMU_S0_7_TRIM);
194 }
195 } else {
196 /* On exynos5420 the triminfo register is in the shared space */
197 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
198 trim_info = readl(data->base_second +
199 EXYNOS_TMU_REG_TRIMINFO);
200 else
201 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
202 }
203 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
204 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
205 EXYNOS_TMU_TEMP_MASK);
206
207 if (!data->temp_error1 ||
208 (pdata->min_efuse_value > data->temp_error1) ||
209 (data->temp_error1 > pdata->max_efuse_value))
210 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
211
212 if (!data->temp_error2)
213 data->temp_error2 =
214 (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
215 EXYNOS_TMU_TEMP_MASK;
216
217 rising_threshold = readl(data->base + reg->threshold_th0);
218
219 if (data->soc == SOC_ARCH_EXYNOS4210) {
220 /* Write temperature code for threshold */
221 threshold_code = temp_to_code(data, pdata->threshold);
222 writeb(threshold_code,
223 data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
224 for (i = 0; i < pdata->non_hw_trigger_levels; i++)
225 writeb(pdata->trigger_levels[i], data->base +
226 reg->threshold_th0 + i * sizeof(reg->threshold_th0));
227
228 exynos_tmu_clear_irqs(data);
229 } else {
230 /* Write temperature code for rising and falling threshold */
231 for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
232 threshold_code = temp_to_code(data,
233 pdata->trigger_levels[i]);
234 rising_threshold &= ~(0xff << 8 * i);
235 rising_threshold |= threshold_code << 8 * i;
236 if (data->soc != SOC_ARCH_EXYNOS5440) {
237 threshold_code = temp_to_code(data,
238 pdata->trigger_levels[i] -
239 pdata->threshold_falling);
240 falling_threshold |= threshold_code << 8 * i;
241 }
242 }
243
244 writel(rising_threshold,
245 data->base + reg->threshold_th0);
246 writel(falling_threshold,
247 data->base + reg->threshold_th1);
248
249 exynos_tmu_clear_irqs(data);
250
251 /* if last threshold limit is also present */
252 i = pdata->max_trigger_level - 1;
253 if (pdata->trigger_levels[i] &&
254 (pdata->trigger_type[i] == HW_TRIP)) {
255 threshold_code = temp_to_code(data,
256 pdata->trigger_levels[i]);
257 if (data->soc != SOC_ARCH_EXYNOS5440) {
258 /* 1-4 level to be assigned in th0 reg */
259 rising_threshold &= ~(0xff << 8 * i);
260 rising_threshold |= threshold_code << 8 * i;
261 writel(rising_threshold,
262 data->base + EXYNOS_THD_TEMP_RISE);
263 } else {
264 /* 5th level to be assigned in th2 reg */
265 rising_threshold =
266 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
267 writel(rising_threshold,
268 data->base + EXYNOS5440_TMU_S0_7_TH2);
269 }
270 con = readl(data->base + reg->tmu_ctrl);
271 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
272 writel(con, data->base + reg->tmu_ctrl);
273 }
274 }
275 /*Clear the PMIN in the common TMU register*/
276 if (data->soc == SOC_ARCH_EXYNOS5440 && !data->id)
277 writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
278 out:
279 clk_disable(data->clk);
280 mutex_unlock(&data->lock);
281 if (!IS_ERR(data->clk_sec))
282 clk_disable(data->clk_sec);
283
284 return ret;
285 }
286
287 static void exynos_tmu_control(struct platform_device *pdev, bool on)
288 {
289 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
290 struct exynos_tmu_platform_data *pdata = data->pdata;
291 const struct exynos_tmu_registers *reg = pdata->registers;
292 unsigned int con, interrupt_en;
293
294 mutex_lock(&data->lock);
295 clk_enable(data->clk);
296
297 con = readl(data->base + reg->tmu_ctrl);
298
299 if (pdata->test_mux)
300 con |= (pdata->test_mux << EXYNOS4412_MUX_ADDR_SHIFT);
301
302 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
303 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
304
305 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
306 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
307
308 if (pdata->noise_cancel_mode) {
309 con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
310 con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
311 }
312
313 if (on) {
314 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
315 interrupt_en =
316 pdata->trigger_enable[3] << reg->inten_rise3_shift |
317 pdata->trigger_enable[2] << reg->inten_rise2_shift |
318 pdata->trigger_enable[1] << reg->inten_rise1_shift |
319 pdata->trigger_enable[0] << reg->inten_rise0_shift;
320 if (TMU_SUPPORTS(pdata, FALLING_TRIP))
321 interrupt_en |=
322 interrupt_en << reg->inten_fall0_shift;
323 } else {
324 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
325 interrupt_en = 0; /* Disable all interrupts */
326 }
327 writel(interrupt_en, data->base + reg->tmu_inten);
328 writel(con, data->base + reg->tmu_ctrl);
329
330 clk_disable(data->clk);
331 mutex_unlock(&data->lock);
332 }
333
334 static int exynos_tmu_read(struct exynos_tmu_data *data)
335 {
336 struct exynos_tmu_platform_data *pdata = data->pdata;
337 const struct exynos_tmu_registers *reg = pdata->registers;
338 u8 temp_code;
339 int temp;
340
341 mutex_lock(&data->lock);
342 clk_enable(data->clk);
343
344 temp_code = readb(data->base + reg->tmu_cur_temp);
345
346 if (data->soc == SOC_ARCH_EXYNOS4210)
347 /* temp_code should range between 75 and 175 */
348 if (temp_code < 75 || temp_code > 175) {
349 temp = -ENODATA;
350 goto out;
351 }
352
353 temp = code_to_temp(data, temp_code);
354 out:
355 clk_disable(data->clk);
356 mutex_unlock(&data->lock);
357
358 return temp;
359 }
360
361 #ifdef CONFIG_THERMAL_EMULATION
362 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
363 {
364 struct exynos_tmu_data *data = drv_data;
365 struct exynos_tmu_platform_data *pdata = data->pdata;
366 const struct exynos_tmu_registers *reg = pdata->registers;
367 unsigned int val;
368 int ret = -EINVAL;
369
370 if (!TMU_SUPPORTS(pdata, EMULATION))
371 goto out;
372
373 if (temp && temp < MCELSIUS)
374 goto out;
375
376 mutex_lock(&data->lock);
377 clk_enable(data->clk);
378
379 val = readl(data->base + reg->emul_con);
380
381 if (temp) {
382 temp /= MCELSIUS;
383
384 if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
385 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
386 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
387 }
388 val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
389 val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
390 EXYNOS_EMUL_ENABLE;
391 } else {
392 val &= ~EXYNOS_EMUL_ENABLE;
393 }
394
395 writel(val, data->base + reg->emul_con);
396
397 clk_disable(data->clk);
398 mutex_unlock(&data->lock);
399 return 0;
400 out:
401 return ret;
402 }
403 #else
404 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
405 { return -EINVAL; }
406 #endif/*CONFIG_THERMAL_EMULATION*/
407
408 static void exynos_tmu_work(struct work_struct *work)
409 {
410 struct exynos_tmu_data *data = container_of(work,
411 struct exynos_tmu_data, irq_work);
412 unsigned int val_type;
413
414 if (!IS_ERR(data->clk_sec))
415 clk_enable(data->clk_sec);
416 /* Find which sensor generated this interrupt */
417 if (data->soc == SOC_ARCH_EXYNOS5440) {
418 val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
419 if (!((val_type >> data->id) & 0x1))
420 goto out;
421 }
422 if (!IS_ERR(data->clk_sec))
423 clk_disable(data->clk_sec);
424
425 exynos_report_trigger(data->reg_conf);
426 mutex_lock(&data->lock);
427 clk_enable(data->clk);
428
429 /* TODO: take action based on particular interrupt */
430 exynos_tmu_clear_irqs(data);
431
432 clk_disable(data->clk);
433 mutex_unlock(&data->lock);
434 out:
435 enable_irq(data->irq);
436 }
437
438 static irqreturn_t exynos_tmu_irq(int irq, void *id)
439 {
440 struct exynos_tmu_data *data = id;
441
442 disable_irq_nosync(irq);
443 schedule_work(&data->irq_work);
444
445 return IRQ_HANDLED;
446 }
447
448 static const struct of_device_id exynos_tmu_match[] = {
449 {
450 .compatible = "samsung,exynos3250-tmu",
451 .data = (void *)EXYNOS3250_TMU_DRV_DATA,
452 },
453 {
454 .compatible = "samsung,exynos4210-tmu",
455 .data = (void *)EXYNOS4210_TMU_DRV_DATA,
456 },
457 {
458 .compatible = "samsung,exynos4412-tmu",
459 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
460 },
461 {
462 .compatible = "samsung,exynos5250-tmu",
463 .data = (void *)EXYNOS5250_TMU_DRV_DATA,
464 },
465 {
466 .compatible = "samsung,exynos5260-tmu",
467 .data = (void *)EXYNOS5260_TMU_DRV_DATA,
468 },
469 {
470 .compatible = "samsung,exynos5420-tmu",
471 .data = (void *)EXYNOS5420_TMU_DRV_DATA,
472 },
473 {
474 .compatible = "samsung,exynos5420-tmu-ext-triminfo",
475 .data = (void *)EXYNOS5420_TMU_DRV_DATA,
476 },
477 {
478 .compatible = "samsung,exynos5440-tmu",
479 .data = (void *)EXYNOS5440_TMU_DRV_DATA,
480 },
481 {},
482 };
483 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
484
485 static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
486 struct platform_device *pdev, int id)
487 {
488 struct exynos_tmu_init_data *data_table;
489 struct exynos_tmu_platform_data *tmu_data;
490 const struct of_device_id *match;
491
492 match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
493 if (!match)
494 return NULL;
495 data_table = (struct exynos_tmu_init_data *) match->data;
496 if (!data_table || id >= data_table->tmu_count)
497 return NULL;
498 tmu_data = data_table->tmu_data;
499 return (struct exynos_tmu_platform_data *) (tmu_data + id);
500 }
501
502 static int exynos_map_dt_data(struct platform_device *pdev)
503 {
504 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
505 struct exynos_tmu_platform_data *pdata;
506 struct resource res;
507 int ret;
508
509 if (!data || !pdev->dev.of_node)
510 return -ENODEV;
511
512 /*
513 * Try enabling the regulator if found
514 * TODO: Add regulator as an SOC feature, so that regulator enable
515 * is a compulsory call.
516 */
517 data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
518 if (!IS_ERR(data->regulator)) {
519 ret = regulator_enable(data->regulator);
520 if (ret) {
521 dev_err(&pdev->dev, "failed to enable vtmu\n");
522 return ret;
523 }
524 } else {
525 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
526 }
527
528 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
529 if (data->id < 0)
530 data->id = 0;
531
532 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
533 if (data->irq <= 0) {
534 dev_err(&pdev->dev, "failed to get IRQ\n");
535 return -ENODEV;
536 }
537
538 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
539 dev_err(&pdev->dev, "failed to get Resource 0\n");
540 return -ENODEV;
541 }
542
543 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
544 if (!data->base) {
545 dev_err(&pdev->dev, "Failed to ioremap memory\n");
546 return -EADDRNOTAVAIL;
547 }
548
549 pdata = exynos_get_driver_data(pdev, data->id);
550 if (!pdata) {
551 dev_err(&pdev->dev, "No platform init data supplied.\n");
552 return -ENODEV;
553 }
554 data->pdata = pdata;
555 /*
556 * Check if the TMU shares some registers and then try to map the
557 * memory of common registers.
558 */
559 if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
560 return 0;
561
562 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
563 dev_err(&pdev->dev, "failed to get Resource 1\n");
564 return -ENODEV;
565 }
566
567 data->base_second = devm_ioremap(&pdev->dev, res.start,
568 resource_size(&res));
569 if (!data->base_second) {
570 dev_err(&pdev->dev, "Failed to ioremap memory\n");
571 return -ENOMEM;
572 }
573
574 return 0;
575 }
576
577 static int exynos_tmu_probe(struct platform_device *pdev)
578 {
579 struct exynos_tmu_data *data;
580 struct exynos_tmu_platform_data *pdata;
581 struct thermal_sensor_conf *sensor_conf;
582 int ret, i;
583
584 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
585 GFP_KERNEL);
586 if (!data)
587 return -ENOMEM;
588
589 platform_set_drvdata(pdev, data);
590 mutex_init(&data->lock);
591
592 ret = exynos_map_dt_data(pdev);
593 if (ret)
594 return ret;
595
596 pdata = data->pdata;
597
598 INIT_WORK(&data->irq_work, exynos_tmu_work);
599
600 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
601 if (IS_ERR(data->clk)) {
602 dev_err(&pdev->dev, "Failed to get clock\n");
603 return PTR_ERR(data->clk);
604 }
605
606 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
607 if (IS_ERR(data->clk_sec)) {
608 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
609 dev_err(&pdev->dev, "Failed to get triminfo clock\n");
610 return PTR_ERR(data->clk_sec);
611 }
612 } else {
613 ret = clk_prepare(data->clk_sec);
614 if (ret) {
615 dev_err(&pdev->dev, "Failed to get clock\n");
616 return ret;
617 }
618 }
619
620 ret = clk_prepare(data->clk);
621 if (ret) {
622 dev_err(&pdev->dev, "Failed to get clock\n");
623 goto err_clk_sec;
624 }
625
626 if (pdata->type == SOC_ARCH_EXYNOS3250 ||
627 pdata->type == SOC_ARCH_EXYNOS4210 ||
628 pdata->type == SOC_ARCH_EXYNOS4412 ||
629 pdata->type == SOC_ARCH_EXYNOS5250 ||
630 pdata->type == SOC_ARCH_EXYNOS5260 ||
631 pdata->type == SOC_ARCH_EXYNOS5420 ||
632 pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
633 pdata->type == SOC_ARCH_EXYNOS5440)
634 data->soc = pdata->type;
635 else {
636 ret = -EINVAL;
637 dev_err(&pdev->dev, "Platform not supported\n");
638 goto err_clk;
639 }
640
641 ret = exynos_tmu_initialize(pdev);
642 if (ret) {
643 dev_err(&pdev->dev, "Failed to initialize TMU\n");
644 goto err_clk;
645 }
646
647 exynos_tmu_control(pdev, true);
648
649 /* Allocate a structure to register with the exynos core thermal */
650 sensor_conf = devm_kzalloc(&pdev->dev,
651 sizeof(struct thermal_sensor_conf), GFP_KERNEL);
652 if (!sensor_conf) {
653 ret = -ENOMEM;
654 goto err_clk;
655 }
656 sprintf(sensor_conf->name, "therm_zone%d", data->id);
657 sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
658 sensor_conf->write_emul_temp =
659 (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
660 sensor_conf->driver_data = data;
661 sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
662 pdata->trigger_enable[1] + pdata->trigger_enable[2]+
663 pdata->trigger_enable[3];
664
665 for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
666 sensor_conf->trip_data.trip_val[i] =
667 pdata->threshold + pdata->trigger_levels[i];
668 sensor_conf->trip_data.trip_type[i] =
669 pdata->trigger_type[i];
670 }
671
672 sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
673
674 sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
675 for (i = 0; i < pdata->freq_tab_count; i++) {
676 sensor_conf->cooling_data.freq_data[i].freq_clip_max =
677 pdata->freq_tab[i].freq_clip_max;
678 sensor_conf->cooling_data.freq_data[i].temp_level =
679 pdata->freq_tab[i].temp_level;
680 }
681 sensor_conf->dev = &pdev->dev;
682 /* Register the sensor with thermal management interface */
683 ret = exynos_register_thermal(sensor_conf);
684 if (ret) {
685 dev_err(&pdev->dev, "Failed to register thermal interface\n");
686 goto err_clk;
687 }
688 data->reg_conf = sensor_conf;
689
690 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
691 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
692 if (ret) {
693 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
694 goto err_clk;
695 }
696
697 return 0;
698 err_clk:
699 clk_unprepare(data->clk);
700 err_clk_sec:
701 if (!IS_ERR(data->clk_sec))
702 clk_unprepare(data->clk_sec);
703 return ret;
704 }
705
706 static int exynos_tmu_remove(struct platform_device *pdev)
707 {
708 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
709
710 exynos_unregister_thermal(data->reg_conf);
711
712 exynos_tmu_control(pdev, false);
713
714 clk_unprepare(data->clk);
715 if (!IS_ERR(data->clk_sec))
716 clk_unprepare(data->clk_sec);
717
718 if (!IS_ERR(data->regulator))
719 regulator_disable(data->regulator);
720
721 return 0;
722 }
723
724 #ifdef CONFIG_PM_SLEEP
725 static int exynos_tmu_suspend(struct device *dev)
726 {
727 exynos_tmu_control(to_platform_device(dev), false);
728
729 return 0;
730 }
731
732 static int exynos_tmu_resume(struct device *dev)
733 {
734 struct platform_device *pdev = to_platform_device(dev);
735
736 exynos_tmu_initialize(pdev);
737 exynos_tmu_control(pdev, true);
738
739 return 0;
740 }
741
742 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
743 exynos_tmu_suspend, exynos_tmu_resume);
744 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
745 #else
746 #define EXYNOS_TMU_PM NULL
747 #endif
748
749 static struct platform_driver exynos_tmu_driver = {
750 .driver = {
751 .name = "exynos-tmu",
752 .owner = THIS_MODULE,
753 .pm = EXYNOS_TMU_PM,
754 .of_match_table = exynos_tmu_match,
755 },
756 .probe = exynos_tmu_probe,
757 .remove = exynos_tmu_remove,
758 };
759
760 module_platform_driver(exynos_tmu_driver);
761
762 MODULE_DESCRIPTION("EXYNOS TMU Driver");
763 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
764 MODULE_LICENSE("GPL");
765 MODULE_ALIAS("platform:exynos-tmu");