]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/thermal/samsung/exynos_tmu.c
Merge remote-tracking branches 'asoc/topic/wm8904', 'asoc/topic/wm8955' and 'asoc...
[mirror_ubuntu-zesty-kernel.git] / drivers / thermal / samsung / exynos_tmu.c
1 /*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6 * Lukasz Majewski <l.majewski@samsung.com>
7 *
8 * Copyright (C) 2011 Samsung Electronics
9 * Donggeun Kim <dg77.kim@samsung.com>
10 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28 #include <linux/clk.h>
29 #include <linux/io.h>
30 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/regulator/consumer.h>
37
38 #include "exynos_tmu.h"
39 #include "../thermal_core.h"
40
41 /* Exynos generic registers */
42 #define EXYNOS_TMU_REG_TRIMINFO 0x0
43 #define EXYNOS_TMU_REG_CONTROL 0x20
44 #define EXYNOS_TMU_REG_STATUS 0x28
45 #define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
46 #define EXYNOS_TMU_REG_INTEN 0x70
47 #define EXYNOS_TMU_REG_INTSTAT 0x74
48 #define EXYNOS_TMU_REG_INTCLEAR 0x78
49
50 #define EXYNOS_TMU_TEMP_MASK 0xff
51 #define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
52 #define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
53 #define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
54 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
55 #define EXYNOS_TMU_CORE_EN_SHIFT 0
56
57 /* Exynos3250 specific registers */
58 #define EXYNOS_TMU_TRIMINFO_CON1 0x10
59
60 /* Exynos4210 specific registers */
61 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
62 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
63
64 /* Exynos5250, Exynos4412, Exynos3250 specific registers */
65 #define EXYNOS_TMU_TRIMINFO_CON2 0x14
66 #define EXYNOS_THD_TEMP_RISE 0x50
67 #define EXYNOS_THD_TEMP_FALL 0x54
68 #define EXYNOS_EMUL_CON 0x80
69
70 #define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
71 #define EXYNOS_TRIMINFO_25_SHIFT 0
72 #define EXYNOS_TRIMINFO_85_SHIFT 8
73 #define EXYNOS_TMU_TRIP_MODE_SHIFT 13
74 #define EXYNOS_TMU_TRIP_MODE_MASK 0x7
75 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
76
77 #define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
78 #define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
79 #define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
80 #define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
81 #define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
82
83 #define EXYNOS_EMUL_TIME 0x57F0
84 #define EXYNOS_EMUL_TIME_MASK 0xffff
85 #define EXYNOS_EMUL_TIME_SHIFT 16
86 #define EXYNOS_EMUL_DATA_SHIFT 8
87 #define EXYNOS_EMUL_DATA_MASK 0xFF
88 #define EXYNOS_EMUL_ENABLE 0x1
89
90 /* Exynos5260 specific */
91 #define EXYNOS5260_TMU_REG_INTEN 0xC0
92 #define EXYNOS5260_TMU_REG_INTSTAT 0xC4
93 #define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
94 #define EXYNOS5260_EMUL_CON 0x100
95
96 /* Exynos4412 specific */
97 #define EXYNOS4412_MUX_ADDR_VALUE 6
98 #define EXYNOS4412_MUX_ADDR_SHIFT 20
99
100 /* Exynos5433 specific registers */
101 #define EXYNOS5433_TMU_REG_CONTROL1 0x024
102 #define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c
103 #define EXYNOS5433_TMU_COUNTER_VALUE0 0x030
104 #define EXYNOS5433_TMU_COUNTER_VALUE1 0x034
105 #define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044
106 #define EXYNOS5433_THD_TEMP_RISE3_0 0x050
107 #define EXYNOS5433_THD_TEMP_RISE7_4 0x054
108 #define EXYNOS5433_THD_TEMP_FALL3_0 0x060
109 #define EXYNOS5433_THD_TEMP_FALL7_4 0x064
110 #define EXYNOS5433_TMU_REG_INTEN 0x0c0
111 #define EXYNOS5433_TMU_REG_INTPEND 0x0c8
112 #define EXYNOS5433_TMU_EMUL_CON 0x110
113 #define EXYNOS5433_TMU_PD_DET_EN 0x130
114
115 #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16
116 #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23
117 #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \
118 (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
119 #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23)
120
121 #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0
122 #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1
123
124 #define EXYNOS5433_PD_DET_EN 1
125
126 /*exynos5440 specific registers*/
127 #define EXYNOS5440_TMU_S0_7_TRIM 0x000
128 #define EXYNOS5440_TMU_S0_7_CTRL 0x020
129 #define EXYNOS5440_TMU_S0_7_DEBUG 0x040
130 #define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
131 #define EXYNOS5440_TMU_S0_7_TH0 0x110
132 #define EXYNOS5440_TMU_S0_7_TH1 0x130
133 #define EXYNOS5440_TMU_S0_7_TH2 0x150
134 #define EXYNOS5440_TMU_S0_7_IRQEN 0x210
135 #define EXYNOS5440_TMU_S0_7_IRQ 0x230
136 /* exynos5440 common registers */
137 #define EXYNOS5440_TMU_IRQ_STATUS 0x000
138 #define EXYNOS5440_TMU_PMIN 0x004
139
140 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
141 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
142 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
143 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
144 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
145 #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
146 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8
147
148 /* Exynos7 specific registers */
149 #define EXYNOS7_THD_TEMP_RISE7_6 0x50
150 #define EXYNOS7_THD_TEMP_FALL7_6 0x60
151 #define EXYNOS7_TMU_REG_INTEN 0x110
152 #define EXYNOS7_TMU_REG_INTPEND 0x118
153 #define EXYNOS7_TMU_REG_EMUL_CON 0x160
154
155 #define EXYNOS7_TMU_TEMP_MASK 0x1ff
156 #define EXYNOS7_PD_DET_EN_SHIFT 23
157 #define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0
158 #define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1
159 #define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2
160 #define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3
161 #define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4
162 #define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5
163 #define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6
164 #define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7
165 #define EXYNOS7_EMUL_DATA_SHIFT 7
166 #define EXYNOS7_EMUL_DATA_MASK 0x1ff
167
168 #define MCELSIUS 1000
169 /**
170 * struct exynos_tmu_data : A structure to hold the private data of the TMU
171 driver
172 * @id: identifier of the one instance of the TMU controller.
173 * @pdata: pointer to the tmu platform/configuration data
174 * @base: base address of the single instance of the TMU controller.
175 * @base_second: base address of the common registers of the TMU controller.
176 * @irq: irq number of the TMU controller.
177 * @soc: id of the SOC type.
178 * @irq_work: pointer to the irq work structure.
179 * @lock: lock to implement synchronization.
180 * @clk: pointer to the clock structure.
181 * @clk_sec: pointer to the clock structure for accessing the base_second.
182 * @sclk: pointer to the clock structure for accessing the tmu special clk.
183 * @temp_error1: fused value of the first point trim.
184 * @temp_error2: fused value of the second point trim.
185 * @regulator: pointer to the TMU regulator structure.
186 * @reg_conf: pointer to structure to register with core thermal.
187 * @tmu_initialize: SoC specific TMU initialization method
188 * @tmu_control: SoC specific TMU control method
189 * @tmu_read: SoC specific TMU temperature read method
190 * @tmu_set_emulation: SoC specific TMU emulation setting method
191 * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
192 */
193 struct exynos_tmu_data {
194 int id;
195 struct exynos_tmu_platform_data *pdata;
196 void __iomem *base;
197 void __iomem *base_second;
198 int irq;
199 enum soc_type soc;
200 struct work_struct irq_work;
201 struct mutex lock;
202 struct clk *clk, *clk_sec, *sclk;
203 u16 temp_error1, temp_error2;
204 struct regulator *regulator;
205 struct thermal_zone_device *tzd;
206
207 int (*tmu_initialize)(struct platform_device *pdev);
208 void (*tmu_control)(struct platform_device *pdev, bool on);
209 int (*tmu_read)(struct exynos_tmu_data *data);
210 void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
211 void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
212 };
213
214 static void exynos_report_trigger(struct exynos_tmu_data *p)
215 {
216 char data[10], *envp[] = { data, NULL };
217 struct thermal_zone_device *tz = p->tzd;
218 int temp;
219 unsigned int i;
220
221 if (!tz) {
222 pr_err("No thermal zone device defined\n");
223 return;
224 }
225
226 thermal_zone_device_update(tz);
227
228 mutex_lock(&tz->lock);
229 /* Find the level for which trip happened */
230 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
231 tz->ops->get_trip_temp(tz, i, &temp);
232 if (tz->last_temperature < temp)
233 break;
234 }
235
236 snprintf(data, sizeof(data), "%u", i);
237 kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
238 mutex_unlock(&tz->lock);
239 }
240
241 /*
242 * TMU treats temperature as a mapped temperature code.
243 * The temperature is converted differently depending on the calibration type.
244 */
245 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
246 {
247 struct exynos_tmu_platform_data *pdata = data->pdata;
248 int temp_code;
249
250 switch (pdata->cal_type) {
251 case TYPE_TWO_POINT_TRIMMING:
252 temp_code = (temp - pdata->first_point_trim) *
253 (data->temp_error2 - data->temp_error1) /
254 (pdata->second_point_trim - pdata->first_point_trim) +
255 data->temp_error1;
256 break;
257 case TYPE_ONE_POINT_TRIMMING:
258 temp_code = temp + data->temp_error1 - pdata->first_point_trim;
259 break;
260 default:
261 temp_code = temp + pdata->default_temp_offset;
262 break;
263 }
264
265 return temp_code;
266 }
267
268 /*
269 * Calculate a temperature value from a temperature code.
270 * The unit of the temperature is degree Celsius.
271 */
272 static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
273 {
274 struct exynos_tmu_platform_data *pdata = data->pdata;
275 int temp;
276
277 switch (pdata->cal_type) {
278 case TYPE_TWO_POINT_TRIMMING:
279 temp = (temp_code - data->temp_error1) *
280 (pdata->second_point_trim - pdata->first_point_trim) /
281 (data->temp_error2 - data->temp_error1) +
282 pdata->first_point_trim;
283 break;
284 case TYPE_ONE_POINT_TRIMMING:
285 temp = temp_code - data->temp_error1 + pdata->first_point_trim;
286 break;
287 default:
288 temp = temp_code - pdata->default_temp_offset;
289 break;
290 }
291
292 return temp;
293 }
294
295 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
296 {
297 struct exynos_tmu_platform_data *pdata = data->pdata;
298
299 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
300 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
301 EXYNOS_TMU_TEMP_MASK);
302
303 if (!data->temp_error1 ||
304 (pdata->min_efuse_value > data->temp_error1) ||
305 (data->temp_error1 > pdata->max_efuse_value))
306 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
307
308 if (!data->temp_error2)
309 data->temp_error2 =
310 (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
311 EXYNOS_TMU_TEMP_MASK;
312 }
313
314 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
315 {
316 struct thermal_zone_device *tz = data->tzd;
317 const struct thermal_trip * const trips =
318 of_thermal_get_trip_points(tz);
319 unsigned long temp;
320 int i;
321
322 if (!trips) {
323 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
324 __func__);
325 return 0;
326 }
327
328 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
329 if (trips[i].type == THERMAL_TRIP_CRITICAL)
330 continue;
331
332 temp = trips[i].temperature / MCELSIUS;
333 if (falling)
334 temp -= (trips[i].hysteresis / MCELSIUS);
335 else
336 threshold &= ~(0xff << 8 * i);
337
338 threshold |= temp_to_code(data, temp) << 8 * i;
339 }
340
341 return threshold;
342 }
343
344 static int exynos_tmu_initialize(struct platform_device *pdev)
345 {
346 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
347 int ret;
348
349 mutex_lock(&data->lock);
350 clk_enable(data->clk);
351 if (!IS_ERR(data->clk_sec))
352 clk_enable(data->clk_sec);
353 ret = data->tmu_initialize(pdev);
354 clk_disable(data->clk);
355 mutex_unlock(&data->lock);
356 if (!IS_ERR(data->clk_sec))
357 clk_disable(data->clk_sec);
358
359 return ret;
360 }
361
362 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
363 {
364 struct exynos_tmu_platform_data *pdata = data->pdata;
365
366 if (data->soc == SOC_ARCH_EXYNOS4412 ||
367 data->soc == SOC_ARCH_EXYNOS3250)
368 con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
369
370 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
371 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
372
373 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
374 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
375
376 if (pdata->noise_cancel_mode) {
377 con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
378 con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
379 }
380
381 return con;
382 }
383
384 static void exynos_tmu_control(struct platform_device *pdev, bool on)
385 {
386 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
387
388 mutex_lock(&data->lock);
389 clk_enable(data->clk);
390 data->tmu_control(pdev, on);
391 clk_disable(data->clk);
392 mutex_unlock(&data->lock);
393 }
394
395 static int exynos4210_tmu_initialize(struct platform_device *pdev)
396 {
397 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
398 struct thermal_zone_device *tz = data->tzd;
399 const struct thermal_trip * const trips =
400 of_thermal_get_trip_points(tz);
401 int ret = 0, threshold_code, i;
402 unsigned long reference, temp;
403 unsigned int status;
404
405 if (!trips) {
406 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
407 __func__);
408 ret = -ENODEV;
409 goto out;
410 }
411
412 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
413 if (!status) {
414 ret = -EBUSY;
415 goto out;
416 }
417
418 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
419
420 /* Write temperature code for threshold */
421 reference = trips[0].temperature / MCELSIUS;
422 threshold_code = temp_to_code(data, reference);
423 if (threshold_code < 0) {
424 ret = threshold_code;
425 goto out;
426 }
427 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
428
429 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
430 temp = trips[i].temperature / MCELSIUS;
431 writeb(temp - reference, data->base +
432 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
433 }
434
435 data->tmu_clear_irqs(data);
436 out:
437 return ret;
438 }
439
440 static int exynos4412_tmu_initialize(struct platform_device *pdev)
441 {
442 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
443 const struct thermal_trip * const trips =
444 of_thermal_get_trip_points(data->tzd);
445 unsigned int status, trim_info, con, ctrl, rising_threshold;
446 int ret = 0, threshold_code, i;
447 unsigned long crit_temp = 0;
448
449 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
450 if (!status) {
451 ret = -EBUSY;
452 goto out;
453 }
454
455 if (data->soc == SOC_ARCH_EXYNOS3250 ||
456 data->soc == SOC_ARCH_EXYNOS4412 ||
457 data->soc == SOC_ARCH_EXYNOS5250) {
458 if (data->soc == SOC_ARCH_EXYNOS3250) {
459 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
460 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
461 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
462 }
463 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
464 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
465 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
466 }
467
468 /* On exynos5420 the triminfo register is in the shared space */
469 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
470 trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
471 else
472 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
473
474 sanitize_temp_error(data, trim_info);
475
476 /* Write temperature code for rising and falling threshold */
477 rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
478 rising_threshold = get_th_reg(data, rising_threshold, false);
479 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
480 writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
481
482 data->tmu_clear_irqs(data);
483
484 /* if last threshold limit is also present */
485 for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
486 if (trips[i].type == THERMAL_TRIP_CRITICAL) {
487 crit_temp = trips[i].temperature;
488 break;
489 }
490 }
491
492 if (i == of_thermal_get_ntrips(data->tzd)) {
493 pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
494 __func__);
495 ret = -EINVAL;
496 goto out;
497 }
498
499 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
500 /* 1-4 level to be assigned in th0 reg */
501 rising_threshold &= ~(0xff << 8 * i);
502 rising_threshold |= threshold_code << 8 * i;
503 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
504 con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
505 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
506 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
507
508 out:
509 return ret;
510 }
511
512 static int exynos5433_tmu_initialize(struct platform_device *pdev)
513 {
514 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
515 struct exynos_tmu_platform_data *pdata = data->pdata;
516 struct thermal_zone_device *tz = data->tzd;
517 unsigned int status, trim_info;
518 unsigned int rising_threshold = 0, falling_threshold = 0;
519 int temp, temp_hist;
520 int ret = 0, threshold_code, i, sensor_id, cal_type;
521
522 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
523 if (!status) {
524 ret = -EBUSY;
525 goto out;
526 }
527
528 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
529 sanitize_temp_error(data, trim_info);
530
531 /* Read the temperature sensor id */
532 sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
533 >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
534 dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
535
536 /* Read the calibration mode */
537 writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
538 cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
539 >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
540
541 switch (cal_type) {
542 case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
543 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
544 break;
545 case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
546 pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
547 break;
548 default:
549 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
550 break;
551 };
552
553 dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
554 cal_type ? 2 : 1);
555
556 /* Write temperature code for rising and falling threshold */
557 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
558 int rising_reg_offset, falling_reg_offset;
559 int j = 0;
560
561 switch (i) {
562 case 0:
563 case 1:
564 case 2:
565 case 3:
566 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
567 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
568 j = i;
569 break;
570 case 4:
571 case 5:
572 case 6:
573 case 7:
574 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
575 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
576 j = i - 4;
577 break;
578 default:
579 continue;
580 }
581
582 /* Write temperature code for rising threshold */
583 tz->ops->get_trip_temp(tz, i, &temp);
584 temp /= MCELSIUS;
585 threshold_code = temp_to_code(data, temp);
586
587 rising_threshold = readl(data->base + rising_reg_offset);
588 rising_threshold |= (threshold_code << j * 8);
589 writel(rising_threshold, data->base + rising_reg_offset);
590
591 /* Write temperature code for falling threshold */
592 tz->ops->get_trip_hyst(tz, i, &temp_hist);
593 temp_hist = temp - (temp_hist / MCELSIUS);
594 threshold_code = temp_to_code(data, temp_hist);
595
596 falling_threshold = readl(data->base + falling_reg_offset);
597 falling_threshold &= ~(0xff << j * 8);
598 falling_threshold |= (threshold_code << j * 8);
599 writel(falling_threshold, data->base + falling_reg_offset);
600 }
601
602 data->tmu_clear_irqs(data);
603 out:
604 return ret;
605 }
606
607 static int exynos5440_tmu_initialize(struct platform_device *pdev)
608 {
609 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
610 unsigned int trim_info = 0, con, rising_threshold;
611 int ret = 0, threshold_code;
612 int crit_temp = 0;
613
614 /*
615 * For exynos5440 soc triminfo value is swapped between TMU0 and
616 * TMU2, so the below logic is needed.
617 */
618 switch (data->id) {
619 case 0:
620 trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
621 EXYNOS5440_TMU_S0_7_TRIM);
622 break;
623 case 1:
624 trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
625 break;
626 case 2:
627 trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
628 EXYNOS5440_TMU_S0_7_TRIM);
629 }
630 sanitize_temp_error(data, trim_info);
631
632 /* Write temperature code for rising and falling threshold */
633 rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
634 rising_threshold = get_th_reg(data, rising_threshold, false);
635 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
636 writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
637
638 data->tmu_clear_irqs(data);
639
640 /* if last threshold limit is also present */
641 if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
642 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
643 /* 5th level to be assigned in th2 reg */
644 rising_threshold =
645 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
646 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
647 con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
648 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
649 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
650 }
651 /* Clear the PMIN in the common TMU register */
652 if (!data->id)
653 writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
654 return ret;
655 }
656
657 static int exynos7_tmu_initialize(struct platform_device *pdev)
658 {
659 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
660 struct thermal_zone_device *tz = data->tzd;
661 struct exynos_tmu_platform_data *pdata = data->pdata;
662 unsigned int status, trim_info;
663 unsigned int rising_threshold = 0, falling_threshold = 0;
664 int ret = 0, threshold_code, i;
665 int temp, temp_hist;
666 unsigned int reg_off, bit_off;
667
668 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
669 if (!status) {
670 ret = -EBUSY;
671 goto out;
672 }
673
674 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
675
676 data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
677 if (!data->temp_error1 ||
678 (pdata->min_efuse_value > data->temp_error1) ||
679 (data->temp_error1 > pdata->max_efuse_value))
680 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
681
682 /* Write temperature code for rising and falling threshold */
683 for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
684 /*
685 * On exynos7 there are 4 rising and 4 falling threshold
686 * registers (0x50-0x5c and 0x60-0x6c respectively). Each
687 * register holds the value of two threshold levels (at bit
688 * offsets 0 and 16). Based on the fact that there are atmost
689 * eight possible trigger levels, calculate the register and
690 * bit offsets where the threshold levels are to be written.
691 *
692 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
693 * [24:16] - Threshold level 7
694 * [8:0] - Threshold level 6
695 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
696 * [24:16] - Threshold level 5
697 * [8:0] - Threshold level 4
698 *
699 * and similarly for falling thresholds.
700 *
701 * Based on the above, calculate the register and bit offsets
702 * for rising/falling threshold levels and populate them.
703 */
704 reg_off = ((7 - i) / 2) * 4;
705 bit_off = ((8 - i) % 2);
706
707 tz->ops->get_trip_temp(tz, i, &temp);
708 temp /= MCELSIUS;
709
710 tz->ops->get_trip_hyst(tz, i, &temp_hist);
711 temp_hist = temp - (temp_hist / MCELSIUS);
712
713 /* Set 9-bit temperature code for rising threshold levels */
714 threshold_code = temp_to_code(data, temp);
715 rising_threshold = readl(data->base +
716 EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
717 rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
718 rising_threshold |= threshold_code << (16 * bit_off);
719 writel(rising_threshold,
720 data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
721
722 /* Set 9-bit temperature code for falling threshold levels */
723 threshold_code = temp_to_code(data, temp_hist);
724 falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
725 falling_threshold |= threshold_code << (16 * bit_off);
726 writel(falling_threshold,
727 data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
728 }
729
730 data->tmu_clear_irqs(data);
731 out:
732 return ret;
733 }
734
735 static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
736 {
737 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
738 struct thermal_zone_device *tz = data->tzd;
739 unsigned int con, interrupt_en;
740
741 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
742
743 if (on) {
744 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
745 interrupt_en =
746 (of_thermal_is_trip_valid(tz, 3)
747 << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
748 (of_thermal_is_trip_valid(tz, 2)
749 << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
750 (of_thermal_is_trip_valid(tz, 1)
751 << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
752 (of_thermal_is_trip_valid(tz, 0)
753 << EXYNOS_TMU_INTEN_RISE0_SHIFT);
754
755 if (data->soc != SOC_ARCH_EXYNOS4210)
756 interrupt_en |=
757 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
758 } else {
759 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
760 interrupt_en = 0; /* Disable all interrupts */
761 }
762 writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
763 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
764 }
765
766 static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
767 {
768 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
769 struct thermal_zone_device *tz = data->tzd;
770 unsigned int con, interrupt_en, pd_det_en;
771
772 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
773
774 if (on) {
775 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
776 interrupt_en =
777 (of_thermal_is_trip_valid(tz, 7)
778 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
779 (of_thermal_is_trip_valid(tz, 6)
780 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
781 (of_thermal_is_trip_valid(tz, 5)
782 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
783 (of_thermal_is_trip_valid(tz, 4)
784 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
785 (of_thermal_is_trip_valid(tz, 3)
786 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
787 (of_thermal_is_trip_valid(tz, 2)
788 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
789 (of_thermal_is_trip_valid(tz, 1)
790 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
791 (of_thermal_is_trip_valid(tz, 0)
792 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
793
794 interrupt_en |=
795 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
796 } else {
797 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
798 interrupt_en = 0; /* Disable all interrupts */
799 }
800
801 pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
802
803 writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
804 writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
805 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
806 }
807
808 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
809 {
810 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
811 struct thermal_zone_device *tz = data->tzd;
812 unsigned int con, interrupt_en;
813
814 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
815
816 if (on) {
817 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
818 interrupt_en =
819 (of_thermal_is_trip_valid(tz, 3)
820 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
821 (of_thermal_is_trip_valid(tz, 2)
822 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
823 (of_thermal_is_trip_valid(tz, 1)
824 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
825 (of_thermal_is_trip_valid(tz, 0)
826 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
827 interrupt_en |=
828 interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
829 } else {
830 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
831 interrupt_en = 0; /* Disable all interrupts */
832 }
833 writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
834 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
835 }
836
837 static void exynos7_tmu_control(struct platform_device *pdev, bool on)
838 {
839 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
840 struct thermal_zone_device *tz = data->tzd;
841 unsigned int con, interrupt_en;
842
843 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
844
845 if (on) {
846 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
847 con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
848 interrupt_en =
849 (of_thermal_is_trip_valid(tz, 7)
850 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
851 (of_thermal_is_trip_valid(tz, 6)
852 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
853 (of_thermal_is_trip_valid(tz, 5)
854 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
855 (of_thermal_is_trip_valid(tz, 4)
856 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
857 (of_thermal_is_trip_valid(tz, 3)
858 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
859 (of_thermal_is_trip_valid(tz, 2)
860 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
861 (of_thermal_is_trip_valid(tz, 1)
862 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
863 (of_thermal_is_trip_valid(tz, 0)
864 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
865
866 interrupt_en |=
867 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
868 } else {
869 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
870 con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
871 interrupt_en = 0; /* Disable all interrupts */
872 }
873
874 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
875 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
876 }
877
878 static int exynos_get_temp(void *p, int *temp)
879 {
880 struct exynos_tmu_data *data = p;
881
882 if (!data || !data->tmu_read)
883 return -EINVAL;
884
885 mutex_lock(&data->lock);
886 clk_enable(data->clk);
887
888 *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
889
890 clk_disable(data->clk);
891 mutex_unlock(&data->lock);
892
893 return 0;
894 }
895
896 #ifdef CONFIG_THERMAL_EMULATION
897 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
898 int temp)
899 {
900 if (temp) {
901 temp /= MCELSIUS;
902
903 if (data->soc != SOC_ARCH_EXYNOS5440) {
904 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
905 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
906 }
907 if (data->soc == SOC_ARCH_EXYNOS7) {
908 val &= ~(EXYNOS7_EMUL_DATA_MASK <<
909 EXYNOS7_EMUL_DATA_SHIFT);
910 val |= (temp_to_code(data, temp) <<
911 EXYNOS7_EMUL_DATA_SHIFT) |
912 EXYNOS_EMUL_ENABLE;
913 } else {
914 val &= ~(EXYNOS_EMUL_DATA_MASK <<
915 EXYNOS_EMUL_DATA_SHIFT);
916 val |= (temp_to_code(data, temp) <<
917 EXYNOS_EMUL_DATA_SHIFT) |
918 EXYNOS_EMUL_ENABLE;
919 }
920 } else {
921 val &= ~EXYNOS_EMUL_ENABLE;
922 }
923
924 return val;
925 }
926
927 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
928 int temp)
929 {
930 unsigned int val;
931 u32 emul_con;
932
933 if (data->soc == SOC_ARCH_EXYNOS5260)
934 emul_con = EXYNOS5260_EMUL_CON;
935 else if (data->soc == SOC_ARCH_EXYNOS5433)
936 emul_con = EXYNOS5433_TMU_EMUL_CON;
937 else if (data->soc == SOC_ARCH_EXYNOS7)
938 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
939 else
940 emul_con = EXYNOS_EMUL_CON;
941
942 val = readl(data->base + emul_con);
943 val = get_emul_con_reg(data, val, temp);
944 writel(val, data->base + emul_con);
945 }
946
947 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
948 int temp)
949 {
950 unsigned int val;
951
952 val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
953 val = get_emul_con_reg(data, val, temp);
954 writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
955 }
956
957 static int exynos_tmu_set_emulation(void *drv_data, int temp)
958 {
959 struct exynos_tmu_data *data = drv_data;
960 int ret = -EINVAL;
961
962 if (data->soc == SOC_ARCH_EXYNOS4210)
963 goto out;
964
965 if (temp && temp < MCELSIUS)
966 goto out;
967
968 mutex_lock(&data->lock);
969 clk_enable(data->clk);
970 data->tmu_set_emulation(data, temp);
971 clk_disable(data->clk);
972 mutex_unlock(&data->lock);
973 return 0;
974 out:
975 return ret;
976 }
977 #else
978 #define exynos4412_tmu_set_emulation NULL
979 #define exynos5440_tmu_set_emulation NULL
980 static int exynos_tmu_set_emulation(void *drv_data, int temp)
981 { return -EINVAL; }
982 #endif /* CONFIG_THERMAL_EMULATION */
983
984 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
985 {
986 int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
987
988 /* "temp_code" should range between 75 and 175 */
989 return (ret < 75 || ret > 175) ? -ENODATA : ret;
990 }
991
992 static int exynos4412_tmu_read(struct exynos_tmu_data *data)
993 {
994 return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
995 }
996
997 static int exynos5440_tmu_read(struct exynos_tmu_data *data)
998 {
999 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
1000 }
1001
1002 static int exynos7_tmu_read(struct exynos_tmu_data *data)
1003 {
1004 return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
1005 EXYNOS7_TMU_TEMP_MASK;
1006 }
1007
1008 static void exynos_tmu_work(struct work_struct *work)
1009 {
1010 struct exynos_tmu_data *data = container_of(work,
1011 struct exynos_tmu_data, irq_work);
1012 unsigned int val_type;
1013
1014 if (!IS_ERR(data->clk_sec))
1015 clk_enable(data->clk_sec);
1016 /* Find which sensor generated this interrupt */
1017 if (data->soc == SOC_ARCH_EXYNOS5440) {
1018 val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
1019 if (!((val_type >> data->id) & 0x1))
1020 goto out;
1021 }
1022 if (!IS_ERR(data->clk_sec))
1023 clk_disable(data->clk_sec);
1024
1025 exynos_report_trigger(data);
1026 mutex_lock(&data->lock);
1027 clk_enable(data->clk);
1028
1029 /* TODO: take action based on particular interrupt */
1030 data->tmu_clear_irqs(data);
1031
1032 clk_disable(data->clk);
1033 mutex_unlock(&data->lock);
1034 out:
1035 enable_irq(data->irq);
1036 }
1037
1038 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
1039 {
1040 unsigned int val_irq;
1041 u32 tmu_intstat, tmu_intclear;
1042
1043 if (data->soc == SOC_ARCH_EXYNOS5260) {
1044 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
1045 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
1046 } else if (data->soc == SOC_ARCH_EXYNOS7) {
1047 tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
1048 tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
1049 } else if (data->soc == SOC_ARCH_EXYNOS5433) {
1050 tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
1051 tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
1052 } else {
1053 tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
1054 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
1055 }
1056
1057 val_irq = readl(data->base + tmu_intstat);
1058 /*
1059 * Clear the interrupts. Please note that the documentation for
1060 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
1061 * states that INTCLEAR register has a different placing of bits
1062 * responsible for FALL IRQs than INTSTAT register. Exynos5420
1063 * and Exynos5440 documentation is correct (Exynos4210 doesn't
1064 * support FALL IRQs at all).
1065 */
1066 writel(val_irq, data->base + tmu_intclear);
1067 }
1068
1069 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
1070 {
1071 unsigned int val_irq;
1072
1073 val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
1074 /* clear the interrupts */
1075 writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
1076 }
1077
1078 static irqreturn_t exynos_tmu_irq(int irq, void *id)
1079 {
1080 struct exynos_tmu_data *data = id;
1081
1082 disable_irq_nosync(irq);
1083 schedule_work(&data->irq_work);
1084
1085 return IRQ_HANDLED;
1086 }
1087
1088 static const struct of_device_id exynos_tmu_match[] = {
1089 { .compatible = "samsung,exynos3250-tmu", },
1090 { .compatible = "samsung,exynos4210-tmu", },
1091 { .compatible = "samsung,exynos4412-tmu", },
1092 { .compatible = "samsung,exynos5250-tmu", },
1093 { .compatible = "samsung,exynos5260-tmu", },
1094 { .compatible = "samsung,exynos5420-tmu", },
1095 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
1096 { .compatible = "samsung,exynos5433-tmu", },
1097 { .compatible = "samsung,exynos5440-tmu", },
1098 { .compatible = "samsung,exynos7-tmu", },
1099 { /* sentinel */ },
1100 };
1101 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
1102
1103 static int exynos_of_get_soc_type(struct device_node *np)
1104 {
1105 if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
1106 return SOC_ARCH_EXYNOS3250;
1107 else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
1108 return SOC_ARCH_EXYNOS4210;
1109 else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
1110 return SOC_ARCH_EXYNOS4412;
1111 else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
1112 return SOC_ARCH_EXYNOS5250;
1113 else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
1114 return SOC_ARCH_EXYNOS5260;
1115 else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
1116 return SOC_ARCH_EXYNOS5420;
1117 else if (of_device_is_compatible(np,
1118 "samsung,exynos5420-tmu-ext-triminfo"))
1119 return SOC_ARCH_EXYNOS5420_TRIMINFO;
1120 else if (of_device_is_compatible(np, "samsung,exynos5433-tmu"))
1121 return SOC_ARCH_EXYNOS5433;
1122 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
1123 return SOC_ARCH_EXYNOS5440;
1124 else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
1125 return SOC_ARCH_EXYNOS7;
1126
1127 return -EINVAL;
1128 }
1129
1130 static int exynos_of_sensor_conf(struct device_node *np,
1131 struct exynos_tmu_platform_data *pdata)
1132 {
1133 u32 value;
1134 int ret;
1135
1136 of_node_get(np);
1137
1138 ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
1139 pdata->gain = (u8)value;
1140 of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
1141 pdata->reference_voltage = (u8)value;
1142 of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
1143 pdata->noise_cancel_mode = (u8)value;
1144
1145 of_property_read_u32(np, "samsung,tmu_efuse_value",
1146 &pdata->efuse_value);
1147 of_property_read_u32(np, "samsung,tmu_min_efuse_value",
1148 &pdata->min_efuse_value);
1149 of_property_read_u32(np, "samsung,tmu_max_efuse_value",
1150 &pdata->max_efuse_value);
1151
1152 of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
1153 pdata->first_point_trim = (u8)value;
1154 of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
1155 pdata->second_point_trim = (u8)value;
1156 of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
1157 pdata->default_temp_offset = (u8)value;
1158
1159 of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
1160 of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
1161
1162 of_node_put(np);
1163 return 0;
1164 }
1165
1166 static int exynos_map_dt_data(struct platform_device *pdev)
1167 {
1168 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1169 struct exynos_tmu_platform_data *pdata;
1170 struct resource res;
1171 int ret;
1172
1173 if (!data || !pdev->dev.of_node)
1174 return -ENODEV;
1175
1176 /*
1177 * Try enabling the regulator if found
1178 * TODO: Add regulator as an SOC feature, so that regulator enable
1179 * is a compulsory call.
1180 */
1181 data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
1182 if (!IS_ERR(data->regulator)) {
1183 ret = regulator_enable(data->regulator);
1184 if (ret) {
1185 dev_err(&pdev->dev, "failed to enable vtmu\n");
1186 return ret;
1187 }
1188 } else {
1189 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
1190 }
1191
1192 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
1193 if (data->id < 0)
1194 data->id = 0;
1195
1196 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1197 if (data->irq <= 0) {
1198 dev_err(&pdev->dev, "failed to get IRQ\n");
1199 return -ENODEV;
1200 }
1201
1202 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
1203 dev_err(&pdev->dev, "failed to get Resource 0\n");
1204 return -ENODEV;
1205 }
1206
1207 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
1208 if (!data->base) {
1209 dev_err(&pdev->dev, "Failed to ioremap memory\n");
1210 return -EADDRNOTAVAIL;
1211 }
1212
1213 pdata = devm_kzalloc(&pdev->dev,
1214 sizeof(struct exynos_tmu_platform_data),
1215 GFP_KERNEL);
1216 if (!pdata)
1217 return -ENOMEM;
1218
1219 exynos_of_sensor_conf(pdev->dev.of_node, pdata);
1220 data->pdata = pdata;
1221 data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
1222
1223 switch (data->soc) {
1224 case SOC_ARCH_EXYNOS4210:
1225 data->tmu_initialize = exynos4210_tmu_initialize;
1226 data->tmu_control = exynos4210_tmu_control;
1227 data->tmu_read = exynos4210_tmu_read;
1228 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1229 break;
1230 case SOC_ARCH_EXYNOS3250:
1231 case SOC_ARCH_EXYNOS4412:
1232 case SOC_ARCH_EXYNOS5250:
1233 case SOC_ARCH_EXYNOS5260:
1234 case SOC_ARCH_EXYNOS5420:
1235 case SOC_ARCH_EXYNOS5420_TRIMINFO:
1236 data->tmu_initialize = exynos4412_tmu_initialize;
1237 data->tmu_control = exynos4210_tmu_control;
1238 data->tmu_read = exynos4412_tmu_read;
1239 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1240 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1241 break;
1242 case SOC_ARCH_EXYNOS5433:
1243 data->tmu_initialize = exynos5433_tmu_initialize;
1244 data->tmu_control = exynos5433_tmu_control;
1245 data->tmu_read = exynos4412_tmu_read;
1246 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1247 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1248 break;
1249 case SOC_ARCH_EXYNOS5440:
1250 data->tmu_initialize = exynos5440_tmu_initialize;
1251 data->tmu_control = exynos5440_tmu_control;
1252 data->tmu_read = exynos5440_tmu_read;
1253 data->tmu_set_emulation = exynos5440_tmu_set_emulation;
1254 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
1255 break;
1256 case SOC_ARCH_EXYNOS7:
1257 data->tmu_initialize = exynos7_tmu_initialize;
1258 data->tmu_control = exynos7_tmu_control;
1259 data->tmu_read = exynos7_tmu_read;
1260 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1261 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1262 break;
1263 default:
1264 dev_err(&pdev->dev, "Platform not supported\n");
1265 return -EINVAL;
1266 }
1267
1268 /*
1269 * Check if the TMU shares some registers and then try to map the
1270 * memory of common registers.
1271 */
1272 if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO &&
1273 data->soc != SOC_ARCH_EXYNOS5440)
1274 return 0;
1275
1276 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
1277 dev_err(&pdev->dev, "failed to get Resource 1\n");
1278 return -ENODEV;
1279 }
1280
1281 data->base_second = devm_ioremap(&pdev->dev, res.start,
1282 resource_size(&res));
1283 if (!data->base_second) {
1284 dev_err(&pdev->dev, "Failed to ioremap memory\n");
1285 return -ENOMEM;
1286 }
1287
1288 return 0;
1289 }
1290
1291 static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1292 .get_temp = exynos_get_temp,
1293 .set_emul_temp = exynos_tmu_set_emulation,
1294 };
1295
1296 static int exynos_tmu_probe(struct platform_device *pdev)
1297 {
1298 struct exynos_tmu_data *data;
1299 int ret;
1300
1301 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
1302 GFP_KERNEL);
1303 if (!data)
1304 return -ENOMEM;
1305
1306 platform_set_drvdata(pdev, data);
1307 mutex_init(&data->lock);
1308
1309 data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
1310 &exynos_sensor_ops);
1311 if (IS_ERR(data->tzd)) {
1312 pr_err("thermal: tz: %p ERROR\n", data->tzd);
1313 return PTR_ERR(data->tzd);
1314 }
1315 ret = exynos_map_dt_data(pdev);
1316 if (ret)
1317 goto err_sensor;
1318
1319 INIT_WORK(&data->irq_work, exynos_tmu_work);
1320
1321 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
1322 if (IS_ERR(data->clk)) {
1323 dev_err(&pdev->dev, "Failed to get clock\n");
1324 ret = PTR_ERR(data->clk);
1325 goto err_sensor;
1326 }
1327
1328 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
1329 if (IS_ERR(data->clk_sec)) {
1330 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
1331 dev_err(&pdev->dev, "Failed to get triminfo clock\n");
1332 ret = PTR_ERR(data->clk_sec);
1333 goto err_sensor;
1334 }
1335 } else {
1336 ret = clk_prepare(data->clk_sec);
1337 if (ret) {
1338 dev_err(&pdev->dev, "Failed to get clock\n");
1339 goto err_sensor;
1340 }
1341 }
1342
1343 ret = clk_prepare(data->clk);
1344 if (ret) {
1345 dev_err(&pdev->dev, "Failed to get clock\n");
1346 goto err_clk_sec;
1347 }
1348
1349 switch (data->soc) {
1350 case SOC_ARCH_EXYNOS5433:
1351 case SOC_ARCH_EXYNOS7:
1352 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
1353 if (IS_ERR(data->sclk)) {
1354 dev_err(&pdev->dev, "Failed to get sclk\n");
1355 goto err_clk;
1356 } else {
1357 ret = clk_prepare_enable(data->sclk);
1358 if (ret) {
1359 dev_err(&pdev->dev, "Failed to enable sclk\n");
1360 goto err_clk;
1361 }
1362 }
1363 break;
1364 default:
1365 break;
1366 };
1367
1368 ret = exynos_tmu_initialize(pdev);
1369 if (ret) {
1370 dev_err(&pdev->dev, "Failed to initialize TMU\n");
1371 goto err_sclk;
1372 }
1373
1374 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
1375 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
1376 if (ret) {
1377 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
1378 goto err_sclk;
1379 }
1380
1381 exynos_tmu_control(pdev, true);
1382 return 0;
1383 err_sclk:
1384 clk_disable_unprepare(data->sclk);
1385 err_clk:
1386 clk_unprepare(data->clk);
1387 err_clk_sec:
1388 if (!IS_ERR(data->clk_sec))
1389 clk_unprepare(data->clk_sec);
1390 err_sensor:
1391 if (!IS_ERR_OR_NULL(data->regulator))
1392 regulator_disable(data->regulator);
1393 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1394
1395 return ret;
1396 }
1397
1398 static int exynos_tmu_remove(struct platform_device *pdev)
1399 {
1400 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1401 struct thermal_zone_device *tzd = data->tzd;
1402
1403 thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
1404 exynos_tmu_control(pdev, false);
1405
1406 clk_disable_unprepare(data->sclk);
1407 clk_unprepare(data->clk);
1408 if (!IS_ERR(data->clk_sec))
1409 clk_unprepare(data->clk_sec);
1410
1411 if (!IS_ERR(data->regulator))
1412 regulator_disable(data->regulator);
1413
1414 return 0;
1415 }
1416
1417 #ifdef CONFIG_PM_SLEEP
1418 static int exynos_tmu_suspend(struct device *dev)
1419 {
1420 exynos_tmu_control(to_platform_device(dev), false);
1421
1422 return 0;
1423 }
1424
1425 static int exynos_tmu_resume(struct device *dev)
1426 {
1427 struct platform_device *pdev = to_platform_device(dev);
1428
1429 exynos_tmu_initialize(pdev);
1430 exynos_tmu_control(pdev, true);
1431
1432 return 0;
1433 }
1434
1435 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
1436 exynos_tmu_suspend, exynos_tmu_resume);
1437 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
1438 #else
1439 #define EXYNOS_TMU_PM NULL
1440 #endif
1441
1442 static struct platform_driver exynos_tmu_driver = {
1443 .driver = {
1444 .name = "exynos-tmu",
1445 .pm = EXYNOS_TMU_PM,
1446 .of_match_table = exynos_tmu_match,
1447 },
1448 .probe = exynos_tmu_probe,
1449 .remove = exynos_tmu_remove,
1450 };
1451
1452 module_platform_driver(exynos_tmu_driver);
1453
1454 MODULE_DESCRIPTION("EXYNOS TMU Driver");
1455 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
1456 MODULE_LICENSE("GPL");
1457 MODULE_ALIAS("platform:exynos-tmu");