]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/thermal/tegra/soctherm.c
UBUNTU: Ubuntu-5.4.0-117.132
[mirror_ubuntu-focal-kernel.git] / drivers / thermal / tegra / soctherm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Author:
6 * Mikko Perttunen <mperttunen@nvidia.com>
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19 #include <linux/debugfs.h>
20 #include <linux/bitops.h>
21 #include <linux/clk.h>
22 #include <linux/delay.h>
23 #include <linux/err.h>
24 #include <linux/interrupt.h>
25 #include <linux/io.h>
26 #include <linux/irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/reset.h>
32 #include <linux/thermal.h>
33
34 #include <dt-bindings/thermal/tegra124-soctherm.h>
35
36 #include "../thermal_core.h"
37 #include "soctherm.h"
38
39 #define SENSOR_CONFIG0 0
40 #define SENSOR_CONFIG0_STOP BIT(0)
41 #define SENSOR_CONFIG0_CPTR_OVER BIT(2)
42 #define SENSOR_CONFIG0_OVER BIT(3)
43 #define SENSOR_CONFIG0_TCALC_OVER BIT(4)
44 #define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8)
45 #define SENSOR_CONFIG0_TALL_SHIFT 8
46
47 #define SENSOR_CONFIG1 4
48 #define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff
49 #define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
50 #define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15)
51 #define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
52 #define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24)
53 #define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
54 #define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
55
56 /*
57 * SENSOR_CONFIG2 is defined in soctherm.h
58 * because, it will be used by tegra_soctherm_fuse.c
59 */
60
61 #define SENSOR_STATUS0 0xc
62 #define SENSOR_STATUS0_VALID_MASK BIT(31)
63 #define SENSOR_STATUS0_CAPTURE_MASK 0xffff
64
65 #define SENSOR_STATUS1 0x10
66 #define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31)
67 #define SENSOR_STATUS1_TEMP_MASK 0xffff
68
69 #define READBACK_VALUE_MASK 0xff00
70 #define READBACK_VALUE_SHIFT 8
71 #define READBACK_ADD_HALF BIT(7)
72 #define READBACK_NEGATE BIT(0)
73
74 /*
75 * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h
76 * because it will be used by tegraxxx_soctherm.c
77 */
78 #define THERMCTL_LVL0_CPU0_EN_MASK BIT(8)
79 #define THERMCTL_LVL0_CPU0_CPU_THROT_MASK (0x3 << 5)
80 #define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT 0x1
81 #define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY 0x2
82 #define THERMCTL_LVL0_CPU0_GPU_THROT_MASK (0x3 << 3)
83 #define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT 0x1
84 #define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY 0x2
85 #define THERMCTL_LVL0_CPU0_MEM_THROT_MASK BIT(2)
86 #define THERMCTL_LVL0_CPU0_STATUS_MASK 0x3
87
88 #define THERMCTL_LVL0_UP_STATS 0x10
89 #define THERMCTL_LVL0_DN_STATS 0x14
90
91 #define THERMCTL_INTR_STATUS 0x84
92
93 #define TH_INTR_MD0_MASK BIT(25)
94 #define TH_INTR_MU0_MASK BIT(24)
95 #define TH_INTR_GD0_MASK BIT(17)
96 #define TH_INTR_GU0_MASK BIT(16)
97 #define TH_INTR_CD0_MASK BIT(9)
98 #define TH_INTR_CU0_MASK BIT(8)
99 #define TH_INTR_PD0_MASK BIT(1)
100 #define TH_INTR_PU0_MASK BIT(0)
101 #define TH_INTR_IGNORE_MASK 0xFCFCFCFC
102
103 #define THERMCTL_STATS_CTL 0x94
104 #define STATS_CTL_CLR_DN 0x8
105 #define STATS_CTL_EN_DN 0x4
106 #define STATS_CTL_CLR_UP 0x2
107 #define STATS_CTL_EN_UP 0x1
108
109 #define OC1_CFG 0x310
110 #define OC1_CFG_LONG_LATENCY_MASK BIT(6)
111 #define OC1_CFG_HW_RESTORE_MASK BIT(5)
112 #define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4)
113 #define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2)
114 #define OC1_CFG_ALARM_POLARITY_MASK BIT(1)
115 #define OC1_CFG_EN_THROTTLE_MASK BIT(0)
116
117 #define OC1_CNT_THRESHOLD 0x314
118 #define OC1_THROTTLE_PERIOD 0x318
119 #define OC1_ALARM_COUNT 0x31c
120 #define OC1_FILTER 0x320
121 #define OC1_STATS 0x3a8
122
123 #define OC_INTR_STATUS 0x39c
124 #define OC_INTR_ENABLE 0x3a0
125 #define OC_INTR_DISABLE 0x3a4
126 #define OC_STATS_CTL 0x3c4
127 #define OC_STATS_CTL_CLR_ALL 0x2
128 #define OC_STATS_CTL_EN_ALL 0x1
129
130 #define OC_INTR_OC1_MASK BIT(0)
131 #define OC_INTR_OC2_MASK BIT(1)
132 #define OC_INTR_OC3_MASK BIT(2)
133 #define OC_INTR_OC4_MASK BIT(3)
134 #define OC_INTR_OC5_MASK BIT(4)
135
136 #define THROT_GLOBAL_CFG 0x400
137 #define THROT_GLOBAL_ENB_MASK BIT(0)
138
139 #define CPU_PSKIP_STATUS 0x418
140 #define XPU_PSKIP_STATUS_M_MASK (0xff << 12)
141 #define XPU_PSKIP_STATUS_N_MASK (0xff << 4)
142 #define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK BIT(1)
143 #define XPU_PSKIP_STATUS_ENABLED_MASK BIT(0)
144
145 #define THROT_PRIORITY_LOCK 0x424
146 #define THROT_PRIORITY_LOCK_PRIORITY_MASK 0xff
147
148 #define THROT_STATUS 0x428
149 #define THROT_STATUS_BREACH_MASK BIT(12)
150 #define THROT_STATUS_STATE_MASK (0xff << 4)
151 #define THROT_STATUS_ENABLED_MASK BIT(0)
152
153 #define THROT_PSKIP_CTRL_LITE_CPU 0x430
154 #define THROT_PSKIP_CTRL_ENABLE_MASK BIT(31)
155 #define THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8)
156 #define THROT_PSKIP_CTRL_DIVISOR_MASK 0xff
157 #define THROT_PSKIP_CTRL_VECT_GPU_MASK (0x7 << 16)
158 #define THROT_PSKIP_CTRL_VECT_CPU_MASK (0x7 << 8)
159 #define THROT_PSKIP_CTRL_VECT2_CPU_MASK 0x7
160
161 #define THROT_VECT_NONE 0x0 /* 3'b000 */
162 #define THROT_VECT_LOW 0x1 /* 3'b001 */
163 #define THROT_VECT_MED 0x3 /* 3'b011 */
164 #define THROT_VECT_HIGH 0x7 /* 3'b111 */
165
166 #define THROT_PSKIP_RAMP_LITE_CPU 0x434
167 #define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31)
168 #define THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8)
169 #define THROT_PSKIP_RAMP_STEP_MASK 0xff
170
171 #define THROT_PRIORITY_LITE 0x444
172 #define THROT_PRIORITY_LITE_PRIO_MASK 0xff
173
174 #define THROT_DELAY_LITE 0x448
175 #define THROT_DELAY_LITE_DELAY_MASK 0xff
176
177 /* car register offsets needed for enabling HW throttling */
178 #define CAR_SUPER_CCLKG_DIVIDER 0x36c
179 #define CDIVG_USE_THERM_CONTROLS_MASK BIT(30)
180
181 /* ccroc register offsets needed for enabling HW throttling for Tegra132 */
182 #define CCROC_SUPER_CCLKG_DIVIDER 0x024
183
184 #define CCROC_GLOBAL_CFG 0x148
185
186 #define CCROC_THROT_PSKIP_RAMP_CPU 0x150
187 #define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31)
188 #define CCROC_THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8)
189 #define CCROC_THROT_PSKIP_RAMP_STEP_MASK 0xff
190
191 #define CCROC_THROT_PSKIP_CTRL_CPU 0x154
192 #define CCROC_THROT_PSKIP_CTRL_ENB_MASK BIT(31)
193 #define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8)
194 #define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK 0xff
195
196 /* get val from register(r) mask bits(m) */
197 #define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1))
198 /* set val(v) to mask bits(m) of register(r) */
199 #define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \
200 (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
201
202 /* get dividend from the depth */
203 #define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
204
205 /* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-soctherm.h
206 * level vector
207 * NONE 3'b000
208 * LOW 3'b001
209 * MED 3'b011
210 * HIGH 3'b111
211 */
212 #define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1)
213
214 /* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
215 #define THROT_OFFSET 0x30
216 #define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \
217 (THROT_OFFSET * throt) + (8 * dev))
218 #define THROT_PSKIP_RAMP(throt, dev) (THROT_PSKIP_RAMP_LITE_CPU + \
219 (THROT_OFFSET * throt) + (8 * dev))
220
221 /* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */
222 #define THROT_PRIORITY_CTRL(throt) (THROT_PRIORITY_LITE + \
223 (THROT_OFFSET * throt))
224 #define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \
225 (THROT_OFFSET * throt))
226
227 #define ALARM_OFFSET 0x14
228 #define ALARM_CFG(throt) (OC1_CFG + \
229 (ALARM_OFFSET * (throt - THROTTLE_OC1)))
230
231 #define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \
232 (ALARM_OFFSET * (throt - THROTTLE_OC1)))
233
234 #define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \
235 (ALARM_OFFSET * (throt - THROTTLE_OC1)))
236
237 #define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \
238 (ALARM_OFFSET * (throt - THROTTLE_OC1)))
239
240 #define ALARM_FILTER(throt) (OC1_FILTER + \
241 (ALARM_OFFSET * (throt - THROTTLE_OC1)))
242
243 #define ALARM_STATS(throt) (OC1_STATS + \
244 (4 * (throt - THROTTLE_OC1)))
245
246 /* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
247 #define CCROC_THROT_OFFSET 0x0c
248 #define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \
249 (CCROC_THROT_OFFSET * vect))
250 #define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect) (CCROC_THROT_PSKIP_RAMP_CPU + \
251 (CCROC_THROT_OFFSET * vect))
252
253 /* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */
254 #define THERMCTL_LVL_REGS_SIZE 0x20
255 #define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
256
257 #define OC_THROTTLE_MODE_DISABLED 0
258 #define OC_THROTTLE_MODE_BRIEF 2
259
260 static const int min_low_temp = -127000;
261 static const int max_high_temp = 127000;
262
263 enum soctherm_throttle_id {
264 THROTTLE_LIGHT = 0,
265 THROTTLE_HEAVY,
266 THROTTLE_OC1,
267 THROTTLE_OC2,
268 THROTTLE_OC3,
269 THROTTLE_OC4,
270 THROTTLE_OC5, /* OC5 is reserved */
271 THROTTLE_SIZE,
272 };
273
274 enum soctherm_oc_irq_id {
275 TEGRA_SOC_OC_IRQ_1,
276 TEGRA_SOC_OC_IRQ_2,
277 TEGRA_SOC_OC_IRQ_3,
278 TEGRA_SOC_OC_IRQ_4,
279 TEGRA_SOC_OC_IRQ_5,
280 TEGRA_SOC_OC_IRQ_MAX,
281 };
282
283 enum soctherm_throttle_dev_id {
284 THROTTLE_DEV_CPU = 0,
285 THROTTLE_DEV_GPU,
286 THROTTLE_DEV_SIZE,
287 };
288
289 static const char *const throt_names[] = {
290 [THROTTLE_LIGHT] = "light",
291 [THROTTLE_HEAVY] = "heavy",
292 [THROTTLE_OC1] = "oc1",
293 [THROTTLE_OC2] = "oc2",
294 [THROTTLE_OC3] = "oc3",
295 [THROTTLE_OC4] = "oc4",
296 [THROTTLE_OC5] = "oc5",
297 };
298
299 struct tegra_soctherm;
300 struct tegra_thermctl_zone {
301 void __iomem *reg;
302 struct device *dev;
303 struct tegra_soctherm *ts;
304 struct thermal_zone_device *tz;
305 const struct tegra_tsensor_group *sg;
306 };
307
308 struct soctherm_oc_cfg {
309 u32 active_low;
310 u32 throt_period;
311 u32 alarm_cnt_thresh;
312 u32 alarm_filter;
313 u32 mode;
314 bool intr_en;
315 };
316
317 struct soctherm_throt_cfg {
318 const char *name;
319 unsigned int id;
320 u8 priority;
321 u8 cpu_throt_level;
322 u32 cpu_throt_depth;
323 u32 gpu_throt_level;
324 struct soctherm_oc_cfg oc_cfg;
325 struct thermal_cooling_device *cdev;
326 bool init;
327 };
328
329 struct tegra_soctherm {
330 struct reset_control *reset;
331 struct clk *clock_tsensor;
332 struct clk *clock_soctherm;
333 void __iomem *regs;
334 void __iomem *clk_regs;
335 void __iomem *ccroc_regs;
336
337 int thermal_irq;
338 int edp_irq;
339
340 u32 *calib;
341 struct thermal_zone_device **thermctl_tzs;
342 struct tegra_soctherm_soc *soc;
343
344 struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
345
346 struct dentry *debugfs_dir;
347
348 struct mutex thermctl_lock;
349 };
350
351 struct soctherm_oc_irq_chip_data {
352 struct mutex irq_lock; /* serialize OC IRQs */
353 struct irq_chip irq_chip;
354 struct irq_domain *domain;
355 int irq_enable;
356 };
357
358 static struct soctherm_oc_irq_chip_data soc_irq_cdata;
359
360 /**
361 * ccroc_writel() - writes a value to a CCROC register
362 * @ts: pointer to a struct tegra_soctherm
363 * @v: the value to write
364 * @reg: the register offset
365 *
366 * Writes @v to @reg. No return value.
367 */
368 static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
369 {
370 writel(value, (ts->ccroc_regs + reg));
371 }
372
373 /**
374 * ccroc_readl() - reads specified register from CCROC IP block
375 * @ts: pointer to a struct tegra_soctherm
376 * @reg: register address to be read
377 *
378 * Return: the value of the register
379 */
380 static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg)
381 {
382 return readl(ts->ccroc_regs + reg);
383 }
384
385 static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
386 {
387 const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
388 void __iomem *base = tegra->regs + sensor->base;
389 unsigned int val;
390
391 val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
392 writel(val, base + SENSOR_CONFIG0);
393
394 val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
395 val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
396 val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
397 val |= SENSOR_CONFIG1_TEMP_ENABLE;
398 writel(val, base + SENSOR_CONFIG1);
399
400 writel(tegra->calib[i], base + SENSOR_CONFIG2);
401 }
402
403 /*
404 * Translate from soctherm readback format to millicelsius.
405 * The soctherm readback format in bits is as follows:
406 * TTTTTTTT H______N
407 * where T's contain the temperature in Celsius,
408 * H denotes an addition of 0.5 Celsius and N denotes negation
409 * of the final value.
410 */
411 static int translate_temp(u16 val)
412 {
413 int t;
414
415 t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
416 if (val & READBACK_ADD_HALF)
417 t += 500;
418 if (val & READBACK_NEGATE)
419 t *= -1;
420
421 return t;
422 }
423
424 static int tegra_thermctl_get_temp(void *data, int *out_temp)
425 {
426 struct tegra_thermctl_zone *zone = data;
427 u32 val;
428
429 val = readl(zone->reg);
430 val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
431 *out_temp = translate_temp(val);
432
433 return 0;
434 }
435
436 /**
437 * enforce_temp_range() - check and enforce temperature range [min, max]
438 * @trip_temp: the trip temperature to check
439 *
440 * Checks and enforces the permitted temperature range that SOC_THERM
441 * HW can support This is
442 * done while taking care of precision.
443 *
444 * Return: The precision adjusted capped temperature in millicelsius.
445 */
446 static int enforce_temp_range(struct device *dev, int trip_temp)
447 {
448 int temp;
449
450 temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
451 if (temp != trip_temp)
452 dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
453 trip_temp, temp);
454 return temp;
455 }
456
457 /**
458 * thermtrip_program() - Configures the hardware to shut down the
459 * system if a given sensor group reaches a given temperature
460 * @dev: ptr to the struct device for the SOC_THERM IP block
461 * @sg: pointer to the sensor group to set the thermtrip temperature for
462 * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
463 *
464 * Sets the thermal trip threshold of the given sensor group to be the
465 * @trip_temp. If this threshold is crossed, the hardware will shut
466 * down.
467 *
468 * Note that, although @trip_temp is specified in millicelsius, the
469 * hardware is programmed in degrees Celsius.
470 *
471 * Return: 0 upon success, or %-EINVAL upon failure.
472 */
473 static int thermtrip_program(struct device *dev,
474 const struct tegra_tsensor_group *sg,
475 int trip_temp)
476 {
477 struct tegra_soctherm *ts = dev_get_drvdata(dev);
478 int temp;
479 u32 r;
480
481 if (!sg || !sg->thermtrip_threshold_mask)
482 return -EINVAL;
483
484 temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
485
486 r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
487 r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp);
488 r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1);
489 r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0);
490 writel(r, ts->regs + THERMCTL_THERMTRIP_CTL);
491
492 return 0;
493 }
494
495 /**
496 * throttrip_program() - Configures the hardware to throttle the
497 * pulse if a given sensor group reaches a given temperature
498 * @dev: ptr to the struct device for the SOC_THERM IP block
499 * @sg: pointer to the sensor group to set the thermtrip temperature for
500 * @stc: pointer to the throttle need to be triggered
501 * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
502 *
503 * Sets the thermal trip threshold and throttle event of the given sensor
504 * group. If this threshold is crossed, the hardware will trigger the
505 * throttle.
506 *
507 * Note that, although @trip_temp is specified in millicelsius, the
508 * hardware is programmed in degrees Celsius.
509 *
510 * Return: 0 upon success, or %-EINVAL upon failure.
511 */
512 static int throttrip_program(struct device *dev,
513 const struct tegra_tsensor_group *sg,
514 struct soctherm_throt_cfg *stc,
515 int trip_temp)
516 {
517 struct tegra_soctherm *ts = dev_get_drvdata(dev);
518 int temp, cpu_throt, gpu_throt;
519 unsigned int throt;
520 u32 r, reg_off;
521
522 if (!sg || !stc || !stc->init)
523 return -EINVAL;
524
525 temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
526
527 /* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */
528 throt = stc->id;
529 reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1);
530
531 if (throt == THROTTLE_LIGHT) {
532 cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT;
533 gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT;
534 } else {
535 cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY;
536 gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY;
537 if (throt != THROTTLE_HEAVY)
538 dev_warn(dev,
539 "invalid throt id %d - assuming HEAVY",
540 throt);
541 }
542
543 r = readl(ts->regs + reg_off);
544 r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp);
545 r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp);
546 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt);
547 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt);
548 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
549 writel(r, ts->regs + reg_off);
550
551 return 0;
552 }
553
554 static struct soctherm_throt_cfg *
555 find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
556 {
557 unsigned int i;
558
559 for (i = 0; ts->throt_cfgs[i].name; i++)
560 if (!strcmp(ts->throt_cfgs[i].name, name))
561 return &ts->throt_cfgs[i];
562
563 return NULL;
564 }
565
566 static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
567 {
568 int i, temp = min_low_temp;
569 struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
570
571 if (id >= TEGRA124_SOCTHERM_SENSOR_NUM)
572 return temp;
573
574 if (tt) {
575 for (i = 0; i < ts->soc->num_ttgs; i++) {
576 if (tt[i].id == id)
577 return tt[i].temp;
578 }
579 }
580
581 return temp;
582 }
583
584 static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
585 {
586 struct tegra_thermctl_zone *zone = data;
587 struct thermal_zone_device *tz = zone->tz;
588 struct tegra_soctherm *ts = zone->ts;
589 const struct tegra_tsensor_group *sg = zone->sg;
590 struct device *dev = zone->dev;
591 enum thermal_trip_type type;
592 int ret;
593
594 if (!tz)
595 return -EINVAL;
596
597 ret = tz->ops->get_trip_type(tz, trip, &type);
598 if (ret)
599 return ret;
600
601 if (type == THERMAL_TRIP_CRITICAL) {
602 /*
603 * If thermtrips property is set in DT,
604 * doesn't need to program critical type trip to HW,
605 * if not, program critical trip to HW.
606 */
607 if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id))
608 return thermtrip_program(dev, sg, temp);
609 else
610 return 0;
611
612 } else if (type == THERMAL_TRIP_HOT) {
613 int i;
614
615 for (i = 0; i < THROTTLE_SIZE; i++) {
616 struct thermal_cooling_device *cdev;
617 struct soctherm_throt_cfg *stc;
618
619 if (!ts->throt_cfgs[i].init)
620 continue;
621
622 cdev = ts->throt_cfgs[i].cdev;
623 if (get_thermal_instance(tz, cdev, trip))
624 stc = find_throttle_cfg_by_name(ts, cdev->type);
625 else
626 continue;
627
628 return throttrip_program(dev, sg, stc, temp);
629 }
630 }
631
632 return 0;
633 }
634
635 static int tegra_thermctl_get_trend(void *data, int trip,
636 enum thermal_trend *trend)
637 {
638 struct tegra_thermctl_zone *zone = data;
639 struct thermal_zone_device *tz = zone->tz;
640 int trip_temp, temp, last_temp, ret;
641
642 if (!tz)
643 return -EINVAL;
644
645 ret = tz->ops->get_trip_temp(zone->tz, trip, &trip_temp);
646 if (ret)
647 return ret;
648
649 temp = READ_ONCE(tz->temperature);
650 last_temp = READ_ONCE(tz->last_temperature);
651
652 if (temp > trip_temp) {
653 if (temp >= last_temp)
654 *trend = THERMAL_TREND_RAISING;
655 else
656 *trend = THERMAL_TREND_STABLE;
657 } else if (temp < trip_temp) {
658 *trend = THERMAL_TREND_DROPPING;
659 } else {
660 *trend = THERMAL_TREND_STABLE;
661 }
662
663 return 0;
664 }
665
666 static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
667 {
668 u32 r;
669
670 /* multiple zones could be handling and setting trips at once */
671 mutex_lock(&zn->ts->thermctl_lock);
672 r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE);
673 r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN);
674 writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE);
675 mutex_unlock(&zn->ts->thermctl_lock);
676 }
677
678 static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
679 {
680 u32 r;
681
682 /* multiple zones could be handling and setting trips at once */
683 mutex_lock(&zn->ts->thermctl_lock);
684 r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE);
685 r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0);
686 writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE);
687 mutex_unlock(&zn->ts->thermctl_lock);
688 }
689
690 static int tegra_thermctl_set_trips(void *data, int lo, int hi)
691 {
692 struct tegra_thermctl_zone *zone = data;
693 u32 r;
694
695 thermal_irq_disable(zone);
696
697 r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
698 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0);
699 writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
700
701 lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
702 hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
703 dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
704
705 r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
706 r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
707 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
708 writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
709
710 thermal_irq_enable(zone);
711
712 return 0;
713 }
714
715 static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
716 .get_temp = tegra_thermctl_get_temp,
717 .set_trip_temp = tegra_thermctl_set_trip_temp,
718 .get_trend = tegra_thermctl_get_trend,
719 .set_trips = tegra_thermctl_set_trips,
720 };
721
722 static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
723 {
724 int ntrips, i, ret;
725 enum thermal_trip_type type;
726
727 ntrips = of_thermal_get_ntrips(tz);
728 if (ntrips <= 0)
729 return -EINVAL;
730
731 for (i = 0; i < ntrips; i++) {
732 ret = tz->ops->get_trip_type(tz, i, &type);
733 if (ret)
734 return -EINVAL;
735 if (type == THERMAL_TRIP_HOT) {
736 ret = tz->ops->get_trip_temp(tz, i, temp);
737 if (!ret)
738 *trip = i;
739
740 return ret;
741 }
742 }
743
744 return -EINVAL;
745 }
746
747 /**
748 * tegra_soctherm_set_hwtrips() - set HW trip point from DT data
749 * @dev: struct device * of the SOC_THERM instance
750 *
751 * Configure the SOC_THERM HW trip points, setting "THERMTRIP"
752 * "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
753 * type trip_temp
754 * from thermal zone.
755 * After they have been configured, THERMTRIP or THROTTLE will take
756 * action when the configured SoC thermal sensor group reaches a
757 * certain temperature.
758 *
759 * Return: 0 upon success, or a negative error code on failure.
760 * "Success" does not mean that trips was enabled; it could also
761 * mean that no node was found in DT.
762 * THERMTRIP has been enabled successfully when a message similar to
763 * this one appears on the serial console:
764 * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
765 * THROTTLE has been enabled successfully when a message similar to
766 * this one appears on the serial console:
767 * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC"
768 */
769 static int tegra_soctherm_set_hwtrips(struct device *dev,
770 const struct tegra_tsensor_group *sg,
771 struct thermal_zone_device *tz)
772 {
773 struct tegra_soctherm *ts = dev_get_drvdata(dev);
774 struct soctherm_throt_cfg *stc;
775 int i, trip, temperature, ret;
776
777 /* Get thermtrips. If missing, try to get critical trips. */
778 temperature = tsensor_group_thermtrip_get(ts, sg->id);
779 if (min_low_temp == temperature)
780 if (tz->ops->get_crit_temp(tz, &temperature))
781 temperature = max_high_temp;
782
783 ret = thermtrip_program(dev, sg, temperature);
784 if (ret) {
785 dev_err(dev, "thermtrip: %s: error during enable\n", sg->name);
786 return ret;
787 }
788
789 dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
790 sg->name, temperature);
791
792 ret = get_hot_temp(tz, &trip, &temperature);
793 if (ret) {
794 dev_info(dev, "throttrip: %s: missing hot temperature\n",
795 sg->name);
796 return 0;
797 }
798
799 for (i = 0; i < THROTTLE_OC1; i++) {
800 struct thermal_cooling_device *cdev;
801
802 if (!ts->throt_cfgs[i].init)
803 continue;
804
805 cdev = ts->throt_cfgs[i].cdev;
806 if (get_thermal_instance(tz, cdev, trip))
807 stc = find_throttle_cfg_by_name(ts, cdev->type);
808 else
809 continue;
810
811 ret = throttrip_program(dev, sg, stc, temperature);
812 if (ret) {
813 dev_err(dev, "throttrip: %s: error during enable\n",
814 sg->name);
815 return ret;
816 }
817
818 dev_info(dev,
819 "throttrip: will throttle when %s reaches %d mC\n",
820 sg->name, temperature);
821 break;
822 }
823
824 if (i == THROTTLE_SIZE)
825 dev_info(dev, "throttrip: %s: missing throttle cdev\n",
826 sg->name);
827
828 return 0;
829 }
830
831 static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id)
832 {
833 struct tegra_soctherm *ts = dev_id;
834 u32 r;
835
836 /* Case for no lock:
837 * Although interrupts are enabled in set_trips, there is still no need
838 * to lock here because the interrupts are disabled before programming
839 * new trip points. Hence there cant be a interrupt on the same sensor.
840 * An interrupt can however occur on a sensor while trips are being
841 * programmed on a different one. This beign a LEVEL interrupt won't
842 * cause a new interrupt but this is taken care of by the re-reading of
843 * the STATUS register in the thread function.
844 */
845 r = readl(ts->regs + THERMCTL_INTR_STATUS);
846 writel(r, ts->regs + THERMCTL_INTR_DISABLE);
847
848 return IRQ_WAKE_THREAD;
849 }
850
851 /**
852 * soctherm_thermal_isr_thread() - Handles a thermal interrupt request
853 * @irq: The interrupt number being requested; not used
854 * @dev_id: Opaque pointer to tegra_soctherm;
855 *
856 * Clears the interrupt status register if there are expected
857 * interrupt bits set.
858 * The interrupt(s) are then handled by updating the corresponding
859 * thermal zones.
860 *
861 * An error is logged if any unexpected interrupt bits are set.
862 *
863 * Disabled interrupts are re-enabled.
864 *
865 * Return: %IRQ_HANDLED. Interrupt was handled and no further processing
866 * is needed.
867 */
868 static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
869 {
870 struct tegra_soctherm *ts = dev_id;
871 struct thermal_zone_device *tz;
872 u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0;
873
874 st = readl(ts->regs + THERMCTL_INTR_STATUS);
875
876 /* deliberately clear expected interrupts handled in SW */
877 cp |= st & TH_INTR_CD0_MASK;
878 cp |= st & TH_INTR_CU0_MASK;
879
880 gp |= st & TH_INTR_GD0_MASK;
881 gp |= st & TH_INTR_GU0_MASK;
882
883 pl |= st & TH_INTR_PD0_MASK;
884 pl |= st & TH_INTR_PU0_MASK;
885
886 me |= st & TH_INTR_MD0_MASK;
887 me |= st & TH_INTR_MU0_MASK;
888
889 ex |= cp | gp | pl | me;
890 if (ex) {
891 writel(ex, ts->regs + THERMCTL_INTR_STATUS);
892 st &= ~ex;
893
894 if (cp) {
895 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU];
896 thermal_zone_device_update(tz,
897 THERMAL_EVENT_UNSPECIFIED);
898 }
899
900 if (gp) {
901 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU];
902 thermal_zone_device_update(tz,
903 THERMAL_EVENT_UNSPECIFIED);
904 }
905
906 if (pl) {
907 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX];
908 thermal_zone_device_update(tz,
909 THERMAL_EVENT_UNSPECIFIED);
910 }
911
912 if (me) {
913 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM];
914 thermal_zone_device_update(tz,
915 THERMAL_EVENT_UNSPECIFIED);
916 }
917 }
918
919 /* deliberately ignore expected interrupts NOT handled in SW */
920 ex |= TH_INTR_IGNORE_MASK;
921 st &= ~ex;
922
923 if (st) {
924 /* Whine about any other unexpected INTR bits still set */
925 pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st);
926 writel(st, ts->regs + THERMCTL_INTR_STATUS);
927 }
928
929 return IRQ_HANDLED;
930 }
931
932 /**
933 * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
934 * @alarm: The soctherm throttle id
935 * @enable: Flag indicating enable the soctherm over-current
936 * interrupt or disable it
937 *
938 * Enables a specific over-current pins @alarm to raise an interrupt if the flag
939 * is set and the alarm corresponds to OC1, OC2, OC3, or OC4.
940 */
941 static void soctherm_oc_intr_enable(struct tegra_soctherm *ts,
942 enum soctherm_throttle_id alarm,
943 bool enable)
944 {
945 u32 r;
946
947 if (!enable)
948 return;
949
950 r = readl(ts->regs + OC_INTR_ENABLE);
951 switch (alarm) {
952 case THROTTLE_OC1:
953 r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1);
954 break;
955 case THROTTLE_OC2:
956 r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1);
957 break;
958 case THROTTLE_OC3:
959 r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1);
960 break;
961 case THROTTLE_OC4:
962 r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1);
963 break;
964 default:
965 r = 0;
966 break;
967 }
968 writel(r, ts->regs + OC_INTR_ENABLE);
969 }
970
971 /**
972 * soctherm_handle_alarm() - Handles soctherm alarms
973 * @alarm: The soctherm throttle id
974 *
975 * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing
976 * a warning or informative message.
977 *
978 * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success).
979 */
980 static int soctherm_handle_alarm(enum soctherm_throttle_id alarm)
981 {
982 int rv = -EINVAL;
983
984 switch (alarm) {
985 case THROTTLE_OC1:
986 pr_debug("soctherm: Successfully handled OC1 alarm\n");
987 rv = 0;
988 break;
989
990 case THROTTLE_OC2:
991 pr_debug("soctherm: Successfully handled OC2 alarm\n");
992 rv = 0;
993 break;
994
995 case THROTTLE_OC3:
996 pr_debug("soctherm: Successfully handled OC3 alarm\n");
997 rv = 0;
998 break;
999
1000 case THROTTLE_OC4:
1001 pr_debug("soctherm: Successfully handled OC4 alarm\n");
1002 rv = 0;
1003 break;
1004
1005 default:
1006 break;
1007 }
1008
1009 if (rv)
1010 pr_err("soctherm: ERROR in handling %s alarm\n",
1011 throt_names[alarm]);
1012
1013 return rv;
1014 }
1015
1016 /**
1017 * soctherm_edp_isr_thread() - log an over-current interrupt request
1018 * @irq: OC irq number. Currently not being used. See description
1019 * @arg: a void pointer for callback, currently not being used
1020 *
1021 * Over-current events are handled in hardware. This function is called to log
1022 * and handle any OC events that happened. Additionally, it checks every
1023 * over-current interrupt registers for registers are set but
1024 * was not expected (i.e. any discrepancy in interrupt status) by the function,
1025 * the discrepancy will logged.
1026 *
1027 * Return: %IRQ_HANDLED
1028 */
1029 static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg)
1030 {
1031 struct tegra_soctherm *ts = arg;
1032 u32 st, ex, oc1, oc2, oc3, oc4;
1033
1034 st = readl(ts->regs + OC_INTR_STATUS);
1035
1036 /* deliberately clear expected interrupts handled in SW */
1037 oc1 = st & OC_INTR_OC1_MASK;
1038 oc2 = st & OC_INTR_OC2_MASK;
1039 oc3 = st & OC_INTR_OC3_MASK;
1040 oc4 = st & OC_INTR_OC4_MASK;
1041 ex = oc1 | oc2 | oc3 | oc4;
1042
1043 pr_err("soctherm: OC ALARM 0x%08x\n", ex);
1044 if (ex) {
1045 writel(st, ts->regs + OC_INTR_STATUS);
1046 st &= ~ex;
1047
1048 if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1))
1049 soctherm_oc_intr_enable(ts, THROTTLE_OC1, true);
1050
1051 if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2))
1052 soctherm_oc_intr_enable(ts, THROTTLE_OC2, true);
1053
1054 if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3))
1055 soctherm_oc_intr_enable(ts, THROTTLE_OC3, true);
1056
1057 if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4))
1058 soctherm_oc_intr_enable(ts, THROTTLE_OC4, true);
1059
1060 if (oc1 && soc_irq_cdata.irq_enable & BIT(0))
1061 handle_nested_irq(
1062 irq_find_mapping(soc_irq_cdata.domain, 0));
1063
1064 if (oc2 && soc_irq_cdata.irq_enable & BIT(1))
1065 handle_nested_irq(
1066 irq_find_mapping(soc_irq_cdata.domain, 1));
1067
1068 if (oc3 && soc_irq_cdata.irq_enable & BIT(2))
1069 handle_nested_irq(
1070 irq_find_mapping(soc_irq_cdata.domain, 2));
1071
1072 if (oc4 && soc_irq_cdata.irq_enable & BIT(3))
1073 handle_nested_irq(
1074 irq_find_mapping(soc_irq_cdata.domain, 3));
1075 }
1076
1077 if (st) {
1078 pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st);
1079 writel(st, ts->regs + OC_INTR_STATUS);
1080 }
1081
1082 return IRQ_HANDLED;
1083 }
1084
1085 /**
1086 * soctherm_edp_isr() - Disables any active interrupts
1087 * @irq: The interrupt request number
1088 * @arg: Opaque pointer to an argument
1089 *
1090 * Writes to the OC_INTR_DISABLE register the over current interrupt status,
1091 * masking any asserted interrupts. Doing this prevents the same interrupts
1092 * from triggering this isr repeatedly. The thread woken by this isr will
1093 * handle asserted interrupts and subsequently unmask/re-enable them.
1094 *
1095 * The OC_INTR_DISABLE register indicates which OC interrupts
1096 * have been disabled.
1097 *
1098 * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread
1099 */
1100 static irqreturn_t soctherm_edp_isr(int irq, void *arg)
1101 {
1102 struct tegra_soctherm *ts = arg;
1103 u32 r;
1104
1105 if (!ts)
1106 return IRQ_NONE;
1107
1108 r = readl(ts->regs + OC_INTR_STATUS);
1109 writel(r, ts->regs + OC_INTR_DISABLE);
1110
1111 return IRQ_WAKE_THREAD;
1112 }
1113
1114 /**
1115 * soctherm_oc_irq_lock() - locks the over-current interrupt request
1116 * @data: Interrupt request data
1117 *
1118 * Looks up the chip data from @data and locks the mutex associated with
1119 * a particular over-current interrupt request.
1120 */
1121 static void soctherm_oc_irq_lock(struct irq_data *data)
1122 {
1123 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1124
1125 mutex_lock(&d->irq_lock);
1126 }
1127
1128 /**
1129 * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request
1130 * @data: Interrupt request data
1131 *
1132 * Looks up the interrupt request data @data and unlocks the mutex associated
1133 * with a particular over-current interrupt request.
1134 */
1135 static void soctherm_oc_irq_sync_unlock(struct irq_data *data)
1136 {
1137 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1138
1139 mutex_unlock(&d->irq_lock);
1140 }
1141
1142 /**
1143 * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue
1144 * @data: irq_data structure of the chip
1145 *
1146 * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM
1147 * to respond to over-current interrupts.
1148 *
1149 */
1150 static void soctherm_oc_irq_enable(struct irq_data *data)
1151 {
1152 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1153
1154 d->irq_enable |= BIT(data->hwirq);
1155 }
1156
1157 /**
1158 * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
1159 * @irq_data: The interrupt request information
1160 *
1161 * Clears the interrupt request enable bit of the overcurrent
1162 * interrupt request chip data.
1163 *
1164 * Return: Nothing is returned (void)
1165 */
1166 static void soctherm_oc_irq_disable(struct irq_data *data)
1167 {
1168 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1169
1170 d->irq_enable &= ~BIT(data->hwirq);
1171 }
1172
1173 static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type)
1174 {
1175 return 0;
1176 }
1177
1178 /**
1179 * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper
1180 * @h: Interrupt request domain
1181 * @virq: Virtual interrupt request number
1182 * @hw: Hardware interrupt request number
1183 *
1184 * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM
1185 * interrupt request is called, the irq_domain takes the request's virtual
1186 * request number (much like a virtual memory address) and maps it to a
1187 * physical hardware request number.
1188 *
1189 * When a mapping doesn't already exist for a virtual request number, the
1190 * irq_domain calls this function to associate the virtual request number with
1191 * a hardware request number.
1192 *
1193 * Return: 0
1194 */
1195 static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
1196 irq_hw_number_t hw)
1197 {
1198 struct soctherm_oc_irq_chip_data *data = h->host_data;
1199
1200 irq_set_chip_data(virq, data);
1201 irq_set_chip(virq, &data->irq_chip);
1202 irq_set_nested_thread(virq, 1);
1203 return 0;
1204 }
1205
1206 /**
1207 * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
1208 * @d: Interrupt request domain
1209 * @intspec: Array of u32s from DTs "interrupt" property
1210 * @intsize: Number of values inside the intspec array
1211 * @out_hwirq: HW IRQ value associated with this interrupt
1212 * @out_type: The IRQ SENSE type for this interrupt.
1213 *
1214 * This Device Tree IRQ specifier translation function will translate a
1215 * specific "interrupt" as defined by 2 DT values where the cell values map
1216 * the hwirq number + 1 and linux irq flags. Since the output is the hwirq
1217 * number, this function will subtract 1 from the value listed in DT.
1218 *
1219 * Return: 0
1220 */
1221 static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d,
1222 struct device_node *ctrlr, const u32 *intspec, unsigned int intsize,
1223 irq_hw_number_t *out_hwirq, unsigned int *out_type)
1224 {
1225 if (WARN_ON(intsize < 2))
1226 return -EINVAL;
1227
1228 /*
1229 * The HW value is 1 index less than the DT IRQ values.
1230 * i.e. OC4 goes to HW index 3.
1231 */
1232 *out_hwirq = intspec[0] - 1;
1233 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
1234 return 0;
1235 }
1236
1237 static const struct irq_domain_ops soctherm_oc_domain_ops = {
1238 .map = soctherm_oc_irq_map,
1239 .xlate = soctherm_irq_domain_xlate_twocell,
1240 };
1241
1242 /**
1243 * soctherm_oc_int_init() - Initial enabling of the over
1244 * current interrupts
1245 * @np: The devicetree node for soctherm
1246 * @num_irqs: The number of new interrupt requests
1247 *
1248 * Sets the over current interrupt request chip data
1249 *
1250 * Return: 0 on success or if overcurrent interrupts are not enabled,
1251 * -ENOMEM (out of memory), or irq_base if the function failed to
1252 * allocate the irqs
1253 */
1254 static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
1255 {
1256 if (!num_irqs) {
1257 pr_info("%s(): OC interrupts are not enabled\n", __func__);
1258 return 0;
1259 }
1260
1261 mutex_init(&soc_irq_cdata.irq_lock);
1262 soc_irq_cdata.irq_enable = 0;
1263
1264 soc_irq_cdata.irq_chip.name = "soc_therm_oc";
1265 soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock;
1266 soc_irq_cdata.irq_chip.irq_bus_sync_unlock =
1267 soctherm_oc_irq_sync_unlock;
1268 soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable;
1269 soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable;
1270 soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
1271 soc_irq_cdata.irq_chip.irq_set_wake = NULL;
1272
1273 soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
1274 &soctherm_oc_domain_ops,
1275 &soc_irq_cdata);
1276
1277 if (!soc_irq_cdata.domain) {
1278 pr_err("%s: Failed to create IRQ domain\n", __func__);
1279 return -ENOMEM;
1280 }
1281
1282 pr_debug("%s(): OC interrupts enabled successful\n", __func__);
1283 return 0;
1284 }
1285
1286 #ifdef CONFIG_DEBUG_FS
1287 static int regs_show(struct seq_file *s, void *data)
1288 {
1289 struct platform_device *pdev = s->private;
1290 struct tegra_soctherm *ts = platform_get_drvdata(pdev);
1291 const struct tegra_tsensor *tsensors = ts->soc->tsensors;
1292 const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
1293 u32 r, state;
1294 int i, level;
1295
1296 seq_puts(s, "-----TSENSE (convert HW)-----\n");
1297
1298 for (i = 0; i < ts->soc->num_tsensors; i++) {
1299 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1);
1300 state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE);
1301
1302 seq_printf(s, "%s: ", tsensors[i].name);
1303 seq_printf(s, "En(%d) ", state);
1304
1305 if (!state) {
1306 seq_puts(s, "\n");
1307 continue;
1308 }
1309
1310 state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK);
1311 seq_printf(s, "tiddq(%d) ", state);
1312 state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK);
1313 seq_printf(s, "ten_count(%d) ", state);
1314 state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK);
1315 seq_printf(s, "tsample(%d) ", state + 1);
1316
1317 r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1);
1318 state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK);
1319 seq_printf(s, "Temp(%d/", state);
1320 state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK);
1321 seq_printf(s, "%d) ", translate_temp(state));
1322
1323 r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0);
1324 state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK);
1325 seq_printf(s, "Capture(%d/", state);
1326 state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK);
1327 seq_printf(s, "%d) ", state);
1328
1329 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0);
1330 state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP);
1331 seq_printf(s, "Stop(%d) ", state);
1332 state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK);
1333 seq_printf(s, "Tall(%d) ", state);
1334 state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER);
1335 seq_printf(s, "Over(%d/", state);
1336 state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER);
1337 seq_printf(s, "%d/", state);
1338 state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER);
1339 seq_printf(s, "%d) ", state);
1340
1341 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2);
1342 state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK);
1343 seq_printf(s, "Therm_A/B(%d/", state);
1344 state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK);
1345 seq_printf(s, "%d)\n", (s16)state);
1346 }
1347
1348 r = readl(ts->regs + SENSOR_PDIV);
1349 seq_printf(s, "PDIV: 0x%x\n", r);
1350
1351 r = readl(ts->regs + SENSOR_HOTSPOT_OFF);
1352 seq_printf(s, "HOTSPOT: 0x%x\n", r);
1353
1354 seq_puts(s, "\n");
1355 seq_puts(s, "-----SOC_THERM-----\n");
1356
1357 r = readl(ts->regs + SENSOR_TEMP1);
1358 state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK);
1359 seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state));
1360 state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK);
1361 seq_printf(s, " GPU(%d) ", translate_temp(state));
1362 r = readl(ts->regs + SENSOR_TEMP2);
1363 state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK);
1364 seq_printf(s, " PLLX(%d) ", translate_temp(state));
1365 state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
1366 seq_printf(s, " MEM(%d)\n", translate_temp(state));
1367
1368 for (i = 0; i < ts->soc->num_ttgs; i++) {
1369 seq_printf(s, "%s:\n", ttgs[i]->name);
1370 for (level = 0; level < 4; level++) {
1371 s32 v;
1372 u32 mask;
1373 u16 off = ttgs[i]->thermctl_lvl0_offset;
1374
1375 r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
1376
1377 mask = ttgs[i]->thermctl_lvl0_up_thresh_mask;
1378 state = REG_GET_MASK(r, mask);
1379 v = sign_extend32(state, ts->soc->bptt - 1);
1380 v *= ts->soc->thresh_grain;
1381 seq_printf(s, " %d: Up/Dn(%d /", level, v);
1382
1383 mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask;
1384 state = REG_GET_MASK(r, mask);
1385 v = sign_extend32(state, ts->soc->bptt - 1);
1386 v *= ts->soc->thresh_grain;
1387 seq_printf(s, "%d ) ", v);
1388
1389 mask = THERMCTL_LVL0_CPU0_EN_MASK;
1390 state = REG_GET_MASK(r, mask);
1391 seq_printf(s, "En(%d) ", state);
1392
1393 mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK;
1394 state = REG_GET_MASK(r, mask);
1395 seq_puts(s, "CPU Throt");
1396 if (!state)
1397 seq_printf(s, "(%s) ", "none");
1398 else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT)
1399 seq_printf(s, "(%s) ", "L");
1400 else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY)
1401 seq_printf(s, "(%s) ", "H");
1402 else
1403 seq_printf(s, "(%s) ", "H+L");
1404
1405 mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK;
1406 state = REG_GET_MASK(r, mask);
1407 seq_puts(s, "GPU Throt");
1408 if (!state)
1409 seq_printf(s, "(%s) ", "none");
1410 else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT)
1411 seq_printf(s, "(%s) ", "L");
1412 else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY)
1413 seq_printf(s, "(%s) ", "H");
1414 else
1415 seq_printf(s, "(%s) ", "H+L");
1416
1417 mask = THERMCTL_LVL0_CPU0_STATUS_MASK;
1418 state = REG_GET_MASK(r, mask);
1419 seq_printf(s, "Status(%s)\n",
1420 state == 0 ? "LO" :
1421 state == 1 ? "In" :
1422 state == 2 ? "Res" : "HI");
1423 }
1424 }
1425
1426 r = readl(ts->regs + THERMCTL_STATS_CTL);
1427 seq_printf(s, "STATS: Up(%s) Dn(%s)\n",
1428 r & STATS_CTL_EN_UP ? "En" : "--",
1429 r & STATS_CTL_EN_DN ? "En" : "--");
1430
1431 for (level = 0; level < 4; level++) {
1432 u16 off;
1433
1434 off = THERMCTL_LVL0_UP_STATS;
1435 r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
1436 seq_printf(s, " Level_%d Up(%d) ", level, r);
1437
1438 off = THERMCTL_LVL0_DN_STATS;
1439 r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
1440 seq_printf(s, "Dn(%d)\n", r);
1441 }
1442
1443 r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
1444 state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
1445 seq_printf(s, "Thermtrip Any En(%d)\n", state);
1446 for (i = 0; i < ts->soc->num_ttgs; i++) {
1447 state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask);
1448 seq_printf(s, " %s En(%d) ", ttgs[i]->name, state);
1449 state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask);
1450 state *= ts->soc->thresh_grain;
1451 seq_printf(s, "Thresh(%d)\n", state);
1452 }
1453
1454 r = readl(ts->regs + THROT_GLOBAL_CFG);
1455 seq_puts(s, "\n");
1456 seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r);
1457
1458 seq_puts(s, "---------------------------------------------------\n");
1459 r = readl(ts->regs + THROT_STATUS);
1460 state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK);
1461 seq_printf(s, "THROT STATUS: breach(%d) ", state);
1462 state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK);
1463 seq_printf(s, "state(%d) ", state);
1464 state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK);
1465 seq_printf(s, "enabled(%d)\n", state);
1466
1467 r = readl(ts->regs + CPU_PSKIP_STATUS);
1468 if (ts->soc->use_ccroc) {
1469 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
1470 seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state);
1471 } else {
1472 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK);
1473 seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state);
1474 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK);
1475 seq_printf(s, "N(%d) ", state);
1476 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
1477 seq_printf(s, "enabled(%d)\n", state);
1478 }
1479
1480 return 0;
1481 }
1482
1483 DEFINE_SHOW_ATTRIBUTE(regs);
1484
1485 static void soctherm_debug_init(struct platform_device *pdev)
1486 {
1487 struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
1488 struct dentry *root;
1489
1490 root = debugfs_create_dir("soctherm", NULL);
1491
1492 tegra->debugfs_dir = root;
1493
1494 debugfs_create_file("reg_contents", 0644, root, pdev, &regs_fops);
1495 }
1496 #else
1497 static inline void soctherm_debug_init(struct platform_device *pdev) {}
1498 #endif
1499
1500 static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
1501 {
1502 struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
1503 int err;
1504
1505 if (!tegra->clock_soctherm || !tegra->clock_tsensor)
1506 return -EINVAL;
1507
1508 reset_control_assert(tegra->reset);
1509
1510 if (enable) {
1511 err = clk_prepare_enable(tegra->clock_soctherm);
1512 if (err) {
1513 reset_control_deassert(tegra->reset);
1514 return err;
1515 }
1516
1517 err = clk_prepare_enable(tegra->clock_tsensor);
1518 if (err) {
1519 clk_disable_unprepare(tegra->clock_soctherm);
1520 reset_control_deassert(tegra->reset);
1521 return err;
1522 }
1523 } else {
1524 clk_disable_unprepare(tegra->clock_tsensor);
1525 clk_disable_unprepare(tegra->clock_soctherm);
1526 }
1527
1528 reset_control_deassert(tegra->reset);
1529
1530 return 0;
1531 }
1532
1533 static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev,
1534 unsigned long *max_state)
1535 {
1536 *max_state = 1;
1537 return 0;
1538 }
1539
1540 static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev,
1541 unsigned long *cur_state)
1542 {
1543 struct tegra_soctherm *ts = cdev->devdata;
1544 u32 r;
1545
1546 r = readl(ts->regs + THROT_STATUS);
1547 if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK))
1548 *cur_state = 1;
1549 else
1550 *cur_state = 0;
1551
1552 return 0;
1553 }
1554
1555 static int throt_set_cdev_state(struct thermal_cooling_device *cdev,
1556 unsigned long cur_state)
1557 {
1558 return 0;
1559 }
1560
1561 static const struct thermal_cooling_device_ops throt_cooling_ops = {
1562 .get_max_state = throt_get_cdev_max_state,
1563 .get_cur_state = throt_get_cdev_cur_state,
1564 .set_cur_state = throt_set_cdev_state,
1565 };
1566
1567 static int soctherm_thermtrips_parse(struct platform_device *pdev)
1568 {
1569 struct device *dev = &pdev->dev;
1570 struct tegra_soctherm *ts = dev_get_drvdata(dev);
1571 struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
1572 const int max_num_prop = ts->soc->num_ttgs * 2;
1573 u32 *tlb;
1574 int i, j, n, ret;
1575
1576 if (!tt)
1577 return -ENOMEM;
1578
1579 n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips");
1580 if (n <= 0) {
1581 dev_info(dev,
1582 "missing thermtrips, will use critical trips as shut down temp\n");
1583 return n;
1584 }
1585
1586 n = min(max_num_prop, n);
1587
1588 tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL);
1589 if (!tlb)
1590 return -ENOMEM;
1591 ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips",
1592 tlb, n);
1593 if (ret) {
1594 dev_err(dev, "invalid num ele: thermtrips:%d\n", ret);
1595 return ret;
1596 }
1597
1598 i = 0;
1599 for (j = 0; j < n; j = j + 2) {
1600 if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM)
1601 continue;
1602
1603 tt[i].id = tlb[j];
1604 tt[i].temp = tlb[j + 1];
1605 i++;
1606 }
1607
1608 return 0;
1609 }
1610
1611 static void soctherm_oc_cfg_parse(struct device *dev,
1612 struct device_node *np_oc,
1613 struct soctherm_throt_cfg *stc)
1614 {
1615 u32 val;
1616
1617 if (of_property_read_bool(np_oc, "nvidia,polarity-active-low"))
1618 stc->oc_cfg.active_low = 1;
1619 else
1620 stc->oc_cfg.active_low = 0;
1621
1622 if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) {
1623 stc->oc_cfg.intr_en = 1;
1624 stc->oc_cfg.alarm_cnt_thresh = val;
1625 }
1626
1627 if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val))
1628 stc->oc_cfg.throt_period = val;
1629
1630 if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val))
1631 stc->oc_cfg.alarm_filter = val;
1632
1633 /* BRIEF throttling by default, do not support STICKY */
1634 stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF;
1635 }
1636
1637 static int soctherm_throt_cfg_parse(struct device *dev,
1638 struct device_node *np,
1639 struct soctherm_throt_cfg *stc)
1640 {
1641 struct tegra_soctherm *ts = dev_get_drvdata(dev);
1642 int ret;
1643 u32 val;
1644
1645 ret = of_property_read_u32(np, "nvidia,priority", &val);
1646 if (ret) {
1647 dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name);
1648 return -EINVAL;
1649 }
1650 stc->priority = val;
1651
1652 ret = of_property_read_u32(np, ts->soc->use_ccroc ?
1653 "nvidia,cpu-throt-level" :
1654 "nvidia,cpu-throt-percent", &val);
1655 if (!ret) {
1656 if (ts->soc->use_ccroc &&
1657 val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
1658 stc->cpu_throt_level = val;
1659 else if (!ts->soc->use_ccroc && val <= 100)
1660 stc->cpu_throt_depth = val;
1661 else
1662 goto err;
1663 } else {
1664 goto err;
1665 }
1666
1667 ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val);
1668 if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
1669 stc->gpu_throt_level = val;
1670 else
1671 goto err;
1672
1673 return 0;
1674
1675 err:
1676 dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n",
1677 stc->name);
1678 return -EINVAL;
1679 }
1680
1681 /**
1682 * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
1683 * and register them as cooling devices.
1684 */
1685 static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
1686 {
1687 struct device *dev = &pdev->dev;
1688 struct tegra_soctherm *ts = dev_get_drvdata(dev);
1689 struct device_node *np_stc, *np_stcc;
1690 const char *name;
1691 int i;
1692
1693 for (i = 0; i < THROTTLE_SIZE; i++) {
1694 ts->throt_cfgs[i].name = throt_names[i];
1695 ts->throt_cfgs[i].id = i;
1696 ts->throt_cfgs[i].init = false;
1697 }
1698
1699 np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs");
1700 if (!np_stc) {
1701 dev_info(dev,
1702 "throttle-cfg: no throttle-cfgs - not enabling\n");
1703 return;
1704 }
1705
1706 for_each_child_of_node(np_stc, np_stcc) {
1707 struct soctherm_throt_cfg *stc;
1708 struct thermal_cooling_device *tcd;
1709 int err;
1710
1711 name = np_stcc->name;
1712 stc = find_throttle_cfg_by_name(ts, name);
1713 if (!stc) {
1714 dev_err(dev,
1715 "throttle-cfg: could not find %s\n", name);
1716 continue;
1717 }
1718
1719 if (stc->init) {
1720 dev_err(dev, "throttle-cfg: %s: redefined!\n", name);
1721 of_node_put(np_stcc);
1722 break;
1723 }
1724
1725 err = soctherm_throt_cfg_parse(dev, np_stcc, stc);
1726 if (err)
1727 continue;
1728
1729 if (stc->id >= THROTTLE_OC1) {
1730 soctherm_oc_cfg_parse(dev, np_stcc, stc);
1731 stc->init = true;
1732 } else {
1733
1734 tcd = thermal_of_cooling_device_register(np_stcc,
1735 (char *)name, ts,
1736 &throt_cooling_ops);
1737 if (IS_ERR_OR_NULL(tcd)) {
1738 dev_err(dev,
1739 "throttle-cfg: %s: failed to register cooling device\n",
1740 name);
1741 continue;
1742 }
1743 stc->cdev = tcd;
1744 stc->init = true;
1745 }
1746
1747 }
1748
1749 of_node_put(np_stc);
1750 }
1751
1752 /**
1753 * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
1754 * @level: describing the level LOW/MED/HIGH of throttling
1755 *
1756 * It's necessary to set up the CPU-local CCROC NV_THERM instance with
1757 * the M/N values desired for each level. This function does this.
1758 *
1759 * This function pre-programs the CCROC NV_THERM levels in terms of
1760 * pre-configured "Low", "Medium" or "Heavy" throttle levels which are
1761 * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY.
1762 */
1763 static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
1764 {
1765 u8 depth, dividend;
1766 u32 r;
1767
1768 switch (level) {
1769 case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
1770 depth = 50;
1771 break;
1772 case TEGRA_SOCTHERM_THROT_LEVEL_MED:
1773 depth = 75;
1774 break;
1775 case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
1776 depth = 80;
1777 break;
1778 case TEGRA_SOCTHERM_THROT_LEVEL_NONE:
1779 return;
1780 default:
1781 return;
1782 }
1783
1784 dividend = THROT_DEPTH_DIVIDEND(depth);
1785
1786 /* setup PSKIP in ccroc nv_therm registers */
1787 r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
1788 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
1789 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf);
1790 ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
1791
1792 r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
1793 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1);
1794 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
1795 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
1796 ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
1797 }
1798
1799 /**
1800 * throttlectl_cpu_level_select() - program CPU pulse skipper config
1801 * @throt: the LIGHT/HEAVY of throttle event id
1802 *
1803 * Pulse skippers are used to throttle clock frequencies. This
1804 * function programs the pulse skippers based on @throt and platform
1805 * data. This function is used on SoCs which have CPU-local pulse
1806 * skipper control, such as T13x. It programs soctherm's interface to
1807 * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling
1808 * vectors. PSKIP_BYPASS mode is set as required per HW spec.
1809 */
1810 static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
1811 enum soctherm_throttle_id throt)
1812 {
1813 u32 r, throt_vect;
1814
1815 /* Denver:CCROC NV_THERM interface N:3 Mapping */
1816 switch (ts->throt_cfgs[throt].cpu_throt_level) {
1817 case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
1818 throt_vect = THROT_VECT_LOW;
1819 break;
1820 case TEGRA_SOCTHERM_THROT_LEVEL_MED:
1821 throt_vect = THROT_VECT_MED;
1822 break;
1823 case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
1824 throt_vect = THROT_VECT_HIGH;
1825 break;
1826 default:
1827 throt_vect = THROT_VECT_NONE;
1828 break;
1829 }
1830
1831 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1832 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
1833 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect);
1834 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect);
1835 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1836
1837 /* bypass sequencer in soc_therm as it is programmed in ccroc */
1838 r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1);
1839 writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
1840 }
1841
1842 /**
1843 * throttlectl_cpu_mn() - program CPU pulse skipper configuration
1844 * @throt: the LIGHT/HEAVY of throttle event id
1845 *
1846 * Pulse skippers are used to throttle clock frequencies. This
1847 * function programs the pulse skippers based on @throt and platform
1848 * data. This function is used for CPUs that have "remote" pulse
1849 * skipper control, e.g., the CPU pulse skipper is controlled by the
1850 * SOC_THERM IP block. (SOC_THERM is located outside the CPU
1851 * complex.)
1852 */
1853 static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
1854 enum soctherm_throttle_id throt)
1855 {
1856 u32 r;
1857 int depth;
1858 u8 dividend;
1859
1860 depth = ts->throt_cfgs[throt].cpu_throt_depth;
1861 dividend = THROT_DEPTH_DIVIDEND(depth);
1862
1863 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1864 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
1865 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
1866 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
1867 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1868
1869 r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
1870 r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
1871 r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf);
1872 writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
1873 }
1874
1875 /**
1876 * throttlectl_gpu_level_select() - selects throttling level for GPU
1877 * @throt: the LIGHT/HEAVY of throttle event id
1878 *
1879 * This function programs soctherm's interface to GK20a NV_THERM to select
1880 * pre-configured "Low", "Medium" or "Heavy" throttle levels.
1881 *
1882 * Return: boolean true if HW was programmed
1883 */
1884 static void throttlectl_gpu_level_select(struct tegra_soctherm *ts,
1885 enum soctherm_throttle_id throt)
1886 {
1887 u32 r, level, throt_vect;
1888
1889 level = ts->throt_cfgs[throt].gpu_throt_level;
1890 throt_vect = THROT_LEVEL_TO_DEPTH(level);
1891 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
1892 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
1893 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect);
1894 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
1895 }
1896
1897 static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
1898 enum soctherm_throttle_id throt)
1899 {
1900 u32 r;
1901 struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
1902
1903 if (oc->mode == OC_THROTTLE_MODE_DISABLED)
1904 return -EINVAL;
1905
1906 r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1);
1907 r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
1908 r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
1909 r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1);
1910 writel(r, ts->regs + ALARM_CFG(throt));
1911 writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
1912 writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
1913 writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
1914 soctherm_oc_intr_enable(ts, throt, oc->intr_en);
1915
1916 return 0;
1917 }
1918
1919 /**
1920 * soctherm_throttle_program() - programs pulse skippers' configuration
1921 * @throt: the LIGHT/HEAVY of the throttle event id.
1922 *
1923 * Pulse skippers are used to throttle clock frequencies.
1924 * This function programs the pulse skippers.
1925 */
1926 static void soctherm_throttle_program(struct tegra_soctherm *ts,
1927 enum soctherm_throttle_id throt)
1928 {
1929 u32 r;
1930 struct soctherm_throt_cfg stc = ts->throt_cfgs[throt];
1931
1932 if (!stc.init)
1933 return;
1934
1935 if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt)))
1936 return;
1937
1938 /* Setup PSKIP parameters */
1939 if (ts->soc->use_ccroc)
1940 throttlectl_cpu_level_select(ts, throt);
1941 else
1942 throttlectl_cpu_mn(ts, throt);
1943
1944 throttlectl_gpu_level_select(ts, throt);
1945
1946 r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
1947 writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
1948
1949 r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0);
1950 writel(r, ts->regs + THROT_DELAY_CTRL(throt));
1951
1952 r = readl(ts->regs + THROT_PRIORITY_LOCK);
1953 r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK);
1954 if (r >= stc.priority)
1955 return;
1956 r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK,
1957 stc.priority);
1958 writel(r, ts->regs + THROT_PRIORITY_LOCK);
1959 }
1960
1961 static void tegra_soctherm_throttle(struct device *dev)
1962 {
1963 struct tegra_soctherm *ts = dev_get_drvdata(dev);
1964 u32 v;
1965 int i;
1966
1967 /* configure LOW, MED and HIGH levels for CCROC NV_THERM */
1968 if (ts->soc->use_ccroc) {
1969 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW);
1970 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED);
1971 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH);
1972 }
1973
1974 /* Thermal HW throttle programming */
1975 for (i = 0; i < THROTTLE_SIZE; i++)
1976 soctherm_throttle_program(ts, i);
1977
1978 v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1);
1979 if (ts->soc->use_ccroc) {
1980 ccroc_writel(ts, v, CCROC_GLOBAL_CFG);
1981
1982 v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER);
1983 v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
1984 ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER);
1985 } else {
1986 writel(v, ts->regs + THROT_GLOBAL_CFG);
1987
1988 v = readl(ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
1989 v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
1990 writel(v, ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
1991 }
1992
1993 /* initialize stats collection */
1994 v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN |
1995 STATS_CTL_CLR_UP | STATS_CTL_EN_UP;
1996 writel(v, ts->regs + THERMCTL_STATS_CTL);
1997 }
1998
1999 static int soctherm_interrupts_init(struct platform_device *pdev,
2000 struct tegra_soctherm *tegra)
2001 {
2002 struct device_node *np = pdev->dev.of_node;
2003 int ret;
2004
2005 ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
2006 if (ret < 0) {
2007 dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
2008 return ret;
2009 }
2010
2011 tegra->thermal_irq = platform_get_irq(pdev, 0);
2012 if (tegra->thermal_irq < 0) {
2013 dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n");
2014 return 0;
2015 }
2016
2017 tegra->edp_irq = platform_get_irq(pdev, 1);
2018 if (tegra->edp_irq < 0) {
2019 dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n");
2020 return 0;
2021 }
2022
2023 ret = devm_request_threaded_irq(&pdev->dev,
2024 tegra->thermal_irq,
2025 soctherm_thermal_isr,
2026 soctherm_thermal_isr_thread,
2027 IRQF_ONESHOT,
2028 dev_name(&pdev->dev),
2029 tegra);
2030 if (ret < 0) {
2031 dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n");
2032 return ret;
2033 }
2034
2035 ret = devm_request_threaded_irq(&pdev->dev,
2036 tegra->edp_irq,
2037 soctherm_edp_isr,
2038 soctherm_edp_isr_thread,
2039 IRQF_ONESHOT,
2040 "soctherm_edp",
2041 tegra);
2042 if (ret < 0) {
2043 dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n");
2044 return ret;
2045 }
2046
2047 return 0;
2048 }
2049
2050 static void soctherm_init(struct platform_device *pdev)
2051 {
2052 struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
2053 const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs;
2054 int i;
2055 u32 pdiv, hotspot;
2056
2057 /* Initialize raw sensors */
2058 for (i = 0; i < tegra->soc->num_tsensors; ++i)
2059 enable_tsensor(tegra, i);
2060
2061 /* program pdiv and hotspot offsets per THERM */
2062 pdiv = readl(tegra->regs + SENSOR_PDIV);
2063 hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF);
2064 for (i = 0; i < tegra->soc->num_ttgs; ++i) {
2065 pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask,
2066 ttgs[i]->pdiv);
2067 /* hotspot offset from PLLX, doesn't need to configure PLLX */
2068 if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX)
2069 continue;
2070 hotspot = REG_SET_MASK(hotspot,
2071 ttgs[i]->pllx_hotspot_mask,
2072 ttgs[i]->pllx_hotspot_diff);
2073 }
2074 writel(pdiv, tegra->regs + SENSOR_PDIV);
2075 writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
2076
2077 /* Configure hw throttle */
2078 tegra_soctherm_throttle(&pdev->dev);
2079 }
2080
2081 static const struct of_device_id tegra_soctherm_of_match[] = {
2082 #ifdef CONFIG_ARCH_TEGRA_124_SOC
2083 {
2084 .compatible = "nvidia,tegra124-soctherm",
2085 .data = &tegra124_soctherm,
2086 },
2087 #endif
2088 #ifdef CONFIG_ARCH_TEGRA_132_SOC
2089 {
2090 .compatible = "nvidia,tegra132-soctherm",
2091 .data = &tegra132_soctherm,
2092 },
2093 #endif
2094 #ifdef CONFIG_ARCH_TEGRA_210_SOC
2095 {
2096 .compatible = "nvidia,tegra210-soctherm",
2097 .data = &tegra210_soctherm,
2098 },
2099 #endif
2100 { },
2101 };
2102 MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
2103
2104 static int tegra_soctherm_probe(struct platform_device *pdev)
2105 {
2106 const struct of_device_id *match;
2107 struct tegra_soctherm *tegra;
2108 struct thermal_zone_device *z;
2109 struct tsensor_shared_calib shared_calib;
2110 struct resource *res;
2111 struct tegra_soctherm_soc *soc;
2112 unsigned int i;
2113 int err;
2114
2115 match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node);
2116 if (!match)
2117 return -ENODEV;
2118
2119 soc = (struct tegra_soctherm_soc *)match->data;
2120 if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM)
2121 return -EINVAL;
2122
2123 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
2124 if (!tegra)
2125 return -ENOMEM;
2126
2127 mutex_init(&tegra->thermctl_lock);
2128 dev_set_drvdata(&pdev->dev, tegra);
2129
2130 tegra->soc = soc;
2131
2132 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2133 "soctherm-reg");
2134 tegra->regs = devm_ioremap_resource(&pdev->dev, res);
2135 if (IS_ERR(tegra->regs)) {
2136 dev_err(&pdev->dev, "can't get soctherm registers");
2137 return PTR_ERR(tegra->regs);
2138 }
2139
2140 if (!tegra->soc->use_ccroc) {
2141 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2142 "car-reg");
2143 tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res);
2144 if (IS_ERR(tegra->clk_regs)) {
2145 dev_err(&pdev->dev, "can't get car clk registers");
2146 return PTR_ERR(tegra->clk_regs);
2147 }
2148 } else {
2149 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2150 "ccroc-reg");
2151 tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res);
2152 if (IS_ERR(tegra->ccroc_regs)) {
2153 dev_err(&pdev->dev, "can't get ccroc registers");
2154 return PTR_ERR(tegra->ccroc_regs);
2155 }
2156 }
2157
2158 tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
2159 if (IS_ERR(tegra->reset)) {
2160 dev_err(&pdev->dev, "can't get soctherm reset\n");
2161 return PTR_ERR(tegra->reset);
2162 }
2163
2164 tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
2165 if (IS_ERR(tegra->clock_tsensor)) {
2166 dev_err(&pdev->dev, "can't get tsensor clock\n");
2167 return PTR_ERR(tegra->clock_tsensor);
2168 }
2169
2170 tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
2171 if (IS_ERR(tegra->clock_soctherm)) {
2172 dev_err(&pdev->dev, "can't get soctherm clock\n");
2173 return PTR_ERR(tegra->clock_soctherm);
2174 }
2175
2176 tegra->calib = devm_kcalloc(&pdev->dev,
2177 soc->num_tsensors, sizeof(u32),
2178 GFP_KERNEL);
2179 if (!tegra->calib)
2180 return -ENOMEM;
2181
2182 /* calculate shared calibration data */
2183 err = tegra_calc_shared_calib(soc->tfuse, &shared_calib);
2184 if (err)
2185 return err;
2186
2187 /* calculate tsensor calibaration data */
2188 for (i = 0; i < soc->num_tsensors; ++i) {
2189 err = tegra_calc_tsensor_calib(&soc->tsensors[i],
2190 &shared_calib,
2191 &tegra->calib[i]);
2192 if (err)
2193 return err;
2194 }
2195
2196 tegra->thermctl_tzs = devm_kcalloc(&pdev->dev,
2197 soc->num_ttgs, sizeof(z),
2198 GFP_KERNEL);
2199 if (!tegra->thermctl_tzs)
2200 return -ENOMEM;
2201
2202 err = soctherm_clk_enable(pdev, true);
2203 if (err)
2204 return err;
2205
2206 soctherm_thermtrips_parse(pdev);
2207
2208 soctherm_init_hw_throt_cdev(pdev);
2209
2210 soctherm_init(pdev);
2211
2212 for (i = 0; i < soc->num_ttgs; ++i) {
2213 struct tegra_thermctl_zone *zone =
2214 devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
2215 if (!zone) {
2216 err = -ENOMEM;
2217 goto disable_clocks;
2218 }
2219
2220 zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
2221 zone->dev = &pdev->dev;
2222 zone->sg = soc->ttgs[i];
2223 zone->ts = tegra;
2224
2225 z = devm_thermal_zone_of_sensor_register(&pdev->dev,
2226 soc->ttgs[i]->id, zone,
2227 &tegra_of_thermal_ops);
2228 if (IS_ERR(z)) {
2229 err = PTR_ERR(z);
2230 dev_err(&pdev->dev, "failed to register sensor: %d\n",
2231 err);
2232 goto disable_clocks;
2233 }
2234
2235 zone->tz = z;
2236 tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
2237
2238 /* Configure hw trip points */
2239 err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
2240 if (err)
2241 goto disable_clocks;
2242 }
2243
2244 err = soctherm_interrupts_init(pdev, tegra);
2245
2246 soctherm_debug_init(pdev);
2247
2248 return 0;
2249
2250 disable_clocks:
2251 soctherm_clk_enable(pdev, false);
2252
2253 return err;
2254 }
2255
2256 static int tegra_soctherm_remove(struct platform_device *pdev)
2257 {
2258 struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
2259
2260 debugfs_remove_recursive(tegra->debugfs_dir);
2261
2262 soctherm_clk_enable(pdev, false);
2263
2264 return 0;
2265 }
2266
2267 static int __maybe_unused soctherm_suspend(struct device *dev)
2268 {
2269 struct platform_device *pdev = to_platform_device(dev);
2270
2271 soctherm_clk_enable(pdev, false);
2272
2273 return 0;
2274 }
2275
2276 static int __maybe_unused soctherm_resume(struct device *dev)
2277 {
2278 struct platform_device *pdev = to_platform_device(dev);
2279 struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
2280 struct tegra_soctherm_soc *soc = tegra->soc;
2281 int err, i;
2282
2283 err = soctherm_clk_enable(pdev, true);
2284 if (err) {
2285 dev_err(&pdev->dev,
2286 "Resume failed: enable clocks failed\n");
2287 return err;
2288 }
2289
2290 soctherm_init(pdev);
2291
2292 for (i = 0; i < soc->num_ttgs; ++i) {
2293 struct thermal_zone_device *tz;
2294
2295 tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
2296 err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
2297 if (err) {
2298 dev_err(&pdev->dev,
2299 "Resume failed: set hwtrips failed\n");
2300 return err;
2301 }
2302 }
2303
2304 return 0;
2305 }
2306
2307 static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume);
2308
2309 static struct platform_driver tegra_soctherm_driver = {
2310 .probe = tegra_soctherm_probe,
2311 .remove = tegra_soctherm_remove,
2312 .driver = {
2313 .name = "tegra_soctherm",
2314 .pm = &tegra_soctherm_pm,
2315 .of_match_table = tegra_soctherm_of_match,
2316 },
2317 };
2318 module_platform_driver(tegra_soctherm_driver);
2319
2320 MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
2321 MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
2322 MODULE_LICENSE("GPL v2");