]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/acpi/acpi_lpss.c
ACPI: PM: Introduce "poweroff" callbacks for ACPI PM domain and LPSS
[mirror_ubuntu-bionic-kernel.git] / drivers / acpi / acpi_lpss.c
1 /*
2 * ACPI support for Intel Lynxpoint LPSS.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/acpi.h>
14 #include <linux/clkdev.h>
15 #include <linux/clk-provider.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/mutex.h>
19 #include <linux/platform_device.h>
20 #include <linux/platform_data/clk-lpss.h>
21 #include <linux/platform_data/x86/pmc_atom.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pwm.h>
25 #include <linux/suspend.h>
26 #include <linux/delay.h>
27
28 #include "internal.h"
29
30 ACPI_MODULE_NAME("acpi_lpss");
31
32 #ifdef CONFIG_X86_INTEL_LPSS
33
34 #include <asm/cpu_device_id.h>
35 #include <asm/intel-family.h>
36 #include <asm/iosf_mbi.h>
37
38 #define LPSS_ADDR(desc) ((unsigned long)&desc)
39
40 #define LPSS_CLK_SIZE 0x04
41 #define LPSS_LTR_SIZE 0x18
42
43 /* Offsets relative to LPSS_PRIVATE_OFFSET */
44 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
45 #define LPSS_RESETS 0x04
46 #define LPSS_RESETS_RESET_FUNC BIT(0)
47 #define LPSS_RESETS_RESET_APB BIT(1)
48 #define LPSS_GENERAL 0x08
49 #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
50 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
51 #define LPSS_SW_LTR 0x10
52 #define LPSS_AUTO_LTR 0x14
53 #define LPSS_LTR_SNOOP_REQ BIT(15)
54 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
55 #define LPSS_LTR_SNOOP_LAT_1US 0x800
56 #define LPSS_LTR_SNOOP_LAT_32US 0xC00
57 #define LPSS_LTR_SNOOP_LAT_SHIFT 5
58 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
59 #define LPSS_LTR_MAX_VAL 0x3FF
60 #define LPSS_TX_INT 0x20
61 #define LPSS_TX_INT_MASK BIT(1)
62
63 #define LPSS_PRV_REG_COUNT 9
64
65 /* LPSS Flags */
66 #define LPSS_CLK BIT(0)
67 #define LPSS_CLK_GATE BIT(1)
68 #define LPSS_CLK_DIVIDER BIT(2)
69 #define LPSS_LTR BIT(3)
70 #define LPSS_SAVE_CTX BIT(4)
71 #define LPSS_NO_D3_DELAY BIT(5)
72
73 /* Crystal Cove PMIC shares same ACPI ID between different platforms */
74 #define BYT_CRC_HRV 2
75 #define CHT_CRC_HRV 3
76
77 struct lpss_private_data;
78
79 struct lpss_device_desc {
80 unsigned int flags;
81 const char *clk_con_id;
82 unsigned int prv_offset;
83 size_t prv_size_override;
84 struct property_entry *properties;
85 void (*setup)(struct lpss_private_data *pdata);
86 bool resume_from_noirq;
87 };
88
89 static const struct lpss_device_desc lpss_dma_desc = {
90 .flags = LPSS_CLK,
91 };
92
93 struct lpss_private_data {
94 struct acpi_device *adev;
95 void __iomem *mmio_base;
96 resource_size_t mmio_size;
97 unsigned int fixed_clk_rate;
98 struct clk *clk;
99 const struct lpss_device_desc *dev_desc;
100 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
101 };
102
103 /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
104 static u32 pmc_atom_d3_mask = 0xfe000ffe;
105
106 /* LPSS run time quirks */
107 static unsigned int lpss_quirks;
108
109 /*
110 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
111 *
112 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
113 * it can be powered off automatically whenever the last LPSS device goes down.
114 * In case of no power any access to the DMA controller will hang the system.
115 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
116 * well as on ASuS T100TA transformer.
117 *
118 * This quirk overrides power state of entire LPSS island to keep DMA powered
119 * on whenever we have at least one other device in use.
120 */
121 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
122
123 /* UART Component Parameter Register */
124 #define LPSS_UART_CPR 0xF4
125 #define LPSS_UART_CPR_AFCE BIT(4)
126
127 static void lpss_uart_setup(struct lpss_private_data *pdata)
128 {
129 unsigned int offset;
130 u32 val;
131
132 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
133 val = readl(pdata->mmio_base + offset);
134 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
135
136 val = readl(pdata->mmio_base + LPSS_UART_CPR);
137 if (!(val & LPSS_UART_CPR_AFCE)) {
138 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
139 val = readl(pdata->mmio_base + offset);
140 val |= LPSS_GENERAL_UART_RTS_OVRD;
141 writel(val, pdata->mmio_base + offset);
142 }
143 }
144
145 static void lpss_deassert_reset(struct lpss_private_data *pdata)
146 {
147 unsigned int offset;
148 u32 val;
149
150 offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
151 val = readl(pdata->mmio_base + offset);
152 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
153 writel(val, pdata->mmio_base + offset);
154 }
155
156 /*
157 * BYT PWM used for backlight control by the i915 driver on systems without
158 * the Crystal Cove PMIC.
159 */
160 static struct pwm_lookup byt_pwm_lookup[] = {
161 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
162 "pwm_backlight", 0, PWM_POLARITY_NORMAL,
163 "pwm-lpss-platform"),
164 };
165
166 static void byt_pwm_setup(struct lpss_private_data *pdata)
167 {
168 struct acpi_device *adev = pdata->adev;
169
170 /* Only call pwm_add_table for the first PWM controller */
171 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
172 return;
173
174 if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
175 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
176 }
177
178 #define LPSS_I2C_ENABLE 0x6c
179
180 static void byt_i2c_setup(struct lpss_private_data *pdata)
181 {
182 const char *uid_str = acpi_device_uid(pdata->adev);
183 acpi_handle handle = pdata->adev->handle;
184 unsigned long long shared_host = 0;
185 acpi_status status;
186 long uid = 0;
187
188 /* Expected to always be true, but better safe then sorry */
189 if (uid_str)
190 uid = simple_strtol(uid_str, NULL, 10);
191
192 /* Detect I2C bus shared with PUNIT and ignore its d3 status */
193 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
194 if (ACPI_SUCCESS(status) && shared_host && uid)
195 pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
196
197 lpss_deassert_reset(pdata);
198
199 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
200 pdata->fixed_clk_rate = 133000000;
201
202 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
203 }
204
205 /* BSW PWM used for backlight control by the i915 driver */
206 static struct pwm_lookup bsw_pwm_lookup[] = {
207 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
208 "pwm_backlight", 0, PWM_POLARITY_NORMAL,
209 "pwm-lpss-platform"),
210 };
211
212 static void bsw_pwm_setup(struct lpss_private_data *pdata)
213 {
214 struct acpi_device *adev = pdata->adev;
215
216 /* Only call pwm_add_table for the first PWM controller */
217 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
218 return;
219
220 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
221 }
222
223 static const struct lpss_device_desc lpt_dev_desc = {
224 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
225 .prv_offset = 0x800,
226 };
227
228 static const struct lpss_device_desc lpt_i2c_dev_desc = {
229 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
230 .prv_offset = 0x800,
231 };
232
233 static struct property_entry uart_properties[] = {
234 PROPERTY_ENTRY_U32("reg-io-width", 4),
235 PROPERTY_ENTRY_U32("reg-shift", 2),
236 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
237 { },
238 };
239
240 static const struct lpss_device_desc lpt_uart_dev_desc = {
241 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
242 .clk_con_id = "baudclk",
243 .prv_offset = 0x800,
244 .setup = lpss_uart_setup,
245 .properties = uart_properties,
246 };
247
248 static const struct lpss_device_desc lpt_sdio_dev_desc = {
249 .flags = LPSS_LTR,
250 .prv_offset = 0x1000,
251 .prv_size_override = 0x1018,
252 };
253
254 static const struct lpss_device_desc byt_pwm_dev_desc = {
255 .flags = LPSS_SAVE_CTX,
256 .prv_offset = 0x800,
257 .setup = byt_pwm_setup,
258 };
259
260 static const struct lpss_device_desc bsw_pwm_dev_desc = {
261 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
262 .prv_offset = 0x800,
263 .setup = bsw_pwm_setup,
264 };
265
266 static const struct lpss_device_desc byt_uart_dev_desc = {
267 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
268 .clk_con_id = "baudclk",
269 .prv_offset = 0x800,
270 .setup = lpss_uart_setup,
271 .properties = uart_properties,
272 };
273
274 static const struct lpss_device_desc bsw_uart_dev_desc = {
275 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
276 | LPSS_NO_D3_DELAY,
277 .clk_con_id = "baudclk",
278 .prv_offset = 0x800,
279 .setup = lpss_uart_setup,
280 .properties = uart_properties,
281 };
282
283 static const struct lpss_device_desc byt_spi_dev_desc = {
284 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
285 .prv_offset = 0x400,
286 };
287
288 static const struct lpss_device_desc byt_sdio_dev_desc = {
289 .flags = LPSS_CLK,
290 };
291
292 static const struct lpss_device_desc byt_i2c_dev_desc = {
293 .flags = LPSS_CLK | LPSS_SAVE_CTX,
294 .prv_offset = 0x800,
295 .setup = byt_i2c_setup,
296 .resume_from_noirq = true,
297 };
298
299 static const struct lpss_device_desc bsw_i2c_dev_desc = {
300 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
301 .prv_offset = 0x800,
302 .setup = byt_i2c_setup,
303 .resume_from_noirq = true,
304 };
305
306 static const struct lpss_device_desc bsw_spi_dev_desc = {
307 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
308 | LPSS_NO_D3_DELAY,
309 .prv_offset = 0x400,
310 .setup = lpss_deassert_reset,
311 };
312
313 #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
314
315 static const struct x86_cpu_id lpss_cpu_ids[] = {
316 ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
317 ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
318 {}
319 };
320
321 #else
322
323 #define LPSS_ADDR(desc) (0UL)
324
325 #endif /* CONFIG_X86_INTEL_LPSS */
326
327 static const struct acpi_device_id acpi_lpss_device_ids[] = {
328 /* Generic LPSS devices */
329 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
330
331 /* Lynxpoint LPSS devices */
332 { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
333 { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
334 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
335 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
336 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
337 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
338 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
339 { "INT33C7", },
340
341 /* BayTrail LPSS devices */
342 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
343 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
344 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
345 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
346 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
347 { "INT33B2", },
348 { "INT33FC", },
349
350 /* Braswell LPSS devices */
351 { "80862286", LPSS_ADDR(lpss_dma_desc) },
352 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
353 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
354 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
355 { "808622C0", LPSS_ADDR(lpss_dma_desc) },
356 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
357
358 /* Broadwell LPSS devices */
359 { "INT3430", LPSS_ADDR(lpt_dev_desc) },
360 { "INT3431", LPSS_ADDR(lpt_dev_desc) },
361 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
362 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
363 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
364 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
365 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
366 { "INT3437", },
367
368 /* Wildcat Point LPSS devices */
369 { "INT3438", LPSS_ADDR(lpt_dev_desc) },
370
371 { }
372 };
373
374 #ifdef CONFIG_X86_INTEL_LPSS
375
376 static int is_memory(struct acpi_resource *res, void *not_used)
377 {
378 struct resource r;
379 return !acpi_dev_resource_memory(res, &r);
380 }
381
382 /* LPSS main clock device. */
383 static struct platform_device *lpss_clk_dev;
384
385 static inline void lpt_register_clock_device(void)
386 {
387 lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
388 }
389
390 static int register_device_clock(struct acpi_device *adev,
391 struct lpss_private_data *pdata)
392 {
393 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
394 const char *devname = dev_name(&adev->dev);
395 struct clk *clk;
396 struct lpss_clk_data *clk_data;
397 const char *parent, *clk_name;
398 void __iomem *prv_base;
399
400 if (!lpss_clk_dev)
401 lpt_register_clock_device();
402
403 clk_data = platform_get_drvdata(lpss_clk_dev);
404 if (!clk_data)
405 return -ENODEV;
406 clk = clk_data->clk;
407
408 if (!pdata->mmio_base
409 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
410 return -ENODATA;
411
412 parent = clk_data->name;
413 prv_base = pdata->mmio_base + dev_desc->prv_offset;
414
415 if (pdata->fixed_clk_rate) {
416 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
417 pdata->fixed_clk_rate);
418 goto out;
419 }
420
421 if (dev_desc->flags & LPSS_CLK_GATE) {
422 clk = clk_register_gate(NULL, devname, parent, 0,
423 prv_base, 0, 0, NULL);
424 parent = devname;
425 }
426
427 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
428 /* Prevent division by zero */
429 if (!readl(prv_base))
430 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
431
432 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
433 if (!clk_name)
434 return -ENOMEM;
435 clk = clk_register_fractional_divider(NULL, clk_name, parent,
436 0, prv_base,
437 1, 15, 16, 15, 0, NULL);
438 parent = clk_name;
439
440 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
441 if (!clk_name) {
442 kfree(parent);
443 return -ENOMEM;
444 }
445 clk = clk_register_gate(NULL, clk_name, parent,
446 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
447 prv_base, 31, 0, NULL);
448 kfree(parent);
449 kfree(clk_name);
450 }
451 out:
452 if (IS_ERR(clk))
453 return PTR_ERR(clk);
454
455 pdata->clk = clk;
456 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
457 return 0;
458 }
459
460 static int acpi_lpss_create_device(struct acpi_device *adev,
461 const struct acpi_device_id *id)
462 {
463 const struct lpss_device_desc *dev_desc;
464 struct lpss_private_data *pdata;
465 struct resource_entry *rentry;
466 struct list_head resource_list;
467 struct platform_device *pdev;
468 int ret;
469
470 dev_desc = (const struct lpss_device_desc *)id->driver_data;
471 if (!dev_desc) {
472 pdev = acpi_create_platform_device(adev, NULL);
473 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
474 }
475 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
476 if (!pdata)
477 return -ENOMEM;
478
479 INIT_LIST_HEAD(&resource_list);
480 ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
481 if (ret < 0)
482 goto err_out;
483
484 list_for_each_entry(rentry, &resource_list, node)
485 if (resource_type(rentry->res) == IORESOURCE_MEM) {
486 if (dev_desc->prv_size_override)
487 pdata->mmio_size = dev_desc->prv_size_override;
488 else
489 pdata->mmio_size = resource_size(rentry->res);
490 pdata->mmio_base = ioremap(rentry->res->start,
491 pdata->mmio_size);
492 break;
493 }
494
495 acpi_dev_free_resource_list(&resource_list);
496
497 if (!pdata->mmio_base) {
498 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
499 adev->pnp.type.platform_id = 0;
500 /* Skip the device, but continue the namespace scan. */
501 ret = 0;
502 goto err_out;
503 }
504
505 pdata->adev = adev;
506 pdata->dev_desc = dev_desc;
507
508 if (dev_desc->setup)
509 dev_desc->setup(pdata);
510
511 if (dev_desc->flags & LPSS_CLK) {
512 ret = register_device_clock(adev, pdata);
513 if (ret) {
514 /* Skip the device, but continue the namespace scan. */
515 ret = 0;
516 goto err_out;
517 }
518 }
519
520 /*
521 * This works around a known issue in ACPI tables where LPSS devices
522 * have _PS0 and _PS3 without _PSC (and no power resources), so
523 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
524 */
525 acpi_device_fix_up_power(adev);
526
527 adev->driver_data = pdata;
528 pdev = acpi_create_platform_device(adev, dev_desc->properties);
529 if (!IS_ERR_OR_NULL(pdev)) {
530 return 1;
531 }
532
533 ret = PTR_ERR(pdev);
534 adev->driver_data = NULL;
535
536 err_out:
537 kfree(pdata);
538 return ret;
539 }
540
541 static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
542 {
543 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
544 }
545
546 static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
547 unsigned int reg)
548 {
549 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
550 }
551
552 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
553 {
554 struct acpi_device *adev;
555 struct lpss_private_data *pdata;
556 unsigned long flags;
557 int ret;
558
559 ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
560 if (WARN_ON(ret))
561 return ret;
562
563 spin_lock_irqsave(&dev->power.lock, flags);
564 if (pm_runtime_suspended(dev)) {
565 ret = -EAGAIN;
566 goto out;
567 }
568 pdata = acpi_driver_data(adev);
569 if (WARN_ON(!pdata || !pdata->mmio_base)) {
570 ret = -ENODEV;
571 goto out;
572 }
573 *val = __lpss_reg_read(pdata, reg);
574
575 out:
576 spin_unlock_irqrestore(&dev->power.lock, flags);
577 return ret;
578 }
579
580 static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
581 char *buf)
582 {
583 u32 ltr_value = 0;
584 unsigned int reg;
585 int ret;
586
587 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
588 ret = lpss_reg_read(dev, reg, &ltr_value);
589 if (ret)
590 return ret;
591
592 return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
593 }
594
595 static ssize_t lpss_ltr_mode_show(struct device *dev,
596 struct device_attribute *attr, char *buf)
597 {
598 u32 ltr_mode = 0;
599 char *outstr;
600 int ret;
601
602 ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
603 if (ret)
604 return ret;
605
606 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
607 return sprintf(buf, "%s\n", outstr);
608 }
609
610 static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
611 static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
612 static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
613
614 static struct attribute *lpss_attrs[] = {
615 &dev_attr_auto_ltr.attr,
616 &dev_attr_sw_ltr.attr,
617 &dev_attr_ltr_mode.attr,
618 NULL,
619 };
620
621 static const struct attribute_group lpss_attr_group = {
622 .attrs = lpss_attrs,
623 .name = "lpss_ltr",
624 };
625
626 static void acpi_lpss_set_ltr(struct device *dev, s32 val)
627 {
628 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
629 u32 ltr_mode, ltr_val;
630
631 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
632 if (val < 0) {
633 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
634 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
635 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
636 }
637 return;
638 }
639 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
640 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
641 ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
642 val = LPSS_LTR_MAX_VAL;
643 } else if (val > LPSS_LTR_MAX_VAL) {
644 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
645 val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
646 } else {
647 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
648 }
649 ltr_val |= val;
650 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
651 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
652 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
653 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
654 }
655 }
656
657 #ifdef CONFIG_PM
658 /**
659 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
660 * @dev: LPSS device
661 * @pdata: pointer to the private data of the LPSS device
662 *
663 * Most LPSS devices have private registers which may loose their context when
664 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
665 * prv_reg_ctx array.
666 */
667 static void acpi_lpss_save_ctx(struct device *dev,
668 struct lpss_private_data *pdata)
669 {
670 unsigned int i;
671
672 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
673 unsigned long offset = i * sizeof(u32);
674
675 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
676 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
677 pdata->prv_reg_ctx[i], offset);
678 }
679 }
680
681 /**
682 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
683 * @dev: LPSS device
684 * @pdata: pointer to the private data of the LPSS device
685 *
686 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
687 */
688 static void acpi_lpss_restore_ctx(struct device *dev,
689 struct lpss_private_data *pdata)
690 {
691 unsigned int i;
692
693 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
694 unsigned long offset = i * sizeof(u32);
695
696 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
697 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
698 pdata->prv_reg_ctx[i], offset);
699 }
700 }
701
702 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
703 {
704 /*
705 * The following delay is needed or the subsequent write operations may
706 * fail. The LPSS devices are actually PCI devices and the PCI spec
707 * expects 10ms delay before the device can be accessed after D3 to D0
708 * transition. However some platforms like BSW does not need this delay.
709 */
710 unsigned int delay = 10; /* default 10ms delay */
711
712 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
713 delay = 0;
714
715 msleep(delay);
716 }
717
718 static int acpi_lpss_activate(struct device *dev)
719 {
720 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
721 int ret;
722
723 ret = acpi_dev_resume(dev);
724 if (ret)
725 return ret;
726
727 acpi_lpss_d3_to_d0_delay(pdata);
728
729 /*
730 * This is called only on ->probe() stage where a device is either in
731 * known state defined by BIOS or most likely powered off. Due to this
732 * we have to deassert reset line to be sure that ->probe() will
733 * recognize the device.
734 */
735 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
736 lpss_deassert_reset(pdata);
737
738 return 0;
739 }
740
741 static void acpi_lpss_dismiss(struct device *dev)
742 {
743 acpi_dev_suspend(dev, false);
744 }
745
746 /* IOSF SB for LPSS island */
747 #define LPSS_IOSF_UNIT_LPIOEP 0xA0
748 #define LPSS_IOSF_UNIT_LPIO1 0xAB
749 #define LPSS_IOSF_UNIT_LPIO2 0xAC
750
751 #define LPSS_IOSF_PMCSR 0x84
752 #define LPSS_PMCSR_D0 0
753 #define LPSS_PMCSR_D3hot 3
754 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
755
756 #define LPSS_IOSF_GPIODEF0 0x154
757 #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
758 #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
759 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
760 #define LPSS_GPIODEF0_DMA_LLP BIT(13)
761
762 static DEFINE_MUTEX(lpss_iosf_mutex);
763 static bool lpss_iosf_d3_entered = true;
764
765 static void lpss_iosf_enter_d3_state(void)
766 {
767 u32 value1 = 0;
768 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
769 u32 value2 = LPSS_PMCSR_D3hot;
770 u32 mask2 = LPSS_PMCSR_Dx_MASK;
771 /*
772 * PMC provides an information about actual status of the LPSS devices.
773 * Here we read the values related to LPSS power island, i.e. LPSS
774 * devices, excluding both LPSS DMA controllers, along with SCC domain.
775 */
776 u32 func_dis, d3_sts_0, pmc_status;
777 int ret;
778
779 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
780 if (ret)
781 return;
782
783 mutex_lock(&lpss_iosf_mutex);
784
785 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
786 if (ret)
787 goto exit;
788
789 /*
790 * Get the status of entire LPSS power island per device basis.
791 * Shutdown both LPSS DMA controllers if and only if all other devices
792 * are already in D3hot.
793 */
794 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
795 if (pmc_status)
796 goto exit;
797
798 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
799 LPSS_IOSF_PMCSR, value2, mask2);
800
801 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
802 LPSS_IOSF_PMCSR, value2, mask2);
803
804 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
805 LPSS_IOSF_GPIODEF0, value1, mask1);
806
807 lpss_iosf_d3_entered = true;
808
809 exit:
810 mutex_unlock(&lpss_iosf_mutex);
811 }
812
813 static void lpss_iosf_exit_d3_state(void)
814 {
815 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
816 LPSS_GPIODEF0_DMA_LLP;
817 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
818 u32 value2 = LPSS_PMCSR_D0;
819 u32 mask2 = LPSS_PMCSR_Dx_MASK;
820
821 mutex_lock(&lpss_iosf_mutex);
822
823 if (!lpss_iosf_d3_entered)
824 goto exit;
825
826 lpss_iosf_d3_entered = false;
827
828 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
829 LPSS_IOSF_GPIODEF0, value1, mask1);
830
831 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
832 LPSS_IOSF_PMCSR, value2, mask2);
833
834 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
835 LPSS_IOSF_PMCSR, value2, mask2);
836
837 exit:
838 mutex_unlock(&lpss_iosf_mutex);
839 }
840
841 static int acpi_lpss_suspend(struct device *dev, bool wakeup)
842 {
843 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
844 int ret;
845
846 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
847 acpi_lpss_save_ctx(dev, pdata);
848
849 ret = acpi_dev_suspend(dev, wakeup);
850
851 /*
852 * This call must be last in the sequence, otherwise PMC will return
853 * wrong status for devices being about to be powered off. See
854 * lpss_iosf_enter_d3_state() for further information.
855 */
856 if (acpi_target_system_state() == ACPI_STATE_S0 &&
857 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
858 lpss_iosf_enter_d3_state();
859
860 return ret;
861 }
862
863 static int acpi_lpss_resume(struct device *dev)
864 {
865 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
866 int ret;
867
868 /*
869 * This call is kept first to be in symmetry with
870 * acpi_lpss_runtime_suspend() one.
871 */
872 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
873 lpss_iosf_exit_d3_state();
874
875 ret = acpi_dev_resume(dev);
876 if (ret)
877 return ret;
878
879 acpi_lpss_d3_to_d0_delay(pdata);
880
881 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
882 acpi_lpss_restore_ctx(dev, pdata);
883
884 return 0;
885 }
886
887 #ifdef CONFIG_PM_SLEEP
888 static int acpi_lpss_do_suspend_late(struct device *dev)
889 {
890 int ret;
891
892 if (dev_pm_smart_suspend_and_suspended(dev))
893 return 0;
894
895 ret = pm_generic_suspend_late(dev);
896 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
897 }
898
899 static int acpi_lpss_suspend_late(struct device *dev)
900 {
901 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
902
903 if (pdata->dev_desc->resume_from_noirq)
904 return 0;
905
906 return acpi_lpss_do_suspend_late(dev);
907 }
908
909 static int acpi_lpss_suspend_noirq(struct device *dev)
910 {
911 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
912 int ret;
913
914 if (pdata->dev_desc->resume_from_noirq) {
915 /*
916 * The driver's ->suspend_late callback will be invoked by
917 * acpi_lpss_do_suspend_late(), with the assumption that the
918 * driver really wanted to run that code in ->suspend_noirq, but
919 * it could not run after acpi_dev_suspend() and the driver
920 * expected the latter to be called in the "late" phase.
921 */
922 ret = acpi_lpss_do_suspend_late(dev);
923 if (ret)
924 return ret;
925 }
926
927 return acpi_subsys_suspend_noirq(dev);
928 }
929
930 static int acpi_lpss_do_resume_early(struct device *dev)
931 {
932 int ret = acpi_lpss_resume(dev);
933
934 return ret ? ret : pm_generic_resume_early(dev);
935 }
936
937 static int acpi_lpss_resume_early(struct device *dev)
938 {
939 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
940
941 if (pdata->dev_desc->resume_from_noirq)
942 return 0;
943
944 return acpi_lpss_do_resume_early(dev);
945 }
946
947 static int acpi_lpss_resume_noirq(struct device *dev)
948 {
949 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
950 int ret;
951
952 /* Follow acpi_subsys_resume_noirq(). */
953 if (dev_pm_may_skip_resume(dev))
954 return 0;
955
956 if (dev_pm_smart_suspend_and_suspended(dev))
957 pm_runtime_set_active(dev);
958
959 ret = pm_generic_resume_noirq(dev);
960 if (ret)
961 return ret;
962
963 if (!pdata->dev_desc->resume_from_noirq)
964 return 0;
965
966 /*
967 * The driver's ->resume_early callback will be invoked by
968 * acpi_lpss_do_resume_early(), with the assumption that the driver
969 * really wanted to run that code in ->resume_noirq, but it could not
970 * run before acpi_dev_resume() and the driver expected the latter to be
971 * called in the "early" phase.
972 */
973 return acpi_lpss_do_resume_early(dev);
974 }
975
976 static int acpi_lpss_do_restore_early(struct device *dev)
977 {
978 int ret = acpi_lpss_resume(dev);
979
980 return ret ? ret : pm_generic_restore_early(dev);
981 }
982
983 static int acpi_lpss_restore_early(struct device *dev)
984 {
985 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
986
987 if (pdata->dev_desc->resume_from_noirq)
988 return 0;
989
990 return acpi_lpss_do_restore_early(dev);
991 }
992
993 static int acpi_lpss_restore_noirq(struct device *dev)
994 {
995 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
996 int ret;
997
998 ret = pm_generic_restore_noirq(dev);
999 if (ret)
1000 return ret;
1001
1002 if (!pdata->dev_desc->resume_from_noirq)
1003 return 0;
1004
1005 /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1006 return acpi_lpss_do_restore_early(dev);
1007 }
1008
1009 static int acpi_lpss_do_poweroff_late(struct device *dev)
1010 {
1011 int ret = pm_generic_poweroff_late(dev);
1012
1013 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1014 }
1015
1016 static int acpi_lpss_poweroff_late(struct device *dev)
1017 {
1018 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1019
1020 if (dev_pm_smart_suspend_and_suspended(dev))
1021 return 0;
1022
1023 if (pdata->dev_desc->resume_from_noirq)
1024 return 0;
1025
1026 return acpi_lpss_do_poweroff_late(dev);
1027 }
1028
1029 static int acpi_lpss_poweroff_noirq(struct device *dev)
1030 {
1031 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1032
1033 if (dev_pm_smart_suspend_and_suspended(dev))
1034 return 0;
1035
1036 if (pdata->dev_desc->resume_from_noirq) {
1037 /* This is analogous to the acpi_lpss_suspend_noirq() case. */
1038 int ret = acpi_lpss_do_poweroff_late(dev);
1039 if (ret)
1040 return ret;
1041 }
1042
1043 return pm_generic_poweroff_noirq(dev);
1044 }
1045 #endif /* CONFIG_PM_SLEEP */
1046
1047 static int acpi_lpss_runtime_suspend(struct device *dev)
1048 {
1049 int ret = pm_generic_runtime_suspend(dev);
1050
1051 return ret ? ret : acpi_lpss_suspend(dev, true);
1052 }
1053
1054 static int acpi_lpss_runtime_resume(struct device *dev)
1055 {
1056 int ret = acpi_lpss_resume(dev);
1057
1058 return ret ? ret : pm_generic_runtime_resume(dev);
1059 }
1060 #endif /* CONFIG_PM */
1061
1062 static struct dev_pm_domain acpi_lpss_pm_domain = {
1063 #ifdef CONFIG_PM
1064 .activate = acpi_lpss_activate,
1065 .dismiss = acpi_lpss_dismiss,
1066 #endif
1067 .ops = {
1068 #ifdef CONFIG_PM
1069 #ifdef CONFIG_PM_SLEEP
1070 .prepare = acpi_subsys_prepare,
1071 .complete = acpi_subsys_complete,
1072 .suspend = acpi_subsys_suspend,
1073 .suspend_late = acpi_lpss_suspend_late,
1074 .suspend_noirq = acpi_lpss_suspend_noirq,
1075 .resume_noirq = acpi_lpss_resume_noirq,
1076 .resume_early = acpi_lpss_resume_early,
1077 .freeze = acpi_subsys_freeze,
1078 .poweroff = acpi_subsys_poweroff,
1079 .poweroff_late = acpi_lpss_poweroff_late,
1080 .poweroff_noirq = acpi_lpss_poweroff_noirq,
1081 .restore_noirq = acpi_lpss_restore_noirq,
1082 .restore_early = acpi_lpss_restore_early,
1083 #endif
1084 .runtime_suspend = acpi_lpss_runtime_suspend,
1085 .runtime_resume = acpi_lpss_runtime_resume,
1086 #endif
1087 },
1088 };
1089
1090 static int acpi_lpss_platform_notify(struct notifier_block *nb,
1091 unsigned long action, void *data)
1092 {
1093 struct platform_device *pdev = to_platform_device(data);
1094 struct lpss_private_data *pdata;
1095 struct acpi_device *adev;
1096 const struct acpi_device_id *id;
1097
1098 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
1099 if (!id || !id->driver_data)
1100 return 0;
1101
1102 if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1103 return 0;
1104
1105 pdata = acpi_driver_data(adev);
1106 if (!pdata)
1107 return 0;
1108
1109 if (pdata->mmio_base &&
1110 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
1111 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
1112 return 0;
1113 }
1114
1115 switch (action) {
1116 case BUS_NOTIFY_BIND_DRIVER:
1117 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1118 break;
1119 case BUS_NOTIFY_DRIVER_NOT_BOUND:
1120 case BUS_NOTIFY_UNBOUND_DRIVER:
1121 dev_pm_domain_set(&pdev->dev, NULL);
1122 break;
1123 case BUS_NOTIFY_ADD_DEVICE:
1124 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1125 if (pdata->dev_desc->flags & LPSS_LTR)
1126 return sysfs_create_group(&pdev->dev.kobj,
1127 &lpss_attr_group);
1128 break;
1129 case BUS_NOTIFY_DEL_DEVICE:
1130 if (pdata->dev_desc->flags & LPSS_LTR)
1131 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
1132 dev_pm_domain_set(&pdev->dev, NULL);
1133 break;
1134 default:
1135 break;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static struct notifier_block acpi_lpss_nb = {
1142 .notifier_call = acpi_lpss_platform_notify,
1143 };
1144
1145 static void acpi_lpss_bind(struct device *dev)
1146 {
1147 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1148
1149 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
1150 return;
1151
1152 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
1153 dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
1154 else
1155 dev_err(dev, "MMIO size insufficient to access LTR\n");
1156 }
1157
1158 static void acpi_lpss_unbind(struct device *dev)
1159 {
1160 dev->power.set_latency_tolerance = NULL;
1161 }
1162
1163 static struct acpi_scan_handler lpss_handler = {
1164 .ids = acpi_lpss_device_ids,
1165 .attach = acpi_lpss_create_device,
1166 .bind = acpi_lpss_bind,
1167 .unbind = acpi_lpss_unbind,
1168 };
1169
1170 void __init acpi_lpss_init(void)
1171 {
1172 const struct x86_cpu_id *id;
1173 int ret;
1174
1175 ret = lpt_clk_init();
1176 if (ret)
1177 return;
1178
1179 id = x86_match_cpu(lpss_cpu_ids);
1180 if (id)
1181 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1182
1183 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1184 acpi_scan_add_handler(&lpss_handler);
1185 }
1186
1187 #else
1188
1189 static struct acpi_scan_handler lpss_handler = {
1190 .ids = acpi_lpss_device_ids,
1191 };
1192
1193 void __init acpi_lpss_init(void)
1194 {
1195 acpi_scan_add_handler(&lpss_handler);
1196 }
1197
1198 #endif /* CONFIG_X86_INTEL_LPSS */