]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/acpi/acpi_lpss.c
Merge branch 'linux-4.15' of git://github.com/skeggsb/linux into drm-fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / acpi / acpi_lpss.c
1 /*
2 * ACPI support for Intel Lynxpoint LPSS.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/acpi.h>
14 #include <linux/clkdev.h>
15 #include <linux/clk-provider.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/mutex.h>
19 #include <linux/platform_device.h>
20 #include <linux/platform_data/clk-lpss.h>
21 #include <linux/platform_data/x86/pmc_atom.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pwm.h>
25 #include <linux/delay.h>
26
27 #include "internal.h"
28
29 ACPI_MODULE_NAME("acpi_lpss");
30
31 #ifdef CONFIG_X86_INTEL_LPSS
32
33 #include <asm/cpu_device_id.h>
34 #include <asm/intel-family.h>
35 #include <asm/iosf_mbi.h>
36
37 #define LPSS_ADDR(desc) ((unsigned long)&desc)
38
39 #define LPSS_CLK_SIZE 0x04
40 #define LPSS_LTR_SIZE 0x18
41
42 /* Offsets relative to LPSS_PRIVATE_OFFSET */
43 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
44 #define LPSS_RESETS 0x04
45 #define LPSS_RESETS_RESET_FUNC BIT(0)
46 #define LPSS_RESETS_RESET_APB BIT(1)
47 #define LPSS_GENERAL 0x08
48 #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
49 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
50 #define LPSS_SW_LTR 0x10
51 #define LPSS_AUTO_LTR 0x14
52 #define LPSS_LTR_SNOOP_REQ BIT(15)
53 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
54 #define LPSS_LTR_SNOOP_LAT_1US 0x800
55 #define LPSS_LTR_SNOOP_LAT_32US 0xC00
56 #define LPSS_LTR_SNOOP_LAT_SHIFT 5
57 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
58 #define LPSS_LTR_MAX_VAL 0x3FF
59 #define LPSS_TX_INT 0x20
60 #define LPSS_TX_INT_MASK BIT(1)
61
62 #define LPSS_PRV_REG_COUNT 9
63
64 /* LPSS Flags */
65 #define LPSS_CLK BIT(0)
66 #define LPSS_CLK_GATE BIT(1)
67 #define LPSS_CLK_DIVIDER BIT(2)
68 #define LPSS_LTR BIT(3)
69 #define LPSS_SAVE_CTX BIT(4)
70 #define LPSS_NO_D3_DELAY BIT(5)
71
72 struct lpss_private_data;
73
74 struct lpss_device_desc {
75 unsigned int flags;
76 const char *clk_con_id;
77 unsigned int prv_offset;
78 size_t prv_size_override;
79 struct property_entry *properties;
80 void (*setup)(struct lpss_private_data *pdata);
81 };
82
83 static const struct lpss_device_desc lpss_dma_desc = {
84 .flags = LPSS_CLK,
85 };
86
87 struct lpss_private_data {
88 struct acpi_device *adev;
89 void __iomem *mmio_base;
90 resource_size_t mmio_size;
91 unsigned int fixed_clk_rate;
92 struct clk *clk;
93 const struct lpss_device_desc *dev_desc;
94 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
95 };
96
97 /* LPSS run time quirks */
98 static unsigned int lpss_quirks;
99
100 /*
101 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
102 *
103 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
104 * it can be powered off automatically whenever the last LPSS device goes down.
105 * In case of no power any access to the DMA controller will hang the system.
106 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
107 * well as on ASuS T100TA transformer.
108 *
109 * This quirk overrides power state of entire LPSS island to keep DMA powered
110 * on whenever we have at least one other device in use.
111 */
112 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
113
114 /* UART Component Parameter Register */
115 #define LPSS_UART_CPR 0xF4
116 #define LPSS_UART_CPR_AFCE BIT(4)
117
118 static void lpss_uart_setup(struct lpss_private_data *pdata)
119 {
120 unsigned int offset;
121 u32 val;
122
123 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
124 val = readl(pdata->mmio_base + offset);
125 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
126
127 val = readl(pdata->mmio_base + LPSS_UART_CPR);
128 if (!(val & LPSS_UART_CPR_AFCE)) {
129 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
130 val = readl(pdata->mmio_base + offset);
131 val |= LPSS_GENERAL_UART_RTS_OVRD;
132 writel(val, pdata->mmio_base + offset);
133 }
134 }
135
136 static void lpss_deassert_reset(struct lpss_private_data *pdata)
137 {
138 unsigned int offset;
139 u32 val;
140
141 offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
142 val = readl(pdata->mmio_base + offset);
143 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
144 writel(val, pdata->mmio_base + offset);
145 }
146
147 /*
148 * BYT PWM used for backlight control by the i915 driver on systems without
149 * the Crystal Cove PMIC.
150 */
151 static struct pwm_lookup byt_pwm_lookup[] = {
152 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
153 "pwm_backlight", 0, PWM_POLARITY_NORMAL,
154 "pwm-lpss-platform"),
155 };
156
157 static void byt_pwm_setup(struct lpss_private_data *pdata)
158 {
159 struct acpi_device *adev = pdata->adev;
160
161 /* Only call pwm_add_table for the first PWM controller */
162 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
163 return;
164
165 if (!acpi_dev_present("INT33FD", NULL, -1))
166 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
167 }
168
169 #define LPSS_I2C_ENABLE 0x6c
170
171 static void byt_i2c_setup(struct lpss_private_data *pdata)
172 {
173 lpss_deassert_reset(pdata);
174
175 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
176 pdata->fixed_clk_rate = 133000000;
177
178 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
179 }
180
181 /* BSW PWM used for backlight control by the i915 driver */
182 static struct pwm_lookup bsw_pwm_lookup[] = {
183 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
184 "pwm_backlight", 0, PWM_POLARITY_NORMAL,
185 "pwm-lpss-platform"),
186 };
187
188 static void bsw_pwm_setup(struct lpss_private_data *pdata)
189 {
190 struct acpi_device *adev = pdata->adev;
191
192 /* Only call pwm_add_table for the first PWM controller */
193 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
194 return;
195
196 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
197 }
198
199 static const struct lpss_device_desc lpt_dev_desc = {
200 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
201 .prv_offset = 0x800,
202 };
203
204 static const struct lpss_device_desc lpt_i2c_dev_desc = {
205 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
206 .prv_offset = 0x800,
207 };
208
209 static struct property_entry uart_properties[] = {
210 PROPERTY_ENTRY_U32("reg-io-width", 4),
211 PROPERTY_ENTRY_U32("reg-shift", 2),
212 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
213 { },
214 };
215
216 static const struct lpss_device_desc lpt_uart_dev_desc = {
217 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
218 .clk_con_id = "baudclk",
219 .prv_offset = 0x800,
220 .setup = lpss_uart_setup,
221 .properties = uart_properties,
222 };
223
224 static const struct lpss_device_desc lpt_sdio_dev_desc = {
225 .flags = LPSS_LTR,
226 .prv_offset = 0x1000,
227 .prv_size_override = 0x1018,
228 };
229
230 static const struct lpss_device_desc byt_pwm_dev_desc = {
231 .flags = LPSS_SAVE_CTX,
232 .setup = byt_pwm_setup,
233 };
234
235 static const struct lpss_device_desc bsw_pwm_dev_desc = {
236 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
237 .setup = bsw_pwm_setup,
238 };
239
240 static const struct lpss_device_desc byt_uart_dev_desc = {
241 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
242 .clk_con_id = "baudclk",
243 .prv_offset = 0x800,
244 .setup = lpss_uart_setup,
245 .properties = uart_properties,
246 };
247
248 static const struct lpss_device_desc bsw_uart_dev_desc = {
249 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
250 | LPSS_NO_D3_DELAY,
251 .clk_con_id = "baudclk",
252 .prv_offset = 0x800,
253 .setup = lpss_uart_setup,
254 .properties = uart_properties,
255 };
256
257 static const struct lpss_device_desc byt_spi_dev_desc = {
258 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
259 .prv_offset = 0x400,
260 };
261
262 static const struct lpss_device_desc byt_sdio_dev_desc = {
263 .flags = LPSS_CLK,
264 };
265
266 static const struct lpss_device_desc byt_i2c_dev_desc = {
267 .flags = LPSS_CLK | LPSS_SAVE_CTX,
268 .prv_offset = 0x800,
269 .setup = byt_i2c_setup,
270 };
271
272 static const struct lpss_device_desc bsw_i2c_dev_desc = {
273 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
274 .prv_offset = 0x800,
275 .setup = byt_i2c_setup,
276 };
277
278 static const struct lpss_device_desc bsw_spi_dev_desc = {
279 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
280 | LPSS_NO_D3_DELAY,
281 .prv_offset = 0x400,
282 .setup = lpss_deassert_reset,
283 };
284
285 #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
286
287 static const struct x86_cpu_id lpss_cpu_ids[] = {
288 ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
289 ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
290 {}
291 };
292
293 #else
294
295 #define LPSS_ADDR(desc) (0UL)
296
297 #endif /* CONFIG_X86_INTEL_LPSS */
298
299 static const struct acpi_device_id acpi_lpss_device_ids[] = {
300 /* Generic LPSS devices */
301 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
302
303 /* Lynxpoint LPSS devices */
304 { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
305 { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
306 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
307 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
308 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
309 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
310 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
311 { "INT33C7", },
312
313 /* BayTrail LPSS devices */
314 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
315 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
316 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
317 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
318 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
319 { "INT33B2", },
320 { "INT33FC", },
321
322 /* Braswell LPSS devices */
323 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
324 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
325 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
326 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
327
328 /* Broadwell LPSS devices */
329 { "INT3430", LPSS_ADDR(lpt_dev_desc) },
330 { "INT3431", LPSS_ADDR(lpt_dev_desc) },
331 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
332 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
333 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
334 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
335 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
336 { "INT3437", },
337
338 /* Wildcat Point LPSS devices */
339 { "INT3438", LPSS_ADDR(lpt_dev_desc) },
340
341 { }
342 };
343
344 #ifdef CONFIG_X86_INTEL_LPSS
345
346 static int is_memory(struct acpi_resource *res, void *not_used)
347 {
348 struct resource r;
349 return !acpi_dev_resource_memory(res, &r);
350 }
351
352 /* LPSS main clock device. */
353 static struct platform_device *lpss_clk_dev;
354
355 static inline void lpt_register_clock_device(void)
356 {
357 lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
358 }
359
360 static int register_device_clock(struct acpi_device *adev,
361 struct lpss_private_data *pdata)
362 {
363 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
364 const char *devname = dev_name(&adev->dev);
365 struct clk *clk;
366 struct lpss_clk_data *clk_data;
367 const char *parent, *clk_name;
368 void __iomem *prv_base;
369
370 if (!lpss_clk_dev)
371 lpt_register_clock_device();
372
373 clk_data = platform_get_drvdata(lpss_clk_dev);
374 if (!clk_data)
375 return -ENODEV;
376 clk = clk_data->clk;
377
378 if (!pdata->mmio_base
379 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
380 return -ENODATA;
381
382 parent = clk_data->name;
383 prv_base = pdata->mmio_base + dev_desc->prv_offset;
384
385 if (pdata->fixed_clk_rate) {
386 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
387 pdata->fixed_clk_rate);
388 goto out;
389 }
390
391 if (dev_desc->flags & LPSS_CLK_GATE) {
392 clk = clk_register_gate(NULL, devname, parent, 0,
393 prv_base, 0, 0, NULL);
394 parent = devname;
395 }
396
397 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
398 /* Prevent division by zero */
399 if (!readl(prv_base))
400 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
401
402 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
403 if (!clk_name)
404 return -ENOMEM;
405 clk = clk_register_fractional_divider(NULL, clk_name, parent,
406 0, prv_base,
407 1, 15, 16, 15, 0, NULL);
408 parent = clk_name;
409
410 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
411 if (!clk_name) {
412 kfree(parent);
413 return -ENOMEM;
414 }
415 clk = clk_register_gate(NULL, clk_name, parent,
416 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
417 prv_base, 31, 0, NULL);
418 kfree(parent);
419 kfree(clk_name);
420 }
421 out:
422 if (IS_ERR(clk))
423 return PTR_ERR(clk);
424
425 pdata->clk = clk;
426 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
427 return 0;
428 }
429
430 static int acpi_lpss_create_device(struct acpi_device *adev,
431 const struct acpi_device_id *id)
432 {
433 const struct lpss_device_desc *dev_desc;
434 struct lpss_private_data *pdata;
435 struct resource_entry *rentry;
436 struct list_head resource_list;
437 struct platform_device *pdev;
438 int ret;
439
440 dev_desc = (const struct lpss_device_desc *)id->driver_data;
441 if (!dev_desc) {
442 pdev = acpi_create_platform_device(adev, NULL);
443 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
444 }
445 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
446 if (!pdata)
447 return -ENOMEM;
448
449 INIT_LIST_HEAD(&resource_list);
450 ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
451 if (ret < 0)
452 goto err_out;
453
454 list_for_each_entry(rentry, &resource_list, node)
455 if (resource_type(rentry->res) == IORESOURCE_MEM) {
456 if (dev_desc->prv_size_override)
457 pdata->mmio_size = dev_desc->prv_size_override;
458 else
459 pdata->mmio_size = resource_size(rentry->res);
460 pdata->mmio_base = ioremap(rentry->res->start,
461 pdata->mmio_size);
462 break;
463 }
464
465 acpi_dev_free_resource_list(&resource_list);
466
467 if (!pdata->mmio_base) {
468 /* Skip the device, but continue the namespace scan. */
469 ret = 0;
470 goto err_out;
471 }
472
473 pdata->adev = adev;
474 pdata->dev_desc = dev_desc;
475
476 if (dev_desc->setup)
477 dev_desc->setup(pdata);
478
479 if (dev_desc->flags & LPSS_CLK) {
480 ret = register_device_clock(adev, pdata);
481 if (ret) {
482 /* Skip the device, but continue the namespace scan. */
483 ret = 0;
484 goto err_out;
485 }
486 }
487
488 /*
489 * This works around a known issue in ACPI tables where LPSS devices
490 * have _PS0 and _PS3 without _PSC (and no power resources), so
491 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
492 */
493 ret = acpi_device_fix_up_power(adev);
494 if (ret) {
495 /* Skip the device, but continue the namespace scan. */
496 ret = 0;
497 goto err_out;
498 }
499
500 adev->driver_data = pdata;
501 pdev = acpi_create_platform_device(adev, dev_desc->properties);
502 if (!IS_ERR_OR_NULL(pdev)) {
503 return 1;
504 }
505
506 ret = PTR_ERR(pdev);
507 adev->driver_data = NULL;
508
509 err_out:
510 kfree(pdata);
511 return ret;
512 }
513
514 static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
515 {
516 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
517 }
518
519 static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
520 unsigned int reg)
521 {
522 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
523 }
524
525 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
526 {
527 struct acpi_device *adev;
528 struct lpss_private_data *pdata;
529 unsigned long flags;
530 int ret;
531
532 ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
533 if (WARN_ON(ret))
534 return ret;
535
536 spin_lock_irqsave(&dev->power.lock, flags);
537 if (pm_runtime_suspended(dev)) {
538 ret = -EAGAIN;
539 goto out;
540 }
541 pdata = acpi_driver_data(adev);
542 if (WARN_ON(!pdata || !pdata->mmio_base)) {
543 ret = -ENODEV;
544 goto out;
545 }
546 *val = __lpss_reg_read(pdata, reg);
547
548 out:
549 spin_unlock_irqrestore(&dev->power.lock, flags);
550 return ret;
551 }
552
553 static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
554 char *buf)
555 {
556 u32 ltr_value = 0;
557 unsigned int reg;
558 int ret;
559
560 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
561 ret = lpss_reg_read(dev, reg, &ltr_value);
562 if (ret)
563 return ret;
564
565 return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
566 }
567
568 static ssize_t lpss_ltr_mode_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
570 {
571 u32 ltr_mode = 0;
572 char *outstr;
573 int ret;
574
575 ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
576 if (ret)
577 return ret;
578
579 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
580 return sprintf(buf, "%s\n", outstr);
581 }
582
583 static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
584 static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
585 static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
586
587 static struct attribute *lpss_attrs[] = {
588 &dev_attr_auto_ltr.attr,
589 &dev_attr_sw_ltr.attr,
590 &dev_attr_ltr_mode.attr,
591 NULL,
592 };
593
594 static const struct attribute_group lpss_attr_group = {
595 .attrs = lpss_attrs,
596 .name = "lpss_ltr",
597 };
598
599 static void acpi_lpss_set_ltr(struct device *dev, s32 val)
600 {
601 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
602 u32 ltr_mode, ltr_val;
603
604 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
605 if (val < 0) {
606 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
607 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
608 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
609 }
610 return;
611 }
612 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
613 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
614 ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
615 val = LPSS_LTR_MAX_VAL;
616 } else if (val > LPSS_LTR_MAX_VAL) {
617 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
618 val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
619 } else {
620 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
621 }
622 ltr_val |= val;
623 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
624 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
625 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
626 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
627 }
628 }
629
630 #ifdef CONFIG_PM
631 /**
632 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
633 * @dev: LPSS device
634 * @pdata: pointer to the private data of the LPSS device
635 *
636 * Most LPSS devices have private registers which may loose their context when
637 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
638 * prv_reg_ctx array.
639 */
640 static void acpi_lpss_save_ctx(struct device *dev,
641 struct lpss_private_data *pdata)
642 {
643 unsigned int i;
644
645 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
646 unsigned long offset = i * sizeof(u32);
647
648 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
649 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
650 pdata->prv_reg_ctx[i], offset);
651 }
652 }
653
654 /**
655 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
656 * @dev: LPSS device
657 * @pdata: pointer to the private data of the LPSS device
658 *
659 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
660 */
661 static void acpi_lpss_restore_ctx(struct device *dev,
662 struct lpss_private_data *pdata)
663 {
664 unsigned int i;
665
666 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
667 unsigned long offset = i * sizeof(u32);
668
669 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
670 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
671 pdata->prv_reg_ctx[i], offset);
672 }
673 }
674
675 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
676 {
677 /*
678 * The following delay is needed or the subsequent write operations may
679 * fail. The LPSS devices are actually PCI devices and the PCI spec
680 * expects 10ms delay before the device can be accessed after D3 to D0
681 * transition. However some platforms like BSW does not need this delay.
682 */
683 unsigned int delay = 10; /* default 10ms delay */
684
685 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
686 delay = 0;
687
688 msleep(delay);
689 }
690
691 static int acpi_lpss_activate(struct device *dev)
692 {
693 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
694 int ret;
695
696 ret = acpi_dev_resume(dev);
697 if (ret)
698 return ret;
699
700 acpi_lpss_d3_to_d0_delay(pdata);
701
702 /*
703 * This is called only on ->probe() stage where a device is either in
704 * known state defined by BIOS or most likely powered off. Due to this
705 * we have to deassert reset line to be sure that ->probe() will
706 * recognize the device.
707 */
708 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
709 lpss_deassert_reset(pdata);
710
711 return 0;
712 }
713
714 static void acpi_lpss_dismiss(struct device *dev)
715 {
716 acpi_dev_suspend(dev, false);
717 }
718
719 /* IOSF SB for LPSS island */
720 #define LPSS_IOSF_UNIT_LPIOEP 0xA0
721 #define LPSS_IOSF_UNIT_LPIO1 0xAB
722 #define LPSS_IOSF_UNIT_LPIO2 0xAC
723
724 #define LPSS_IOSF_PMCSR 0x84
725 #define LPSS_PMCSR_D0 0
726 #define LPSS_PMCSR_D3hot 3
727 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
728
729 #define LPSS_IOSF_GPIODEF0 0x154
730 #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
731 #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
732 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
733 #define LPSS_GPIODEF0_DMA_LLP BIT(13)
734
735 static DEFINE_MUTEX(lpss_iosf_mutex);
736
737 static void lpss_iosf_enter_d3_state(void)
738 {
739 u32 value1 = 0;
740 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
741 u32 value2 = LPSS_PMCSR_D3hot;
742 u32 mask2 = LPSS_PMCSR_Dx_MASK;
743 /*
744 * PMC provides an information about actual status of the LPSS devices.
745 * Here we read the values related to LPSS power island, i.e. LPSS
746 * devices, excluding both LPSS DMA controllers, along with SCC domain.
747 */
748 u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
749 int ret;
750
751 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
752 if (ret)
753 return;
754
755 mutex_lock(&lpss_iosf_mutex);
756
757 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
758 if (ret)
759 goto exit;
760
761 /*
762 * Get the status of entire LPSS power island per device basis.
763 * Shutdown both LPSS DMA controllers if and only if all other devices
764 * are already in D3hot.
765 */
766 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
767 if (pmc_status)
768 goto exit;
769
770 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
771 LPSS_IOSF_PMCSR, value2, mask2);
772
773 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
774 LPSS_IOSF_PMCSR, value2, mask2);
775
776 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
777 LPSS_IOSF_GPIODEF0, value1, mask1);
778 exit:
779 mutex_unlock(&lpss_iosf_mutex);
780 }
781
782 static void lpss_iosf_exit_d3_state(void)
783 {
784 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
785 LPSS_GPIODEF0_DMA_LLP;
786 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
787 u32 value2 = LPSS_PMCSR_D0;
788 u32 mask2 = LPSS_PMCSR_Dx_MASK;
789
790 mutex_lock(&lpss_iosf_mutex);
791
792 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
793 LPSS_IOSF_GPIODEF0, value1, mask1);
794
795 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
796 LPSS_IOSF_PMCSR, value2, mask2);
797
798 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
799 LPSS_IOSF_PMCSR, value2, mask2);
800
801 mutex_unlock(&lpss_iosf_mutex);
802 }
803
804 static int acpi_lpss_suspend(struct device *dev, bool wakeup)
805 {
806 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
807 int ret;
808
809 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
810 acpi_lpss_save_ctx(dev, pdata);
811
812 ret = acpi_dev_suspend(dev, wakeup);
813
814 /*
815 * This call must be last in the sequence, otherwise PMC will return
816 * wrong status for devices being about to be powered off. See
817 * lpss_iosf_enter_d3_state() for further information.
818 */
819 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
820 lpss_iosf_enter_d3_state();
821
822 return ret;
823 }
824
825 static int acpi_lpss_resume(struct device *dev)
826 {
827 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
828 int ret;
829
830 /*
831 * This call is kept first to be in symmetry with
832 * acpi_lpss_runtime_suspend() one.
833 */
834 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
835 lpss_iosf_exit_d3_state();
836
837 ret = acpi_dev_resume(dev);
838 if (ret)
839 return ret;
840
841 acpi_lpss_d3_to_d0_delay(pdata);
842
843 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
844 acpi_lpss_restore_ctx(dev, pdata);
845
846 return 0;
847 }
848
849 #ifdef CONFIG_PM_SLEEP
850 static int acpi_lpss_suspend_late(struct device *dev)
851 {
852 int ret;
853
854 if (dev_pm_smart_suspend_and_suspended(dev))
855 return 0;
856
857 ret = pm_generic_suspend_late(dev);
858 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
859 }
860
861 static int acpi_lpss_resume_early(struct device *dev)
862 {
863 int ret = acpi_lpss_resume(dev);
864
865 return ret ? ret : pm_generic_resume_early(dev);
866 }
867 #endif /* CONFIG_PM_SLEEP */
868
869 static int acpi_lpss_runtime_suspend(struct device *dev)
870 {
871 int ret = pm_generic_runtime_suspend(dev);
872
873 return ret ? ret : acpi_lpss_suspend(dev, true);
874 }
875
876 static int acpi_lpss_runtime_resume(struct device *dev)
877 {
878 int ret = acpi_lpss_resume(dev);
879
880 return ret ? ret : pm_generic_runtime_resume(dev);
881 }
882 #endif /* CONFIG_PM */
883
884 static struct dev_pm_domain acpi_lpss_pm_domain = {
885 #ifdef CONFIG_PM
886 .activate = acpi_lpss_activate,
887 .dismiss = acpi_lpss_dismiss,
888 #endif
889 .ops = {
890 #ifdef CONFIG_PM
891 #ifdef CONFIG_PM_SLEEP
892 .prepare = acpi_subsys_prepare,
893 .complete = acpi_subsys_complete,
894 .suspend = acpi_subsys_suspend,
895 .suspend_late = acpi_lpss_suspend_late,
896 .suspend_noirq = acpi_subsys_suspend_noirq,
897 .resume_noirq = acpi_subsys_resume_noirq,
898 .resume_early = acpi_lpss_resume_early,
899 .freeze = acpi_subsys_freeze,
900 .freeze_late = acpi_subsys_freeze_late,
901 .freeze_noirq = acpi_subsys_freeze_noirq,
902 .thaw_noirq = acpi_subsys_thaw_noirq,
903 .poweroff = acpi_subsys_suspend,
904 .poweroff_late = acpi_lpss_suspend_late,
905 .poweroff_noirq = acpi_subsys_suspend_noirq,
906 .restore_noirq = acpi_subsys_resume_noirq,
907 .restore_early = acpi_lpss_resume_early,
908 #endif
909 .runtime_suspend = acpi_lpss_runtime_suspend,
910 .runtime_resume = acpi_lpss_runtime_resume,
911 #endif
912 },
913 };
914
915 static int acpi_lpss_platform_notify(struct notifier_block *nb,
916 unsigned long action, void *data)
917 {
918 struct platform_device *pdev = to_platform_device(data);
919 struct lpss_private_data *pdata;
920 struct acpi_device *adev;
921 const struct acpi_device_id *id;
922
923 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
924 if (!id || !id->driver_data)
925 return 0;
926
927 if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
928 return 0;
929
930 pdata = acpi_driver_data(adev);
931 if (!pdata)
932 return 0;
933
934 if (pdata->mmio_base &&
935 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
936 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
937 return 0;
938 }
939
940 switch (action) {
941 case BUS_NOTIFY_BIND_DRIVER:
942 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
943 break;
944 case BUS_NOTIFY_DRIVER_NOT_BOUND:
945 case BUS_NOTIFY_UNBOUND_DRIVER:
946 dev_pm_domain_set(&pdev->dev, NULL);
947 break;
948 case BUS_NOTIFY_ADD_DEVICE:
949 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
950 if (pdata->dev_desc->flags & LPSS_LTR)
951 return sysfs_create_group(&pdev->dev.kobj,
952 &lpss_attr_group);
953 break;
954 case BUS_NOTIFY_DEL_DEVICE:
955 if (pdata->dev_desc->flags & LPSS_LTR)
956 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
957 dev_pm_domain_set(&pdev->dev, NULL);
958 break;
959 default:
960 break;
961 }
962
963 return 0;
964 }
965
966 static struct notifier_block acpi_lpss_nb = {
967 .notifier_call = acpi_lpss_platform_notify,
968 };
969
970 static void acpi_lpss_bind(struct device *dev)
971 {
972 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
973
974 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
975 return;
976
977 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
978 dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
979 else
980 dev_err(dev, "MMIO size insufficient to access LTR\n");
981 }
982
983 static void acpi_lpss_unbind(struct device *dev)
984 {
985 dev->power.set_latency_tolerance = NULL;
986 }
987
988 static struct acpi_scan_handler lpss_handler = {
989 .ids = acpi_lpss_device_ids,
990 .attach = acpi_lpss_create_device,
991 .bind = acpi_lpss_bind,
992 .unbind = acpi_lpss_unbind,
993 };
994
995 void __init acpi_lpss_init(void)
996 {
997 const struct x86_cpu_id *id;
998 int ret;
999
1000 ret = lpt_clk_init();
1001 if (ret)
1002 return;
1003
1004 id = x86_match_cpu(lpss_cpu_ids);
1005 if (id)
1006 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1007
1008 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1009 acpi_scan_add_handler(&lpss_handler);
1010 }
1011
1012 #else
1013
1014 static struct acpi_scan_handler lpss_handler = {
1015 .ids = acpi_lpss_device_ids,
1016 };
1017
1018 void __init acpi_lpss_init(void)
1019 {
1020 acpi_scan_add_handler(&lpss_handler);
1021 }
1022
1023 #endif /* CONFIG_X86_INTEL_LPSS */