]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/acpi/acpi_lpss.c
Merge remote-tracking branch 'asoc/for-5.10' into asoc-linus
[mirror_ubuntu-jammy-kernel.git] / drivers / acpi / acpi_lpss.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ACPI support for Intel Lynxpoint LPSS.
4 *
5 * Copyright (C) 2013, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/clkdev.h>
12 #include <linux/clk-provider.h>
13 #include <linux/dmi.h>
14 #include <linux/err.h>
15 #include <linux/io.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/platform_data/x86/clk-lpss.h>
20 #include <linux/platform_data/x86/pmc_atom.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pwm.h>
24 #include <linux/suspend.h>
25 #include <linux/delay.h>
26
27 #include "internal.h"
28
29 #ifdef CONFIG_X86_INTEL_LPSS
30
31 #include <asm/cpu_device_id.h>
32 #include <asm/intel-family.h>
33 #include <asm/iosf_mbi.h>
34
35 #define LPSS_ADDR(desc) ((unsigned long)&desc)
36
37 #define LPSS_CLK_SIZE 0x04
38 #define LPSS_LTR_SIZE 0x18
39
40 /* Offsets relative to LPSS_PRIVATE_OFFSET */
41 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
42 #define LPSS_RESETS 0x04
43 #define LPSS_RESETS_RESET_FUNC BIT(0)
44 #define LPSS_RESETS_RESET_APB BIT(1)
45 #define LPSS_GENERAL 0x08
46 #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
47 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
48 #define LPSS_SW_LTR 0x10
49 #define LPSS_AUTO_LTR 0x14
50 #define LPSS_LTR_SNOOP_REQ BIT(15)
51 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
52 #define LPSS_LTR_SNOOP_LAT_1US 0x800
53 #define LPSS_LTR_SNOOP_LAT_32US 0xC00
54 #define LPSS_LTR_SNOOP_LAT_SHIFT 5
55 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
56 #define LPSS_LTR_MAX_VAL 0x3FF
57 #define LPSS_TX_INT 0x20
58 #define LPSS_TX_INT_MASK BIT(1)
59
60 #define LPSS_PRV_REG_COUNT 9
61
62 /* LPSS Flags */
63 #define LPSS_CLK BIT(0)
64 #define LPSS_CLK_GATE BIT(1)
65 #define LPSS_CLK_DIVIDER BIT(2)
66 #define LPSS_LTR BIT(3)
67 #define LPSS_SAVE_CTX BIT(4)
68 /*
69 * For some devices the DSDT AML code for another device turns off the device
70 * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
71 * as ctx register values.
72 * Luckily these devices always use the same ctx register values, so we can
73 * work around this by saving the ctx registers once on activation.
74 */
75 #define LPSS_SAVE_CTX_ONCE BIT(5)
76 #define LPSS_NO_D3_DELAY BIT(6)
77
78 struct lpss_private_data;
79
80 struct lpss_device_desc {
81 unsigned int flags;
82 const char *clk_con_id;
83 unsigned int prv_offset;
84 size_t prv_size_override;
85 struct property_entry *properties;
86 void (*setup)(struct lpss_private_data *pdata);
87 bool resume_from_noirq;
88 };
89
90 static const struct lpss_device_desc lpss_dma_desc = {
91 .flags = LPSS_CLK,
92 };
93
94 struct lpss_private_data {
95 struct acpi_device *adev;
96 void __iomem *mmio_base;
97 resource_size_t mmio_size;
98 unsigned int fixed_clk_rate;
99 struct clk *clk;
100 const struct lpss_device_desc *dev_desc;
101 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
102 };
103
104 /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
105 static u32 pmc_atom_d3_mask = 0xfe000ffe;
106
107 /* LPSS run time quirks */
108 static unsigned int lpss_quirks;
109
110 /*
111 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
112 *
113 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
114 * it can be powered off automatically whenever the last LPSS device goes down.
115 * In case of no power any access to the DMA controller will hang the system.
116 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
117 * well as on ASuS T100TA transformer.
118 *
119 * This quirk overrides power state of entire LPSS island to keep DMA powered
120 * on whenever we have at least one other device in use.
121 */
122 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
123
124 /* UART Component Parameter Register */
125 #define LPSS_UART_CPR 0xF4
126 #define LPSS_UART_CPR_AFCE BIT(4)
127
128 static void lpss_uart_setup(struct lpss_private_data *pdata)
129 {
130 unsigned int offset;
131 u32 val;
132
133 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
134 val = readl(pdata->mmio_base + offset);
135 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
136
137 val = readl(pdata->mmio_base + LPSS_UART_CPR);
138 if (!(val & LPSS_UART_CPR_AFCE)) {
139 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
140 val = readl(pdata->mmio_base + offset);
141 val |= LPSS_GENERAL_UART_RTS_OVRD;
142 writel(val, pdata->mmio_base + offset);
143 }
144 }
145
146 static void lpss_deassert_reset(struct lpss_private_data *pdata)
147 {
148 unsigned int offset;
149 u32 val;
150
151 offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
152 val = readl(pdata->mmio_base + offset);
153 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
154 writel(val, pdata->mmio_base + offset);
155 }
156
157 /*
158 * BYT PWM used for backlight control by the i915 driver on systems without
159 * the Crystal Cove PMIC.
160 */
161 static struct pwm_lookup byt_pwm_lookup[] = {
162 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
163 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
164 "pwm-lpss-platform"),
165 };
166
167 static void byt_pwm_setup(struct lpss_private_data *pdata)
168 {
169 struct acpi_device *adev = pdata->adev;
170
171 /* Only call pwm_add_table for the first PWM controller */
172 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
173 return;
174
175 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
176 }
177
178 #define LPSS_I2C_ENABLE 0x6c
179
180 static void byt_i2c_setup(struct lpss_private_data *pdata)
181 {
182 const char *uid_str = acpi_device_uid(pdata->adev);
183 acpi_handle handle = pdata->adev->handle;
184 unsigned long long shared_host = 0;
185 acpi_status status;
186 long uid = 0;
187
188 /* Expected to always be true, but better safe then sorry */
189 if (uid_str)
190 uid = simple_strtol(uid_str, NULL, 10);
191
192 /* Detect I2C bus shared with PUNIT and ignore its d3 status */
193 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
194 if (ACPI_SUCCESS(status) && shared_host && uid)
195 pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
196
197 lpss_deassert_reset(pdata);
198
199 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
200 pdata->fixed_clk_rate = 133000000;
201
202 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
203 }
204
205 /* BSW PWM used for backlight control by the i915 driver */
206 static struct pwm_lookup bsw_pwm_lookup[] = {
207 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
208 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
209 "pwm-lpss-platform"),
210 };
211
212 static void bsw_pwm_setup(struct lpss_private_data *pdata)
213 {
214 struct acpi_device *adev = pdata->adev;
215
216 /* Only call pwm_add_table for the first PWM controller */
217 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
218 return;
219
220 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
221 }
222
223 static const struct lpss_device_desc lpt_dev_desc = {
224 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
225 | LPSS_SAVE_CTX,
226 .prv_offset = 0x800,
227 };
228
229 static const struct lpss_device_desc lpt_i2c_dev_desc = {
230 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
231 .prv_offset = 0x800,
232 };
233
234 static struct property_entry uart_properties[] = {
235 PROPERTY_ENTRY_U32("reg-io-width", 4),
236 PROPERTY_ENTRY_U32("reg-shift", 2),
237 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
238 { },
239 };
240
241 static const struct lpss_device_desc lpt_uart_dev_desc = {
242 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
243 | LPSS_SAVE_CTX,
244 .clk_con_id = "baudclk",
245 .prv_offset = 0x800,
246 .setup = lpss_uart_setup,
247 .properties = uart_properties,
248 };
249
250 static const struct lpss_device_desc lpt_sdio_dev_desc = {
251 .flags = LPSS_LTR,
252 .prv_offset = 0x1000,
253 .prv_size_override = 0x1018,
254 };
255
256 static const struct lpss_device_desc byt_pwm_dev_desc = {
257 .flags = LPSS_SAVE_CTX,
258 .prv_offset = 0x800,
259 .setup = byt_pwm_setup,
260 };
261
262 static const struct lpss_device_desc bsw_pwm_dev_desc = {
263 .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
264 .prv_offset = 0x800,
265 .setup = bsw_pwm_setup,
266 .resume_from_noirq = true,
267 };
268
269 static const struct lpss_device_desc byt_uart_dev_desc = {
270 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
271 .clk_con_id = "baudclk",
272 .prv_offset = 0x800,
273 .setup = lpss_uart_setup,
274 .properties = uart_properties,
275 };
276
277 static const struct lpss_device_desc bsw_uart_dev_desc = {
278 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
279 | LPSS_NO_D3_DELAY,
280 .clk_con_id = "baudclk",
281 .prv_offset = 0x800,
282 .setup = lpss_uart_setup,
283 .properties = uart_properties,
284 };
285
286 static const struct lpss_device_desc byt_spi_dev_desc = {
287 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
288 .prv_offset = 0x400,
289 };
290
291 static const struct lpss_device_desc byt_sdio_dev_desc = {
292 .flags = LPSS_CLK,
293 };
294
295 static const struct lpss_device_desc byt_i2c_dev_desc = {
296 .flags = LPSS_CLK | LPSS_SAVE_CTX,
297 .prv_offset = 0x800,
298 .setup = byt_i2c_setup,
299 .resume_from_noirq = true,
300 };
301
302 static const struct lpss_device_desc bsw_i2c_dev_desc = {
303 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
304 .prv_offset = 0x800,
305 .setup = byt_i2c_setup,
306 .resume_from_noirq = true,
307 };
308
309 static const struct lpss_device_desc bsw_spi_dev_desc = {
310 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
311 | LPSS_NO_D3_DELAY,
312 .prv_offset = 0x400,
313 .setup = lpss_deassert_reset,
314 };
315
316 static const struct x86_cpu_id lpss_cpu_ids[] = {
317 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
318 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
319 {}
320 };
321
322 #else
323
324 #define LPSS_ADDR(desc) (0UL)
325
326 #endif /* CONFIG_X86_INTEL_LPSS */
327
328 static const struct acpi_device_id acpi_lpss_device_ids[] = {
329 /* Generic LPSS devices */
330 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
331
332 /* Lynxpoint LPSS devices */
333 { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
334 { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
335 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
336 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
337 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
338 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
339 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
340 { "INT33C7", },
341
342 /* BayTrail LPSS devices */
343 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
344 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
345 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
346 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
347 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
348 { "INT33B2", },
349 { "INT33FC", },
350
351 /* Braswell LPSS devices */
352 { "80862286", LPSS_ADDR(lpss_dma_desc) },
353 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
354 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
355 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
356 { "808622C0", LPSS_ADDR(lpss_dma_desc) },
357 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
358
359 /* Broadwell LPSS devices */
360 { "INT3430", LPSS_ADDR(lpt_dev_desc) },
361 { "INT3431", LPSS_ADDR(lpt_dev_desc) },
362 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
363 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
364 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
365 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
366 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
367 { "INT3437", },
368
369 /* Wildcat Point LPSS devices */
370 { "INT3438", LPSS_ADDR(lpt_dev_desc) },
371
372 { }
373 };
374
375 #ifdef CONFIG_X86_INTEL_LPSS
376
377 static int is_memory(struct acpi_resource *res, void *not_used)
378 {
379 struct resource r;
380 return !acpi_dev_resource_memory(res, &r);
381 }
382
383 /* LPSS main clock device. */
384 static struct platform_device *lpss_clk_dev;
385
386 static inline void lpt_register_clock_device(void)
387 {
388 lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
389 }
390
391 static int register_device_clock(struct acpi_device *adev,
392 struct lpss_private_data *pdata)
393 {
394 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
395 const char *devname = dev_name(&adev->dev);
396 struct clk *clk;
397 struct lpss_clk_data *clk_data;
398 const char *parent, *clk_name;
399 void __iomem *prv_base;
400
401 if (!lpss_clk_dev)
402 lpt_register_clock_device();
403
404 clk_data = platform_get_drvdata(lpss_clk_dev);
405 if (!clk_data)
406 return -ENODEV;
407 clk = clk_data->clk;
408
409 if (!pdata->mmio_base
410 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
411 return -ENODATA;
412
413 parent = clk_data->name;
414 prv_base = pdata->mmio_base + dev_desc->prv_offset;
415
416 if (pdata->fixed_clk_rate) {
417 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
418 pdata->fixed_clk_rate);
419 goto out;
420 }
421
422 if (dev_desc->flags & LPSS_CLK_GATE) {
423 clk = clk_register_gate(NULL, devname, parent, 0,
424 prv_base, 0, 0, NULL);
425 parent = devname;
426 }
427
428 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
429 /* Prevent division by zero */
430 if (!readl(prv_base))
431 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
432
433 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
434 if (!clk_name)
435 return -ENOMEM;
436 clk = clk_register_fractional_divider(NULL, clk_name, parent,
437 0, prv_base,
438 1, 15, 16, 15, 0, NULL);
439 parent = clk_name;
440
441 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
442 if (!clk_name) {
443 kfree(parent);
444 return -ENOMEM;
445 }
446 clk = clk_register_gate(NULL, clk_name, parent,
447 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
448 prv_base, 31, 0, NULL);
449 kfree(parent);
450 kfree(clk_name);
451 }
452 out:
453 if (IS_ERR(clk))
454 return PTR_ERR(clk);
455
456 pdata->clk = clk;
457 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
458 return 0;
459 }
460
461 struct lpss_device_links {
462 const char *supplier_hid;
463 const char *supplier_uid;
464 const char *consumer_hid;
465 const char *consumer_uid;
466 u32 flags;
467 const struct dmi_system_id *dep_missing_ids;
468 };
469
470 /* Please keep this list sorted alphabetically by vendor and model */
471 static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
472 {
473 .matches = {
474 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
475 DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
476 },
477 },
478 {}
479 };
480
481 /*
482 * The _DEP method is used to identify dependencies but instead of creating
483 * device links for every handle in _DEP, only links in the following list are
484 * created. That is necessary because, in the general case, _DEP can refer to
485 * devices that might not have drivers, or that are on different buses, or where
486 * the supplier is not enumerated until after the consumer is probed.
487 */
488 static const struct lpss_device_links lpss_device_links[] = {
489 /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
490 {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
491 /* CHT iGPU depends on PMIC I2C controller */
492 {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
493 /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
494 {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
495 i2c1_dep_missing_dmi_ids},
496 /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
497 {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
498 /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
499 {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
500 };
501
502 static bool acpi_lpss_is_supplier(struct acpi_device *adev,
503 const struct lpss_device_links *link)
504 {
505 return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
506 }
507
508 static bool acpi_lpss_is_consumer(struct acpi_device *adev,
509 const struct lpss_device_links *link)
510 {
511 return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
512 }
513
514 struct hid_uid {
515 const char *hid;
516 const char *uid;
517 };
518
519 static int match_hid_uid(struct device *dev, const void *data)
520 {
521 struct acpi_device *adev = ACPI_COMPANION(dev);
522 const struct hid_uid *id = data;
523
524 if (!adev)
525 return 0;
526
527 return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
528 }
529
530 static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
531 {
532 struct device *dev;
533
534 struct hid_uid data = {
535 .hid = hid,
536 .uid = uid,
537 };
538
539 dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
540 if (dev)
541 return dev;
542
543 return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
544 }
545
546 static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
547 {
548 struct acpi_handle_list dep_devices;
549 acpi_status status;
550 int i;
551
552 if (!acpi_has_method(adev->handle, "_DEP"))
553 return false;
554
555 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
556 &dep_devices);
557 if (ACPI_FAILURE(status)) {
558 dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
559 return false;
560 }
561
562 for (i = 0; i < dep_devices.count; i++) {
563 if (dep_devices.handles[i] == handle)
564 return true;
565 }
566
567 return false;
568 }
569
570 static void acpi_lpss_link_consumer(struct device *dev1,
571 const struct lpss_device_links *link)
572 {
573 struct device *dev2;
574
575 dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
576 if (!dev2)
577 return;
578
579 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
580 || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
581 device_link_add(dev2, dev1, link->flags);
582
583 put_device(dev2);
584 }
585
586 static void acpi_lpss_link_supplier(struct device *dev1,
587 const struct lpss_device_links *link)
588 {
589 struct device *dev2;
590
591 dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
592 if (!dev2)
593 return;
594
595 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
596 || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
597 device_link_add(dev1, dev2, link->flags);
598
599 put_device(dev2);
600 }
601
602 static void acpi_lpss_create_device_links(struct acpi_device *adev,
603 struct platform_device *pdev)
604 {
605 int i;
606
607 for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
608 const struct lpss_device_links *link = &lpss_device_links[i];
609
610 if (acpi_lpss_is_supplier(adev, link))
611 acpi_lpss_link_consumer(&pdev->dev, link);
612
613 if (acpi_lpss_is_consumer(adev, link))
614 acpi_lpss_link_supplier(&pdev->dev, link);
615 }
616 }
617
618 static int acpi_lpss_create_device(struct acpi_device *adev,
619 const struct acpi_device_id *id)
620 {
621 const struct lpss_device_desc *dev_desc;
622 struct lpss_private_data *pdata;
623 struct resource_entry *rentry;
624 struct list_head resource_list;
625 struct platform_device *pdev;
626 int ret;
627
628 dev_desc = (const struct lpss_device_desc *)id->driver_data;
629 if (!dev_desc) {
630 pdev = acpi_create_platform_device(adev, NULL);
631 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
632 }
633 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
634 if (!pdata)
635 return -ENOMEM;
636
637 INIT_LIST_HEAD(&resource_list);
638 ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
639 if (ret < 0)
640 goto err_out;
641
642 list_for_each_entry(rentry, &resource_list, node)
643 if (resource_type(rentry->res) == IORESOURCE_MEM) {
644 if (dev_desc->prv_size_override)
645 pdata->mmio_size = dev_desc->prv_size_override;
646 else
647 pdata->mmio_size = resource_size(rentry->res);
648 pdata->mmio_base = ioremap(rentry->res->start,
649 pdata->mmio_size);
650 break;
651 }
652
653 acpi_dev_free_resource_list(&resource_list);
654
655 if (!pdata->mmio_base) {
656 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
657 adev->pnp.type.platform_id = 0;
658 /* Skip the device, but continue the namespace scan. */
659 ret = 0;
660 goto err_out;
661 }
662
663 pdata->adev = adev;
664 pdata->dev_desc = dev_desc;
665
666 if (dev_desc->setup)
667 dev_desc->setup(pdata);
668
669 if (dev_desc->flags & LPSS_CLK) {
670 ret = register_device_clock(adev, pdata);
671 if (ret) {
672 /* Skip the device, but continue the namespace scan. */
673 ret = 0;
674 goto err_out;
675 }
676 }
677
678 /*
679 * This works around a known issue in ACPI tables where LPSS devices
680 * have _PS0 and _PS3 without _PSC (and no power resources), so
681 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
682 */
683 acpi_device_fix_up_power(adev);
684
685 adev->driver_data = pdata;
686 pdev = acpi_create_platform_device(adev, dev_desc->properties);
687 if (!IS_ERR_OR_NULL(pdev)) {
688 acpi_lpss_create_device_links(adev, pdev);
689 return 1;
690 }
691
692 ret = PTR_ERR(pdev);
693 adev->driver_data = NULL;
694
695 err_out:
696 kfree(pdata);
697 return ret;
698 }
699
700 static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
701 {
702 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
703 }
704
705 static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
706 unsigned int reg)
707 {
708 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
709 }
710
711 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
712 {
713 struct acpi_device *adev;
714 struct lpss_private_data *pdata;
715 unsigned long flags;
716 int ret;
717
718 ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
719 if (WARN_ON(ret))
720 return ret;
721
722 spin_lock_irqsave(&dev->power.lock, flags);
723 if (pm_runtime_suspended(dev)) {
724 ret = -EAGAIN;
725 goto out;
726 }
727 pdata = acpi_driver_data(adev);
728 if (WARN_ON(!pdata || !pdata->mmio_base)) {
729 ret = -ENODEV;
730 goto out;
731 }
732 *val = __lpss_reg_read(pdata, reg);
733
734 out:
735 spin_unlock_irqrestore(&dev->power.lock, flags);
736 return ret;
737 }
738
739 static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
740 char *buf)
741 {
742 u32 ltr_value = 0;
743 unsigned int reg;
744 int ret;
745
746 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
747 ret = lpss_reg_read(dev, reg, &ltr_value);
748 if (ret)
749 return ret;
750
751 return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
752 }
753
754 static ssize_t lpss_ltr_mode_show(struct device *dev,
755 struct device_attribute *attr, char *buf)
756 {
757 u32 ltr_mode = 0;
758 char *outstr;
759 int ret;
760
761 ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
762 if (ret)
763 return ret;
764
765 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
766 return sprintf(buf, "%s\n", outstr);
767 }
768
769 static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
770 static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
771 static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
772
773 static struct attribute *lpss_attrs[] = {
774 &dev_attr_auto_ltr.attr,
775 &dev_attr_sw_ltr.attr,
776 &dev_attr_ltr_mode.attr,
777 NULL,
778 };
779
780 static const struct attribute_group lpss_attr_group = {
781 .attrs = lpss_attrs,
782 .name = "lpss_ltr",
783 };
784
785 static void acpi_lpss_set_ltr(struct device *dev, s32 val)
786 {
787 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
788 u32 ltr_mode, ltr_val;
789
790 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
791 if (val < 0) {
792 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
793 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
794 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
795 }
796 return;
797 }
798 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
799 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
800 ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
801 val = LPSS_LTR_MAX_VAL;
802 } else if (val > LPSS_LTR_MAX_VAL) {
803 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
804 val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
805 } else {
806 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
807 }
808 ltr_val |= val;
809 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
810 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
811 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
812 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
813 }
814 }
815
816 #ifdef CONFIG_PM
817 /**
818 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
819 * @dev: LPSS device
820 * @pdata: pointer to the private data of the LPSS device
821 *
822 * Most LPSS devices have private registers which may loose their context when
823 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
824 * prv_reg_ctx array.
825 */
826 static void acpi_lpss_save_ctx(struct device *dev,
827 struct lpss_private_data *pdata)
828 {
829 unsigned int i;
830
831 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
832 unsigned long offset = i * sizeof(u32);
833
834 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
835 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
836 pdata->prv_reg_ctx[i], offset);
837 }
838 }
839
840 /**
841 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
842 * @dev: LPSS device
843 * @pdata: pointer to the private data of the LPSS device
844 *
845 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
846 */
847 static void acpi_lpss_restore_ctx(struct device *dev,
848 struct lpss_private_data *pdata)
849 {
850 unsigned int i;
851
852 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
853 unsigned long offset = i * sizeof(u32);
854
855 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
856 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
857 pdata->prv_reg_ctx[i], offset);
858 }
859 }
860
861 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
862 {
863 /*
864 * The following delay is needed or the subsequent write operations may
865 * fail. The LPSS devices are actually PCI devices and the PCI spec
866 * expects 10ms delay before the device can be accessed after D3 to D0
867 * transition. However some platforms like BSW does not need this delay.
868 */
869 unsigned int delay = 10; /* default 10ms delay */
870
871 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
872 delay = 0;
873
874 msleep(delay);
875 }
876
877 static int acpi_lpss_activate(struct device *dev)
878 {
879 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
880 int ret;
881
882 ret = acpi_dev_resume(dev);
883 if (ret)
884 return ret;
885
886 acpi_lpss_d3_to_d0_delay(pdata);
887
888 /*
889 * This is called only on ->probe() stage where a device is either in
890 * known state defined by BIOS or most likely powered off. Due to this
891 * we have to deassert reset line to be sure that ->probe() will
892 * recognize the device.
893 */
894 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
895 lpss_deassert_reset(pdata);
896
897 #ifdef CONFIG_PM
898 if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
899 acpi_lpss_save_ctx(dev, pdata);
900 #endif
901
902 return 0;
903 }
904
905 static void acpi_lpss_dismiss(struct device *dev)
906 {
907 acpi_dev_suspend(dev, false);
908 }
909
910 /* IOSF SB for LPSS island */
911 #define LPSS_IOSF_UNIT_LPIOEP 0xA0
912 #define LPSS_IOSF_UNIT_LPIO1 0xAB
913 #define LPSS_IOSF_UNIT_LPIO2 0xAC
914
915 #define LPSS_IOSF_PMCSR 0x84
916 #define LPSS_PMCSR_D0 0
917 #define LPSS_PMCSR_D3hot 3
918 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
919
920 #define LPSS_IOSF_GPIODEF0 0x154
921 #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
922 #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
923 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
924 #define LPSS_GPIODEF0_DMA_LLP BIT(13)
925
926 static DEFINE_MUTEX(lpss_iosf_mutex);
927 static bool lpss_iosf_d3_entered = true;
928
929 static void lpss_iosf_enter_d3_state(void)
930 {
931 u32 value1 = 0;
932 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
933 u32 value2 = LPSS_PMCSR_D3hot;
934 u32 mask2 = LPSS_PMCSR_Dx_MASK;
935 /*
936 * PMC provides an information about actual status of the LPSS devices.
937 * Here we read the values related to LPSS power island, i.e. LPSS
938 * devices, excluding both LPSS DMA controllers, along with SCC domain.
939 */
940 u32 func_dis, d3_sts_0, pmc_status;
941 int ret;
942
943 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
944 if (ret)
945 return;
946
947 mutex_lock(&lpss_iosf_mutex);
948
949 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
950 if (ret)
951 goto exit;
952
953 /*
954 * Get the status of entire LPSS power island per device basis.
955 * Shutdown both LPSS DMA controllers if and only if all other devices
956 * are already in D3hot.
957 */
958 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
959 if (pmc_status)
960 goto exit;
961
962 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
963 LPSS_IOSF_PMCSR, value2, mask2);
964
965 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
966 LPSS_IOSF_PMCSR, value2, mask2);
967
968 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
969 LPSS_IOSF_GPIODEF0, value1, mask1);
970
971 lpss_iosf_d3_entered = true;
972
973 exit:
974 mutex_unlock(&lpss_iosf_mutex);
975 }
976
977 static void lpss_iosf_exit_d3_state(void)
978 {
979 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
980 LPSS_GPIODEF0_DMA_LLP;
981 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
982 u32 value2 = LPSS_PMCSR_D0;
983 u32 mask2 = LPSS_PMCSR_Dx_MASK;
984
985 mutex_lock(&lpss_iosf_mutex);
986
987 if (!lpss_iosf_d3_entered)
988 goto exit;
989
990 lpss_iosf_d3_entered = false;
991
992 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
993 LPSS_IOSF_GPIODEF0, value1, mask1);
994
995 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
996 LPSS_IOSF_PMCSR, value2, mask2);
997
998 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
999 LPSS_IOSF_PMCSR, value2, mask2);
1000
1001 exit:
1002 mutex_unlock(&lpss_iosf_mutex);
1003 }
1004
1005 static int acpi_lpss_suspend(struct device *dev, bool wakeup)
1006 {
1007 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1008 int ret;
1009
1010 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
1011 acpi_lpss_save_ctx(dev, pdata);
1012
1013 ret = acpi_dev_suspend(dev, wakeup);
1014
1015 /*
1016 * This call must be last in the sequence, otherwise PMC will return
1017 * wrong status for devices being about to be powered off. See
1018 * lpss_iosf_enter_d3_state() for further information.
1019 */
1020 if (acpi_target_system_state() == ACPI_STATE_S0 &&
1021 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1022 lpss_iosf_enter_d3_state();
1023
1024 return ret;
1025 }
1026
1027 static int acpi_lpss_resume(struct device *dev)
1028 {
1029 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1030 int ret;
1031
1032 /*
1033 * This call is kept first to be in symmetry with
1034 * acpi_lpss_runtime_suspend() one.
1035 */
1036 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1037 lpss_iosf_exit_d3_state();
1038
1039 ret = acpi_dev_resume(dev);
1040 if (ret)
1041 return ret;
1042
1043 acpi_lpss_d3_to_d0_delay(pdata);
1044
1045 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
1046 acpi_lpss_restore_ctx(dev, pdata);
1047
1048 return 0;
1049 }
1050
1051 #ifdef CONFIG_PM_SLEEP
1052 static int acpi_lpss_do_suspend_late(struct device *dev)
1053 {
1054 int ret;
1055
1056 if (dev_pm_skip_suspend(dev))
1057 return 0;
1058
1059 ret = pm_generic_suspend_late(dev);
1060 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1061 }
1062
1063 static int acpi_lpss_suspend_late(struct device *dev)
1064 {
1065 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1066
1067 if (pdata->dev_desc->resume_from_noirq)
1068 return 0;
1069
1070 return acpi_lpss_do_suspend_late(dev);
1071 }
1072
1073 static int acpi_lpss_suspend_noirq(struct device *dev)
1074 {
1075 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1076 int ret;
1077
1078 if (pdata->dev_desc->resume_from_noirq) {
1079 /*
1080 * The driver's ->suspend_late callback will be invoked by
1081 * acpi_lpss_do_suspend_late(), with the assumption that the
1082 * driver really wanted to run that code in ->suspend_noirq, but
1083 * it could not run after acpi_dev_suspend() and the driver
1084 * expected the latter to be called in the "late" phase.
1085 */
1086 ret = acpi_lpss_do_suspend_late(dev);
1087 if (ret)
1088 return ret;
1089 }
1090
1091 return acpi_subsys_suspend_noirq(dev);
1092 }
1093
1094 static int acpi_lpss_do_resume_early(struct device *dev)
1095 {
1096 int ret = acpi_lpss_resume(dev);
1097
1098 return ret ? ret : pm_generic_resume_early(dev);
1099 }
1100
1101 static int acpi_lpss_resume_early(struct device *dev)
1102 {
1103 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1104
1105 if (pdata->dev_desc->resume_from_noirq)
1106 return 0;
1107
1108 if (dev_pm_skip_resume(dev))
1109 return 0;
1110
1111 return acpi_lpss_do_resume_early(dev);
1112 }
1113
1114 static int acpi_lpss_resume_noirq(struct device *dev)
1115 {
1116 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1117 int ret;
1118
1119 /* Follow acpi_subsys_resume_noirq(). */
1120 if (dev_pm_skip_resume(dev))
1121 return 0;
1122
1123 ret = pm_generic_resume_noirq(dev);
1124 if (ret)
1125 return ret;
1126
1127 if (!pdata->dev_desc->resume_from_noirq)
1128 return 0;
1129
1130 /*
1131 * The driver's ->resume_early callback will be invoked by
1132 * acpi_lpss_do_resume_early(), with the assumption that the driver
1133 * really wanted to run that code in ->resume_noirq, but it could not
1134 * run before acpi_dev_resume() and the driver expected the latter to be
1135 * called in the "early" phase.
1136 */
1137 return acpi_lpss_do_resume_early(dev);
1138 }
1139
1140 static int acpi_lpss_do_restore_early(struct device *dev)
1141 {
1142 int ret = acpi_lpss_resume(dev);
1143
1144 return ret ? ret : pm_generic_restore_early(dev);
1145 }
1146
1147 static int acpi_lpss_restore_early(struct device *dev)
1148 {
1149 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1150
1151 if (pdata->dev_desc->resume_from_noirq)
1152 return 0;
1153
1154 return acpi_lpss_do_restore_early(dev);
1155 }
1156
1157 static int acpi_lpss_restore_noirq(struct device *dev)
1158 {
1159 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1160 int ret;
1161
1162 ret = pm_generic_restore_noirq(dev);
1163 if (ret)
1164 return ret;
1165
1166 if (!pdata->dev_desc->resume_from_noirq)
1167 return 0;
1168
1169 /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1170 return acpi_lpss_do_restore_early(dev);
1171 }
1172
1173 static int acpi_lpss_do_poweroff_late(struct device *dev)
1174 {
1175 int ret = pm_generic_poweroff_late(dev);
1176
1177 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1178 }
1179
1180 static int acpi_lpss_poweroff_late(struct device *dev)
1181 {
1182 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1183
1184 if (dev_pm_skip_suspend(dev))
1185 return 0;
1186
1187 if (pdata->dev_desc->resume_from_noirq)
1188 return 0;
1189
1190 return acpi_lpss_do_poweroff_late(dev);
1191 }
1192
1193 static int acpi_lpss_poweroff_noirq(struct device *dev)
1194 {
1195 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1196
1197 if (dev_pm_skip_suspend(dev))
1198 return 0;
1199
1200 if (pdata->dev_desc->resume_from_noirq) {
1201 /* This is analogous to the acpi_lpss_suspend_noirq() case. */
1202 int ret = acpi_lpss_do_poweroff_late(dev);
1203 if (ret)
1204 return ret;
1205 }
1206
1207 return pm_generic_poweroff_noirq(dev);
1208 }
1209 #endif /* CONFIG_PM_SLEEP */
1210
1211 static int acpi_lpss_runtime_suspend(struct device *dev)
1212 {
1213 int ret = pm_generic_runtime_suspend(dev);
1214
1215 return ret ? ret : acpi_lpss_suspend(dev, true);
1216 }
1217
1218 static int acpi_lpss_runtime_resume(struct device *dev)
1219 {
1220 int ret = acpi_lpss_resume(dev);
1221
1222 return ret ? ret : pm_generic_runtime_resume(dev);
1223 }
1224 #endif /* CONFIG_PM */
1225
1226 static struct dev_pm_domain acpi_lpss_pm_domain = {
1227 #ifdef CONFIG_PM
1228 .activate = acpi_lpss_activate,
1229 .dismiss = acpi_lpss_dismiss,
1230 #endif
1231 .ops = {
1232 #ifdef CONFIG_PM
1233 #ifdef CONFIG_PM_SLEEP
1234 .prepare = acpi_subsys_prepare,
1235 .complete = acpi_subsys_complete,
1236 .suspend = acpi_subsys_suspend,
1237 .suspend_late = acpi_lpss_suspend_late,
1238 .suspend_noirq = acpi_lpss_suspend_noirq,
1239 .resume_noirq = acpi_lpss_resume_noirq,
1240 .resume_early = acpi_lpss_resume_early,
1241 .freeze = acpi_subsys_freeze,
1242 .poweroff = acpi_subsys_poweroff,
1243 .poweroff_late = acpi_lpss_poweroff_late,
1244 .poweroff_noirq = acpi_lpss_poweroff_noirq,
1245 .restore_noirq = acpi_lpss_restore_noirq,
1246 .restore_early = acpi_lpss_restore_early,
1247 #endif
1248 .runtime_suspend = acpi_lpss_runtime_suspend,
1249 .runtime_resume = acpi_lpss_runtime_resume,
1250 #endif
1251 },
1252 };
1253
1254 static int acpi_lpss_platform_notify(struct notifier_block *nb,
1255 unsigned long action, void *data)
1256 {
1257 struct platform_device *pdev = to_platform_device(data);
1258 struct lpss_private_data *pdata;
1259 struct acpi_device *adev;
1260 const struct acpi_device_id *id;
1261
1262 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
1263 if (!id || !id->driver_data)
1264 return 0;
1265
1266 if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1267 return 0;
1268
1269 pdata = acpi_driver_data(adev);
1270 if (!pdata)
1271 return 0;
1272
1273 if (pdata->mmio_base &&
1274 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
1275 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
1276 return 0;
1277 }
1278
1279 switch (action) {
1280 case BUS_NOTIFY_BIND_DRIVER:
1281 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1282 break;
1283 case BUS_NOTIFY_DRIVER_NOT_BOUND:
1284 case BUS_NOTIFY_UNBOUND_DRIVER:
1285 dev_pm_domain_set(&pdev->dev, NULL);
1286 break;
1287 case BUS_NOTIFY_ADD_DEVICE:
1288 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1289 if (pdata->dev_desc->flags & LPSS_LTR)
1290 return sysfs_create_group(&pdev->dev.kobj,
1291 &lpss_attr_group);
1292 break;
1293 case BUS_NOTIFY_DEL_DEVICE:
1294 if (pdata->dev_desc->flags & LPSS_LTR)
1295 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
1296 dev_pm_domain_set(&pdev->dev, NULL);
1297 break;
1298 default:
1299 break;
1300 }
1301
1302 return 0;
1303 }
1304
1305 static struct notifier_block acpi_lpss_nb = {
1306 .notifier_call = acpi_lpss_platform_notify,
1307 };
1308
1309 static void acpi_lpss_bind(struct device *dev)
1310 {
1311 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1312
1313 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
1314 return;
1315
1316 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
1317 dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
1318 else
1319 dev_err(dev, "MMIO size insufficient to access LTR\n");
1320 }
1321
1322 static void acpi_lpss_unbind(struct device *dev)
1323 {
1324 dev->power.set_latency_tolerance = NULL;
1325 }
1326
1327 static struct acpi_scan_handler lpss_handler = {
1328 .ids = acpi_lpss_device_ids,
1329 .attach = acpi_lpss_create_device,
1330 .bind = acpi_lpss_bind,
1331 .unbind = acpi_lpss_unbind,
1332 };
1333
1334 void __init acpi_lpss_init(void)
1335 {
1336 const struct x86_cpu_id *id;
1337 int ret;
1338
1339 ret = lpt_clk_init();
1340 if (ret)
1341 return;
1342
1343 id = x86_match_cpu(lpss_cpu_ids);
1344 if (id)
1345 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1346
1347 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1348 acpi_scan_add_handler(&lpss_handler);
1349 }
1350
1351 #else
1352
1353 static struct acpi_scan_handler lpss_handler = {
1354 .ids = acpi_lpss_device_ids,
1355 };
1356
1357 void __init acpi_lpss_init(void)
1358 {
1359 acpi_scan_add_handler(&lpss_handler);
1360 }
1361
1362 #endif /* CONFIG_X86_INTEL_LPSS */