1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller PCI glue driver
5 * This code is based on drivers/scsi/ufs/ufshcd-pci.c
6 * Copyright (C) 2011-2013 Samsung India Software Operations
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
14 #include <linux/pci.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_qos.h>
17 #include <linux/debugfs.h>
18 #include <linux/uuid.h>
19 #include <linux/acpi.h>
20 #include <linux/gpio/consumer.h>
23 void (*late_init
)(struct ufs_hba
*hba
);
32 struct ufs_host ufs_host
;
36 struct dentry
*debugfs_root
;
37 struct gpio_desc
*reset_gpio
;
40 static const guid_t intel_dsm_guid
=
41 GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
42 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
44 static int __intel_dsm(struct intel_host
*intel_host
, struct device
*dev
,
45 unsigned int fn
, u32
*result
)
47 union acpi_object
*obj
;
51 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &intel_dsm_guid
, 0, fn
, NULL
);
55 if (obj
->type
!= ACPI_TYPE_BUFFER
|| obj
->buffer
.length
< 1) {
60 len
= min_t(size_t, obj
->buffer
.length
, 4);
63 memcpy(result
, obj
->buffer
.pointer
, len
);
70 static int intel_dsm(struct intel_host
*intel_host
, struct device
*dev
,
71 unsigned int fn
, u32
*result
)
73 if (fn
> 31 || !(intel_host
->dsm_fns
& (1 << fn
)))
76 return __intel_dsm(intel_host
, dev
, fn
, result
);
79 static void intel_dsm_init(struct intel_host
*intel_host
, struct device
*dev
)
83 err
= __intel_dsm(intel_host
, dev
, INTEL_DSM_FNS
, &intel_host
->dsm_fns
);
84 dev_dbg(dev
, "DSM fns %#x, error %d\n", intel_host
->dsm_fns
, err
);
87 static int ufs_intel_hce_enable_notify(struct ufs_hba
*hba
,
88 enum ufs_notify_change_status status
)
90 /* Cannot enable ICE until after HC enable */
91 if (status
== POST_CHANGE
&& hba
->caps
& UFSHCD_CAP_CRYPTO
) {
92 u32 hce
= ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
);
94 hce
|= CRYPTO_GENERAL_ENABLE
;
95 ufshcd_writel(hba
, hce
, REG_CONTROLLER_ENABLE
);
101 static int ufs_intel_disable_lcc(struct ufs_hba
*hba
)
103 u32 attr
= UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE
);
106 ufshcd_dme_get(hba
, attr
, &lcc_enable
);
108 ufshcd_disable_host_tx_lcc(hba
);
113 static int ufs_intel_link_startup_notify(struct ufs_hba
*hba
,
114 enum ufs_notify_change_status status
)
120 err
= ufs_intel_disable_lcc(hba
);
131 static int ufs_intel_set_lanes(struct ufs_hba
*hba
, u32 lanes
)
133 struct ufs_pa_layer_attr pwr_info
= hba
->pwr_info
;
136 pwr_info
.lane_rx
= lanes
;
137 pwr_info
.lane_tx
= lanes
;
138 ret
= ufshcd_config_pwr_mode(hba
, &pwr_info
);
140 dev_err(hba
->dev
, "%s: Setting %u lanes, err = %d\n",
141 __func__
, lanes
, ret
);
145 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba
*hba
,
146 enum ufs_notify_change_status status
,
147 struct ufs_pa_layer_attr
*dev_max_params
,
148 struct ufs_pa_layer_attr
*dev_req_params
)
154 if (ufshcd_is_hs_mode(dev_max_params
) &&
155 (hba
->pwr_info
.lane_rx
!= 2 || hba
->pwr_info
.lane_tx
!= 2))
156 ufs_intel_set_lanes(hba
, 2);
157 memcpy(dev_req_params
, dev_max_params
, sizeof(*dev_req_params
));
160 if (ufshcd_is_hs_mode(dev_req_params
)) {
161 u32 peer_granularity
;
163 usleep_range(1000, 1250);
164 err
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
175 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba
*hba
)
177 u32 granularity
, peer_granularity
;
178 u32 pa_tactivate
, peer_pa_tactivate
;
181 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
), &granularity
);
185 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
), &peer_granularity
);
189 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
193 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &peer_pa_tactivate
);
197 if (granularity
== peer_granularity
) {
198 u32 new_peer_pa_tactivate
= pa_tactivate
+ 2;
200 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), new_peer_pa_tactivate
);
206 #define INTEL_ACTIVELTR 0x804
207 #define INTEL_IDLELTR 0x808
209 #define INTEL_LTR_REQ BIT(15)
210 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
211 #define INTEL_LTR_SCALE_1US (2 << 10)
212 #define INTEL_LTR_SCALE_32US (3 << 10)
213 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
215 static void intel_cache_ltr(struct ufs_hba
*hba
)
217 struct intel_host
*host
= ufshcd_get_variant(hba
);
219 host
->active_ltr
= readl(hba
->mmio_base
+ INTEL_ACTIVELTR
);
220 host
->idle_ltr
= readl(hba
->mmio_base
+ INTEL_IDLELTR
);
223 static void intel_ltr_set(struct device
*dev
, s32 val
)
225 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
226 struct intel_host
*host
= ufshcd_get_variant(hba
);
229 pm_runtime_get_sync(dev
);
232 * Program latency tolerance (LTR) accordingly what has been asked
233 * by the PM QoS layer or disable it in case we were passed
234 * negative value or PM_QOS_LATENCY_ANY.
236 ltr
= readl(hba
->mmio_base
+ INTEL_ACTIVELTR
);
238 if (val
== PM_QOS_LATENCY_ANY
|| val
< 0) {
239 ltr
&= ~INTEL_LTR_REQ
;
241 ltr
|= INTEL_LTR_REQ
;
242 ltr
&= ~INTEL_LTR_SCALE_MASK
;
243 ltr
&= ~INTEL_LTR_VALUE_MASK
;
245 if (val
> INTEL_LTR_VALUE_MASK
) {
247 if (val
> INTEL_LTR_VALUE_MASK
)
248 val
= INTEL_LTR_VALUE_MASK
;
249 ltr
|= INTEL_LTR_SCALE_32US
| val
;
251 ltr
|= INTEL_LTR_SCALE_1US
| val
;
255 if (ltr
== host
->active_ltr
)
258 writel(ltr
, hba
->mmio_base
+ INTEL_ACTIVELTR
);
259 writel(ltr
, hba
->mmio_base
+ INTEL_IDLELTR
);
261 /* Cache the values into intel_host structure */
262 intel_cache_ltr(hba
);
267 static void intel_ltr_expose(struct device
*dev
)
269 dev
->power
.set_latency_tolerance
= intel_ltr_set
;
270 dev_pm_qos_expose_latency_tolerance(dev
);
273 static void intel_ltr_hide(struct device
*dev
)
275 dev_pm_qos_hide_latency_tolerance(dev
);
276 dev
->power
.set_latency_tolerance
= NULL
;
279 static void intel_add_debugfs(struct ufs_hba
*hba
)
281 struct dentry
*dir
= debugfs_create_dir(dev_name(hba
->dev
), NULL
);
282 struct intel_host
*host
= ufshcd_get_variant(hba
);
284 intel_cache_ltr(hba
);
286 host
->debugfs_root
= dir
;
287 debugfs_create_x32("active_ltr", 0444, dir
, &host
->active_ltr
);
288 debugfs_create_x32("idle_ltr", 0444, dir
, &host
->idle_ltr
);
291 static void intel_remove_debugfs(struct ufs_hba
*hba
)
293 struct intel_host
*host
= ufshcd_get_variant(hba
);
295 debugfs_remove_recursive(host
->debugfs_root
);
298 static int ufs_intel_device_reset(struct ufs_hba
*hba
)
300 struct intel_host
*host
= ufshcd_get_variant(hba
);
302 if (host
->dsm_fns
& INTEL_DSM_RESET
) {
306 err
= intel_dsm(host
, hba
->dev
, INTEL_DSM_RESET
, &result
);
310 dev_err(hba
->dev
, "%s: DSM error %d result %u\n",
311 __func__
, err
, result
);
315 if (!host
->reset_gpio
)
318 gpiod_set_value_cansleep(host
->reset_gpio
, 1);
319 usleep_range(10, 15);
321 gpiod_set_value_cansleep(host
->reset_gpio
, 0);
322 usleep_range(10, 15);
327 static struct gpio_desc
*ufs_intel_get_reset_gpio(struct device
*dev
)
329 /* GPIO in _DSD has active low setting */
330 return devm_gpiod_get_optional(dev
, "reset", GPIOD_OUT_LOW
);
333 static int ufs_intel_common_init(struct ufs_hba
*hba
)
335 struct intel_host
*host
;
337 hba
->caps
|= UFSHCD_CAP_RPM_AUTOSUSPEND
;
339 host
= devm_kzalloc(hba
->dev
, sizeof(*host
), GFP_KERNEL
);
342 ufshcd_set_variant(hba
, host
);
343 intel_dsm_init(host
, hba
->dev
);
344 if (host
->dsm_fns
& INTEL_DSM_RESET
) {
345 if (hba
->vops
->device_reset
)
346 hba
->caps
|= UFSHCD_CAP_DEEPSLEEP
;
348 if (hba
->vops
->device_reset
)
349 host
->reset_gpio
= ufs_intel_get_reset_gpio(hba
->dev
);
350 if (IS_ERR(host
->reset_gpio
)) {
351 dev_err(hba
->dev
, "%s: failed to get reset GPIO, error %ld\n",
352 __func__
, PTR_ERR(host
->reset_gpio
));
353 host
->reset_gpio
= NULL
;
355 if (host
->reset_gpio
) {
356 gpiod_set_value_cansleep(host
->reset_gpio
, 0);
357 hba
->caps
|= UFSHCD_CAP_DEEPSLEEP
;
360 intel_ltr_expose(hba
->dev
);
361 intel_add_debugfs(hba
);
365 static void ufs_intel_common_exit(struct ufs_hba
*hba
)
367 intel_remove_debugfs(hba
);
368 intel_ltr_hide(hba
->dev
);
371 static int ufs_intel_resume(struct ufs_hba
*hba
, enum ufs_pm_op op
)
373 if (ufshcd_is_link_hibern8(hba
)) {
374 int ret
= ufshcd_uic_hibern8_exit(hba
);
377 ufshcd_set_link_active(hba
);
379 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
382 * Force reset and restore. Any other actions can lead
383 * to an unrecoverable state.
385 ufshcd_set_link_off(hba
);
392 static int ufs_intel_ehl_init(struct ufs_hba
*hba
)
394 hba
->quirks
|= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8
;
395 return ufs_intel_common_init(hba
);
398 static void ufs_intel_lkf_late_init(struct ufs_hba
*hba
)
400 /* LKF always needs a full reset, so set PM accordingly */
401 if (hba
->caps
& UFSHCD_CAP_DEEPSLEEP
) {
402 hba
->spm_lvl
= UFS_PM_LVL_6
;
403 hba
->rpm_lvl
= UFS_PM_LVL_6
;
405 hba
->spm_lvl
= UFS_PM_LVL_5
;
406 hba
->rpm_lvl
= UFS_PM_LVL_5
;
410 static int ufs_intel_lkf_init(struct ufs_hba
*hba
)
412 struct ufs_host
*ufs_host
;
415 hba
->nop_out_timeout
= 200;
416 hba
->quirks
|= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8
;
417 hba
->caps
|= UFSHCD_CAP_CRYPTO
;
418 err
= ufs_intel_common_init(hba
);
419 ufs_host
= ufshcd_get_variant(hba
);
420 ufs_host
->late_init
= ufs_intel_lkf_late_init
;
424 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops
= {
426 .init
= ufs_intel_common_init
,
427 .exit
= ufs_intel_common_exit
,
428 .link_startup_notify
= ufs_intel_link_startup_notify
,
429 .resume
= ufs_intel_resume
,
432 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops
= {
434 .init
= ufs_intel_ehl_init
,
435 .exit
= ufs_intel_common_exit
,
436 .link_startup_notify
= ufs_intel_link_startup_notify
,
437 .resume
= ufs_intel_resume
,
440 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops
= {
442 .init
= ufs_intel_lkf_init
,
443 .exit
= ufs_intel_common_exit
,
444 .hce_enable_notify
= ufs_intel_hce_enable_notify
,
445 .link_startup_notify
= ufs_intel_link_startup_notify
,
446 .pwr_change_notify
= ufs_intel_lkf_pwr_change_notify
,
447 .apply_dev_quirks
= ufs_intel_lkf_apply_dev_quirks
,
448 .resume
= ufs_intel_resume
,
449 .device_reset
= ufs_intel_device_reset
,
452 #ifdef CONFIG_PM_SLEEP
453 static int ufshcd_pci_restore(struct device
*dev
)
455 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
457 /* Force a full reset and restore */
458 ufshcd_set_link_off(hba
);
460 return ufshcd_system_resume(dev
);
465 * ufshcd_pci_shutdown - main function to put the controller in reset state
466 * @pdev: pointer to PCI device handle
468 static void ufshcd_pci_shutdown(struct pci_dev
*pdev
)
470 ufshcd_shutdown((struct ufs_hba
*)pci_get_drvdata(pdev
));
474 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
475 * data structure memory
476 * @pdev: pointer to PCI handle
478 static void ufshcd_pci_remove(struct pci_dev
*pdev
)
480 struct ufs_hba
*hba
= pci_get_drvdata(pdev
);
482 pm_runtime_forbid(&pdev
->dev
);
483 pm_runtime_get_noresume(&pdev
->dev
);
485 ufshcd_dealloc_host(hba
);
489 * ufshcd_pci_probe - probe routine of the driver
490 * @pdev: pointer to PCI device handle
493 * Returns 0 on success, non-zero value on failure
496 ufshcd_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
498 struct ufs_host
*ufs_host
;
500 void __iomem
*mmio_base
;
503 err
= pcim_enable_device(pdev
);
505 dev_err(&pdev
->dev
, "pcim_enable_device failed\n");
509 pci_set_master(pdev
);
511 err
= pcim_iomap_regions(pdev
, 1 << 0, UFSHCD
);
513 dev_err(&pdev
->dev
, "request and iomap failed\n");
517 mmio_base
= pcim_iomap_table(pdev
)[0];
519 err
= ufshcd_alloc_host(&pdev
->dev
, &hba
);
521 dev_err(&pdev
->dev
, "Allocation failed\n");
525 pci_set_drvdata(pdev
, hba
);
527 hba
->vops
= (struct ufs_hba_variant_ops
*)id
->driver_data
;
529 err
= ufshcd_init(hba
, mmio_base
, pdev
->irq
);
531 dev_err(&pdev
->dev
, "Initialization failed\n");
532 ufshcd_dealloc_host(hba
);
536 ufs_host
= ufshcd_get_variant(hba
);
537 if (ufs_host
&& ufs_host
->late_init
)
538 ufs_host
->late_init(hba
);
540 pm_runtime_put_noidle(&pdev
->dev
);
541 pm_runtime_allow(&pdev
->dev
);
546 static const struct dev_pm_ops ufshcd_pci_pm_ops
= {
547 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend
, ufshcd_runtime_resume
, NULL
)
548 #ifdef CONFIG_PM_SLEEP
549 .suspend
= ufshcd_system_suspend
,
550 .resume
= ufshcd_system_resume
,
551 .freeze
= ufshcd_system_suspend
,
552 .thaw
= ufshcd_system_resume
,
553 .poweroff
= ufshcd_system_suspend
,
554 .restore
= ufshcd_pci_restore
,
555 .prepare
= ufshcd_suspend_prepare
,
556 .complete
= ufshcd_resume_complete
,
560 static const struct pci_device_id ufshcd_pci_tbl
[] = {
561 { PCI_VENDOR_ID_SAMSUNG
, 0xC00C, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0 },
562 { PCI_VDEVICE(INTEL
, 0x9DFA), (kernel_ulong_t
)&ufs_intel_cnl_hba_vops
},
563 { PCI_VDEVICE(INTEL
, 0x4B41), (kernel_ulong_t
)&ufs_intel_ehl_hba_vops
},
564 { PCI_VDEVICE(INTEL
, 0x4B43), (kernel_ulong_t
)&ufs_intel_ehl_hba_vops
},
565 { PCI_VDEVICE(INTEL
, 0x98FA), (kernel_ulong_t
)&ufs_intel_lkf_hba_vops
},
566 { } /* terminate list */
569 MODULE_DEVICE_TABLE(pci
, ufshcd_pci_tbl
);
571 static struct pci_driver ufshcd_pci_driver
= {
573 .id_table
= ufshcd_pci_tbl
,
574 .probe
= ufshcd_pci_probe
,
575 .remove
= ufshcd_pci_remove
,
576 .shutdown
= ufshcd_pci_shutdown
,
578 .pm
= &ufshcd_pci_pm_ops
582 module_pci_driver(ufshcd_pci_driver
);
584 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
585 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
586 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
587 MODULE_LICENSE("GPL");
588 MODULE_VERSION(UFSHCD_DRIVER_VERSION
);