]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/misc/mei/pci-me.c
ntb: ntb_test: ensure the link is up before trying to configure the mws
[mirror_ubuntu-artful-kernel.git] / drivers / misc / mei / pci-me.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/fs.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/pci.h>
25 #include <linux/poll.h>
26 #include <linux/ioctl.h>
27 #include <linux/cdev.h>
28 #include <linux/sched.h>
29 #include <linux/uuid.h>
30 #include <linux/compat.h>
31 #include <linux/jiffies.h>
32 #include <linux/interrupt.h>
33
34 #include <linux/pm_domain.h>
35 #include <linux/pm_runtime.h>
36
37 #include <linux/mei.h>
38
39 #include "mei_dev.h"
40 #include "client.h"
41 #include "hw-me-regs.h"
42 #include "hw-me.h"
43
44 /* mei_pci_tbl - PCI Device ID Table */
45 static const struct pci_device_id mei_me_pci_tbl[] = {
46 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
49 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
50 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
57
58 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
71
72 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
75 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
85
86 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
87 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
91
92 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
93 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
94
95 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
96 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
97
98 /* required last entry */
99 {0, }
100 };
101
102 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
103
104 #ifdef CONFIG_PM
105 static inline void mei_me_set_pm_domain(struct mei_device *dev);
106 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
107 #else
108 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
109 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
110 #endif /* CONFIG_PM */
111
112 /**
113 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
114 *
115 * @pdev: PCI device structure
116 * @cfg: per generation config
117 *
118 * Return: true if ME Interface is valid, false otherwise
119 */
120 static bool mei_me_quirk_probe(struct pci_dev *pdev,
121 const struct mei_cfg *cfg)
122 {
123 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
124 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
125 return false;
126 }
127
128 return true;
129 }
130
131 /**
132 * mei_me_probe - Device Initialization Routine
133 *
134 * @pdev: PCI device structure
135 * @ent: entry in kcs_pci_tbl
136 *
137 * Return: 0 on success, <0 on failure.
138 */
139 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
140 {
141 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
142 struct mei_device *dev;
143 struct mei_me_hw *hw;
144 unsigned int irqflags;
145 int err;
146
147
148 if (!mei_me_quirk_probe(pdev, cfg))
149 return -ENODEV;
150
151 /* enable pci dev */
152 err = pcim_enable_device(pdev);
153 if (err) {
154 dev_err(&pdev->dev, "failed to enable pci device.\n");
155 goto end;
156 }
157 /* set PCI host mastering */
158 pci_set_master(pdev);
159 /* pci request regions and mapping IO device memory for mei driver */
160 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
161 if (err) {
162 dev_err(&pdev->dev, "failed to get pci regions.\n");
163 goto end;
164 }
165
166 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
167 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
168
169 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
170 if (err)
171 err = dma_set_coherent_mask(&pdev->dev,
172 DMA_BIT_MASK(32));
173 }
174 if (err) {
175 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
176 goto end;
177 }
178
179 /* allocates and initializes the mei dev structure */
180 dev = mei_me_dev_init(pdev, cfg);
181 if (!dev) {
182 err = -ENOMEM;
183 goto end;
184 }
185 hw = to_me_hw(dev);
186 hw->mem_addr = pcim_iomap_table(pdev)[0];
187
188 pci_enable_msi(pdev);
189
190 /* request and enable interrupt */
191 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
192
193 err = request_threaded_irq(pdev->irq,
194 mei_me_irq_quick_handler,
195 mei_me_irq_thread_handler,
196 irqflags, KBUILD_MODNAME, dev);
197 if (err) {
198 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
199 pdev->irq);
200 goto end;
201 }
202
203 if (mei_start(dev)) {
204 dev_err(&pdev->dev, "init hw failure.\n");
205 err = -ENODEV;
206 goto release_irq;
207 }
208
209 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
210 pm_runtime_use_autosuspend(&pdev->dev);
211
212 err = mei_register(dev, &pdev->dev);
213 if (err)
214 goto stop;
215
216 pci_set_drvdata(pdev, dev);
217
218 /*
219 * For not wake-able HW runtime pm framework
220 * can't be used on pci device level.
221 * Use domain runtime pm callbacks instead.
222 */
223 if (!pci_dev_run_wake(pdev))
224 mei_me_set_pm_domain(dev);
225
226 if (mei_pg_is_enabled(dev))
227 pm_runtime_put_noidle(&pdev->dev);
228
229 dev_dbg(&pdev->dev, "initialization successful.\n");
230
231 return 0;
232
233 stop:
234 mei_stop(dev);
235 release_irq:
236 mei_cancel_work(dev);
237 mei_disable_interrupts(dev);
238 free_irq(pdev->irq, dev);
239 end:
240 dev_err(&pdev->dev, "initialization failed.\n");
241 return err;
242 }
243
244 /**
245 * mei_me_shutdown - Device Removal Routine
246 *
247 * @pdev: PCI device structure
248 *
249 * mei_me_shutdown is called from the reboot notifier
250 * it's a simplified version of remove so we go down
251 * faster.
252 */
253 static void mei_me_shutdown(struct pci_dev *pdev)
254 {
255 struct mei_device *dev;
256
257 dev = pci_get_drvdata(pdev);
258 if (!dev)
259 return;
260
261 dev_dbg(&pdev->dev, "shutdown\n");
262 mei_stop(dev);
263
264 if (!pci_dev_run_wake(pdev))
265 mei_me_unset_pm_domain(dev);
266
267 mei_disable_interrupts(dev);
268 free_irq(pdev->irq, dev);
269 }
270
271 /**
272 * mei_me_remove - Device Removal Routine
273 *
274 * @pdev: PCI device structure
275 *
276 * mei_me_remove is called by the PCI subsystem to alert the driver
277 * that it should release a PCI device.
278 */
279 static void mei_me_remove(struct pci_dev *pdev)
280 {
281 struct mei_device *dev;
282
283 dev = pci_get_drvdata(pdev);
284 if (!dev)
285 return;
286
287 if (mei_pg_is_enabled(dev))
288 pm_runtime_get_noresume(&pdev->dev);
289
290 dev_dbg(&pdev->dev, "stop\n");
291 mei_stop(dev);
292
293 if (!pci_dev_run_wake(pdev))
294 mei_me_unset_pm_domain(dev);
295
296 mei_disable_interrupts(dev);
297
298 free_irq(pdev->irq, dev);
299
300 mei_deregister(dev);
301 }
302
303 #ifdef CONFIG_PM_SLEEP
304 static int mei_me_pci_suspend(struct device *device)
305 {
306 struct pci_dev *pdev = to_pci_dev(device);
307 struct mei_device *dev = pci_get_drvdata(pdev);
308
309 if (!dev)
310 return -ENODEV;
311
312 dev_dbg(&pdev->dev, "suspend\n");
313
314 mei_stop(dev);
315
316 mei_disable_interrupts(dev);
317
318 free_irq(pdev->irq, dev);
319 pci_disable_msi(pdev);
320
321 return 0;
322 }
323
324 static int mei_me_pci_resume(struct device *device)
325 {
326 struct pci_dev *pdev = to_pci_dev(device);
327 struct mei_device *dev;
328 unsigned int irqflags;
329 int err;
330
331 dev = pci_get_drvdata(pdev);
332 if (!dev)
333 return -ENODEV;
334
335 pci_enable_msi(pdev);
336
337 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
338
339 /* request and enable interrupt */
340 err = request_threaded_irq(pdev->irq,
341 mei_me_irq_quick_handler,
342 mei_me_irq_thread_handler,
343 irqflags, KBUILD_MODNAME, dev);
344
345 if (err) {
346 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
347 pdev->irq);
348 return err;
349 }
350
351 err = mei_restart(dev);
352 if (err)
353 return err;
354
355 /* Start timer if stopped in suspend */
356 schedule_delayed_work(&dev->timer_work, HZ);
357
358 return 0;
359 }
360 #endif /* CONFIG_PM_SLEEP */
361
362 #ifdef CONFIG_PM
363 static int mei_me_pm_runtime_idle(struct device *device)
364 {
365 struct pci_dev *pdev = to_pci_dev(device);
366 struct mei_device *dev;
367
368 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
369
370 dev = pci_get_drvdata(pdev);
371 if (!dev)
372 return -ENODEV;
373 if (mei_write_is_idle(dev))
374 pm_runtime_autosuspend(device);
375
376 return -EBUSY;
377 }
378
379 static int mei_me_pm_runtime_suspend(struct device *device)
380 {
381 struct pci_dev *pdev = to_pci_dev(device);
382 struct mei_device *dev;
383 int ret;
384
385 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
386
387 dev = pci_get_drvdata(pdev);
388 if (!dev)
389 return -ENODEV;
390
391 mutex_lock(&dev->device_lock);
392
393 if (mei_write_is_idle(dev))
394 ret = mei_me_pg_enter_sync(dev);
395 else
396 ret = -EAGAIN;
397
398 mutex_unlock(&dev->device_lock);
399
400 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
401
402 if (ret && ret != -EAGAIN)
403 schedule_work(&dev->reset_work);
404
405 return ret;
406 }
407
408 static int mei_me_pm_runtime_resume(struct device *device)
409 {
410 struct pci_dev *pdev = to_pci_dev(device);
411 struct mei_device *dev;
412 int ret;
413
414 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
415
416 dev = pci_get_drvdata(pdev);
417 if (!dev)
418 return -ENODEV;
419
420 mutex_lock(&dev->device_lock);
421
422 ret = mei_me_pg_exit_sync(dev);
423
424 mutex_unlock(&dev->device_lock);
425
426 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
427
428 if (ret)
429 schedule_work(&dev->reset_work);
430
431 return ret;
432 }
433
434 /**
435 * mei_me_set_pm_domain - fill and set pm domain structure for device
436 *
437 * @dev: mei_device
438 */
439 static inline void mei_me_set_pm_domain(struct mei_device *dev)
440 {
441 struct pci_dev *pdev = to_pci_dev(dev->dev);
442
443 if (pdev->dev.bus && pdev->dev.bus->pm) {
444 dev->pg_domain.ops = *pdev->dev.bus->pm;
445
446 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
447 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
448 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
449
450 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
451 }
452 }
453
454 /**
455 * mei_me_unset_pm_domain - clean pm domain structure for device
456 *
457 * @dev: mei_device
458 */
459 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
460 {
461 /* stop using pm callbacks if any */
462 dev_pm_domain_set(dev->dev, NULL);
463 }
464
465 static const struct dev_pm_ops mei_me_pm_ops = {
466 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
467 mei_me_pci_resume)
468 SET_RUNTIME_PM_OPS(
469 mei_me_pm_runtime_suspend,
470 mei_me_pm_runtime_resume,
471 mei_me_pm_runtime_idle)
472 };
473
474 #define MEI_ME_PM_OPS (&mei_me_pm_ops)
475 #else
476 #define MEI_ME_PM_OPS NULL
477 #endif /* CONFIG_PM */
478 /*
479 * PCI driver structure
480 */
481 static struct pci_driver mei_me_driver = {
482 .name = KBUILD_MODNAME,
483 .id_table = mei_me_pci_tbl,
484 .probe = mei_me_probe,
485 .remove = mei_me_remove,
486 .shutdown = mei_me_shutdown,
487 .driver.pm = MEI_ME_PM_OPS,
488 };
489
490 module_pci_driver(mei_me_driver);
491
492 MODULE_AUTHOR("Intel Corporation");
493 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
494 MODULE_LICENSE("GPL v2");