]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/xen/xen-pciback/pci_stub.c
xen/pciback: Document when the 'unbind' and 'bind' functions are called.
[mirror_ubuntu-zesty-kernel.git] / drivers / xen / xen-pciback / pci_stub.c
1 /*
2 * PCI Stub Driver - Grabs devices in backend to be exported later
3 *
4 * Ryan Wilson <hap9@epoch.ncsc.mil>
5 * Chris Bookholt <hap10@epoch.ncsc.mil>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/rwsem.h>
13 #include <linux/list.h>
14 #include <linux/spinlock.h>
15 #include <linux/kref.h>
16 #include <linux/pci.h>
17 #include <linux/wait.h>
18 #include <linux/sched.h>
19 #include <linux/atomic.h>
20 #include <xen/events.h>
21 #include <asm/xen/pci.h>
22 #include <asm/xen/hypervisor.h>
23 #include <xen/interface/physdev.h>
24 #include "pciback.h"
25 #include "conf_space.h"
26 #include "conf_space_quirks.h"
27
28 static char *pci_devs_to_hide;
29 wait_queue_head_t xen_pcibk_aer_wait_queue;
30 /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
31 * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
32 */
33 static DECLARE_RWSEM(pcistub_sem);
34 module_param_named(hide, pci_devs_to_hide, charp, 0444);
35
36 struct pcistub_device_id {
37 struct list_head slot_list;
38 int domain;
39 unsigned char bus;
40 unsigned int devfn;
41 };
42 static LIST_HEAD(pcistub_device_ids);
43 static DEFINE_SPINLOCK(device_ids_lock);
44
45 struct pcistub_device {
46 struct kref kref;
47 struct list_head dev_list;
48 spinlock_t lock;
49
50 struct pci_dev *dev;
51 struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
52 };
53
54 /* Access to pcistub_devices & seized_devices lists and the initialize_devices
55 * flag must be locked with pcistub_devices_lock
56 */
57 static DEFINE_SPINLOCK(pcistub_devices_lock);
58 static LIST_HEAD(pcistub_devices);
59
60 /* wait for device_initcall before initializing our devices
61 * (see pcistub_init_devices_late)
62 */
63 static int initialize_devices;
64 static LIST_HEAD(seized_devices);
65
66 static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
67 {
68 struct pcistub_device *psdev;
69
70 dev_dbg(&dev->dev, "pcistub_device_alloc\n");
71
72 psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
73 if (!psdev)
74 return NULL;
75
76 psdev->dev = pci_dev_get(dev);
77 if (!psdev->dev) {
78 kfree(psdev);
79 return NULL;
80 }
81
82 kref_init(&psdev->kref);
83 spin_lock_init(&psdev->lock);
84
85 return psdev;
86 }
87
88 /* Don't call this directly as it's called by pcistub_device_put */
89 static void pcistub_device_release(struct kref *kref)
90 {
91 struct pcistub_device *psdev;
92 struct pci_dev *dev;
93 struct xen_pcibk_dev_data *dev_data;
94
95 psdev = container_of(kref, struct pcistub_device, kref);
96 dev = psdev->dev;
97 dev_data = pci_get_drvdata(dev);
98
99 dev_dbg(&dev->dev, "pcistub_device_release\n");
100
101 xen_unregister_device_domain_owner(dev);
102
103 /* Call the reset function which does not take lock as this
104 * is called from "unbind" which takes a device_lock mutex.
105 */
106 __pci_reset_function_locked(dev);
107 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
108 dev_dbg(&dev->dev, "Could not reload PCI state\n");
109 else
110 pci_restore_state(dev);
111
112 if (dev->msix_cap) {
113 struct physdev_pci_device ppdev = {
114 .seg = pci_domain_nr(dev->bus),
115 .bus = dev->bus->number,
116 .devfn = dev->devfn
117 };
118 int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
119 &ppdev);
120
121 if (err)
122 dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
123 err);
124 }
125
126 /* Disable the device */
127 xen_pcibk_reset_device(dev);
128
129 kfree(dev_data);
130 pci_set_drvdata(dev, NULL);
131
132 /* Clean-up the device */
133 xen_pcibk_config_free_dyn_fields(dev);
134 xen_pcibk_config_free_dev(dev);
135
136 dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
137 pci_dev_put(dev);
138
139 kfree(psdev);
140 }
141
142 static inline void pcistub_device_get(struct pcistub_device *psdev)
143 {
144 kref_get(&psdev->kref);
145 }
146
147 static inline void pcistub_device_put(struct pcistub_device *psdev)
148 {
149 kref_put(&psdev->kref, pcistub_device_release);
150 }
151
152 static struct pcistub_device *pcistub_device_find(int domain, int bus,
153 int slot, int func)
154 {
155 struct pcistub_device *psdev = NULL;
156 unsigned long flags;
157
158 spin_lock_irqsave(&pcistub_devices_lock, flags);
159
160 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
161 if (psdev->dev != NULL
162 && domain == pci_domain_nr(psdev->dev->bus)
163 && bus == psdev->dev->bus->number
164 && slot == PCI_SLOT(psdev->dev->devfn)
165 && func == PCI_FUNC(psdev->dev->devfn)) {
166 pcistub_device_get(psdev);
167 goto out;
168 }
169 }
170
171 /* didn't find it */
172 psdev = NULL;
173
174 out:
175 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
176 return psdev;
177 }
178
179 static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
180 struct pcistub_device *psdev)
181 {
182 struct pci_dev *pci_dev = NULL;
183 unsigned long flags;
184
185 pcistub_device_get(psdev);
186
187 spin_lock_irqsave(&psdev->lock, flags);
188 if (!psdev->pdev) {
189 psdev->pdev = pdev;
190 pci_dev = psdev->dev;
191 }
192 spin_unlock_irqrestore(&psdev->lock, flags);
193
194 if (!pci_dev)
195 pcistub_device_put(psdev);
196
197 return pci_dev;
198 }
199
200 struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
201 int domain, int bus,
202 int slot, int func)
203 {
204 struct pcistub_device *psdev;
205 struct pci_dev *found_dev = NULL;
206 unsigned long flags;
207
208 spin_lock_irqsave(&pcistub_devices_lock, flags);
209
210 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
211 if (psdev->dev != NULL
212 && domain == pci_domain_nr(psdev->dev->bus)
213 && bus == psdev->dev->bus->number
214 && slot == PCI_SLOT(psdev->dev->devfn)
215 && func == PCI_FUNC(psdev->dev->devfn)) {
216 found_dev = pcistub_device_get_pci_dev(pdev, psdev);
217 break;
218 }
219 }
220
221 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
222 return found_dev;
223 }
224
225 struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
226 struct pci_dev *dev)
227 {
228 struct pcistub_device *psdev;
229 struct pci_dev *found_dev = NULL;
230 unsigned long flags;
231
232 spin_lock_irqsave(&pcistub_devices_lock, flags);
233
234 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
235 if (psdev->dev == dev) {
236 found_dev = pcistub_device_get_pci_dev(pdev, psdev);
237 break;
238 }
239 }
240
241 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
242 return found_dev;
243 }
244
245 void pcistub_put_pci_dev(struct pci_dev *dev)
246 {
247 struct pcistub_device *psdev, *found_psdev = NULL;
248 unsigned long flags;
249
250 spin_lock_irqsave(&pcistub_devices_lock, flags);
251
252 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
253 if (psdev->dev == dev) {
254 found_psdev = psdev;
255 break;
256 }
257 }
258
259 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
260 if (WARN_ON(!found_psdev))
261 return;
262
263 /*hold this lock for avoiding breaking link between
264 * pcistub and xen_pcibk when AER is in processing
265 */
266 down_write(&pcistub_sem);
267 /* Cleanup our device
268 * (so it's ready for the next domain)
269 */
270
271 /* This is OK - we are running from workqueue context
272 * and want to inhibit the user from fiddling with 'reset'
273 */
274 pci_reset_function(dev);
275 pci_restore_state(dev);
276
277 /* This disables the device. */
278 xen_pcibk_reset_device(dev);
279
280 /* And cleanup up our emulated fields. */
281 xen_pcibk_config_reset_dev(dev);
282 xen_pcibk_config_free_dyn_fields(dev);
283
284 xen_unregister_device_domain_owner(dev);
285
286 spin_lock_irqsave(&found_psdev->lock, flags);
287 found_psdev->pdev = NULL;
288 spin_unlock_irqrestore(&found_psdev->lock, flags);
289
290 pcistub_device_put(found_psdev);
291 up_write(&pcistub_sem);
292 }
293
294 static int pcistub_match_one(struct pci_dev *dev,
295 struct pcistub_device_id *pdev_id)
296 {
297 /* Match the specified device by domain, bus, slot, func and also if
298 * any of the device's parent bridges match.
299 */
300 for (; dev != NULL; dev = dev->bus->self) {
301 if (pci_domain_nr(dev->bus) == pdev_id->domain
302 && dev->bus->number == pdev_id->bus
303 && dev->devfn == pdev_id->devfn)
304 return 1;
305
306 /* Sometimes topmost bridge links to itself. */
307 if (dev == dev->bus->self)
308 break;
309 }
310
311 return 0;
312 }
313
314 static int pcistub_match(struct pci_dev *dev)
315 {
316 struct pcistub_device_id *pdev_id;
317 unsigned long flags;
318 int found = 0;
319
320 spin_lock_irqsave(&device_ids_lock, flags);
321 list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
322 if (pcistub_match_one(dev, pdev_id)) {
323 found = 1;
324 break;
325 }
326 }
327 spin_unlock_irqrestore(&device_ids_lock, flags);
328
329 return found;
330 }
331
332 static int pcistub_init_device(struct pci_dev *dev)
333 {
334 struct xen_pcibk_dev_data *dev_data;
335 int err = 0;
336
337 dev_dbg(&dev->dev, "initializing...\n");
338
339 /* The PCI backend is not intended to be a module (or to work with
340 * removable PCI devices (yet). If it were, xen_pcibk_config_free()
341 * would need to be called somewhere to free the memory allocated
342 * here and then to call kfree(pci_get_drvdata(psdev->dev)).
343 */
344 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
345 + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
346 if (!dev_data) {
347 err = -ENOMEM;
348 goto out;
349 }
350 pci_set_drvdata(dev, dev_data);
351
352 /*
353 * Setup name for fake IRQ handler. It will only be enabled
354 * once the device is turned on by the guest.
355 */
356 sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
357
358 dev_dbg(&dev->dev, "initializing config\n");
359
360 init_waitqueue_head(&xen_pcibk_aer_wait_queue);
361 err = xen_pcibk_config_init_dev(dev);
362 if (err)
363 goto out;
364
365 /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
366 * must do this here because pcibios_enable_device may specify
367 * the pci device's true irq (and possibly its other resources)
368 * if they differ from what's in the configuration space.
369 * This makes the assumption that the device's resources won't
370 * change after this point (otherwise this code may break!)
371 */
372 dev_dbg(&dev->dev, "enabling device\n");
373 err = pci_enable_device(dev);
374 if (err)
375 goto config_release;
376
377 if (dev->msix_cap) {
378 struct physdev_pci_device ppdev = {
379 .seg = pci_domain_nr(dev->bus),
380 .bus = dev->bus->number,
381 .devfn = dev->devfn
382 };
383
384 err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
385 if (err)
386 dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
387 err);
388 }
389
390 /* We need the device active to save the state. */
391 dev_dbg(&dev->dev, "save state of device\n");
392 pci_save_state(dev);
393 dev_data->pci_saved_state = pci_store_saved_state(dev);
394 if (!dev_data->pci_saved_state)
395 dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
396 else {
397 dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
398 __pci_reset_function_locked(dev);
399 pci_restore_state(dev);
400 }
401 /* Now disable the device (this also ensures some private device
402 * data is setup before we export)
403 */
404 dev_dbg(&dev->dev, "reset device\n");
405 xen_pcibk_reset_device(dev);
406
407 dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
408 return 0;
409
410 config_release:
411 xen_pcibk_config_free_dev(dev);
412
413 out:
414 pci_set_drvdata(dev, NULL);
415 kfree(dev_data);
416 return err;
417 }
418
419 /*
420 * Because some initialization still happens on
421 * devices during fs_initcall, we need to defer
422 * full initialization of our devices until
423 * device_initcall.
424 */
425 static int __init pcistub_init_devices_late(void)
426 {
427 struct pcistub_device *psdev;
428 unsigned long flags;
429 int err = 0;
430
431 spin_lock_irqsave(&pcistub_devices_lock, flags);
432
433 while (!list_empty(&seized_devices)) {
434 psdev = container_of(seized_devices.next,
435 struct pcistub_device, dev_list);
436 list_del(&psdev->dev_list);
437
438 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
439
440 err = pcistub_init_device(psdev->dev);
441 if (err) {
442 dev_err(&psdev->dev->dev,
443 "error %d initializing device\n", err);
444 kfree(psdev);
445 psdev = NULL;
446 }
447
448 spin_lock_irqsave(&pcistub_devices_lock, flags);
449
450 if (psdev)
451 list_add_tail(&psdev->dev_list, &pcistub_devices);
452 }
453
454 initialize_devices = 1;
455
456 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
457
458 return 0;
459 }
460
461 static int pcistub_seize(struct pci_dev *dev)
462 {
463 struct pcistub_device *psdev;
464 unsigned long flags;
465 int err = 0;
466
467 psdev = pcistub_device_alloc(dev);
468 if (!psdev)
469 return -ENOMEM;
470
471 spin_lock_irqsave(&pcistub_devices_lock, flags);
472
473 if (initialize_devices) {
474 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
475
476 /* don't want irqs disabled when calling pcistub_init_device */
477 err = pcistub_init_device(psdev->dev);
478
479 spin_lock_irqsave(&pcistub_devices_lock, flags);
480
481 if (!err)
482 list_add(&psdev->dev_list, &pcistub_devices);
483 } else {
484 dev_dbg(&dev->dev, "deferring initialization\n");
485 list_add(&psdev->dev_list, &seized_devices);
486 }
487
488 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
489
490 if (err)
491 pcistub_device_put(psdev);
492
493 return err;
494 }
495
496 /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
497 * other functions that take the sysfs lock. */
498 static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
499 {
500 int err = 0;
501
502 dev_dbg(&dev->dev, "probing...\n");
503
504 if (pcistub_match(dev)) {
505
506 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
507 && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
508 dev_err(&dev->dev, "can't export pci devices that "
509 "don't have a normal (0) or bridge (1) "
510 "header type!\n");
511 err = -ENODEV;
512 goto out;
513 }
514
515 dev_info(&dev->dev, "seizing device\n");
516 err = pcistub_seize(dev);
517 } else
518 /* Didn't find the device */
519 err = -ENODEV;
520
521 out:
522 return err;
523 }
524
525 /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
526 * other functions that take the sysfs lock. */
527 static void pcistub_remove(struct pci_dev *dev)
528 {
529 struct pcistub_device *psdev, *found_psdev = NULL;
530 unsigned long flags;
531
532 dev_dbg(&dev->dev, "removing\n");
533
534 spin_lock_irqsave(&pcistub_devices_lock, flags);
535
536 xen_pcibk_config_quirk_release(dev);
537
538 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
539 if (psdev->dev == dev) {
540 found_psdev = psdev;
541 break;
542 }
543 }
544
545 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
546
547 if (found_psdev) {
548 dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
549 found_psdev->pdev);
550
551 if (found_psdev->pdev) {
552 pr_warn("****** removing device %s while still in-use! ******\n",
553 pci_name(found_psdev->dev));
554 pr_warn("****** driver domain may still access this device's i/o resources!\n");
555 pr_warn("****** shutdown driver domain before binding device\n");
556 pr_warn("****** to other drivers or domains\n");
557
558 /* N.B. This ends up calling pcistub_put_pci_dev which ends up
559 * doing the FLR. */
560 xen_pcibk_release_pci_dev(found_psdev->pdev,
561 found_psdev->dev);
562 }
563
564 spin_lock_irqsave(&pcistub_devices_lock, flags);
565 list_del(&found_psdev->dev_list);
566 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
567
568 /* the final put for releasing from the list */
569 pcistub_device_put(found_psdev);
570 }
571 }
572
573 static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = {
574 {
575 .vendor = PCI_ANY_ID,
576 .device = PCI_ANY_ID,
577 .subvendor = PCI_ANY_ID,
578 .subdevice = PCI_ANY_ID,
579 },
580 {0,},
581 };
582
583 #define PCI_NODENAME_MAX 40
584 static void kill_domain_by_device(struct pcistub_device *psdev)
585 {
586 struct xenbus_transaction xbt;
587 int err;
588 char nodename[PCI_NODENAME_MAX];
589
590 BUG_ON(!psdev);
591 snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
592 psdev->pdev->xdev->otherend_id);
593
594 again:
595 err = xenbus_transaction_start(&xbt);
596 if (err) {
597 dev_err(&psdev->dev->dev,
598 "error %d when start xenbus transaction\n", err);
599 return;
600 }
601 /*PV AER handlers will set this flag*/
602 xenbus_printf(xbt, nodename, "aerState" , "aerfail");
603 err = xenbus_transaction_end(xbt, 0);
604 if (err) {
605 if (err == -EAGAIN)
606 goto again;
607 dev_err(&psdev->dev->dev,
608 "error %d when end xenbus transaction\n", err);
609 return;
610 }
611 }
612
613 /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
614 * backend need to have cooperation. In xen_pcibk, those steps will do similar
615 * jobs: send service request and waiting for front_end response.
616 */
617 static pci_ers_result_t common_process(struct pcistub_device *psdev,
618 pci_channel_state_t state, int aer_cmd,
619 pci_ers_result_t result)
620 {
621 pci_ers_result_t res = result;
622 struct xen_pcie_aer_op *aer_op;
623 int ret;
624
625 /*with PV AER drivers*/
626 aer_op = &(psdev->pdev->sh_info->aer_op);
627 aer_op->cmd = aer_cmd ;
628 /*useful for error_detected callback*/
629 aer_op->err = state;
630 /*pcifront_end BDF*/
631 ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
632 &aer_op->domain, &aer_op->bus, &aer_op->devfn);
633 if (!ret) {
634 dev_err(&psdev->dev->dev,
635 DRV_NAME ": failed to get pcifront device\n");
636 return PCI_ERS_RESULT_NONE;
637 }
638 wmb();
639
640 dev_dbg(&psdev->dev->dev,
641 DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
642 aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
643 /*local flag to mark there's aer request, xen_pcibk callback will use
644 * this flag to judge whether we need to check pci-front give aer
645 * service ack signal
646 */
647 set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
648
649 /*It is possible that a pcifront conf_read_write ops request invokes
650 * the callback which cause the spurious execution of wake_up.
651 * Yet it is harmless and better than a spinlock here
652 */
653 set_bit(_XEN_PCIB_active,
654 (unsigned long *)&psdev->pdev->sh_info->flags);
655 wmb();
656 notify_remote_via_irq(psdev->pdev->evtchn_irq);
657
658 ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
659 !(test_bit(_XEN_PCIB_active, (unsigned long *)
660 &psdev->pdev->sh_info->flags)), 300*HZ);
661
662 if (!ret) {
663 if (test_bit(_XEN_PCIB_active,
664 (unsigned long *)&psdev->pdev->sh_info->flags)) {
665 dev_err(&psdev->dev->dev,
666 "pcifront aer process not responding!\n");
667 clear_bit(_XEN_PCIB_active,
668 (unsigned long *)&psdev->pdev->sh_info->flags);
669 aer_op->err = PCI_ERS_RESULT_NONE;
670 return res;
671 }
672 }
673 clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
674
675 if (test_bit(_XEN_PCIF_active,
676 (unsigned long *)&psdev->pdev->sh_info->flags)) {
677 dev_dbg(&psdev->dev->dev,
678 "schedule pci_conf service in " DRV_NAME "\n");
679 xen_pcibk_test_and_schedule_op(psdev->pdev);
680 }
681
682 res = (pci_ers_result_t)aer_op->err;
683 return res;
684 }
685
686 /*
687 * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
688 * of the device driver could provide this service, and then wait for pcifront
689 * ack.
690 * @dev: pointer to PCI devices
691 * return value is used by aer_core do_recovery policy
692 */
693 static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
694 {
695 struct pcistub_device *psdev;
696 pci_ers_result_t result;
697
698 result = PCI_ERS_RESULT_RECOVERED;
699 dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
700 dev->bus->number, dev->devfn);
701
702 down_write(&pcistub_sem);
703 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
704 dev->bus->number,
705 PCI_SLOT(dev->devfn),
706 PCI_FUNC(dev->devfn));
707
708 if (!psdev || !psdev->pdev) {
709 dev_err(&dev->dev,
710 DRV_NAME " device is not found/assigned\n");
711 goto end;
712 }
713
714 if (!psdev->pdev->sh_info) {
715 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
716 " by HVM, kill it\n");
717 kill_domain_by_device(psdev);
718 goto end;
719 }
720
721 if (!test_bit(_XEN_PCIB_AERHANDLER,
722 (unsigned long *)&psdev->pdev->sh_info->flags)) {
723 dev_err(&dev->dev,
724 "guest with no AER driver should have been killed\n");
725 goto end;
726 }
727 result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
728
729 if (result == PCI_ERS_RESULT_NONE ||
730 result == PCI_ERS_RESULT_DISCONNECT) {
731 dev_dbg(&dev->dev,
732 "No AER slot_reset service or disconnected!\n");
733 kill_domain_by_device(psdev);
734 }
735 end:
736 if (psdev)
737 pcistub_device_put(psdev);
738 up_write(&pcistub_sem);
739 return result;
740
741 }
742
743
744 /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
745 * in case of the device driver could provide this service, and then wait
746 * for pcifront ack
747 * @dev: pointer to PCI devices
748 * return value is used by aer_core do_recovery policy
749 */
750
751 static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
752 {
753 struct pcistub_device *psdev;
754 pci_ers_result_t result;
755
756 result = PCI_ERS_RESULT_RECOVERED;
757 dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
758 dev->bus->number, dev->devfn);
759
760 down_write(&pcistub_sem);
761 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
762 dev->bus->number,
763 PCI_SLOT(dev->devfn),
764 PCI_FUNC(dev->devfn));
765
766 if (!psdev || !psdev->pdev) {
767 dev_err(&dev->dev,
768 DRV_NAME " device is not found/assigned\n");
769 goto end;
770 }
771
772 if (!psdev->pdev->sh_info) {
773 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
774 " by HVM, kill it\n");
775 kill_domain_by_device(psdev);
776 goto end;
777 }
778
779 if (!test_bit(_XEN_PCIB_AERHANDLER,
780 (unsigned long *)&psdev->pdev->sh_info->flags)) {
781 dev_err(&dev->dev,
782 "guest with no AER driver should have been killed\n");
783 goto end;
784 }
785 result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
786
787 if (result == PCI_ERS_RESULT_NONE ||
788 result == PCI_ERS_RESULT_DISCONNECT) {
789 dev_dbg(&dev->dev,
790 "No AER mmio_enabled service or disconnected!\n");
791 kill_domain_by_device(psdev);
792 }
793 end:
794 if (psdev)
795 pcistub_device_put(psdev);
796 up_write(&pcistub_sem);
797 return result;
798 }
799
800 /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
801 * in case of the device driver could provide this service, and then wait
802 * for pcifront ack.
803 * @dev: pointer to PCI devices
804 * @error: the current PCI connection state
805 * return value is used by aer_core do_recovery policy
806 */
807
808 static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
809 pci_channel_state_t error)
810 {
811 struct pcistub_device *psdev;
812 pci_ers_result_t result;
813
814 result = PCI_ERS_RESULT_CAN_RECOVER;
815 dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
816 dev->bus->number, dev->devfn);
817
818 down_write(&pcistub_sem);
819 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
820 dev->bus->number,
821 PCI_SLOT(dev->devfn),
822 PCI_FUNC(dev->devfn));
823
824 if (!psdev || !psdev->pdev) {
825 dev_err(&dev->dev,
826 DRV_NAME " device is not found/assigned\n");
827 goto end;
828 }
829
830 if (!psdev->pdev->sh_info) {
831 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
832 " by HVM, kill it\n");
833 kill_domain_by_device(psdev);
834 goto end;
835 }
836
837 /*Guest owns the device yet no aer handler regiested, kill guest*/
838 if (!test_bit(_XEN_PCIB_AERHANDLER,
839 (unsigned long *)&psdev->pdev->sh_info->flags)) {
840 dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
841 kill_domain_by_device(psdev);
842 goto end;
843 }
844 result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
845
846 if (result == PCI_ERS_RESULT_NONE ||
847 result == PCI_ERS_RESULT_DISCONNECT) {
848 dev_dbg(&dev->dev,
849 "No AER error_detected service or disconnected!\n");
850 kill_domain_by_device(psdev);
851 }
852 end:
853 if (psdev)
854 pcistub_device_put(psdev);
855 up_write(&pcistub_sem);
856 return result;
857 }
858
859 /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
860 * in case of the device driver could provide this service, and then wait
861 * for pcifront ack.
862 * @dev: pointer to PCI devices
863 */
864
865 static void xen_pcibk_error_resume(struct pci_dev *dev)
866 {
867 struct pcistub_device *psdev;
868
869 dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
870 dev->bus->number, dev->devfn);
871
872 down_write(&pcistub_sem);
873 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
874 dev->bus->number,
875 PCI_SLOT(dev->devfn),
876 PCI_FUNC(dev->devfn));
877
878 if (!psdev || !psdev->pdev) {
879 dev_err(&dev->dev,
880 DRV_NAME " device is not found/assigned\n");
881 goto end;
882 }
883
884 if (!psdev->pdev->sh_info) {
885 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
886 " by HVM, kill it\n");
887 kill_domain_by_device(psdev);
888 goto end;
889 }
890
891 if (!test_bit(_XEN_PCIB_AERHANDLER,
892 (unsigned long *)&psdev->pdev->sh_info->flags)) {
893 dev_err(&dev->dev,
894 "guest with no AER driver should have been killed\n");
895 kill_domain_by_device(psdev);
896 goto end;
897 }
898 common_process(psdev, 1, XEN_PCI_OP_aer_resume,
899 PCI_ERS_RESULT_RECOVERED);
900 end:
901 if (psdev)
902 pcistub_device_put(psdev);
903 up_write(&pcistub_sem);
904 return;
905 }
906
907 /*add xen_pcibk AER handling*/
908 static const struct pci_error_handlers xen_pcibk_error_handler = {
909 .error_detected = xen_pcibk_error_detected,
910 .mmio_enabled = xen_pcibk_mmio_enabled,
911 .slot_reset = xen_pcibk_slot_reset,
912 .resume = xen_pcibk_error_resume,
913 };
914
915 /*
916 * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
917 * for a normal device. I don't want it to be loaded automatically.
918 */
919
920 static struct pci_driver xen_pcibk_pci_driver = {
921 /* The name should be xen_pciback, but until the tools are updated
922 * we will keep it as pciback. */
923 .name = "pciback",
924 .id_table = pcistub_ids,
925 .probe = pcistub_probe,
926 .remove = pcistub_remove,
927 .err_handler = &xen_pcibk_error_handler,
928 };
929
930 static inline int str_to_slot(const char *buf, int *domain, int *bus,
931 int *slot, int *func)
932 {
933 int parsed = 0;
934
935 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
936 &parsed)) {
937 case 3:
938 *func = -1;
939 sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
940 break;
941 case 2:
942 *slot = *func = -1;
943 sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
944 break;
945 }
946 if (parsed && !buf[parsed])
947 return 0;
948
949 /* try again without domain */
950 *domain = 0;
951 switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
952 case 2:
953 *func = -1;
954 sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
955 break;
956 case 1:
957 *slot = *func = -1;
958 sscanf(buf, " %x:*.* %n", bus, &parsed);
959 break;
960 }
961 if (parsed && !buf[parsed])
962 return 0;
963
964 return -EINVAL;
965 }
966
967 static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
968 *slot, int *func, int *reg, int *size, int *mask)
969 {
970 int parsed = 0;
971
972 sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
973 reg, size, mask, &parsed);
974 if (parsed && !buf[parsed])
975 return 0;
976
977 /* try again without domain */
978 *domain = 0;
979 sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
980 mask, &parsed);
981 if (parsed && !buf[parsed])
982 return 0;
983
984 return -EINVAL;
985 }
986
987 static int pcistub_device_id_add(int domain, int bus, int slot, int func)
988 {
989 struct pcistub_device_id *pci_dev_id;
990 unsigned long flags;
991 int rc = 0, devfn = PCI_DEVFN(slot, func);
992
993 if (slot < 0) {
994 for (slot = 0; !rc && slot < 32; ++slot)
995 rc = pcistub_device_id_add(domain, bus, slot, func);
996 return rc;
997 }
998
999 if (func < 0) {
1000 for (func = 0; !rc && func < 8; ++func)
1001 rc = pcistub_device_id_add(domain, bus, slot, func);
1002 return rc;
1003 }
1004
1005 if ((
1006 #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
1007 || !defined(CONFIG_PCI_DOMAINS)
1008 !pci_domains_supported ? domain :
1009 #endif
1010 domain < 0 || domain > 0xffff)
1011 || bus < 0 || bus > 0xff
1012 || PCI_SLOT(devfn) != slot
1013 || PCI_FUNC(devfn) != func)
1014 return -EINVAL;
1015
1016 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
1017 if (!pci_dev_id)
1018 return -ENOMEM;
1019
1020 pci_dev_id->domain = domain;
1021 pci_dev_id->bus = bus;
1022 pci_dev_id->devfn = devfn;
1023
1024 pr_debug("wants to seize %04x:%02x:%02x.%d\n",
1025 domain, bus, slot, func);
1026
1027 spin_lock_irqsave(&device_ids_lock, flags);
1028 list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
1029 spin_unlock_irqrestore(&device_ids_lock, flags);
1030
1031 return 0;
1032 }
1033
1034 static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
1035 {
1036 struct pcistub_device_id *pci_dev_id, *t;
1037 int err = -ENOENT;
1038 unsigned long flags;
1039
1040 spin_lock_irqsave(&device_ids_lock, flags);
1041 list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
1042 slot_list) {
1043 if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
1044 && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
1045 && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
1046 /* Don't break; here because it's possible the same
1047 * slot could be in the list more than once
1048 */
1049 list_del(&pci_dev_id->slot_list);
1050 kfree(pci_dev_id);
1051
1052 err = 0;
1053
1054 pr_debug("removed %04x:%02x:%02x.%d from seize list\n",
1055 domain, bus, slot, func);
1056 }
1057 }
1058 spin_unlock_irqrestore(&device_ids_lock, flags);
1059
1060 return err;
1061 }
1062
1063 static int pcistub_reg_add(int domain, int bus, int slot, int func,
1064 unsigned int reg, unsigned int size,
1065 unsigned int mask)
1066 {
1067 int err = 0;
1068 struct pcistub_device *psdev;
1069 struct pci_dev *dev;
1070 struct config_field *field;
1071
1072 if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
1073 return -EINVAL;
1074
1075 psdev = pcistub_device_find(domain, bus, slot, func);
1076 if (!psdev) {
1077 err = -ENODEV;
1078 goto out;
1079 }
1080 dev = psdev->dev;
1081
1082 field = kzalloc(sizeof(*field), GFP_ATOMIC);
1083 if (!field) {
1084 err = -ENOMEM;
1085 goto out;
1086 }
1087
1088 field->offset = reg;
1089 field->size = size;
1090 field->mask = mask;
1091 field->init = NULL;
1092 field->reset = NULL;
1093 field->release = NULL;
1094 field->clean = xen_pcibk_config_field_free;
1095
1096 err = xen_pcibk_config_quirks_add_field(dev, field);
1097 if (err)
1098 kfree(field);
1099 out:
1100 if (psdev)
1101 pcistub_device_put(psdev);
1102 return err;
1103 }
1104
1105 static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
1106 size_t count)
1107 {
1108 int domain, bus, slot, func;
1109 int err;
1110
1111 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1112 if (err)
1113 goto out;
1114
1115 err = pcistub_device_id_add(domain, bus, slot, func);
1116
1117 out:
1118 if (!err)
1119 err = count;
1120 return err;
1121 }
1122 static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
1123
1124 static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
1125 size_t count)
1126 {
1127 int domain, bus, slot, func;
1128 int err;
1129
1130 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1131 if (err)
1132 goto out;
1133
1134 err = pcistub_device_id_remove(domain, bus, slot, func);
1135
1136 out:
1137 if (!err)
1138 err = count;
1139 return err;
1140 }
1141 static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
1142
1143 static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
1144 {
1145 struct pcistub_device_id *pci_dev_id;
1146 size_t count = 0;
1147 unsigned long flags;
1148
1149 spin_lock_irqsave(&device_ids_lock, flags);
1150 list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
1151 if (count >= PAGE_SIZE)
1152 break;
1153
1154 count += scnprintf(buf + count, PAGE_SIZE - count,
1155 "%04x:%02x:%02x.%d\n",
1156 pci_dev_id->domain, pci_dev_id->bus,
1157 PCI_SLOT(pci_dev_id->devfn),
1158 PCI_FUNC(pci_dev_id->devfn));
1159 }
1160 spin_unlock_irqrestore(&device_ids_lock, flags);
1161
1162 return count;
1163 }
1164 static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
1165
1166 static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
1167 {
1168 struct pcistub_device *psdev;
1169 struct xen_pcibk_dev_data *dev_data;
1170 size_t count = 0;
1171 unsigned long flags;
1172
1173 spin_lock_irqsave(&pcistub_devices_lock, flags);
1174 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
1175 if (count >= PAGE_SIZE)
1176 break;
1177 if (!psdev->dev)
1178 continue;
1179 dev_data = pci_get_drvdata(psdev->dev);
1180 if (!dev_data)
1181 continue;
1182 count +=
1183 scnprintf(buf + count, PAGE_SIZE - count,
1184 "%s:%s:%sing:%ld\n",
1185 pci_name(psdev->dev),
1186 dev_data->isr_on ? "on" : "off",
1187 dev_data->ack_intr ? "ack" : "not ack",
1188 dev_data->handled);
1189 }
1190 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
1191 return count;
1192 }
1193 static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
1194
1195 static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
1196 const char *buf,
1197 size_t count)
1198 {
1199 struct pcistub_device *psdev;
1200 struct xen_pcibk_dev_data *dev_data;
1201 int domain, bus, slot, func;
1202 int err;
1203
1204 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1205 if (err)
1206 return err;
1207
1208 psdev = pcistub_device_find(domain, bus, slot, func);
1209 if (!psdev) {
1210 err = -ENOENT;
1211 goto out;
1212 }
1213
1214 dev_data = pci_get_drvdata(psdev->dev);
1215 if (!dev_data) {
1216 err = -ENOENT;
1217 goto out;
1218 }
1219
1220 dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
1221 dev_data->irq_name, dev_data->isr_on,
1222 !dev_data->isr_on);
1223
1224 dev_data->isr_on = !(dev_data->isr_on);
1225 if (dev_data->isr_on)
1226 dev_data->ack_intr = 1;
1227 out:
1228 if (psdev)
1229 pcistub_device_put(psdev);
1230 if (!err)
1231 err = count;
1232 return err;
1233 }
1234 static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
1235 pcistub_irq_handler_switch);
1236
1237 static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
1238 size_t count)
1239 {
1240 int domain, bus, slot, func, reg, size, mask;
1241 int err;
1242
1243 err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
1244 &mask);
1245 if (err)
1246 goto out;
1247
1248 err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
1249
1250 out:
1251 if (!err)
1252 err = count;
1253 return err;
1254 }
1255
1256 static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
1257 {
1258 int count = 0;
1259 unsigned long flags;
1260 struct xen_pcibk_config_quirk *quirk;
1261 struct xen_pcibk_dev_data *dev_data;
1262 const struct config_field *field;
1263 const struct config_field_entry *cfg_entry;
1264
1265 spin_lock_irqsave(&device_ids_lock, flags);
1266 list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
1267 if (count >= PAGE_SIZE)
1268 goto out;
1269
1270 count += scnprintf(buf + count, PAGE_SIZE - count,
1271 "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
1272 quirk->pdev->bus->number,
1273 PCI_SLOT(quirk->pdev->devfn),
1274 PCI_FUNC(quirk->pdev->devfn),
1275 quirk->devid.vendor, quirk->devid.device,
1276 quirk->devid.subvendor,
1277 quirk->devid.subdevice);
1278
1279 dev_data = pci_get_drvdata(quirk->pdev);
1280
1281 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
1282 field = cfg_entry->field;
1283 if (count >= PAGE_SIZE)
1284 goto out;
1285
1286 count += scnprintf(buf + count, PAGE_SIZE - count,
1287 "\t\t%08x:%01x:%08x\n",
1288 cfg_entry->base_offset +
1289 field->offset, field->size,
1290 field->mask);
1291 }
1292 }
1293
1294 out:
1295 spin_unlock_irqrestore(&device_ids_lock, flags);
1296
1297 return count;
1298 }
1299 static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
1300 pcistub_quirk_add);
1301
1302 static ssize_t permissive_add(struct device_driver *drv, const char *buf,
1303 size_t count)
1304 {
1305 int domain, bus, slot, func;
1306 int err;
1307 struct pcistub_device *psdev;
1308 struct xen_pcibk_dev_data *dev_data;
1309
1310 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1311 if (err)
1312 goto out;
1313
1314 psdev = pcistub_device_find(domain, bus, slot, func);
1315 if (!psdev) {
1316 err = -ENODEV;
1317 goto out;
1318 }
1319
1320 dev_data = pci_get_drvdata(psdev->dev);
1321 /* the driver data for a device should never be null at this point */
1322 if (!dev_data) {
1323 err = -ENXIO;
1324 goto release;
1325 }
1326 if (!dev_data->permissive) {
1327 dev_data->permissive = 1;
1328 /* Let user know that what they're doing could be unsafe */
1329 dev_warn(&psdev->dev->dev, "enabling permissive mode "
1330 "configuration space accesses!\n");
1331 dev_warn(&psdev->dev->dev,
1332 "permissive mode is potentially unsafe!\n");
1333 }
1334 release:
1335 pcistub_device_put(psdev);
1336 out:
1337 if (!err)
1338 err = count;
1339 return err;
1340 }
1341
1342 static ssize_t permissive_show(struct device_driver *drv, char *buf)
1343 {
1344 struct pcistub_device *psdev;
1345 struct xen_pcibk_dev_data *dev_data;
1346 size_t count = 0;
1347 unsigned long flags;
1348 spin_lock_irqsave(&pcistub_devices_lock, flags);
1349 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
1350 if (count >= PAGE_SIZE)
1351 break;
1352 if (!psdev->dev)
1353 continue;
1354 dev_data = pci_get_drvdata(psdev->dev);
1355 if (!dev_data || !dev_data->permissive)
1356 continue;
1357 count +=
1358 scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
1359 pci_name(psdev->dev));
1360 }
1361 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
1362 return count;
1363 }
1364 static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
1365 permissive_add);
1366
1367 static void pcistub_exit(void)
1368 {
1369 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
1370 driver_remove_file(&xen_pcibk_pci_driver.driver,
1371 &driver_attr_remove_slot);
1372 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
1373 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
1374 driver_remove_file(&xen_pcibk_pci_driver.driver,
1375 &driver_attr_permissive);
1376 driver_remove_file(&xen_pcibk_pci_driver.driver,
1377 &driver_attr_irq_handlers);
1378 driver_remove_file(&xen_pcibk_pci_driver.driver,
1379 &driver_attr_irq_handler_state);
1380 pci_unregister_driver(&xen_pcibk_pci_driver);
1381 }
1382
1383 static int __init pcistub_init(void)
1384 {
1385 int pos = 0;
1386 int err = 0;
1387 int domain, bus, slot, func;
1388 int parsed;
1389
1390 if (pci_devs_to_hide && *pci_devs_to_hide) {
1391 do {
1392 parsed = 0;
1393
1394 err = sscanf(pci_devs_to_hide + pos,
1395 " (%x:%x:%x.%x) %n",
1396 &domain, &bus, &slot, &func, &parsed);
1397 switch (err) {
1398 case 3:
1399 func = -1;
1400 sscanf(pci_devs_to_hide + pos,
1401 " (%x:%x:%x.*) %n",
1402 &domain, &bus, &slot, &parsed);
1403 break;
1404 case 2:
1405 slot = func = -1;
1406 sscanf(pci_devs_to_hide + pos,
1407 " (%x:%x:*.*) %n",
1408 &domain, &bus, &parsed);
1409 break;
1410 }
1411
1412 if (!parsed) {
1413 domain = 0;
1414 err = sscanf(pci_devs_to_hide + pos,
1415 " (%x:%x.%x) %n",
1416 &bus, &slot, &func, &parsed);
1417 switch (err) {
1418 case 2:
1419 func = -1;
1420 sscanf(pci_devs_to_hide + pos,
1421 " (%x:%x.*) %n",
1422 &bus, &slot, &parsed);
1423 break;
1424 case 1:
1425 slot = func = -1;
1426 sscanf(pci_devs_to_hide + pos,
1427 " (%x:*.*) %n",
1428 &bus, &parsed);
1429 break;
1430 }
1431 }
1432
1433 if (parsed <= 0)
1434 goto parse_error;
1435
1436 err = pcistub_device_id_add(domain, bus, slot, func);
1437 if (err)
1438 goto out;
1439
1440 pos += parsed;
1441 } while (pci_devs_to_hide[pos]);
1442 }
1443
1444 /* If we're the first PCI Device Driver to register, we're the
1445 * first one to get offered PCI devices as they become
1446 * available (and thus we can be the first to grab them)
1447 */
1448 err = pci_register_driver(&xen_pcibk_pci_driver);
1449 if (err < 0)
1450 goto out;
1451
1452 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1453 &driver_attr_new_slot);
1454 if (!err)
1455 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1456 &driver_attr_remove_slot);
1457 if (!err)
1458 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1459 &driver_attr_slots);
1460 if (!err)
1461 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1462 &driver_attr_quirks);
1463 if (!err)
1464 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1465 &driver_attr_permissive);
1466
1467 if (!err)
1468 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1469 &driver_attr_irq_handlers);
1470 if (!err)
1471 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1472 &driver_attr_irq_handler_state);
1473 if (err)
1474 pcistub_exit();
1475
1476 out:
1477 return err;
1478
1479 parse_error:
1480 pr_err("Error parsing pci_devs_to_hide at \"%s\"\n",
1481 pci_devs_to_hide + pos);
1482 return -EINVAL;
1483 }
1484
1485 #ifndef MODULE
1486 /*
1487 * fs_initcall happens before device_initcall
1488 * so xen_pcibk *should* get called first (b/c we
1489 * want to suck up any device before other drivers
1490 * get a chance by being the first pci device
1491 * driver to register)
1492 */
1493 fs_initcall(pcistub_init);
1494 #endif
1495
1496 static int __init xen_pcibk_init(void)
1497 {
1498 int err;
1499
1500 if (!xen_initial_domain())
1501 return -ENODEV;
1502
1503 err = xen_pcibk_config_init();
1504 if (err)
1505 return err;
1506
1507 #ifdef MODULE
1508 err = pcistub_init();
1509 if (err < 0)
1510 return err;
1511 #endif
1512
1513 pcistub_init_devices_late();
1514 err = xen_pcibk_xenbus_register();
1515 if (err)
1516 pcistub_exit();
1517
1518 return err;
1519 }
1520
1521 static void __exit xen_pcibk_cleanup(void)
1522 {
1523 xen_pcibk_xenbus_unregister();
1524 pcistub_exit();
1525 }
1526
1527 module_init(xen_pcibk_init);
1528 module_exit(xen_pcibk_cleanup);
1529
1530 MODULE_LICENSE("Dual BSD/GPL");
1531 MODULE_ALIAS("xen-backend:pci");