1 // SPDX-License-Identifier: GPL-2.0
3 * Microsemi Switchtec(tm) PCIe Management Driver
4 * Copyright (c) 2017, Microsemi Corporation
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
24 static int max_devices
= 16;
25 module_param(max_devices
, int, 0644);
26 MODULE_PARM_DESC(max_devices
, "max number of switchtec device instances");
28 static bool use_dma_mrpc
= 1;
29 module_param(use_dma_mrpc
, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc
,
31 "Enable the use of the DMA MRPC feature");
33 static int nirqs
= 32;
34 module_param(nirqs
, int, 0644);
35 MODULE_PARM_DESC(nirqs
, "number of interrupts to allocate (more may be useful for NTB applications)");
37 static dev_t switchtec_devt
;
38 static DEFINE_IDA(switchtec_minor_ida
);
40 struct class *switchtec_class
;
41 EXPORT_SYMBOL_GPL(switchtec_class
);
50 struct switchtec_user
{
51 struct switchtec_dev
*stdev
;
53 enum mrpc_state state
;
55 struct completion comp
;
57 struct list_head list
;
64 unsigned char data
[SWITCHTEC_MRPC_PAYLOAD_SIZE
];
68 static struct switchtec_user
*stuser_create(struct switchtec_dev
*stdev
)
70 struct switchtec_user
*stuser
;
72 stuser
= kzalloc(sizeof(*stuser
), GFP_KERNEL
);
74 return ERR_PTR(-ENOMEM
);
76 get_device(&stdev
->dev
);
77 stuser
->stdev
= stdev
;
78 kref_init(&stuser
->kref
);
79 INIT_LIST_HEAD(&stuser
->list
);
80 init_completion(&stuser
->comp
);
81 stuser
->event_cnt
= atomic_read(&stdev
->event_cnt
);
83 dev_dbg(&stdev
->dev
, "%s: %p\n", __func__
, stuser
);
88 static void stuser_free(struct kref
*kref
)
90 struct switchtec_user
*stuser
;
92 stuser
= container_of(kref
, struct switchtec_user
, kref
);
94 dev_dbg(&stuser
->stdev
->dev
, "%s: %p\n", __func__
, stuser
);
96 put_device(&stuser
->stdev
->dev
);
100 static void stuser_put(struct switchtec_user
*stuser
)
102 kref_put(&stuser
->kref
, stuser_free
);
105 static void stuser_set_state(struct switchtec_user
*stuser
,
106 enum mrpc_state state
)
108 /* requires the mrpc_mutex to already be held when called */
110 const char * const state_names
[] = {
111 [MRPC_IDLE
] = "IDLE",
112 [MRPC_QUEUED
] = "QUEUED",
113 [MRPC_RUNNING
] = "RUNNING",
114 [MRPC_DONE
] = "DONE",
117 stuser
->state
= state
;
119 dev_dbg(&stuser
->stdev
->dev
, "stuser state %p -> %s",
120 stuser
, state_names
[state
]);
123 static void mrpc_complete_cmd(struct switchtec_dev
*stdev
);
125 static void flush_wc_buf(struct switchtec_dev
*stdev
)
127 struct ntb_dbmsg_regs __iomem
*mmio_dbmsg
;
130 * odb (outbound doorbell) register is processed by low latency
131 * hardware and w/o side effect
133 mmio_dbmsg
= (void __iomem
*)stdev
->mmio_ntb
+
134 SWITCHTEC_NTB_REG_DBMSG_OFFSET
;
135 ioread32(&mmio_dbmsg
->odb
);
138 static void mrpc_cmd_submit(struct switchtec_dev
*stdev
)
140 /* requires the mrpc_mutex to already be held when called */
142 struct switchtec_user
*stuser
;
144 if (stdev
->mrpc_busy
)
147 if (list_empty(&stdev
->mrpc_queue
))
150 stuser
= list_entry(stdev
->mrpc_queue
.next
, struct switchtec_user
,
153 if (stdev
->dma_mrpc
) {
154 stdev
->dma_mrpc
->status
= SWITCHTEC_MRPC_STATUS_INPROGRESS
;
155 memset(stdev
->dma_mrpc
->data
, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE
);
158 stuser_set_state(stuser
, MRPC_RUNNING
);
159 stdev
->mrpc_busy
= 1;
160 memcpy_toio(&stdev
->mmio_mrpc
->input_data
,
161 stuser
->data
, stuser
->data_len
);
163 iowrite32(stuser
->cmd
, &stdev
->mmio_mrpc
->cmd
);
165 schedule_delayed_work(&stdev
->mrpc_timeout
,
166 msecs_to_jiffies(500));
169 static int mrpc_queue_cmd(struct switchtec_user
*stuser
)
171 /* requires the mrpc_mutex to already be held when called */
173 struct switchtec_dev
*stdev
= stuser
->stdev
;
175 kref_get(&stuser
->kref
);
176 stuser
->read_len
= sizeof(stuser
->data
);
177 stuser_set_state(stuser
, MRPC_QUEUED
);
178 init_completion(&stuser
->comp
);
179 list_add_tail(&stuser
->list
, &stdev
->mrpc_queue
);
181 mrpc_cmd_submit(stdev
);
186 static void mrpc_complete_cmd(struct switchtec_dev
*stdev
)
188 /* requires the mrpc_mutex to already be held when called */
189 struct switchtec_user
*stuser
;
191 if (list_empty(&stdev
->mrpc_queue
))
194 stuser
= list_entry(stdev
->mrpc_queue
.next
, struct switchtec_user
,
198 stuser
->status
= stdev
->dma_mrpc
->status
;
200 stuser
->status
= ioread32(&stdev
->mmio_mrpc
->status
);
202 if (stuser
->status
== SWITCHTEC_MRPC_STATUS_INPROGRESS
)
205 stuser_set_state(stuser
, MRPC_DONE
);
206 stuser
->return_code
= 0;
208 if (stuser
->status
!= SWITCHTEC_MRPC_STATUS_DONE
)
212 stuser
->return_code
= stdev
->dma_mrpc
->rtn_code
;
214 stuser
->return_code
= ioread32(&stdev
->mmio_mrpc
->ret_value
);
215 if (stuser
->return_code
!= 0)
219 memcpy(stuser
->data
, &stdev
->dma_mrpc
->data
,
222 memcpy_fromio(stuser
->data
, &stdev
->mmio_mrpc
->output_data
,
225 complete_all(&stuser
->comp
);
226 list_del_init(&stuser
->list
);
228 stdev
->mrpc_busy
= 0;
230 mrpc_cmd_submit(stdev
);
233 static void mrpc_event_work(struct work_struct
*work
)
235 struct switchtec_dev
*stdev
;
237 stdev
= container_of(work
, struct switchtec_dev
, mrpc_work
);
239 dev_dbg(&stdev
->dev
, "%s\n", __func__
);
241 mutex_lock(&stdev
->mrpc_mutex
);
242 cancel_delayed_work(&stdev
->mrpc_timeout
);
243 mrpc_complete_cmd(stdev
);
244 mutex_unlock(&stdev
->mrpc_mutex
);
247 static void mrpc_timeout_work(struct work_struct
*work
)
249 struct switchtec_dev
*stdev
;
252 stdev
= container_of(work
, struct switchtec_dev
, mrpc_timeout
.work
);
254 dev_dbg(&stdev
->dev
, "%s\n", __func__
);
256 mutex_lock(&stdev
->mrpc_mutex
);
259 status
= stdev
->dma_mrpc
->status
;
261 status
= ioread32(&stdev
->mmio_mrpc
->status
);
262 if (status
== SWITCHTEC_MRPC_STATUS_INPROGRESS
) {
263 schedule_delayed_work(&stdev
->mrpc_timeout
,
264 msecs_to_jiffies(500));
268 mrpc_complete_cmd(stdev
);
270 mutex_unlock(&stdev
->mrpc_mutex
);
273 static ssize_t
device_version_show(struct device
*dev
,
274 struct device_attribute
*attr
, char *buf
)
276 struct switchtec_dev
*stdev
= to_stdev(dev
);
279 ver
= ioread32(&stdev
->mmio_sys_info
->device_version
);
281 return sprintf(buf
, "%x\n", ver
);
283 static DEVICE_ATTR_RO(device_version
);
285 static ssize_t
fw_version_show(struct device
*dev
,
286 struct device_attribute
*attr
, char *buf
)
288 struct switchtec_dev
*stdev
= to_stdev(dev
);
291 ver
= ioread32(&stdev
->mmio_sys_info
->firmware_version
);
293 return sprintf(buf
, "%08x\n", ver
);
295 static DEVICE_ATTR_RO(fw_version
);
297 static ssize_t
io_string_show(char *buf
, void __iomem
*attr
, size_t len
)
301 memcpy_fromio(buf
, attr
, len
);
305 for (i
= len
- 1; i
> 0; i
--) {
315 #define DEVICE_ATTR_SYS_INFO_STR(field) \
316 static ssize_t field ## _show(struct device *dev, \
317 struct device_attribute *attr, char *buf) \
319 struct switchtec_dev *stdev = to_stdev(dev); \
320 return io_string_show(buf, &stdev->mmio_sys_info->field, \
321 sizeof(stdev->mmio_sys_info->field)); \
324 static DEVICE_ATTR_RO(field)
326 DEVICE_ATTR_SYS_INFO_STR(vendor_id
);
327 DEVICE_ATTR_SYS_INFO_STR(product_id
);
328 DEVICE_ATTR_SYS_INFO_STR(product_revision
);
329 DEVICE_ATTR_SYS_INFO_STR(component_vendor
);
331 static ssize_t
component_id_show(struct device
*dev
,
332 struct device_attribute
*attr
, char *buf
)
334 struct switchtec_dev
*stdev
= to_stdev(dev
);
335 int id
= ioread16(&stdev
->mmio_sys_info
->component_id
);
337 return sprintf(buf
, "PM%04X\n", id
);
339 static DEVICE_ATTR_RO(component_id
);
341 static ssize_t
component_revision_show(struct device
*dev
,
342 struct device_attribute
*attr
, char *buf
)
344 struct switchtec_dev
*stdev
= to_stdev(dev
);
345 int rev
= ioread8(&stdev
->mmio_sys_info
->component_revision
);
347 return sprintf(buf
, "%d\n", rev
);
349 static DEVICE_ATTR_RO(component_revision
);
351 static ssize_t
partition_show(struct device
*dev
,
352 struct device_attribute
*attr
, char *buf
)
354 struct switchtec_dev
*stdev
= to_stdev(dev
);
356 return sprintf(buf
, "%d\n", stdev
->partition
);
358 static DEVICE_ATTR_RO(partition
);
360 static ssize_t
partition_count_show(struct device
*dev
,
361 struct device_attribute
*attr
, char *buf
)
363 struct switchtec_dev
*stdev
= to_stdev(dev
);
365 return sprintf(buf
, "%d\n", stdev
->partition_count
);
367 static DEVICE_ATTR_RO(partition_count
);
369 static struct attribute
*switchtec_device_attrs
[] = {
370 &dev_attr_device_version
.attr
,
371 &dev_attr_fw_version
.attr
,
372 &dev_attr_vendor_id
.attr
,
373 &dev_attr_product_id
.attr
,
374 &dev_attr_product_revision
.attr
,
375 &dev_attr_component_vendor
.attr
,
376 &dev_attr_component_id
.attr
,
377 &dev_attr_component_revision
.attr
,
378 &dev_attr_partition
.attr
,
379 &dev_attr_partition_count
.attr
,
383 ATTRIBUTE_GROUPS(switchtec_device
);
385 static int switchtec_dev_open(struct inode
*inode
, struct file
*filp
)
387 struct switchtec_dev
*stdev
;
388 struct switchtec_user
*stuser
;
390 stdev
= container_of(inode
->i_cdev
, struct switchtec_dev
, cdev
);
392 stuser
= stuser_create(stdev
);
394 return PTR_ERR(stuser
);
396 filp
->private_data
= stuser
;
397 stream_open(inode
, filp
);
399 dev_dbg(&stdev
->dev
, "%s: %p\n", __func__
, stuser
);
404 static int switchtec_dev_release(struct inode
*inode
, struct file
*filp
)
406 struct switchtec_user
*stuser
= filp
->private_data
;
413 static int lock_mutex_and_test_alive(struct switchtec_dev
*stdev
)
415 if (mutex_lock_interruptible(&stdev
->mrpc_mutex
))
419 mutex_unlock(&stdev
->mrpc_mutex
);
426 static ssize_t
switchtec_dev_write(struct file
*filp
, const char __user
*data
,
427 size_t size
, loff_t
*off
)
429 struct switchtec_user
*stuser
= filp
->private_data
;
430 struct switchtec_dev
*stdev
= stuser
->stdev
;
433 if (size
< sizeof(stuser
->cmd
) ||
434 size
> sizeof(stuser
->cmd
) + sizeof(stuser
->data
))
437 stuser
->data_len
= size
- sizeof(stuser
->cmd
);
439 rc
= lock_mutex_and_test_alive(stdev
);
443 if (stuser
->state
!= MRPC_IDLE
) {
448 rc
= copy_from_user(&stuser
->cmd
, data
, sizeof(stuser
->cmd
));
454 data
+= sizeof(stuser
->cmd
);
455 rc
= copy_from_user(&stuser
->data
, data
, size
- sizeof(stuser
->cmd
));
461 rc
= mrpc_queue_cmd(stuser
);
464 mutex_unlock(&stdev
->mrpc_mutex
);
472 static ssize_t
switchtec_dev_read(struct file
*filp
, char __user
*data
,
473 size_t size
, loff_t
*off
)
475 struct switchtec_user
*stuser
= filp
->private_data
;
476 struct switchtec_dev
*stdev
= stuser
->stdev
;
479 if (size
< sizeof(stuser
->cmd
) ||
480 size
> sizeof(stuser
->cmd
) + sizeof(stuser
->data
))
483 rc
= lock_mutex_and_test_alive(stdev
);
487 if (stuser
->state
== MRPC_IDLE
) {
488 mutex_unlock(&stdev
->mrpc_mutex
);
492 stuser
->read_len
= size
- sizeof(stuser
->return_code
);
494 mutex_unlock(&stdev
->mrpc_mutex
);
496 if (filp
->f_flags
& O_NONBLOCK
) {
497 if (!try_wait_for_completion(&stuser
->comp
))
500 rc
= wait_for_completion_interruptible(&stuser
->comp
);
505 rc
= lock_mutex_and_test_alive(stdev
);
509 if (stuser
->state
!= MRPC_DONE
) {
510 mutex_unlock(&stdev
->mrpc_mutex
);
514 rc
= copy_to_user(data
, &stuser
->return_code
,
515 sizeof(stuser
->return_code
));
521 data
+= sizeof(stuser
->return_code
);
522 rc
= copy_to_user(data
, &stuser
->data
,
523 size
- sizeof(stuser
->return_code
));
529 stuser_set_state(stuser
, MRPC_IDLE
);
532 mutex_unlock(&stdev
->mrpc_mutex
);
534 if (stuser
->status
== SWITCHTEC_MRPC_STATUS_DONE
)
536 else if (stuser
->status
== SWITCHTEC_MRPC_STATUS_INTERRUPTED
)
542 static __poll_t
switchtec_dev_poll(struct file
*filp
, poll_table
*wait
)
544 struct switchtec_user
*stuser
= filp
->private_data
;
545 struct switchtec_dev
*stdev
= stuser
->stdev
;
548 poll_wait(filp
, &stuser
->comp
.wait
, wait
);
549 poll_wait(filp
, &stdev
->event_wq
, wait
);
551 if (lock_mutex_and_test_alive(stdev
))
552 return EPOLLIN
| EPOLLRDHUP
| EPOLLOUT
| EPOLLERR
| EPOLLHUP
;
554 mutex_unlock(&stdev
->mrpc_mutex
);
556 if (try_wait_for_completion(&stuser
->comp
))
557 ret
|= EPOLLIN
| EPOLLRDNORM
;
559 if (stuser
->event_cnt
!= atomic_read(&stdev
->event_cnt
))
560 ret
|= EPOLLPRI
| EPOLLRDBAND
;
565 static int ioctl_flash_info(struct switchtec_dev
*stdev
,
566 struct switchtec_ioctl_flash_info __user
*uinfo
)
568 struct switchtec_ioctl_flash_info info
= {0};
569 struct flash_info_regs __iomem
*fi
= stdev
->mmio_flash_info
;
571 info
.flash_length
= ioread32(&fi
->flash_length
);
572 info
.num_partitions
= SWITCHTEC_IOCTL_NUM_PARTITIONS
;
574 if (copy_to_user(uinfo
, &info
, sizeof(info
)))
580 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info
*info
,
581 struct partition_info __iomem
*pi
)
583 info
->address
= ioread32(&pi
->address
);
584 info
->length
= ioread32(&pi
->length
);
587 static int ioctl_flash_part_info(struct switchtec_dev
*stdev
,
588 struct switchtec_ioctl_flash_part_info __user
*uinfo
)
590 struct switchtec_ioctl_flash_part_info info
= {0};
591 struct flash_info_regs __iomem
*fi
= stdev
->mmio_flash_info
;
592 struct sys_info_regs __iomem
*si
= stdev
->mmio_sys_info
;
593 u32 active_addr
= -1;
595 if (copy_from_user(&info
, uinfo
, sizeof(info
)))
598 switch (info
.flash_partition
) {
599 case SWITCHTEC_IOCTL_PART_CFG0
:
600 active_addr
= ioread32(&fi
->active_cfg
);
601 set_fw_info_part(&info
, &fi
->cfg0
);
602 if (ioread16(&si
->cfg_running
) == SWITCHTEC_CFG0_RUNNING
)
603 info
.active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
605 case SWITCHTEC_IOCTL_PART_CFG1
:
606 active_addr
= ioread32(&fi
->active_cfg
);
607 set_fw_info_part(&info
, &fi
->cfg1
);
608 if (ioread16(&si
->cfg_running
) == SWITCHTEC_CFG1_RUNNING
)
609 info
.active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
611 case SWITCHTEC_IOCTL_PART_IMG0
:
612 active_addr
= ioread32(&fi
->active_img
);
613 set_fw_info_part(&info
, &fi
->img0
);
614 if (ioread16(&si
->img_running
) == SWITCHTEC_IMG0_RUNNING
)
615 info
.active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
617 case SWITCHTEC_IOCTL_PART_IMG1
:
618 active_addr
= ioread32(&fi
->active_img
);
619 set_fw_info_part(&info
, &fi
->img1
);
620 if (ioread16(&si
->img_running
) == SWITCHTEC_IMG1_RUNNING
)
621 info
.active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
623 case SWITCHTEC_IOCTL_PART_NVLOG
:
624 set_fw_info_part(&info
, &fi
->nvlog
);
626 case SWITCHTEC_IOCTL_PART_VENDOR0
:
627 set_fw_info_part(&info
, &fi
->vendor
[0]);
629 case SWITCHTEC_IOCTL_PART_VENDOR1
:
630 set_fw_info_part(&info
, &fi
->vendor
[1]);
632 case SWITCHTEC_IOCTL_PART_VENDOR2
:
633 set_fw_info_part(&info
, &fi
->vendor
[2]);
635 case SWITCHTEC_IOCTL_PART_VENDOR3
:
636 set_fw_info_part(&info
, &fi
->vendor
[3]);
638 case SWITCHTEC_IOCTL_PART_VENDOR4
:
639 set_fw_info_part(&info
, &fi
->vendor
[4]);
641 case SWITCHTEC_IOCTL_PART_VENDOR5
:
642 set_fw_info_part(&info
, &fi
->vendor
[5]);
644 case SWITCHTEC_IOCTL_PART_VENDOR6
:
645 set_fw_info_part(&info
, &fi
->vendor
[6]);
647 case SWITCHTEC_IOCTL_PART_VENDOR7
:
648 set_fw_info_part(&info
, &fi
->vendor
[7]);
654 if (info
.address
== active_addr
)
655 info
.active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
657 if (copy_to_user(uinfo
, &info
, sizeof(info
)))
663 static int ioctl_event_summary(struct switchtec_dev
*stdev
,
664 struct switchtec_user
*stuser
,
665 struct switchtec_ioctl_event_summary __user
*usum
,
668 struct switchtec_ioctl_event_summary
*s
;
673 s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
677 s
->global
= ioread32(&stdev
->mmio_sw_event
->global_summary
);
678 s
->part_bitmap
= ioread64(&stdev
->mmio_sw_event
->part_event_bitmap
);
679 s
->local_part
= ioread32(&stdev
->mmio_part_cfg
->part_event_summary
);
681 for (i
= 0; i
< stdev
->partition_count
; i
++) {
682 reg
= ioread32(&stdev
->mmio_part_cfg_all
[i
].part_event_summary
);
686 for (i
= 0; i
< SWITCHTEC_MAX_PFF_CSR
; i
++) {
687 reg
= ioread16(&stdev
->mmio_pff_csr
[i
].vendor_id
);
688 if (reg
!= PCI_VENDOR_ID_MICROSEMI
)
691 reg
= ioread32(&stdev
->mmio_pff_csr
[i
].pff_event_summary
);
695 if (copy_to_user(usum
, s
, size
)) {
700 stuser
->event_cnt
= atomic_read(&stdev
->event_cnt
);
707 static u32 __iomem
*global_ev_reg(struct switchtec_dev
*stdev
,
708 size_t offset
, int index
)
710 return (void __iomem
*)stdev
->mmio_sw_event
+ offset
;
713 static u32 __iomem
*part_ev_reg(struct switchtec_dev
*stdev
,
714 size_t offset
, int index
)
716 return (void __iomem
*)&stdev
->mmio_part_cfg_all
[index
] + offset
;
719 static u32 __iomem
*pff_ev_reg(struct switchtec_dev
*stdev
,
720 size_t offset
, int index
)
722 return (void __iomem
*)&stdev
->mmio_pff_csr
[index
] + offset
;
725 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
726 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
727 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
729 static const struct event_reg
{
731 u32 __iomem
*(*map_reg
)(struct switchtec_dev
*stdev
,
732 size_t offset
, int index
);
734 EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR
, stack_error_event_hdr
),
735 EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR
, ppu_error_event_hdr
),
736 EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR
, isp_error_event_hdr
),
737 EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET
, sys_reset_event_hdr
),
738 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC
, fw_exception_hdr
),
739 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI
, fw_nmi_hdr
),
740 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL
, fw_non_fatal_hdr
),
741 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL
, fw_fatal_hdr
),
742 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP
, twi_mrpc_comp_hdr
),
743 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC
,
744 twi_mrpc_comp_async_hdr
),
745 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP
, cli_mrpc_comp_hdr
),
746 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC
,
747 cli_mrpc_comp_async_hdr
),
748 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT
, gpio_interrupt_hdr
),
749 EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS
, gfms_event_hdr
),
750 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET
, part_reset_hdr
),
751 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP
, mrpc_comp_hdr
),
752 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC
, mrpc_comp_async_hdr
),
753 EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP
, dyn_binding_hdr
),
754 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P
, aer_in_p2p_hdr
),
755 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP
, aer_in_vep_hdr
),
756 EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC
, dpc_hdr
),
757 EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS
, cts_hdr
),
758 EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG
, hotplug_hdr
),
759 EV_PFF(SWITCHTEC_IOCTL_EVENT_IER
, ier_hdr
),
760 EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH
, threshold_hdr
),
761 EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT
, power_mgmt_hdr
),
762 EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING
, tlp_throttling_hdr
),
763 EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED
, force_speed_hdr
),
764 EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT
, credit_timeout_hdr
),
765 EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE
, link_state_hdr
),
768 static u32 __iomem
*event_hdr_addr(struct switchtec_dev
*stdev
,
769 int event_id
, int index
)
773 if (event_id
< 0 || event_id
>= SWITCHTEC_IOCTL_MAX_EVENTS
)
774 return ERR_PTR(-EINVAL
);
776 off
= event_regs
[event_id
].offset
;
778 if (event_regs
[event_id
].map_reg
== part_ev_reg
) {
779 if (index
== SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX
)
780 index
= stdev
->partition
;
781 else if (index
< 0 || index
>= stdev
->partition_count
)
782 return ERR_PTR(-EINVAL
);
783 } else if (event_regs
[event_id
].map_reg
== pff_ev_reg
) {
784 if (index
< 0 || index
>= stdev
->pff_csr_count
)
785 return ERR_PTR(-EINVAL
);
788 return event_regs
[event_id
].map_reg(stdev
, off
, index
);
791 static int event_ctl(struct switchtec_dev
*stdev
,
792 struct switchtec_ioctl_event_ctl
*ctl
)
798 reg
= event_hdr_addr(stdev
, ctl
->event_id
, ctl
->index
);
803 for (i
= 0; i
< ARRAY_SIZE(ctl
->data
); i
++)
804 ctl
->data
[i
] = ioread32(®
[i
+ 1]);
806 ctl
->occurred
= hdr
& SWITCHTEC_EVENT_OCCURRED
;
807 ctl
->count
= (hdr
>> 5) & 0xFF;
809 if (!(ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR
))
810 hdr
&= ~SWITCHTEC_EVENT_CLEAR
;
811 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL
)
812 hdr
|= SWITCHTEC_EVENT_EN_IRQ
;
813 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL
)
814 hdr
&= ~SWITCHTEC_EVENT_EN_IRQ
;
815 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG
)
816 hdr
|= SWITCHTEC_EVENT_EN_LOG
;
817 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG
)
818 hdr
&= ~SWITCHTEC_EVENT_EN_LOG
;
819 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI
)
820 hdr
|= SWITCHTEC_EVENT_EN_CLI
;
821 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI
)
822 hdr
&= ~SWITCHTEC_EVENT_EN_CLI
;
823 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL
)
824 hdr
|= SWITCHTEC_EVENT_FATAL
;
825 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL
)
826 hdr
&= ~SWITCHTEC_EVENT_FATAL
;
832 if (hdr
& SWITCHTEC_EVENT_EN_IRQ
)
833 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL
;
834 if (hdr
& SWITCHTEC_EVENT_EN_LOG
)
835 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG
;
836 if (hdr
& SWITCHTEC_EVENT_EN_CLI
)
837 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI
;
838 if (hdr
& SWITCHTEC_EVENT_FATAL
)
839 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL
;
844 static int ioctl_event_ctl(struct switchtec_dev
*stdev
,
845 struct switchtec_ioctl_event_ctl __user
*uctl
)
849 unsigned int event_flags
;
850 struct switchtec_ioctl_event_ctl ctl
;
852 if (copy_from_user(&ctl
, uctl
, sizeof(ctl
)))
855 if (ctl
.event_id
>= SWITCHTEC_IOCTL_MAX_EVENTS
)
858 if (ctl
.flags
& SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED
)
861 if (ctl
.index
== SWITCHTEC_IOCTL_EVENT_IDX_ALL
) {
862 if (event_regs
[ctl
.event_id
].map_reg
== global_ev_reg
)
864 else if (event_regs
[ctl
.event_id
].map_reg
== part_ev_reg
)
865 nr_idxs
= stdev
->partition_count
;
866 else if (event_regs
[ctl
.event_id
].map_reg
== pff_ev_reg
)
867 nr_idxs
= stdev
->pff_csr_count
;
871 event_flags
= ctl
.flags
;
872 for (ctl
.index
= 0; ctl
.index
< nr_idxs
; ctl
.index
++) {
873 ctl
.flags
= event_flags
;
874 ret
= event_ctl(stdev
, &ctl
);
879 ret
= event_ctl(stdev
, &ctl
);
884 if (copy_to_user(uctl
, &ctl
, sizeof(ctl
)))
890 static int ioctl_pff_to_port(struct switchtec_dev
*stdev
,
891 struct switchtec_ioctl_pff_port
*up
)
895 struct part_cfg_regs
*pcfg
;
896 struct switchtec_ioctl_pff_port p
;
898 if (copy_from_user(&p
, up
, sizeof(p
)))
902 for (part
= 0; part
< stdev
->partition_count
; part
++) {
903 pcfg
= &stdev
->mmio_part_cfg_all
[part
];
906 reg
= ioread32(&pcfg
->usp_pff_inst_id
);
912 reg
= ioread32(&pcfg
->vep_pff_inst_id
);
914 p
.port
= SWITCHTEC_IOCTL_PFF_VEP
;
918 for (i
= 0; i
< ARRAY_SIZE(pcfg
->dsp_pff_inst_id
); i
++) {
919 reg
= ioread32(&pcfg
->dsp_pff_inst_id
[i
]);
931 if (copy_to_user(up
, &p
, sizeof(p
)))
937 static int ioctl_port_to_pff(struct switchtec_dev
*stdev
,
938 struct switchtec_ioctl_pff_port
*up
)
940 struct switchtec_ioctl_pff_port p
;
941 struct part_cfg_regs
*pcfg
;
943 if (copy_from_user(&p
, up
, sizeof(p
)))
946 if (p
.partition
== SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX
)
947 pcfg
= stdev
->mmio_part_cfg
;
948 else if (p
.partition
< stdev
->partition_count
)
949 pcfg
= &stdev
->mmio_part_cfg_all
[p
.partition
];
955 p
.pff
= ioread32(&pcfg
->usp_pff_inst_id
);
957 case SWITCHTEC_IOCTL_PFF_VEP
:
958 p
.pff
= ioread32(&pcfg
->vep_pff_inst_id
);
961 if (p
.port
> ARRAY_SIZE(pcfg
->dsp_pff_inst_id
))
963 p
.port
= array_index_nospec(p
.port
,
964 ARRAY_SIZE(pcfg
->dsp_pff_inst_id
) + 1);
965 p
.pff
= ioread32(&pcfg
->dsp_pff_inst_id
[p
.port
- 1]);
969 if (copy_to_user(up
, &p
, sizeof(p
)))
975 static long switchtec_dev_ioctl(struct file
*filp
, unsigned int cmd
,
978 struct switchtec_user
*stuser
= filp
->private_data
;
979 struct switchtec_dev
*stdev
= stuser
->stdev
;
981 void __user
*argp
= (void __user
*)arg
;
983 rc
= lock_mutex_and_test_alive(stdev
);
988 case SWITCHTEC_IOCTL_FLASH_INFO
:
989 rc
= ioctl_flash_info(stdev
, argp
);
991 case SWITCHTEC_IOCTL_FLASH_PART_INFO
:
992 rc
= ioctl_flash_part_info(stdev
, argp
);
994 case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY
:
995 rc
= ioctl_event_summary(stdev
, stuser
, argp
,
996 sizeof(struct switchtec_ioctl_event_summary_legacy
));
998 case SWITCHTEC_IOCTL_EVENT_CTL
:
999 rc
= ioctl_event_ctl(stdev
, argp
);
1001 case SWITCHTEC_IOCTL_PFF_TO_PORT
:
1002 rc
= ioctl_pff_to_port(stdev
, argp
);
1004 case SWITCHTEC_IOCTL_PORT_TO_PFF
:
1005 rc
= ioctl_port_to_pff(stdev
, argp
);
1007 case SWITCHTEC_IOCTL_EVENT_SUMMARY
:
1008 rc
= ioctl_event_summary(stdev
, stuser
, argp
,
1009 sizeof(struct switchtec_ioctl_event_summary
));
1016 mutex_unlock(&stdev
->mrpc_mutex
);
1020 static const struct file_operations switchtec_fops
= {
1021 .owner
= THIS_MODULE
,
1022 .open
= switchtec_dev_open
,
1023 .release
= switchtec_dev_release
,
1024 .write
= switchtec_dev_write
,
1025 .read
= switchtec_dev_read
,
1026 .poll
= switchtec_dev_poll
,
1027 .unlocked_ioctl
= switchtec_dev_ioctl
,
1028 .compat_ioctl
= compat_ptr_ioctl
,
1031 static void link_event_work(struct work_struct
*work
)
1033 struct switchtec_dev
*stdev
;
1035 stdev
= container_of(work
, struct switchtec_dev
, link_event_work
);
1037 if (stdev
->link_notifier
)
1038 stdev
->link_notifier(stdev
);
1041 static void check_link_state_events(struct switchtec_dev
*stdev
)
1048 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1049 reg
= ioread32(&stdev
->mmio_pff_csr
[idx
].link_state_hdr
);
1050 dev_dbg(&stdev
->dev
, "link_state: %d->%08x\n", idx
, reg
);
1051 count
= (reg
>> 5) & 0xFF;
1053 if (count
!= stdev
->link_event_count
[idx
]) {
1055 stdev
->link_event_count
[idx
] = count
;
1060 schedule_work(&stdev
->link_event_work
);
1063 static void enable_link_state_events(struct switchtec_dev
*stdev
)
1067 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1068 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1069 SWITCHTEC_EVENT_EN_IRQ
,
1070 &stdev
->mmio_pff_csr
[idx
].link_state_hdr
);
1074 static void enable_dma_mrpc(struct switchtec_dev
*stdev
)
1076 writeq(stdev
->dma_mrpc_dma_addr
, &stdev
->mmio_mrpc
->dma_addr
);
1077 flush_wc_buf(stdev
);
1078 iowrite32(SWITCHTEC_DMA_MRPC_EN
, &stdev
->mmio_mrpc
->dma_en
);
1081 static void stdev_release(struct device
*dev
)
1083 struct switchtec_dev
*stdev
= to_stdev(dev
);
1085 if (stdev
->dma_mrpc
) {
1086 iowrite32(0, &stdev
->mmio_mrpc
->dma_en
);
1087 flush_wc_buf(stdev
);
1088 writeq(0, &stdev
->mmio_mrpc
->dma_addr
);
1089 dma_free_coherent(&stdev
->pdev
->dev
, sizeof(*stdev
->dma_mrpc
),
1090 stdev
->dma_mrpc
, stdev
->dma_mrpc_dma_addr
);
1095 static void stdev_kill(struct switchtec_dev
*stdev
)
1097 struct switchtec_user
*stuser
, *tmpuser
;
1099 pci_clear_master(stdev
->pdev
);
1101 cancel_delayed_work_sync(&stdev
->mrpc_timeout
);
1103 /* Mark the hardware as unavailable and complete all completions */
1104 mutex_lock(&stdev
->mrpc_mutex
);
1105 stdev
->alive
= false;
1107 /* Wake up and kill any users waiting on an MRPC request */
1108 list_for_each_entry_safe(stuser
, tmpuser
, &stdev
->mrpc_queue
, list
) {
1109 complete_all(&stuser
->comp
);
1110 list_del_init(&stuser
->list
);
1114 mutex_unlock(&stdev
->mrpc_mutex
);
1116 /* Wake up any users waiting on event_wq */
1117 wake_up_interruptible(&stdev
->event_wq
);
1120 static struct switchtec_dev
*stdev_create(struct pci_dev
*pdev
)
1122 struct switchtec_dev
*stdev
;
1128 stdev
= kzalloc_node(sizeof(*stdev
), GFP_KERNEL
,
1129 dev_to_node(&pdev
->dev
));
1131 return ERR_PTR(-ENOMEM
);
1133 stdev
->alive
= true;
1135 INIT_LIST_HEAD(&stdev
->mrpc_queue
);
1136 mutex_init(&stdev
->mrpc_mutex
);
1137 stdev
->mrpc_busy
= 0;
1138 INIT_WORK(&stdev
->mrpc_work
, mrpc_event_work
);
1139 INIT_DELAYED_WORK(&stdev
->mrpc_timeout
, mrpc_timeout_work
);
1140 INIT_WORK(&stdev
->link_event_work
, link_event_work
);
1141 init_waitqueue_head(&stdev
->event_wq
);
1142 atomic_set(&stdev
->event_cnt
, 0);
1145 device_initialize(dev
);
1146 dev
->class = switchtec_class
;
1147 dev
->parent
= &pdev
->dev
;
1148 dev
->groups
= switchtec_device_groups
;
1149 dev
->release
= stdev_release
;
1151 minor
= ida_simple_get(&switchtec_minor_ida
, 0, 0,
1158 dev
->devt
= MKDEV(MAJOR(switchtec_devt
), minor
);
1159 dev_set_name(dev
, "switchtec%d", minor
);
1161 cdev
= &stdev
->cdev
;
1162 cdev_init(cdev
, &switchtec_fops
);
1163 cdev
->owner
= THIS_MODULE
;
1168 put_device(&stdev
->dev
);
1172 static int mask_event(struct switchtec_dev
*stdev
, int eid
, int idx
)
1174 size_t off
= event_regs
[eid
].offset
;
1175 u32 __iomem
*hdr_reg
;
1178 hdr_reg
= event_regs
[eid
].map_reg(stdev
, off
, idx
);
1179 hdr
= ioread32(hdr_reg
);
1181 if (!(hdr
& SWITCHTEC_EVENT_OCCURRED
&& hdr
& SWITCHTEC_EVENT_EN_IRQ
))
1184 if (eid
== SWITCHTEC_IOCTL_EVENT_LINK_STATE
||
1185 eid
== SWITCHTEC_IOCTL_EVENT_MRPC_COMP
)
1188 dev_dbg(&stdev
->dev
, "%s: %d %d %x\n", __func__
, eid
, idx
, hdr
);
1189 hdr
&= ~(SWITCHTEC_EVENT_EN_IRQ
| SWITCHTEC_EVENT_OCCURRED
);
1190 iowrite32(hdr
, hdr_reg
);
1195 static int mask_all_events(struct switchtec_dev
*stdev
, int eid
)
1200 if (event_regs
[eid
].map_reg
== part_ev_reg
) {
1201 for (idx
= 0; idx
< stdev
->partition_count
; idx
++)
1202 count
+= mask_event(stdev
, eid
, idx
);
1203 } else if (event_regs
[eid
].map_reg
== pff_ev_reg
) {
1204 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1205 if (!stdev
->pff_local
[idx
])
1208 count
+= mask_event(stdev
, eid
, idx
);
1211 count
+= mask_event(stdev
, eid
, 0);
1217 static irqreturn_t
switchtec_event_isr(int irq
, void *dev
)
1219 struct switchtec_dev
*stdev
= dev
;
1221 irqreturn_t ret
= IRQ_NONE
;
1222 int eid
, event_count
= 0;
1224 reg
= ioread32(&stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1225 if (reg
& SWITCHTEC_EVENT_OCCURRED
) {
1226 dev_dbg(&stdev
->dev
, "%s: mrpc comp\n", __func__
);
1228 schedule_work(&stdev
->mrpc_work
);
1229 iowrite32(reg
, &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1232 check_link_state_events(stdev
);
1234 for (eid
= 0; eid
< SWITCHTEC_IOCTL_MAX_EVENTS
; eid
++)
1235 event_count
+= mask_all_events(stdev
, eid
);
1238 atomic_inc(&stdev
->event_cnt
);
1239 wake_up_interruptible(&stdev
->event_wq
);
1240 dev_dbg(&stdev
->dev
, "%s: %d events\n", __func__
,
1249 static irqreturn_t
switchtec_dma_mrpc_isr(int irq
, void *dev
)
1251 struct switchtec_dev
*stdev
= dev
;
1252 irqreturn_t ret
= IRQ_NONE
;
1254 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1255 SWITCHTEC_EVENT_EN_IRQ
,
1256 &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1257 schedule_work(&stdev
->mrpc_work
);
1263 static int switchtec_init_isr(struct switchtec_dev
*stdev
)
1273 nvecs
= pci_alloc_irq_vectors(stdev
->pdev
, 1, nirqs
,
1274 PCI_IRQ_MSIX
| PCI_IRQ_MSI
|
1279 event_irq
= ioread32(&stdev
->mmio_part_cfg
->vep_vector_number
);
1280 if (event_irq
< 0 || event_irq
>= nvecs
)
1283 event_irq
= pci_irq_vector(stdev
->pdev
, event_irq
);
1287 rc
= devm_request_irq(&stdev
->pdev
->dev
, event_irq
,
1288 switchtec_event_isr
, 0,
1289 KBUILD_MODNAME
, stdev
);
1294 if (!stdev
->dma_mrpc
)
1297 dma_mrpc_irq
= ioread32(&stdev
->mmio_mrpc
->dma_vector
);
1298 if (dma_mrpc_irq
< 0 || dma_mrpc_irq
>= nvecs
)
1301 dma_mrpc_irq
= pci_irq_vector(stdev
->pdev
, dma_mrpc_irq
);
1302 if (dma_mrpc_irq
< 0)
1303 return dma_mrpc_irq
;
1305 rc
= devm_request_irq(&stdev
->pdev
->dev
, dma_mrpc_irq
,
1306 switchtec_dma_mrpc_isr
, 0,
1307 KBUILD_MODNAME
, stdev
);
1312 static void init_pff(struct switchtec_dev
*stdev
)
1316 struct part_cfg_regs
*pcfg
= stdev
->mmio_part_cfg
;
1318 for (i
= 0; i
< SWITCHTEC_MAX_PFF_CSR
; i
++) {
1319 reg
= ioread16(&stdev
->mmio_pff_csr
[i
].vendor_id
);
1320 if (reg
!= PCI_VENDOR_ID_MICROSEMI
)
1324 stdev
->pff_csr_count
= i
;
1326 reg
= ioread32(&pcfg
->usp_pff_inst_id
);
1327 if (reg
< SWITCHTEC_MAX_PFF_CSR
)
1328 stdev
->pff_local
[reg
] = 1;
1330 reg
= ioread32(&pcfg
->vep_pff_inst_id
);
1331 if (reg
< SWITCHTEC_MAX_PFF_CSR
)
1332 stdev
->pff_local
[reg
] = 1;
1334 for (i
= 0; i
< ARRAY_SIZE(pcfg
->dsp_pff_inst_id
); i
++) {
1335 reg
= ioread32(&pcfg
->dsp_pff_inst_id
[i
]);
1336 if (reg
< SWITCHTEC_MAX_PFF_CSR
)
1337 stdev
->pff_local
[reg
] = 1;
1341 static int switchtec_init_pci(struct switchtec_dev
*stdev
,
1342 struct pci_dev
*pdev
)
1346 unsigned long res_start
, res_len
;
1348 rc
= pcim_enable_device(pdev
);
1352 rc
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1356 pci_set_master(pdev
);
1358 res_start
= pci_resource_start(pdev
, 0);
1359 res_len
= pci_resource_len(pdev
, 0);
1361 if (!devm_request_mem_region(&pdev
->dev
, res_start
,
1362 res_len
, KBUILD_MODNAME
))
1365 stdev
->mmio_mrpc
= devm_ioremap_wc(&pdev
->dev
, res_start
,
1366 SWITCHTEC_GAS_TOP_CFG_OFFSET
);
1367 if (!stdev
->mmio_mrpc
)
1370 map
= devm_ioremap(&pdev
->dev
,
1371 res_start
+ SWITCHTEC_GAS_TOP_CFG_OFFSET
,
1372 res_len
- SWITCHTEC_GAS_TOP_CFG_OFFSET
);
1376 stdev
->mmio
= map
- SWITCHTEC_GAS_TOP_CFG_OFFSET
;
1377 stdev
->mmio_sw_event
= stdev
->mmio
+ SWITCHTEC_GAS_SW_EVENT_OFFSET
;
1378 stdev
->mmio_sys_info
= stdev
->mmio
+ SWITCHTEC_GAS_SYS_INFO_OFFSET
;
1379 stdev
->mmio_flash_info
= stdev
->mmio
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET
;
1380 stdev
->mmio_ntb
= stdev
->mmio
+ SWITCHTEC_GAS_NTB_OFFSET
;
1381 stdev
->partition
= ioread8(&stdev
->mmio_sys_info
->partition_id
);
1382 stdev
->partition_count
= ioread8(&stdev
->mmio_ntb
->partition_count
);
1383 stdev
->mmio_part_cfg_all
= stdev
->mmio
+ SWITCHTEC_GAS_PART_CFG_OFFSET
;
1384 stdev
->mmio_part_cfg
= &stdev
->mmio_part_cfg_all
[stdev
->partition
];
1385 stdev
->mmio_pff_csr
= stdev
->mmio
+ SWITCHTEC_GAS_PFF_CSR_OFFSET
;
1387 if (stdev
->partition_count
< 1)
1388 stdev
->partition_count
= 1;
1392 pci_set_drvdata(pdev
, stdev
);
1397 if (ioread32(&stdev
->mmio_mrpc
->dma_ver
) == 0)
1400 stdev
->dma_mrpc
= dma_alloc_coherent(&stdev
->pdev
->dev
,
1401 sizeof(*stdev
->dma_mrpc
),
1402 &stdev
->dma_mrpc_dma_addr
,
1404 if (stdev
->dma_mrpc
== NULL
)
1410 static int switchtec_pci_probe(struct pci_dev
*pdev
,
1411 const struct pci_device_id
*id
)
1413 struct switchtec_dev
*stdev
;
1416 if (pdev
->class == (PCI_CLASS_BRIDGE_OTHER
<< 8))
1417 request_module_nowait("ntb_hw_switchtec");
1419 stdev
= stdev_create(pdev
);
1421 return PTR_ERR(stdev
);
1423 rc
= switchtec_init_pci(stdev
, pdev
);
1427 rc
= switchtec_init_isr(stdev
);
1429 dev_err(&stdev
->dev
, "failed to init isr.\n");
1433 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1434 SWITCHTEC_EVENT_EN_IRQ
,
1435 &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1436 enable_link_state_events(stdev
);
1438 if (stdev
->dma_mrpc
)
1439 enable_dma_mrpc(stdev
);
1441 rc
= cdev_device_add(&stdev
->cdev
, &stdev
->dev
);
1445 dev_info(&stdev
->dev
, "Management device registered.\n");
1452 ida_simple_remove(&switchtec_minor_ida
, MINOR(stdev
->dev
.devt
));
1453 put_device(&stdev
->dev
);
1457 static void switchtec_pci_remove(struct pci_dev
*pdev
)
1459 struct switchtec_dev
*stdev
= pci_get_drvdata(pdev
);
1461 pci_set_drvdata(pdev
, NULL
);
1463 cdev_device_del(&stdev
->cdev
, &stdev
->dev
);
1464 ida_simple_remove(&switchtec_minor_ida
, MINOR(stdev
->dev
.devt
));
1465 dev_info(&stdev
->dev
, "unregistered.\n");
1467 put_device(&stdev
->dev
);
1470 #define SWITCHTEC_PCI_DEVICE(device_id) \
1472 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1473 .device = device_id, \
1474 .subvendor = PCI_ANY_ID, \
1475 .subdevice = PCI_ANY_ID, \
1476 .class = (PCI_CLASS_MEMORY_OTHER << 8), \
1477 .class_mask = 0xFFFFFFFF, \
1480 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1481 .device = device_id, \
1482 .subvendor = PCI_ANY_ID, \
1483 .subdevice = PCI_ANY_ID, \
1484 .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
1485 .class_mask = 0xFFFFFFFF, \
1488 static const struct pci_device_id switchtec_pci_tbl
[] = {
1489 SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
1490 SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
1491 SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
1492 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
1493 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
1494 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
1495 SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
1496 SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
1497 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
1498 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1499 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
1500 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
1501 SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
1502 SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
1503 SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
1504 SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
1505 SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
1506 SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
1507 SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
1508 SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
1509 SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
1510 SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
1511 SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
1512 SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
1513 SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
1514 SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
1515 SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
1516 SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
1517 SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
1518 SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
1521 MODULE_DEVICE_TABLE(pci
, switchtec_pci_tbl
);
1523 static struct pci_driver switchtec_pci_driver
= {
1524 .name
= KBUILD_MODNAME
,
1525 .id_table
= switchtec_pci_tbl
,
1526 .probe
= switchtec_pci_probe
,
1527 .remove
= switchtec_pci_remove
,
1530 static int __init
switchtec_init(void)
1534 rc
= alloc_chrdev_region(&switchtec_devt
, 0, max_devices
,
1539 switchtec_class
= class_create(THIS_MODULE
, "switchtec");
1540 if (IS_ERR(switchtec_class
)) {
1541 rc
= PTR_ERR(switchtec_class
);
1542 goto err_create_class
;
1545 rc
= pci_register_driver(&switchtec_pci_driver
);
1547 goto err_pci_register
;
1549 pr_info(KBUILD_MODNAME
": loaded.\n");
1554 class_destroy(switchtec_class
);
1557 unregister_chrdev_region(switchtec_devt
, max_devices
);
1561 module_init(switchtec_init
);
1563 static void __exit
switchtec_exit(void)
1565 pci_unregister_driver(&switchtec_pci_driver
);
1566 class_destroy(switchtec_class
);
1567 unregister_chrdev_region(switchtec_devt
, max_devices
);
1568 ida_destroy(&switchtec_minor_ida
);
1570 pr_info(KBUILD_MODNAME
": unloaded.\n");
1572 module_exit(switchtec_exit
);