2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/interrupt.h>
17 #include <linux/io-64-nonatomic-lo-hi.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
21 #include <linux/ntb.h>
22 #include <linux/pci.h>
23 #include <linux/switchtec.h>
25 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
26 MODULE_VERSION("0.1");
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Microsemi Corporation");
30 static ulong max_mw_size
= SZ_2M
;
31 module_param(max_mw_size
, ulong
, 0644);
32 MODULE_PARM_DESC(max_mw_size
,
33 "Max memory window size reported to the upper layer");
35 static bool use_lut_mws
;
36 module_param(use_lut_mws
, bool, 0644);
37 MODULE_PARM_DESC(use_lut_mws
,
38 "Enable the use of the LUT based memory windows");
40 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
47 u64 mw_sizes
[MAX_MWS
];
51 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
52 #define LUT_SIZE SZ_64K
54 struct switchtec_ntb
{
56 struct switchtec_dev
*stdev
;
64 struct ntb_info_regs __iomem
*mmio_ntb
;
65 struct ntb_ctrl_regs __iomem
*mmio_ctrl
;
66 struct ntb_dbmsg_regs __iomem
*mmio_dbmsg
;
67 struct ntb_ctrl_regs __iomem
*mmio_self_ctrl
;
68 struct ntb_ctrl_regs __iomem
*mmio_peer_ctrl
;
69 struct ntb_dbmsg_regs __iomem
*mmio_self_dbmsg
;
70 struct ntb_dbmsg_regs __iomem
*mmio_peer_dbmsg
;
72 void __iomem
*mmio_xlink_win
;
74 struct shared_mw
*self_shared
;
75 struct shared_mw __iomem
*peer_shared
;
76 dma_addr_t self_shared_dma
;
83 /* synchronize rmw access of db_mask and hw reg */
84 spinlock_t db_mask_lock
;
89 int direct_mw_to_bar
[MAX_DIRECT_MW
];
91 int peer_nr_direct_mw
;
93 int peer_direct_mw_to_bar
[MAX_DIRECT_MW
];
96 enum ntb_speed link_speed
;
97 enum ntb_width link_width
;
98 struct work_struct link_reinit_work
;
101 static struct switchtec_ntb
*ntb_sndev(struct ntb_dev
*ntb
)
103 return container_of(ntb
, struct switchtec_ntb
, ntb
);
106 static int switchtec_ntb_part_op(struct switchtec_ntb
*sndev
,
107 struct ntb_ctrl_regs __iomem
*ctl
,
108 u32 op
, int wait_status
)
110 static const char * const op_text
[] = {
111 [NTB_CTRL_PART_OP_LOCK
] = "lock",
112 [NTB_CTRL_PART_OP_CFG
] = "configure",
113 [NTB_CTRL_PART_OP_RESET
] = "reset",
121 case NTB_CTRL_PART_OP_LOCK
:
122 status
= NTB_CTRL_PART_STATUS_LOCKING
;
124 case NTB_CTRL_PART_OP_CFG
:
125 status
= NTB_CTRL_PART_STATUS_CONFIGURING
;
127 case NTB_CTRL_PART_OP_RESET
:
128 status
= NTB_CTRL_PART_STATUS_RESETTING
;
134 iowrite32(op
, &ctl
->partition_op
);
136 for (i
= 0; i
< 1000; i
++) {
137 if (msleep_interruptible(50) != 0) {
138 iowrite32(NTB_CTRL_PART_OP_RESET
, &ctl
->partition_op
);
142 ps
= ioread32(&ctl
->partition_status
) & 0xFFFF;
148 if (ps
== wait_status
)
152 dev_err(&sndev
->stdev
->dev
,
153 "Timed out while performing %s (%d). (%08x)\n",
155 ioread32(&ctl
->partition_status
));
163 static int switchtec_ntb_send_msg(struct switchtec_ntb
*sndev
, int idx
,
166 if (idx
< 0 || idx
>= ARRAY_SIZE(sndev
->mmio_peer_dbmsg
->omsg
))
169 iowrite32(val
, &sndev
->mmio_peer_dbmsg
->omsg
[idx
].msg
);
174 static int switchtec_ntb_mw_count(struct ntb_dev
*ntb
, int pidx
)
176 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
177 int nr_direct_mw
= sndev
->peer_nr_direct_mw
;
178 int nr_lut_mw
= sndev
->peer_nr_lut_mw
- sndev
->nr_rsvd_luts
;
180 if (pidx
!= NTB_DEF_PEER_IDX
)
186 return nr_direct_mw
+ nr_lut_mw
;
189 static int lut_index(struct switchtec_ntb
*sndev
, int mw_idx
)
191 return mw_idx
- sndev
->nr_direct_mw
+ sndev
->nr_rsvd_luts
;
194 static int peer_lut_index(struct switchtec_ntb
*sndev
, int mw_idx
)
196 return mw_idx
- sndev
->peer_nr_direct_mw
+ sndev
->nr_rsvd_luts
;
199 static int switchtec_ntb_mw_get_align(struct ntb_dev
*ntb
, int pidx
,
200 int widx
, resource_size_t
*addr_align
,
201 resource_size_t
*size_align
,
202 resource_size_t
*size_max
)
204 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
206 resource_size_t size
;
208 if (pidx
!= NTB_DEF_PEER_IDX
)
211 lut
= widx
>= sndev
->peer_nr_direct_mw
;
212 size
= ioread64(&sndev
->peer_shared
->mw_sizes
[widx
]);
218 *addr_align
= lut
? size
: SZ_4K
;
221 *size_align
= lut
? size
: SZ_4K
;
229 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb
*sndev
, int idx
)
231 struct ntb_ctrl_regs __iomem
*ctl
= sndev
->mmio_peer_ctrl
;
232 int bar
= sndev
->peer_direct_mw_to_bar
[idx
];
235 ctl_val
= ioread32(&ctl
->bar_entry
[bar
].ctl
);
236 ctl_val
&= ~NTB_CTRL_BAR_DIR_WIN_EN
;
237 iowrite32(ctl_val
, &ctl
->bar_entry
[bar
].ctl
);
238 iowrite32(0, &ctl
->bar_entry
[bar
].win_size
);
239 iowrite32(0, &ctl
->bar_ext_entry
[bar
].win_size
);
240 iowrite64(sndev
->self_partition
, &ctl
->bar_entry
[bar
].xlate_addr
);
243 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb
*sndev
, int idx
)
245 struct ntb_ctrl_regs __iomem
*ctl
= sndev
->mmio_peer_ctrl
;
247 iowrite64(0, &ctl
->lut_entry
[peer_lut_index(sndev
, idx
)]);
250 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb
*sndev
, int idx
,
251 dma_addr_t addr
, resource_size_t size
)
253 int xlate_pos
= ilog2(size
);
254 int bar
= sndev
->peer_direct_mw_to_bar
[idx
];
255 struct ntb_ctrl_regs __iomem
*ctl
= sndev
->mmio_peer_ctrl
;
258 ctl_val
= ioread32(&ctl
->bar_entry
[bar
].ctl
);
259 ctl_val
|= NTB_CTRL_BAR_DIR_WIN_EN
;
261 iowrite32(ctl_val
, &ctl
->bar_entry
[bar
].ctl
);
262 iowrite32(xlate_pos
| (lower_32_bits(size
) & 0xFFFFF000),
263 &ctl
->bar_entry
[bar
].win_size
);
264 iowrite32(upper_32_bits(size
), &ctl
->bar_ext_entry
[bar
].win_size
);
265 iowrite64(sndev
->self_partition
| addr
,
266 &ctl
->bar_entry
[bar
].xlate_addr
);
269 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb
*sndev
, int idx
,
270 dma_addr_t addr
, resource_size_t size
)
272 struct ntb_ctrl_regs __iomem
*ctl
= sndev
->mmio_peer_ctrl
;
274 iowrite64((NTB_CTRL_LUT_EN
| (sndev
->self_partition
<< 1) | addr
),
275 &ctl
->lut_entry
[peer_lut_index(sndev
, idx
)]);
278 static int switchtec_ntb_mw_set_trans(struct ntb_dev
*ntb
, int pidx
, int widx
,
279 dma_addr_t addr
, resource_size_t size
)
281 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
282 struct ntb_ctrl_regs __iomem
*ctl
= sndev
->mmio_peer_ctrl
;
283 int xlate_pos
= ilog2(size
);
284 int nr_direct_mw
= sndev
->peer_nr_direct_mw
;
287 if (pidx
!= NTB_DEF_PEER_IDX
)
290 dev_dbg(&sndev
->stdev
->dev
, "MW %d: part %d addr %pad size %pap\n",
291 widx
, pidx
, &addr
, &size
);
293 if (widx
>= switchtec_ntb_mw_count(ntb
, pidx
))
299 if (!IS_ALIGNED(addr
, BIT_ULL(xlate_pos
))) {
301 * In certain circumstances we can get a buffer that is
302 * not aligned to its size. (Most of the time
303 * dma_alloc_coherent ensures this). This can happen when
304 * using large buffers allocated by the CMA
305 * (see CMA_CONFIG_ALIGNMENT)
307 dev_err(&sndev
->stdev
->dev
,
308 "ERROR: Memory window address is not aligned to it's size!\n");
312 rc
= switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_LOCK
,
313 NTB_CTRL_PART_STATUS_LOCKED
);
317 if (addr
== 0 || size
== 0) {
318 if (widx
< nr_direct_mw
)
319 switchtec_ntb_mw_clr_direct(sndev
, widx
);
321 switchtec_ntb_mw_clr_lut(sndev
, widx
);
323 if (widx
< nr_direct_mw
)
324 switchtec_ntb_mw_set_direct(sndev
, widx
, addr
, size
);
326 switchtec_ntb_mw_set_lut(sndev
, widx
, addr
, size
);
329 rc
= switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_CFG
,
330 NTB_CTRL_PART_STATUS_NORMAL
);
333 dev_err(&sndev
->stdev
->dev
,
334 "Hardware reported an error configuring mw %d: %08x\n",
335 widx
, ioread32(&ctl
->bar_error
));
337 if (widx
< nr_direct_mw
)
338 switchtec_ntb_mw_clr_direct(sndev
, widx
);
340 switchtec_ntb_mw_clr_lut(sndev
, widx
);
342 switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_CFG
,
343 NTB_CTRL_PART_STATUS_NORMAL
);
349 static int switchtec_ntb_peer_mw_count(struct ntb_dev
*ntb
)
351 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
352 int nr_lut_mw
= sndev
->nr_lut_mw
- sndev
->nr_rsvd_luts
;
354 return sndev
->nr_direct_mw
+ (use_lut_mws
? nr_lut_mw
: 0);
357 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb
*sndev
,
358 int idx
, phys_addr_t
*base
,
359 resource_size_t
*size
)
361 int bar
= sndev
->direct_mw_to_bar
[idx
];
369 * This is the direct BAR shared with the LUTs
370 * which means the actual window will be offset
371 * by the size of all the LUT entries.
374 offset
= LUT_SIZE
* sndev
->nr_lut_mw
;
378 *base
= pci_resource_start(sndev
->ntb
.pdev
, bar
) + offset
;
381 *size
= pci_resource_len(sndev
->ntb
.pdev
, bar
) - offset
;
382 if (offset
&& *size
> offset
)
385 if (*size
> max_mw_size
)
392 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb
*sndev
,
393 int idx
, phys_addr_t
*base
,
394 resource_size_t
*size
)
396 int bar
= sndev
->direct_mw_to_bar
[0];
399 offset
= LUT_SIZE
* lut_index(sndev
, idx
);
402 *base
= pci_resource_start(sndev
->ntb
.pdev
, bar
) + offset
;
410 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev
*ntb
, int idx
,
412 resource_size_t
*size
)
414 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
416 if (idx
< sndev
->nr_direct_mw
)
417 return switchtec_ntb_direct_get_addr(sndev
, idx
, base
, size
);
418 else if (idx
< switchtec_ntb_peer_mw_count(ntb
))
419 return switchtec_ntb_lut_get_addr(sndev
, idx
, base
, size
);
424 static void switchtec_ntb_part_link_speed(struct switchtec_ntb
*sndev
,
426 enum ntb_speed
*speed
,
427 enum ntb_width
*width
)
429 struct switchtec_dev
*stdev
= sndev
->stdev
;
431 u32 pff
= ioread32(&stdev
->mmio_part_cfg
[partition
].vep_pff_inst_id
);
432 u32 linksta
= ioread32(&stdev
->mmio_pff_csr
[pff
].pci_cap_region
[13]);
435 *speed
= (linksta
>> 16) & 0xF;
438 *width
= (linksta
>> 20) & 0x3F;
441 static void switchtec_ntb_set_link_speed(struct switchtec_ntb
*sndev
)
443 enum ntb_speed self_speed
, peer_speed
;
444 enum ntb_width self_width
, peer_width
;
446 if (!sndev
->link_is_up
) {
447 sndev
->link_speed
= NTB_SPEED_NONE
;
448 sndev
->link_width
= NTB_WIDTH_NONE
;
452 switchtec_ntb_part_link_speed(sndev
, sndev
->self_partition
,
453 &self_speed
, &self_width
);
454 switchtec_ntb_part_link_speed(sndev
, sndev
->peer_partition
,
455 &peer_speed
, &peer_width
);
457 sndev
->link_speed
= min(self_speed
, peer_speed
);
458 sndev
->link_width
= min(self_width
, peer_width
);
461 static int crosslink_is_enabled(struct switchtec_ntb
*sndev
)
463 struct ntb_info_regs __iomem
*inf
= sndev
->mmio_ntb
;
465 return ioread8(&inf
->ntp_info
[sndev
->peer_partition
].xlink_enabled
);
468 static void crosslink_init_dbmsgs(struct switchtec_ntb
*sndev
)
473 if (!crosslink_is_enabled(sndev
))
476 for (i
= 0; i
< ARRAY_SIZE(sndev
->mmio_peer_dbmsg
->imsg
); i
++) {
477 int m
= i
| sndev
->self_partition
<< 2;
479 msg_map
|= m
<< i
* 8;
482 iowrite32(msg_map
, &sndev
->mmio_peer_dbmsg
->msg_map
);
483 iowrite64(sndev
->db_valid_mask
<< sndev
->db_peer_shift
,
484 &sndev
->mmio_peer_dbmsg
->odb_mask
);
492 MSG_LINK_FORCE_DOWN
= 4,
495 static int switchtec_ntb_reinit_peer(struct switchtec_ntb
*sndev
);
497 static void link_reinit_work(struct work_struct
*work
)
499 struct switchtec_ntb
*sndev
;
501 sndev
= container_of(work
, struct switchtec_ntb
, link_reinit_work
);
503 switchtec_ntb_reinit_peer(sndev
);
506 static void switchtec_ntb_check_link(struct switchtec_ntb
*sndev
,
507 enum switchtec_msg msg
)
510 int old
= sndev
->link_is_up
;
512 if (msg
== MSG_LINK_FORCE_DOWN
) {
513 schedule_work(&sndev
->link_reinit_work
);
515 if (sndev
->link_is_up
) {
516 sndev
->link_is_up
= 0;
517 ntb_link_event(&sndev
->ntb
);
518 dev_info(&sndev
->stdev
->dev
, "ntb link forced down\n");
524 link_sta
= sndev
->self_shared
->link_sta
;
526 u64 peer
= ioread64(&sndev
->peer_shared
->magic
);
528 if ((peer
& 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC
)
529 link_sta
= peer
>> 32;
534 sndev
->link_is_up
= link_sta
;
535 switchtec_ntb_set_link_speed(sndev
);
537 if (link_sta
!= old
) {
538 switchtec_ntb_send_msg(sndev
, LINK_MESSAGE
, MSG_CHECK_LINK
);
539 ntb_link_event(&sndev
->ntb
);
540 dev_info(&sndev
->stdev
->dev
, "ntb link %s\n",
541 link_sta
? "up" : "down");
544 crosslink_init_dbmsgs(sndev
);
548 static void switchtec_ntb_link_notification(struct switchtec_dev
*stdev
)
550 struct switchtec_ntb
*sndev
= stdev
->sndev
;
552 switchtec_ntb_check_link(sndev
, MSG_CHECK_LINK
);
555 static u64
switchtec_ntb_link_is_up(struct ntb_dev
*ntb
,
556 enum ntb_speed
*speed
,
557 enum ntb_width
*width
)
559 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
562 *speed
= sndev
->link_speed
;
564 *width
= sndev
->link_width
;
566 return sndev
->link_is_up
;
569 static int switchtec_ntb_link_enable(struct ntb_dev
*ntb
,
570 enum ntb_speed max_speed
,
571 enum ntb_width max_width
)
573 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
575 dev_dbg(&sndev
->stdev
->dev
, "enabling link\n");
577 sndev
->self_shared
->link_sta
= 1;
578 switchtec_ntb_send_msg(sndev
, LINK_MESSAGE
, MSG_LINK_UP
);
580 switchtec_ntb_check_link(sndev
, MSG_CHECK_LINK
);
585 static int switchtec_ntb_link_disable(struct ntb_dev
*ntb
)
587 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
589 dev_dbg(&sndev
->stdev
->dev
, "disabling link\n");
591 sndev
->self_shared
->link_sta
= 0;
592 switchtec_ntb_send_msg(sndev
, LINK_MESSAGE
, MSG_LINK_DOWN
);
594 switchtec_ntb_check_link(sndev
, MSG_CHECK_LINK
);
599 static u64
switchtec_ntb_db_valid_mask(struct ntb_dev
*ntb
)
601 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
603 return sndev
->db_valid_mask
;
606 static int switchtec_ntb_db_vector_count(struct ntb_dev
*ntb
)
611 static u64
switchtec_ntb_db_vector_mask(struct ntb_dev
*ntb
, int db_vector
)
613 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
615 if (db_vector
< 0 || db_vector
> 1)
618 return sndev
->db_valid_mask
;
621 static u64
switchtec_ntb_db_read(struct ntb_dev
*ntb
)
624 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
626 ret
= ioread64(&sndev
->mmio_self_dbmsg
->idb
) >> sndev
->db_shift
;
628 return ret
& sndev
->db_valid_mask
;
631 static int switchtec_ntb_db_clear(struct ntb_dev
*ntb
, u64 db_bits
)
633 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
635 iowrite64(db_bits
<< sndev
->db_shift
, &sndev
->mmio_self_dbmsg
->idb
);
640 static int switchtec_ntb_db_set_mask(struct ntb_dev
*ntb
, u64 db_bits
)
642 unsigned long irqflags
;
643 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
645 if (db_bits
& ~sndev
->db_valid_mask
)
648 spin_lock_irqsave(&sndev
->db_mask_lock
, irqflags
);
650 sndev
->db_mask
|= db_bits
<< sndev
->db_shift
;
651 iowrite64(~sndev
->db_mask
, &sndev
->mmio_self_dbmsg
->idb_mask
);
653 spin_unlock_irqrestore(&sndev
->db_mask_lock
, irqflags
);
658 static int switchtec_ntb_db_clear_mask(struct ntb_dev
*ntb
, u64 db_bits
)
660 unsigned long irqflags
;
661 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
663 if (db_bits
& ~sndev
->db_valid_mask
)
666 spin_lock_irqsave(&sndev
->db_mask_lock
, irqflags
);
668 sndev
->db_mask
&= ~(db_bits
<< sndev
->db_shift
);
669 iowrite64(~sndev
->db_mask
, &sndev
->mmio_self_dbmsg
->idb_mask
);
671 spin_unlock_irqrestore(&sndev
->db_mask_lock
, irqflags
);
676 static u64
switchtec_ntb_db_read_mask(struct ntb_dev
*ntb
)
678 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
680 return (sndev
->db_mask
>> sndev
->db_shift
) & sndev
->db_valid_mask
;
683 static int switchtec_ntb_peer_db_addr(struct ntb_dev
*ntb
,
684 phys_addr_t
*db_addr
,
685 resource_size_t
*db_size
,
689 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
690 unsigned long offset
;
692 if (unlikely(db_bit
>= BITS_PER_LONG_LONG
))
695 offset
= (unsigned long)sndev
->mmio_peer_dbmsg
->odb
-
696 (unsigned long)sndev
->stdev
->mmio
;
698 offset
+= sndev
->db_shift
/ 8;
701 *db_addr
= pci_resource_start(ntb
->pdev
, 0) + offset
;
703 *db_size
= sizeof(u32
);
705 *db_data
= BIT_ULL(db_bit
) << sndev
->db_peer_shift
;
710 static int switchtec_ntb_peer_db_set(struct ntb_dev
*ntb
, u64 db_bits
)
712 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
714 iowrite64(db_bits
<< sndev
->db_peer_shift
,
715 &sndev
->mmio_peer_dbmsg
->odb
);
720 static int switchtec_ntb_spad_count(struct ntb_dev
*ntb
)
722 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
724 return ARRAY_SIZE(sndev
->self_shared
->spad
);
727 static u32
switchtec_ntb_spad_read(struct ntb_dev
*ntb
, int idx
)
729 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
731 if (idx
< 0 || idx
>= ARRAY_SIZE(sndev
->self_shared
->spad
))
734 if (!sndev
->self_shared
)
737 return sndev
->self_shared
->spad
[idx
];
740 static int switchtec_ntb_spad_write(struct ntb_dev
*ntb
, int idx
, u32 val
)
742 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
744 if (idx
< 0 || idx
>= ARRAY_SIZE(sndev
->self_shared
->spad
))
747 if (!sndev
->self_shared
)
750 sndev
->self_shared
->spad
[idx
] = val
;
755 static u32
switchtec_ntb_peer_spad_read(struct ntb_dev
*ntb
, int pidx
,
758 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
760 if (pidx
!= NTB_DEF_PEER_IDX
)
763 if (sidx
< 0 || sidx
>= ARRAY_SIZE(sndev
->peer_shared
->spad
))
766 if (!sndev
->peer_shared
)
769 return ioread32(&sndev
->peer_shared
->spad
[sidx
]);
772 static int switchtec_ntb_peer_spad_write(struct ntb_dev
*ntb
, int pidx
,
775 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
777 if (pidx
!= NTB_DEF_PEER_IDX
)
780 if (sidx
< 0 || sidx
>= ARRAY_SIZE(sndev
->peer_shared
->spad
))
783 if (!sndev
->peer_shared
)
786 iowrite32(val
, &sndev
->peer_shared
->spad
[sidx
]);
791 static int switchtec_ntb_peer_spad_addr(struct ntb_dev
*ntb
, int pidx
,
792 int sidx
, phys_addr_t
*spad_addr
)
794 struct switchtec_ntb
*sndev
= ntb_sndev(ntb
);
795 unsigned long offset
;
797 if (pidx
!= NTB_DEF_PEER_IDX
)
800 offset
= (unsigned long)&sndev
->peer_shared
->spad
[sidx
] -
801 (unsigned long)sndev
->stdev
->mmio
;
804 *spad_addr
= pci_resource_start(ntb
->pdev
, 0) + offset
;
809 static const struct ntb_dev_ops switchtec_ntb_ops
= {
810 .mw_count
= switchtec_ntb_mw_count
,
811 .mw_get_align
= switchtec_ntb_mw_get_align
,
812 .mw_set_trans
= switchtec_ntb_mw_set_trans
,
813 .peer_mw_count
= switchtec_ntb_peer_mw_count
,
814 .peer_mw_get_addr
= switchtec_ntb_peer_mw_get_addr
,
815 .link_is_up
= switchtec_ntb_link_is_up
,
816 .link_enable
= switchtec_ntb_link_enable
,
817 .link_disable
= switchtec_ntb_link_disable
,
818 .db_valid_mask
= switchtec_ntb_db_valid_mask
,
819 .db_vector_count
= switchtec_ntb_db_vector_count
,
820 .db_vector_mask
= switchtec_ntb_db_vector_mask
,
821 .db_read
= switchtec_ntb_db_read
,
822 .db_clear
= switchtec_ntb_db_clear
,
823 .db_set_mask
= switchtec_ntb_db_set_mask
,
824 .db_clear_mask
= switchtec_ntb_db_clear_mask
,
825 .db_read_mask
= switchtec_ntb_db_read_mask
,
826 .peer_db_addr
= switchtec_ntb_peer_db_addr
,
827 .peer_db_set
= switchtec_ntb_peer_db_set
,
828 .spad_count
= switchtec_ntb_spad_count
,
829 .spad_read
= switchtec_ntb_spad_read
,
830 .spad_write
= switchtec_ntb_spad_write
,
831 .peer_spad_read
= switchtec_ntb_peer_spad_read
,
832 .peer_spad_write
= switchtec_ntb_peer_spad_write
,
833 .peer_spad_addr
= switchtec_ntb_peer_spad_addr
,
836 static int switchtec_ntb_init_sndev(struct switchtec_ntb
*sndev
)
843 sndev
->ntb
.pdev
= sndev
->stdev
->pdev
;
844 sndev
->ntb
.topo
= NTB_TOPO_SWITCH
;
845 sndev
->ntb
.ops
= &switchtec_ntb_ops
;
847 INIT_WORK(&sndev
->link_reinit_work
, link_reinit_work
);
849 sndev
->self_partition
= sndev
->stdev
->partition
;
851 sndev
->mmio_ntb
= sndev
->stdev
->mmio_ntb
;
853 self
= sndev
->self_partition
;
854 tpart_vec
= ioread32(&sndev
->mmio_ntb
->ntp_info
[self
].target_part_high
);
856 tpart_vec
|= ioread32(&sndev
->mmio_ntb
->ntp_info
[self
].target_part_low
);
858 part_map
= ioread64(&sndev
->mmio_ntb
->ep_map
);
859 part_map
&= ~(1 << sndev
->self_partition
);
861 if (!ffs(tpart_vec
)) {
862 if (sndev
->stdev
->partition_count
!= 2) {
863 dev_err(&sndev
->stdev
->dev
,
864 "ntb target partition not defined\n");
870 dev_err(&sndev
->stdev
->dev
,
871 "peer partition is not NT partition\n");
875 sndev
->peer_partition
= bit
- 1;
877 if (ffs(tpart_vec
) != fls(tpart_vec
)) {
878 dev_err(&sndev
->stdev
->dev
,
879 "ntb driver only supports 1 pair of 1-1 ntb mapping\n");
883 sndev
->peer_partition
= ffs(tpart_vec
) - 1;
884 if (!(part_map
& (1 << sndev
->peer_partition
))) {
885 dev_err(&sndev
->stdev
->dev
,
886 "ntb target partition is not NT partition\n");
891 dev_dbg(&sndev
->stdev
->dev
, "Partition ID %d of %d\n",
892 sndev
->self_partition
, sndev
->stdev
->partition_count
);
894 sndev
->mmio_ctrl
= (void * __iomem
)sndev
->mmio_ntb
+
895 SWITCHTEC_NTB_REG_CTRL_OFFSET
;
896 sndev
->mmio_dbmsg
= (void * __iomem
)sndev
->mmio_ntb
+
897 SWITCHTEC_NTB_REG_DBMSG_OFFSET
;
899 sndev
->mmio_self_ctrl
= &sndev
->mmio_ctrl
[sndev
->self_partition
];
900 sndev
->mmio_peer_ctrl
= &sndev
->mmio_ctrl
[sndev
->peer_partition
];
901 sndev
->mmio_self_dbmsg
= &sndev
->mmio_dbmsg
[sndev
->self_partition
];
902 sndev
->mmio_peer_dbmsg
= sndev
->mmio_self_dbmsg
;
907 static int config_rsvd_lut_win(struct switchtec_ntb
*sndev
,
908 struct ntb_ctrl_regs __iomem
*ctl
,
909 int lut_idx
, int partition
, u64 addr
)
911 int peer_bar
= sndev
->peer_direct_mw_to_bar
[0];
915 rc
= switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_LOCK
,
916 NTB_CTRL_PART_STATUS_LOCKED
);
920 ctl_val
= ioread32(&ctl
->bar_entry
[peer_bar
].ctl
);
922 ctl_val
|= NTB_CTRL_BAR_LUT_WIN_EN
;
923 ctl_val
|= ilog2(LUT_SIZE
) << 8;
924 ctl_val
|= (sndev
->nr_lut_mw
- 1) << 14;
925 iowrite32(ctl_val
, &ctl
->bar_entry
[peer_bar
].ctl
);
927 iowrite64((NTB_CTRL_LUT_EN
| (partition
<< 1) | addr
),
928 &ctl
->lut_entry
[lut_idx
]);
930 rc
= switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_CFG
,
931 NTB_CTRL_PART_STATUS_NORMAL
);
933 u32 bar_error
, lut_error
;
935 bar_error
= ioread32(&ctl
->bar_error
);
936 lut_error
= ioread32(&ctl
->lut_error
);
937 dev_err(&sndev
->stdev
->dev
,
938 "Error setting up reserved lut window: %08x / %08x\n",
939 bar_error
, lut_error
);
946 static int config_req_id_table(struct switchtec_ntb
*sndev
,
947 struct ntb_ctrl_regs __iomem
*mmio_ctrl
,
948 int *req_ids
, int count
)
954 if (ioread32(&mmio_ctrl
->req_id_table_size
) < count
) {
955 dev_err(&sndev
->stdev
->dev
,
956 "Not enough requester IDs available.\n");
960 rc
= switchtec_ntb_part_op(sndev
, mmio_ctrl
,
961 NTB_CTRL_PART_OP_LOCK
,
962 NTB_CTRL_PART_STATUS_LOCKED
);
966 iowrite32(NTB_PART_CTRL_ID_PROT_DIS
,
967 &mmio_ctrl
->partition_ctrl
);
969 for (i
= 0; i
< count
; i
++) {
970 iowrite32(req_ids
[i
] << 16 | NTB_CTRL_REQ_ID_EN
,
971 &mmio_ctrl
->req_id_table
[i
]);
973 proxy_id
= ioread32(&mmio_ctrl
->req_id_table
[i
]);
974 dev_dbg(&sndev
->stdev
->dev
,
975 "Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
976 req_ids
[i
] >> 8, (req_ids
[i
] >> 3) & 0x1F,
977 req_ids
[i
] & 0x7, (proxy_id
>> 4) & 0x1F,
978 (proxy_id
>> 1) & 0x7);
981 rc
= switchtec_ntb_part_op(sndev
, mmio_ctrl
,
982 NTB_CTRL_PART_OP_CFG
,
983 NTB_CTRL_PART_STATUS_NORMAL
);
986 error
= ioread32(&mmio_ctrl
->req_id_error
);
987 dev_err(&sndev
->stdev
->dev
,
988 "Error setting up the requester ID table: %08x\n",
995 static int crosslink_setup_mws(struct switchtec_ntb
*sndev
, int ntb_lut_idx
,
996 u64
*mw_addrs
, int mw_count
)
999 struct ntb_ctrl_regs __iomem
*ctl
= sndev
->mmio_self_ctrl
;
1001 size_t size
, offset
;
1006 rc
= switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_LOCK
,
1007 NTB_CTRL_PART_STATUS_LOCKED
);
1011 for (i
= 0; i
< sndev
->nr_lut_mw
; i
++) {
1012 if (i
== ntb_lut_idx
)
1015 addr
= mw_addrs
[0] + LUT_SIZE
* i
;
1017 iowrite64((NTB_CTRL_LUT_EN
| (sndev
->peer_partition
<< 1) |
1019 &ctl
->lut_entry
[i
]);
1022 sndev
->nr_direct_mw
= min_t(int, sndev
->nr_direct_mw
, mw_count
);
1024 for (i
= 0; i
< sndev
->nr_direct_mw
; i
++) {
1025 bar
= sndev
->direct_mw_to_bar
[i
];
1026 offset
= (i
== 0) ? LUT_SIZE
* sndev
->nr_lut_mw
: 0;
1027 addr
= mw_addrs
[i
] + offset
;
1028 size
= pci_resource_len(sndev
->ntb
.pdev
, bar
) - offset
;
1029 xlate_pos
= ilog2(size
);
1031 if (offset
&& size
> offset
)
1034 ctl_val
= ioread32(&ctl
->bar_entry
[bar
].ctl
);
1035 ctl_val
|= NTB_CTRL_BAR_DIR_WIN_EN
;
1037 iowrite32(ctl_val
, &ctl
->bar_entry
[bar
].ctl
);
1038 iowrite32(xlate_pos
| (lower_32_bits(size
) & 0xFFFFF000),
1039 &ctl
->bar_entry
[bar
].win_size
);
1040 iowrite32(upper_32_bits(size
), &ctl
->bar_ext_entry
[bar
].win_size
);
1041 iowrite64(sndev
->peer_partition
| addr
,
1042 &ctl
->bar_entry
[bar
].xlate_addr
);
1045 rc
= switchtec_ntb_part_op(sndev
, ctl
, NTB_CTRL_PART_OP_CFG
,
1046 NTB_CTRL_PART_STATUS_NORMAL
);
1048 u32 bar_error
, lut_error
;
1050 bar_error
= ioread32(&ctl
->bar_error
);
1051 lut_error
= ioread32(&ctl
->lut_error
);
1052 dev_err(&sndev
->stdev
->dev
,
1053 "Error setting up cross link windows: %08x / %08x\n",
1054 bar_error
, lut_error
);
1061 static int crosslink_setup_req_ids(struct switchtec_ntb
*sndev
,
1062 struct ntb_ctrl_regs __iomem
*mmio_ctrl
)
1068 for (i
= 0; i
< ARRAY_SIZE(req_ids
); i
++) {
1069 proxy_id
= ioread32(&sndev
->mmio_self_ctrl
->req_id_table
[i
]);
1071 if (!(proxy_id
& NTB_CTRL_REQ_ID_EN
))
1074 req_ids
[i
] = ((proxy_id
>> 1) & 0xFF);
1077 return config_req_id_table(sndev
, mmio_ctrl
, req_ids
, i
);
1081 * In crosslink configuration there is a virtual partition in the
1082 * middle of the two switches. The BARs in this partition have to be
1083 * enumerated and assigned addresses.
1085 static int crosslink_enum_partition(struct switchtec_ntb
*sndev
,
1088 struct part_cfg_regs __iomem
*part_cfg
=
1089 &sndev
->stdev
->mmio_part_cfg_all
[sndev
->peer_partition
];
1090 u32 pff
= ioread32(&part_cfg
->vep_pff_inst_id
);
1091 struct pff_csr_regs __iomem
*mmio_pff
=
1092 &sndev
->stdev
->mmio_pff_csr
[pff
];
1093 const u64 bar_space
= 0x1000000000LL
;
1098 iowrite16(0x6, &mmio_pff
->pcicmd
);
1100 for (i
= 0; i
< ARRAY_SIZE(mmio_pff
->pci_bar64
); i
++) {
1101 iowrite64(bar_space
* i
, &mmio_pff
->pci_bar64
[i
]);
1102 bar_addr
= ioread64(&mmio_pff
->pci_bar64
[i
]);
1105 dev_dbg(&sndev
->stdev
->dev
,
1106 "Crosslink BAR%d addr: %llx\n",
1109 if (bar_addr
!= bar_space
* i
)
1112 bar_addrs
[bar_cnt
++] = bar_addr
;
1118 static int switchtec_ntb_init_crosslink(struct switchtec_ntb
*sndev
)
1121 int bar
= sndev
->direct_mw_to_bar
[0];
1122 const int ntb_lut_idx
= 1;
1128 if (!crosslink_is_enabled(sndev
))
1131 dev_info(&sndev
->stdev
->dev
, "Using crosslink configuration\n");
1132 sndev
->ntb
.topo
= NTB_TOPO_CROSSLINK
;
1134 bar_cnt
= crosslink_enum_partition(sndev
, bar_addrs
);
1135 if (bar_cnt
< sndev
->nr_direct_mw
+ 1) {
1136 dev_err(&sndev
->stdev
->dev
,
1137 "Error enumerating crosslink partition\n");
1141 addr
= (bar_addrs
[0] + SWITCHTEC_GAS_NTB_OFFSET
+
1142 SWITCHTEC_NTB_REG_DBMSG_OFFSET
+
1143 sizeof(struct ntb_dbmsg_regs
) * sndev
->peer_partition
);
1145 offset
= addr
& (LUT_SIZE
- 1);
1148 rc
= config_rsvd_lut_win(sndev
, sndev
->mmio_self_ctrl
, ntb_lut_idx
,
1149 sndev
->peer_partition
, addr
);
1153 rc
= crosslink_setup_mws(sndev
, ntb_lut_idx
, &bar_addrs
[1],
1158 rc
= crosslink_setup_req_ids(sndev
, sndev
->mmio_peer_ctrl
);
1162 sndev
->mmio_xlink_win
= pci_iomap_range(sndev
->stdev
->pdev
, bar
,
1163 LUT_SIZE
, LUT_SIZE
);
1164 if (!sndev
->mmio_xlink_win
) {
1169 sndev
->mmio_peer_dbmsg
= sndev
->mmio_xlink_win
+ offset
;
1170 sndev
->nr_rsvd_luts
++;
1172 crosslink_init_dbmsgs(sndev
);
1177 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb
*sndev
)
1179 if (sndev
->mmio_xlink_win
)
1180 pci_iounmap(sndev
->stdev
->pdev
, sndev
->mmio_xlink_win
);
1183 static int map_bars(int *map
, struct ntb_ctrl_regs __iomem
*ctrl
)
1188 for (i
= 0; i
< ARRAY_SIZE(ctrl
->bar_entry
); i
++) {
1189 u32 r
= ioread32(&ctrl
->bar_entry
[i
].ctl
);
1191 if (r
& NTB_CTRL_BAR_VALID
)
1198 static void switchtec_ntb_init_mw(struct switchtec_ntb
*sndev
)
1200 sndev
->nr_direct_mw
= map_bars(sndev
->direct_mw_to_bar
,
1201 sndev
->mmio_self_ctrl
);
1203 sndev
->nr_lut_mw
= ioread16(&sndev
->mmio_self_ctrl
->lut_table_entries
);
1204 sndev
->nr_lut_mw
= rounddown_pow_of_two(sndev
->nr_lut_mw
);
1206 dev_dbg(&sndev
->stdev
->dev
, "MWs: %d direct, %d lut\n",
1207 sndev
->nr_direct_mw
, sndev
->nr_lut_mw
);
1209 sndev
->peer_nr_direct_mw
= map_bars(sndev
->peer_direct_mw_to_bar
,
1210 sndev
->mmio_peer_ctrl
);
1212 sndev
->peer_nr_lut_mw
=
1213 ioread16(&sndev
->mmio_peer_ctrl
->lut_table_entries
);
1214 sndev
->peer_nr_lut_mw
= rounddown_pow_of_two(sndev
->peer_nr_lut_mw
);
1216 dev_dbg(&sndev
->stdev
->dev
, "Peer MWs: %d direct, %d lut\n",
1217 sndev
->peer_nr_direct_mw
, sndev
->peer_nr_lut_mw
);
1222 * There are 64 doorbells in the switch hardware but this is
1223 * shared among all partitions. So we must split them in half
1224 * (32 for each partition). However, the message interrupts are
1225 * also shared with the top 4 doorbells so we just limit this to
1226 * 28 doorbells per partition.
1228 * In crosslink mode, each side has it's own dbmsg register so
1229 * they can each use all 60 of the available doorbells.
1231 static void switchtec_ntb_init_db(struct switchtec_ntb
*sndev
)
1233 sndev
->db_mask
= 0x0FFFFFFFFFFFFFFFULL
;
1235 if (sndev
->mmio_peer_dbmsg
!= sndev
->mmio_self_dbmsg
) {
1236 sndev
->db_shift
= 0;
1237 sndev
->db_peer_shift
= 0;
1238 sndev
->db_valid_mask
= sndev
->db_mask
;
1239 } else if (sndev
->self_partition
< sndev
->peer_partition
) {
1240 sndev
->db_shift
= 0;
1241 sndev
->db_peer_shift
= 32;
1242 sndev
->db_valid_mask
= 0x0FFFFFFF;
1244 sndev
->db_shift
= 32;
1245 sndev
->db_peer_shift
= 0;
1246 sndev
->db_valid_mask
= 0x0FFFFFFF;
1249 iowrite64(~sndev
->db_mask
, &sndev
->mmio_self_dbmsg
->idb_mask
);
1250 iowrite64(sndev
->db_valid_mask
<< sndev
->db_peer_shift
,
1251 &sndev
->mmio_peer_dbmsg
->odb_mask
);
1253 dev_dbg(&sndev
->stdev
->dev
, "dbs: shift %d/%d, mask %016llx\n",
1254 sndev
->db_shift
, sndev
->db_peer_shift
, sndev
->db_valid_mask
);
1257 static void switchtec_ntb_init_msgs(struct switchtec_ntb
*sndev
)
1262 for (i
= 0; i
< ARRAY_SIZE(sndev
->mmio_self_dbmsg
->imsg
); i
++) {
1263 int m
= i
| sndev
->peer_partition
<< 2;
1265 msg_map
|= m
<< i
* 8;
1268 iowrite32(msg_map
, &sndev
->mmio_self_dbmsg
->msg_map
);
1270 for (i
= 0; i
< ARRAY_SIZE(sndev
->mmio_self_dbmsg
->imsg
); i
++)
1271 iowrite64(NTB_DBMSG_IMSG_STATUS
| NTB_DBMSG_IMSG_MASK
,
1272 &sndev
->mmio_self_dbmsg
->imsg
[i
]);
1276 switchtec_ntb_init_req_id_table(struct switchtec_ntb
*sndev
)
1281 * Root Complex Requester ID (which is 0:00.0)
1286 * Host Bridge Requester ID (as read from the mmap address)
1288 req_ids
[1] = ioread16(&sndev
->mmio_ntb
->requester_id
);
1290 return config_req_id_table(sndev
, sndev
->mmio_self_ctrl
, req_ids
,
1291 ARRAY_SIZE(req_ids
));
1294 static void switchtec_ntb_init_shared(struct switchtec_ntb
*sndev
)
1298 memset(sndev
->self_shared
, 0, LUT_SIZE
);
1299 sndev
->self_shared
->magic
= SWITCHTEC_NTB_MAGIC
;
1300 sndev
->self_shared
->partition_id
= sndev
->stdev
->partition
;
1302 for (i
= 0; i
< sndev
->nr_direct_mw
; i
++) {
1303 int bar
= sndev
->direct_mw_to_bar
[i
];
1304 resource_size_t sz
= pci_resource_len(sndev
->stdev
->pdev
, bar
);
1307 sz
= min_t(resource_size_t
, sz
,
1308 LUT_SIZE
* sndev
->nr_lut_mw
);
1310 sndev
->self_shared
->mw_sizes
[i
] = sz
;
1313 for (i
= 0; i
< sndev
->nr_lut_mw
; i
++) {
1314 int idx
= sndev
->nr_direct_mw
+ i
;
1316 sndev
->self_shared
->mw_sizes
[idx
] = LUT_SIZE
;
1320 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb
*sndev
)
1322 int self_bar
= sndev
->direct_mw_to_bar
[0];
1325 sndev
->nr_rsvd_luts
++;
1326 sndev
->self_shared
= dma_alloc_coherent(&sndev
->stdev
->pdev
->dev
,
1328 &sndev
->self_shared_dma
,
1330 if (!sndev
->self_shared
) {
1331 dev_err(&sndev
->stdev
->dev
,
1332 "unable to allocate memory for shared mw\n");
1336 switchtec_ntb_init_shared(sndev
);
1338 rc
= config_rsvd_lut_win(sndev
, sndev
->mmio_peer_ctrl
, 0,
1339 sndev
->self_partition
,
1340 sndev
->self_shared_dma
);
1342 goto unalloc_and_exit
;
1344 sndev
->peer_shared
= pci_iomap(sndev
->stdev
->pdev
, self_bar
, LUT_SIZE
);
1345 if (!sndev
->peer_shared
) {
1347 goto unalloc_and_exit
;
1350 dev_dbg(&sndev
->stdev
->dev
, "Shared MW Ready\n");
1354 dma_free_coherent(&sndev
->stdev
->pdev
->dev
, LUT_SIZE
,
1355 sndev
->self_shared
, sndev
->self_shared_dma
);
1360 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb
*sndev
)
1362 if (sndev
->peer_shared
)
1363 pci_iounmap(sndev
->stdev
->pdev
, sndev
->peer_shared
);
1365 if (sndev
->self_shared
)
1366 dma_free_coherent(&sndev
->stdev
->pdev
->dev
, LUT_SIZE
,
1368 sndev
->self_shared_dma
);
1369 sndev
->nr_rsvd_luts
--;
1372 static irqreturn_t
switchtec_ntb_doorbell_isr(int irq
, void *dev
)
1374 struct switchtec_ntb
*sndev
= dev
;
1376 dev_dbg(&sndev
->stdev
->dev
, "doorbell\n");
1378 ntb_db_event(&sndev
->ntb
, 0);
1383 static irqreturn_t
switchtec_ntb_message_isr(int irq
, void *dev
)
1386 struct switchtec_ntb
*sndev
= dev
;
1388 for (i
= 0; i
< ARRAY_SIZE(sndev
->mmio_self_dbmsg
->imsg
); i
++) {
1389 u64 msg
= ioread64(&sndev
->mmio_self_dbmsg
->imsg
[i
]);
1391 if (msg
& NTB_DBMSG_IMSG_STATUS
) {
1392 dev_dbg(&sndev
->stdev
->dev
, "message: %d %08x\n",
1394 iowrite8(1, &sndev
->mmio_self_dbmsg
->imsg
[i
].status
);
1396 if (i
== LINK_MESSAGE
)
1397 switchtec_ntb_check_link(sndev
, msg
);
1404 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb
*sndev
)
1408 int doorbell_irq
= 0;
1409 int message_irq
= 0;
1411 int idb_vecs
= sizeof(sndev
->mmio_self_dbmsg
->idb_vec_map
);
1413 event_irq
= ioread32(&sndev
->stdev
->mmio_part_cfg
->vep_vector_number
);
1415 while (doorbell_irq
== event_irq
)
1417 while (message_irq
== doorbell_irq
||
1418 message_irq
== event_irq
)
1421 dev_dbg(&sndev
->stdev
->dev
, "irqs - event: %d, db: %d, msgs: %d\n",
1422 event_irq
, doorbell_irq
, message_irq
);
1424 for (i
= 0; i
< idb_vecs
- 4; i
++)
1425 iowrite8(doorbell_irq
,
1426 &sndev
->mmio_self_dbmsg
->idb_vec_map
[i
]);
1428 for (; i
< idb_vecs
; i
++)
1429 iowrite8(message_irq
,
1430 &sndev
->mmio_self_dbmsg
->idb_vec_map
[i
]);
1432 sndev
->doorbell_irq
= pci_irq_vector(sndev
->stdev
->pdev
, doorbell_irq
);
1433 sndev
->message_irq
= pci_irq_vector(sndev
->stdev
->pdev
, message_irq
);
1435 rc
= request_irq(sndev
->doorbell_irq
,
1436 switchtec_ntb_doorbell_isr
, 0,
1437 "switchtec_ntb_doorbell", sndev
);
1441 rc
= request_irq(sndev
->message_irq
,
1442 switchtec_ntb_message_isr
, 0,
1443 "switchtec_ntb_message", sndev
);
1445 free_irq(sndev
->doorbell_irq
, sndev
);
1452 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb
*sndev
)
1454 free_irq(sndev
->doorbell_irq
, sndev
);
1455 free_irq(sndev
->message_irq
, sndev
);
1458 static int switchtec_ntb_reinit_peer(struct switchtec_ntb
*sndev
)
1460 dev_info(&sndev
->stdev
->dev
, "peer reinitialized\n");
1461 switchtec_ntb_deinit_shared_mw(sndev
);
1462 switchtec_ntb_init_mw(sndev
);
1463 return switchtec_ntb_init_shared_mw(sndev
);
1466 static int switchtec_ntb_add(struct device
*dev
,
1467 struct class_interface
*class_intf
)
1469 struct switchtec_dev
*stdev
= to_stdev(dev
);
1470 struct switchtec_ntb
*sndev
;
1473 stdev
->sndev
= NULL
;
1475 if (stdev
->pdev
->class != (PCI_CLASS_BRIDGE_OTHER
<< 8))
1478 sndev
= kzalloc_node(sizeof(*sndev
), GFP_KERNEL
, dev_to_node(dev
));
1482 sndev
->stdev
= stdev
;
1483 rc
= switchtec_ntb_init_sndev(sndev
);
1487 switchtec_ntb_init_mw(sndev
);
1489 rc
= switchtec_ntb_init_req_id_table(sndev
);
1493 rc
= switchtec_ntb_init_crosslink(sndev
);
1497 switchtec_ntb_init_db(sndev
);
1498 switchtec_ntb_init_msgs(sndev
);
1500 rc
= switchtec_ntb_init_shared_mw(sndev
);
1502 goto deinit_crosslink
;
1504 rc
= switchtec_ntb_init_db_msg_irq(sndev
);
1506 goto deinit_shared_and_exit
;
1509 * If this host crashed, the other host may think the link is
1510 * still up. Tell them to force it down (it will go back up
1511 * once we register the ntb device).
1513 switchtec_ntb_send_msg(sndev
, LINK_MESSAGE
, MSG_LINK_FORCE_DOWN
);
1515 rc
= ntb_register_device(&sndev
->ntb
);
1517 goto deinit_and_exit
;
1519 stdev
->sndev
= sndev
;
1520 stdev
->link_notifier
= switchtec_ntb_link_notification
;
1521 dev_info(dev
, "NTB device registered\n");
1526 switchtec_ntb_deinit_db_msg_irq(sndev
);
1527 deinit_shared_and_exit
:
1528 switchtec_ntb_deinit_shared_mw(sndev
);
1530 switchtec_ntb_deinit_crosslink(sndev
);
1533 dev_err(dev
, "failed to register ntb device: %d\n", rc
);
1537 static void switchtec_ntb_remove(struct device
*dev
,
1538 struct class_interface
*class_intf
)
1540 struct switchtec_dev
*stdev
= to_stdev(dev
);
1541 struct switchtec_ntb
*sndev
= stdev
->sndev
;
1546 stdev
->link_notifier
= NULL
;
1547 stdev
->sndev
= NULL
;
1548 ntb_unregister_device(&sndev
->ntb
);
1549 switchtec_ntb_deinit_db_msg_irq(sndev
);
1550 switchtec_ntb_deinit_shared_mw(sndev
);
1551 switchtec_ntb_deinit_crosslink(sndev
);
1553 dev_info(dev
, "ntb device unregistered\n");
1556 static struct class_interface switchtec_interface
= {
1557 .add_dev
= switchtec_ntb_add
,
1558 .remove_dev
= switchtec_ntb_remove
,
1561 static int __init
switchtec_ntb_init(void)
1563 switchtec_interface
.class = switchtec_class
;
1564 return class_interface_register(&switchtec_interface
);
1566 module_init(switchtec_ntb_init
);
1568 static void __exit
switchtec_ntb_exit(void)
1570 class_interface_unregister(&switchtec_interface
);
1572 module_exit(switchtec_ntb_exit
);