]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/ntb/hw/amd/ntb_hw_amd.c
NTB: add support for hotplug feature
[mirror_ubuntu-eoan-kernel.git] / drivers / ntb / hw / amd / ntb_hw_amd.c
CommitLineData
a1b36958
XY
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of AMD Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * AMD PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Xiangliang Yu <Xiangliang.Yu@amd.com>
47 */
48
49#include <linux/debugfs.h>
50#include <linux/delay.h>
51#include <linux/init.h>
52#include <linux/interrupt.h>
53#include <linux/module.h>
54#include <linux/acpi.h>
55#include <linux/pci.h>
56#include <linux/random.h>
57#include <linux/slab.h>
58#include <linux/ntb.h>
59
60#include "ntb_hw_amd.h"
61
62#define NTB_NAME "ntb_hw_amd"
63#define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver"
64#define NTB_VER "1.0"
65
66MODULE_DESCRIPTION(NTB_DESC);
67MODULE_VERSION(NTB_VER);
68MODULE_LICENSE("Dual BSD/GPL");
69MODULE_AUTHOR("AMD Inc.");
70
71static const struct file_operations amd_ntb_debugfs_info;
72static struct dentry *debugfs_dir;
73
74static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
75{
76 if (idx < 0 || idx > ndev->mw_count)
77 return -EINVAL;
78
79 return 1 << idx;
80}
81
82static int amd_ntb_mw_count(struct ntb_dev *ntb)
83{
84 return ntb_ndev(ntb)->mw_count;
85}
86
87static int amd_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
88 phys_addr_t *base,
89 resource_size_t *size,
90 resource_size_t *align,
91 resource_size_t *align_size)
92{
93 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
94 int bar;
95
96 bar = ndev_mw_to_bar(ndev, idx);
97 if (bar < 0)
98 return bar;
99
100 if (base)
101 *base = pci_resource_start(ndev->ntb.pdev, bar);
102
103 if (size)
104 *size = pci_resource_len(ndev->ntb.pdev, bar);
105
106 if (align)
107 *align = SZ_4K;
108
109 if (align_size)
110 *align_size = 1;
111
112 return 0;
113}
114
115static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
116 dma_addr_t addr, resource_size_t size)
117{
118 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
119 unsigned long xlat_reg, limit_reg = 0;
120 resource_size_t mw_size;
121 void __iomem *mmio, *peer_mmio;
122 u64 base_addr, limit, reg_val;
123 int bar;
124
125 bar = ndev_mw_to_bar(ndev, idx);
126 if (bar < 0)
127 return bar;
128
129 mw_size = pci_resource_len(ndev->ntb.pdev, bar);
130
131 /* make sure the range fits in the usable mw size */
132 if (size > mw_size)
133 return -EINVAL;
134
135 mmio = ndev->self_mmio;
136 peer_mmio = ndev->peer_mmio;
137
138 base_addr = pci_resource_start(ndev->ntb.pdev, bar);
139
140 if (bar != 1) {
141 xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 3);
142 limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 3);
143
144 /* Set the limit if supported */
145 limit = base_addr + size;
146
147 /* set and verify setting the translation address */
148 write64(addr, peer_mmio + xlat_reg);
149 reg_val = read64(peer_mmio + xlat_reg);
150 if (reg_val != addr) {
151 write64(0, peer_mmio + xlat_reg);
152 return -EIO;
153 }
154
155 /* set and verify setting the limit */
156 write64(limit, mmio + limit_reg);
157 reg_val = read64(mmio + limit_reg);
158 if (reg_val != limit) {
159 write64(base_addr, mmio + limit_reg);
160 write64(0, peer_mmio + xlat_reg);
161 return -EIO;
162 }
163 } else {
164 xlat_reg = AMD_BAR1XLAT_OFFSET;
165 limit_reg = AMD_BAR1LMT_OFFSET;
166
167 /* split bar addr range must all be 32 bit */
168 if (addr & (~0ull << 32))
169 return -EINVAL;
170 if ((addr + size) & (~0ull << 32))
171 return -EINVAL;
172
173 /* Set the limit if supported */
174 limit = base_addr + size;
175
176 /* set and verify setting the translation address */
177 write64(addr, peer_mmio + xlat_reg);
178 reg_val = read64(peer_mmio + xlat_reg);
179 if (reg_val != addr) {
180 write64(0, peer_mmio + xlat_reg);
181 return -EIO;
182 }
183
184 /* set and verify setting the limit */
185 writel(limit, mmio + limit_reg);
186 reg_val = readl(mmio + limit_reg);
187 if (reg_val != limit) {
188 writel(base_addr, mmio + limit_reg);
189 writel(0, peer_mmio + xlat_reg);
190 return -EIO;
191 }
192 }
193
194 return 0;
195}
196
197static int amd_link_is_up(struct amd_ntb_dev *ndev)
198{
199 if (!ndev->peer_sta)
200 return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
201
e5b0d2d1
XY
202 if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
203 ndev->peer_sta = 0;
204 return 1;
205 }
206
a1b36958
XY
207 /* If peer_sta is reset or D0 event, the ISR has
208 * started a timer to check link status of hardware.
209 * So here just clear status bit. And if peer_sta is
210 * D3 or PME_TO, D0/reset event will be happened when
211 * system wakeup/poweron, so do nothing here.
212 */
213 if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
214 ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
e5b0d2d1 215 else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
a1b36958
XY
216 ndev->peer_sta = 0;
217
218 return 0;
219}
220
221static int amd_ntb_link_is_up(struct ntb_dev *ntb,
222 enum ntb_speed *speed,
223 enum ntb_width *width)
224{
225 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
226 int ret = 0;
227
228 if (amd_link_is_up(ndev)) {
229 if (speed)
230 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
231 if (width)
232 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
233
234 dev_dbg(ndev_dev(ndev), "link is up.\n");
235
236 ret = 1;
237 } else {
238 if (speed)
239 *speed = NTB_SPEED_NONE;
240 if (width)
241 *width = NTB_WIDTH_NONE;
242
243 dev_dbg(ndev_dev(ndev), "link is down.\n");
244 }
245
246 return ret;
247}
248
249static int amd_ntb_link_enable(struct ntb_dev *ntb,
250 enum ntb_speed max_speed,
251 enum ntb_width max_width)
252{
253 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
254 void __iomem *mmio = ndev->self_mmio;
255 u32 ntb_ctl;
256
257 /* Enable event interrupt */
258 ndev->int_mask &= ~AMD_EVENT_INTMASK;
259 writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
260
261 if (ndev->ntb.topo == NTB_TOPO_SEC)
262 return -EINVAL;
263 dev_dbg(ndev_dev(ndev), "Enabling Link.\n");
264
265 ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
266 ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
267 writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
268
269 return 0;
270}
271
272static int amd_ntb_link_disable(struct ntb_dev *ntb)
273{
274 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
275 void __iomem *mmio = ndev->self_mmio;
276 u32 ntb_ctl;
277
278 /* Disable event interrupt */
279 ndev->int_mask |= AMD_EVENT_INTMASK;
280 writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
281
282 if (ndev->ntb.topo == NTB_TOPO_SEC)
283 return -EINVAL;
284 dev_dbg(ndev_dev(ndev), "Enabling Link.\n");
285
286 ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
287 ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
288 writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
289
290 return 0;
291}
292
293static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
294{
295 return ntb_ndev(ntb)->db_valid_mask;
296}
297
298static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
299{
300 return ntb_ndev(ntb)->db_count;
301}
302
303static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
304{
305 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
306
307 if (db_vector < 0 || db_vector > ndev->db_count)
308 return 0;
309
310 return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector);
311}
312
313static u64 amd_ntb_db_read(struct ntb_dev *ntb)
314{
315 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
316 void __iomem *mmio = ndev->self_mmio;
317
318 return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
319}
320
321static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
322{
323 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
324 void __iomem *mmio = ndev->self_mmio;
325
326 writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
327
328 return 0;
329}
330
331static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
332{
333 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
334 void __iomem *mmio = ndev->self_mmio;
335 unsigned long flags;
336
337 if (db_bits & ~ndev->db_valid_mask)
338 return -EINVAL;
339
340 spin_lock_irqsave(&ndev->db_mask_lock, flags);
341 ndev->db_mask |= db_bits;
342 writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
343 spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
344
345 return 0;
346}
347
348static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
349{
350 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
351 void __iomem *mmio = ndev->self_mmio;
352 unsigned long flags;
353
354 if (db_bits & ~ndev->db_valid_mask)
355 return -EINVAL;
356
357 spin_lock_irqsave(&ndev->db_mask_lock, flags);
358 ndev->db_mask &= ~db_bits;
359 writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
360 spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
361
362 return 0;
363}
364
a1b36958
XY
365static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
366{
367 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
368 void __iomem *mmio = ndev->self_mmio;
369
370 writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
371
372 return 0;
373}
374
375static int amd_ntb_spad_count(struct ntb_dev *ntb)
376{
377 return ntb_ndev(ntb)->spad_count;
378}
379
380static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
381{
382 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
383 void __iomem *mmio = ndev->self_mmio;
384 u32 offset;
385
386 if (idx < 0 || idx >= ndev->spad_count)
387 return 0;
388
389 offset = ndev->self_spad + (idx << 2);
390 return readl(mmio + AMD_SPAD_OFFSET + offset);
391}
392
393static int amd_ntb_spad_write(struct ntb_dev *ntb,
394 int idx, u32 val)
395{
396 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
397 void __iomem *mmio = ndev->self_mmio;
398 u32 offset;
399
400 if (idx < 0 || idx >= ndev->spad_count)
401 return -EINVAL;
402
403 offset = ndev->self_spad + (idx << 2);
404 writel(val, mmio + AMD_SPAD_OFFSET + offset);
405
406 return 0;
407}
408
a1b36958
XY
409static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
410{
411 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
412 void __iomem *mmio = ndev->self_mmio;
413 u32 offset;
414
415 if (idx < 0 || idx >= ndev->spad_count)
416 return -EINVAL;
417
418 offset = ndev->peer_spad + (idx << 2);
419 return readl(mmio + AMD_SPAD_OFFSET + offset);
420}
421
422static int amd_ntb_peer_spad_write(struct ntb_dev *ntb,
423 int idx, u32 val)
424{
425 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
426 void __iomem *mmio = ndev->self_mmio;
427 u32 offset;
428
429 if (idx < 0 || idx >= ndev->spad_count)
430 return -EINVAL;
431
432 offset = ndev->peer_spad + (idx << 2);
433 writel(val, mmio + AMD_SPAD_OFFSET + offset);
434
435 return 0;
436}
437
438static const struct ntb_dev_ops amd_ntb_ops = {
439 .mw_count = amd_ntb_mw_count,
440 .mw_get_range = amd_ntb_mw_get_range,
441 .mw_set_trans = amd_ntb_mw_set_trans,
442 .link_is_up = amd_ntb_link_is_up,
443 .link_enable = amd_ntb_link_enable,
444 .link_disable = amd_ntb_link_disable,
445 .db_valid_mask = amd_ntb_db_valid_mask,
446 .db_vector_count = amd_ntb_db_vector_count,
447 .db_vector_mask = amd_ntb_db_vector_mask,
448 .db_read = amd_ntb_db_read,
449 .db_clear = amd_ntb_db_clear,
450 .db_set_mask = amd_ntb_db_set_mask,
451 .db_clear_mask = amd_ntb_db_clear_mask,
a1b36958
XY
452 .peer_db_set = amd_ntb_peer_db_set,
453 .spad_count = amd_ntb_spad_count,
454 .spad_read = amd_ntb_spad_read,
455 .spad_write = amd_ntb_spad_write,
a1b36958
XY
456 .peer_spad_read = amd_ntb_peer_spad_read,
457 .peer_spad_write = amd_ntb_peer_spad_write,
458};
459
460static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
461{
462 void __iomem *mmio = ndev->self_mmio;
463 int reg;
464
465 reg = readl(mmio + AMD_SMUACK_OFFSET);
466 reg |= bit;
467 writel(reg, mmio + AMD_SMUACK_OFFSET);
468
469 ndev->peer_sta |= bit;
470}
471
472static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
473{
474 void __iomem *mmio = ndev->self_mmio;
475 u32 status;
476
477 status = readl(mmio + AMD_INTSTAT_OFFSET);
478 if (!(status & AMD_EVENT_INTMASK))
479 return;
480
481 dev_dbg(ndev_dev(ndev), "status = 0x%x and vec = %d\n", status, vec);
482
483 status &= AMD_EVENT_INTMASK;
484 switch (status) {
485 case AMD_PEER_FLUSH_EVENT:
486 dev_info(ndev_dev(ndev), "Flush is done.\n");
487 break;
488 case AMD_PEER_RESET_EVENT:
489 amd_ack_smu(ndev, AMD_PEER_RESET_EVENT);
490
491 /* link down first */
492 ntb_link_event(&ndev->ntb);
493 /* polling peer status */
494 schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
495
496 break;
497 case AMD_PEER_D3_EVENT:
498 case AMD_PEER_PMETO_EVENT:
e5b0d2d1
XY
499 case AMD_LINK_UP_EVENT:
500 case AMD_LINK_DOWN_EVENT:
a1b36958
XY
501 amd_ack_smu(ndev, status);
502
503 /* link down */
504 ntb_link_event(&ndev->ntb);
505
506 break;
507 case AMD_PEER_D0_EVENT:
508 mmio = ndev->peer_mmio;
509 status = readl(mmio + AMD_PMESTAT_OFFSET);
510 /* check if this is WAKEUP event */
511 if (status & 0x1)
512 dev_info(ndev_dev(ndev), "Wakeup is done.\n");
513
514 amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
515
516 /* start a timer to poll link status */
517 schedule_delayed_work(&ndev->hb_timer,
518 AMD_LINK_HB_TIMEOUT);
519 break;
520 default:
521 dev_info(ndev_dev(ndev), "event status = 0x%x.\n", status);
522 break;
523 }
524}
525
526static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
527{
528 dev_dbg(ndev_dev(ndev), "vec %d\n", vec);
529
530 if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
531 amd_handle_event(ndev, vec);
532
533 if (vec < AMD_DB_CNT)
534 ntb_db_event(&ndev->ntb, vec);
535
536 return IRQ_HANDLED;
537}
538
539static irqreturn_t ndev_vec_isr(int irq, void *dev)
540{
541 struct amd_ntb_vec *nvec = dev;
542
543 return ndev_interrupt(nvec->ndev, nvec->num);
544}
545
546static irqreturn_t ndev_irq_isr(int irq, void *dev)
547{
548 struct amd_ntb_dev *ndev = dev;
549
550 return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
551}
552
553static int ndev_init_isr(struct amd_ntb_dev *ndev,
554 int msix_min, int msix_max)
555{
556 struct pci_dev *pdev;
557 int rc, i, msix_count, node;
558
559 pdev = ndev_pdev(ndev);
560
561 node = dev_to_node(&pdev->dev);
562
563 ndev->db_mask = ndev->db_valid_mask;
564
565 /* Try to set up msix irq */
566 ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
567 GFP_KERNEL, node);
568 if (!ndev->vec)
569 goto err_msix_vec_alloc;
570
571 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
572 GFP_KERNEL, node);
573 if (!ndev->msix)
574 goto err_msix_alloc;
575
576 for (i = 0; i < msix_max; ++i)
577 ndev->msix[i].entry = i;
578
579 msix_count = pci_enable_msix_range(pdev, ndev->msix,
580 msix_min, msix_max);
581 if (msix_count < 0)
582 goto err_msix_enable;
583
584 /* NOTE: Disable MSIX if msix count is less than 16 because of
585 * hardware limitation.
586 */
587 if (msix_count < msix_min) {
588 pci_disable_msix(pdev);
589 goto err_msix_enable;
590 }
591
592 for (i = 0; i < msix_count; ++i) {
593 ndev->vec[i].ndev = ndev;
594 ndev->vec[i].num = i;
595 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
596 "ndev_vec_isr", &ndev->vec[i]);
597 if (rc)
598 goto err_msix_request;
599 }
600
601 dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
602 ndev->db_count = msix_min;
603 ndev->msix_vec_count = msix_max;
604 return 0;
605
606err_msix_request:
607 while (i-- > 0)
608 free_irq(ndev->msix[i].vector, ndev);
609 pci_disable_msix(pdev);
610err_msix_enable:
611 kfree(ndev->msix);
612err_msix_alloc:
613 kfree(ndev->vec);
614err_msix_vec_alloc:
615 ndev->msix = NULL;
616 ndev->vec = NULL;
617
618 /* Try to set up msi irq */
619 rc = pci_enable_msi(pdev);
620 if (rc)
621 goto err_msi_enable;
622
623 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
624 "ndev_irq_isr", ndev);
625 if (rc)
626 goto err_msi_request;
627
628 dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
629 ndev->db_count = 1;
630 ndev->msix_vec_count = 1;
631 return 0;
632
633err_msi_request:
634 pci_disable_msi(pdev);
635err_msi_enable:
636
637 /* Try to set up intx irq */
638 pci_intx(pdev, 1);
639
640 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
641 "ndev_irq_isr", ndev);
642 if (rc)
643 goto err_intx_request;
644
645 dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
646 ndev->db_count = 1;
647 ndev->msix_vec_count = 1;
648 return 0;
649
650err_intx_request:
651 return rc;
652}
653
654static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
655{
656 struct pci_dev *pdev;
657 void __iomem *mmio = ndev->self_mmio;
658 int i;
659
660 pdev = ndev_pdev(ndev);
661
662 /* Mask all doorbell interrupts */
663 ndev->db_mask = ndev->db_valid_mask;
664 writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
665
666 if (ndev->msix) {
667 i = ndev->msix_vec_count;
668 while (i--)
669 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
670 pci_disable_msix(pdev);
671 kfree(ndev->msix);
672 kfree(ndev->vec);
673 } else {
674 free_irq(pdev->irq, ndev);
675 if (pci_dev_msi_enabled(pdev))
676 pci_disable_msi(pdev);
677 else
678 pci_intx(pdev, 0);
679 }
680}
681
682static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
683 size_t count, loff_t *offp)
684{
685 struct amd_ntb_dev *ndev;
686 void __iomem *mmio;
687 char *buf;
688 size_t buf_size;
689 ssize_t ret, off;
690 union { u64 v64; u32 v32; u16 v16; } u;
691
692 ndev = filp->private_data;
693 mmio = ndev->self_mmio;
694
695 buf_size = min(count, 0x800ul);
696
697 buf = kmalloc(buf_size, GFP_KERNEL);
698 if (!buf)
699 return -ENOMEM;
700
701 off = 0;
702
703 off += scnprintf(buf + off, buf_size - off,
704 "NTB Device Information:\n");
705
706 off += scnprintf(buf + off, buf_size - off,
707 "Connection Topology -\t%s\n",
708 ntb_topo_string(ndev->ntb.topo));
709
710 off += scnprintf(buf + off, buf_size - off,
711 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
712
713 if (!amd_link_is_up(ndev)) {
714 off += scnprintf(buf + off, buf_size - off,
715 "Link Status -\t\tDown\n");
716 } else {
717 off += scnprintf(buf + off, buf_size - off,
718 "Link Status -\t\tUp\n");
719 off += scnprintf(buf + off, buf_size - off,
720 "Link Speed -\t\tPCI-E Gen %u\n",
721 NTB_LNK_STA_SPEED(ndev->lnk_sta));
722 off += scnprintf(buf + off, buf_size - off,
723 "Link Width -\t\tx%u\n",
724 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
725 }
726
727 off += scnprintf(buf + off, buf_size - off,
728 "Memory Window Count -\t%u\n", ndev->mw_count);
729 off += scnprintf(buf + off, buf_size - off,
730 "Scratchpad Count -\t%u\n", ndev->spad_count);
731 off += scnprintf(buf + off, buf_size - off,
732 "Doorbell Count -\t%u\n", ndev->db_count);
733 off += scnprintf(buf + off, buf_size - off,
734 "MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
735
736 off += scnprintf(buf + off, buf_size - off,
737 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
738
739 u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
740 off += scnprintf(buf + off, buf_size - off,
741 "Doorbell Mask -\t\t\t%#06x\n", u.v32);
742
743 u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
744 off += scnprintf(buf + off, buf_size - off,
745 "Doorbell Bell -\t\t\t%#06x\n", u.v32);
746
747 off += scnprintf(buf + off, buf_size - off,
748 "\nNTB Incoming XLAT:\n");
749
750 u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
751 off += scnprintf(buf + off, buf_size - off,
752 "XLAT1 -\t\t%#018llx\n", u.v64);
753
754 u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
755 off += scnprintf(buf + off, buf_size - off,
756 "XLAT23 -\t\t%#018llx\n", u.v64);
757
758 u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
759 off += scnprintf(buf + off, buf_size - off,
760 "XLAT45 -\t\t%#018llx\n", u.v64);
761
762 u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
763 off += scnprintf(buf + off, buf_size - off,
764 "LMT1 -\t\t\t%#06x\n", u.v32);
765
766 u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
767 off += scnprintf(buf + off, buf_size - off,
768 "LMT23 -\t\t\t%#018llx\n", u.v64);
769
770 u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
771 off += scnprintf(buf + off, buf_size - off,
772 "LMT45 -\t\t\t%#018llx\n", u.v64);
773
774 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
775 kfree(buf);
776 return ret;
777}
778
779static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
780{
781 if (!debugfs_dir) {
782 ndev->debugfs_dir = NULL;
783 ndev->debugfs_info = NULL;
784 } else {
785 ndev->debugfs_dir =
786 debugfs_create_dir(ndev_name(ndev), debugfs_dir);
787 if (!ndev->debugfs_dir)
788 ndev->debugfs_info = NULL;
789 else
790 ndev->debugfs_info =
791 debugfs_create_file("info", S_IRUSR,
792 ndev->debugfs_dir, ndev,
793 &amd_ntb_debugfs_info);
794 }
795}
796
797static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
798{
799 debugfs_remove_recursive(ndev->debugfs_dir);
800}
801
802static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
803 struct pci_dev *pdev)
804{
805 ndev->ntb.pdev = pdev;
806 ndev->ntb.topo = NTB_TOPO_NONE;
807 ndev->ntb.ops = &amd_ntb_ops;
808 ndev->int_mask = AMD_EVENT_INTMASK;
809 spin_lock_init(&ndev->db_mask_lock);
810}
811
812static int amd_poll_link(struct amd_ntb_dev *ndev)
813{
814 void __iomem *mmio = ndev->peer_mmio;
815 u32 reg, stat;
816 int rc;
817
818 reg = readl(mmio + AMD_SIDEINFO_OFFSET);
819 reg &= NTB_LIN_STA_ACTIVE_BIT;
820
821 dev_dbg(ndev_dev(ndev), "%s: reg_val = 0x%x.\n", __func__, reg);
822
823 if (reg == ndev->cntl_sta)
824 return 0;
825
826 ndev->cntl_sta = reg;
827
828 rc = pci_read_config_dword(ndev->ntb.pdev,
829 AMD_LINK_STATUS_OFFSET, &stat);
830 if (rc)
831 return 0;
832 ndev->lnk_sta = stat;
833
834 return 1;
835}
836
837static void amd_link_hb(struct work_struct *work)
838{
839 struct amd_ntb_dev *ndev = hb_ndev(work);
840
841 if (amd_poll_link(ndev))
842 ntb_link_event(&ndev->ntb);
843
844 if (!amd_link_is_up(ndev))
845 schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
846}
847
848static int amd_init_isr(struct amd_ntb_dev *ndev)
849{
850 return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
851}
852
853static void amd_init_side_info(struct amd_ntb_dev *ndev)
854{
855 void __iomem *mmio = ndev->self_mmio;
856 unsigned int reg;
857
858 reg = readl(mmio + AMD_SIDEINFO_OFFSET);
859 if (!(reg & AMD_SIDE_READY)) {
860 reg |= AMD_SIDE_READY;
861 writel(reg, mmio + AMD_SIDEINFO_OFFSET);
862 }
863}
864
865static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
866{
867 void __iomem *mmio = ndev->self_mmio;
868 unsigned int reg;
869
870 reg = readl(mmio + AMD_SIDEINFO_OFFSET);
871 if (reg & AMD_SIDE_READY) {
872 reg &= ~AMD_SIDE_READY;
873 writel(reg, mmio + AMD_SIDEINFO_OFFSET);
874 readl(mmio + AMD_SIDEINFO_OFFSET);
875 }
876}
877
878static int amd_init_ntb(struct amd_ntb_dev *ndev)
879{
880 void __iomem *mmio = ndev->self_mmio;
881
882 ndev->mw_count = AMD_MW_CNT;
883 ndev->spad_count = AMD_SPADS_CNT;
884 ndev->db_count = AMD_DB_CNT;
885
886 switch (ndev->ntb.topo) {
887 case NTB_TOPO_PRI:
888 case NTB_TOPO_SEC:
889 ndev->spad_count >>= 1;
890 if (ndev->ntb.topo == NTB_TOPO_PRI) {
891 ndev->self_spad = 0;
892 ndev->peer_spad = 0x20;
893 } else {
894 ndev->self_spad = 0x20;
895 ndev->peer_spad = 0;
896 }
897
898 INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
899 schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
900
901 break;
902 default:
903 dev_err(ndev_dev(ndev), "AMD NTB does not support B2B mode.\n");
904 return -EINVAL;
905 }
906
907 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
908
909 /* Mask event interrupts */
910 writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
911
912 return 0;
913}
914
915static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
916{
917 void __iomem *mmio = ndev->self_mmio;
918 u32 info;
919
920 info = readl(mmio + AMD_SIDEINFO_OFFSET);
921 if (info & AMD_SIDE_MASK)
922 return NTB_TOPO_SEC;
923 else
924 return NTB_TOPO_PRI;
925}
926
927static int amd_init_dev(struct amd_ntb_dev *ndev)
928{
929 struct pci_dev *pdev;
930 int rc = 0;
931
932 pdev = ndev_pdev(ndev);
933
934 ndev->ntb.topo = amd_get_topo(ndev);
935 dev_dbg(ndev_dev(ndev), "AMD NTB topo is %s\n",
936 ntb_topo_string(ndev->ntb.topo));
937
938 rc = amd_init_ntb(ndev);
939 if (rc)
940 return rc;
941
942 rc = amd_init_isr(ndev);
943 if (rc) {
944 dev_err(ndev_dev(ndev), "fail to init isr.\n");
945 return rc;
946 }
947
948 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
949
950 return 0;
951}
952
953static void amd_deinit_dev(struct amd_ntb_dev *ndev)
954{
955 cancel_delayed_work_sync(&ndev->hb_timer);
956
957 ndev_deinit_isr(ndev);
958}
959
960static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
961 struct pci_dev *pdev)
962{
963 int rc;
964
965 pci_set_drvdata(pdev, ndev);
966
967 rc = pci_enable_device(pdev);
968 if (rc)
969 goto err_pci_enable;
970
971 rc = pci_request_regions(pdev, NTB_NAME);
972 if (rc)
973 goto err_pci_regions;
974
975 pci_set_master(pdev);
976
977 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
978 if (rc) {
979 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
980 if (rc)
981 goto err_dma_mask;
982 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
983 }
984
985 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
986 if (rc) {
987 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
988 if (rc)
989 goto err_dma_mask;
990 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
991 }
992
993 ndev->self_mmio = pci_iomap(pdev, 0, 0);
994 if (!ndev->self_mmio) {
995 rc = -EIO;
996 goto err_dma_mask;
997 }
998 ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
999
1000 return 0;
1001
1002err_dma_mask:
1003 pci_clear_master(pdev);
1004err_pci_regions:
1005 pci_disable_device(pdev);
1006err_pci_enable:
1007 pci_set_drvdata(pdev, NULL);
1008 return rc;
1009}
1010
1011static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
1012{
1013 struct pci_dev *pdev = ndev_pdev(ndev);
1014
1015 pci_iounmap(pdev, ndev->self_mmio);
1016
1017 pci_clear_master(pdev);
1018 pci_release_regions(pdev);
1019 pci_disable_device(pdev);
1020 pci_set_drvdata(pdev, NULL);
1021}
1022
1023static int amd_ntb_pci_probe(struct pci_dev *pdev,
1024 const struct pci_device_id *id)
1025{
1026 struct amd_ntb_dev *ndev;
1027 int rc, node;
1028
1029 node = dev_to_node(&pdev->dev);
1030
1031 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1032 if (!ndev) {
1033 rc = -ENOMEM;
1034 goto err_ndev;
1035 }
1036
1037 ndev_init_struct(ndev, pdev);
1038
1039 rc = amd_ntb_init_pci(ndev, pdev);
1040 if (rc)
1041 goto err_init_pci;
1042
1043 rc = amd_init_dev(ndev);
1044 if (rc)
1045 goto err_init_dev;
1046
1047 /* write side info */
1048 amd_init_side_info(ndev);
1049
1050 amd_poll_link(ndev);
1051
1052 ndev_init_debugfs(ndev);
1053
1054 rc = ntb_register_device(&ndev->ntb);
1055 if (rc)
1056 goto err_register;
1057
1058 dev_info(&pdev->dev, "NTB device registered.\n");
1059
1060 return 0;
1061
1062err_register:
1063 ndev_deinit_debugfs(ndev);
1064 amd_deinit_dev(ndev);
1065err_init_dev:
1066 amd_ntb_deinit_pci(ndev);
1067err_init_pci:
1068 kfree(ndev);
1069err_ndev:
1070 return rc;
1071}
1072
1073static void amd_ntb_pci_remove(struct pci_dev *pdev)
1074{
1075 struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
1076
1077 ntb_unregister_device(&ndev->ntb);
1078 ndev_deinit_debugfs(ndev);
1079 amd_deinit_side_info(ndev);
1080 amd_deinit_dev(ndev);
1081 amd_ntb_deinit_pci(ndev);
1082 kfree(ndev);
1083}
1084
1085static const struct file_operations amd_ntb_debugfs_info = {
1086 .owner = THIS_MODULE,
1087 .open = simple_open,
1088 .read = ndev_debugfs_read,
1089};
1090
1091static const struct pci_device_id amd_ntb_pci_tbl[] = {
1092 {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)},
1093 {0}
1094};
1095MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
1096
1097static struct pci_driver amd_ntb_pci_driver = {
1098 .name = KBUILD_MODNAME,
1099 .id_table = amd_ntb_pci_tbl,
1100 .probe = amd_ntb_pci_probe,
1101 .remove = amd_ntb_pci_remove,
1102};
1103
1104static int __init amd_ntb_pci_driver_init(void)
1105{
1106 pr_info("%s %s\n", NTB_DESC, NTB_VER);
1107
1108 if (debugfs_initialized())
1109 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1110
1111 return pci_register_driver(&amd_ntb_pci_driver);
1112}
1113module_init(amd_ntb_pci_driver_init);
1114
1115static void __exit amd_ntb_pci_driver_exit(void)
1116{
1117 pci_unregister_driver(&amd_ntb_pci_driver);
1118 debugfs_remove_recursive(debugfs_dir);
1119}
1120module_exit(amd_ntb_pci_driver_exit);