]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/misc/pci_endpoint_test.c
misc: pci_endpoint_test: Add deviceID for AM64 and J7200
[mirror_ubuntu-jammy-kernel.git] / drivers / misc / pci_endpoint_test.c
CommitLineData
6b1baefe 1// SPDX-License-Identifier: GPL-2.0-only
2c156ac7
KVA
2/**
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2c156ac7
KVA
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/fs.h>
12#include <linux/io.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/random.h>
19#include <linux/slab.h>
cf376b4b 20#include <linux/uaccess.h>
2c156ac7
KVA
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/pci_regs.h>
25
26#include <uapi/linux/pcitest.h>
27
e8817de7
GP
28#define DRV_MODULE_NAME "pci-endpoint-test"
29
e0332712 30#define IRQ_TYPE_UNDEFINED -1
e8817de7
GP
31#define IRQ_TYPE_LEGACY 0
32#define IRQ_TYPE_MSI 1
c2e00e31 33#define IRQ_TYPE_MSIX 2
e8817de7
GP
34
35#define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37#define PCI_ENDPOINT_TEST_COMMAND 0x4
38#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
39#define COMMAND_RAISE_MSI_IRQ BIT(1)
c2e00e31 40#define COMMAND_RAISE_MSIX_IRQ BIT(2)
e8817de7
GP
41#define COMMAND_READ BIT(3)
42#define COMMAND_WRITE BIT(4)
43#define COMMAND_COPY BIT(5)
44
45#define PCI_ENDPOINT_TEST_STATUS 0x8
46#define STATUS_READ_SUCCESS BIT(0)
47#define STATUS_READ_FAIL BIT(1)
48#define STATUS_WRITE_SUCCESS BIT(2)
49#define STATUS_WRITE_FAIL BIT(3)
50#define STATUS_COPY_SUCCESS BIT(4)
51#define STATUS_COPY_FAIL BIT(5)
52#define STATUS_IRQ_RAISED BIT(6)
53#define STATUS_SRC_ADDR_INVALID BIT(7)
54#define STATUS_DST_ADDR_INVALID BIT(8)
55
56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
2c156ac7
KVA
57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
e8817de7
GP
62#define PCI_ENDPOINT_TEST_SIZE 0x1c
63#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
2c156ac7 67
cf376b4b
KVA
68#define PCI_ENDPOINT_TEST_FLAGS 0x2c
69#define FLAG_USE_DMA BIT(0)
70
5bb04b19 71#define PCI_DEVICE_ID_TI_AM654 0xb00c
7c52009d
KVA
72#define PCI_DEVICE_ID_TI_J7200 0xb00f
73#define PCI_DEVICE_ID_TI_AM64 0xb010
6b8ab421 74#define PCI_DEVICE_ID_LS1088A 0x80c0
5bb04b19
KVA
75
76#define is_am654_pci_dev(pdev) \
77 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
78
cfb824dd
LP
79#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
80#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
b03025c5 81#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
a63c5f3d 82#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
b03025c5 83
2c156ac7
KVA
84static DEFINE_IDA(pci_endpoint_test_ida);
85
86#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
87 miscdev)
0c8a5f9d
KVA
88
89static bool no_msi;
90module_param(no_msi, bool, 0444);
91MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
92
9133e394
GP
93static int irq_type = IRQ_TYPE_MSI;
94module_param(irq_type, int, 0444);
c2e00e31 95MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
9133e394 96
2c156ac7
KVA
97enum pci_barno {
98 BAR_0,
99 BAR_1,
100 BAR_2,
101 BAR_3,
102 BAR_4,
103 BAR_5,
104};
105
106struct pci_endpoint_test {
107 struct pci_dev *pdev;
108 void __iomem *base;
c9c13ba4 109 void __iomem *bar[PCI_STD_NUM_BARS];
2c156ac7
KVA
110 struct completion irq_raised;
111 int last_irq;
b7636e81 112 int num_irqs;
b2ba9225 113 int irq_type;
2c156ac7
KVA
114 /* mutex to protect the ioctls */
115 struct mutex mutex;
116 struct miscdevice miscdev;
834b9051 117 enum pci_barno test_reg_bar;
13107c60 118 size_t alignment;
c2be14ab 119 const char *name;
2c156ac7
KVA
120};
121
834b9051
KVA
122struct pci_endpoint_test_data {
123 enum pci_barno test_reg_bar;
13107c60 124 size_t alignment;
9133e394 125 int irq_type;
834b9051
KVA
126};
127
2c156ac7
KVA
128static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
129 u32 offset)
130{
131 return readl(test->base + offset);
132}
133
134static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
135 u32 offset, u32 value)
136{
137 writel(value, test->base + offset);
138}
139
140static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
141 int bar, int offset)
142{
143 return readl(test->bar[bar] + offset);
144}
145
146static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
147 int bar, u32 offset, u32 value)
148{
149 writel(value, test->bar[bar] + offset);
150}
151
152static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
153{
154 struct pci_endpoint_test *test = dev_id;
155 u32 reg;
156
157 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
158 if (reg & STATUS_IRQ_RAISED) {
159 test->last_irq = irq;
160 complete(&test->irq_raised);
161 reg &= ~STATUS_IRQ_RAISED;
162 }
163 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
164 reg);
165
166 return IRQ_HANDLED;
167}
168
e0332712
GP
169static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
170{
171 struct pci_dev *pdev = test->pdev;
172
173 pci_free_irq_vectors(pdev);
b2ba9225 174 test->irq_type = IRQ_TYPE_UNDEFINED;
e0332712
GP
175}
176
177static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
178 int type)
179{
180 int irq = -1;
181 struct pci_dev *pdev = test->pdev;
182 struct device *dev = &pdev->dev;
183 bool res = true;
184
185 switch (type) {
186 case IRQ_TYPE_LEGACY:
187 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
188 if (irq < 0)
189 dev_err(dev, "Failed to get Legacy interrupt\n");
190 break;
191 case IRQ_TYPE_MSI:
192 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
193 if (irq < 0)
194 dev_err(dev, "Failed to get MSI interrupts\n");
195 break;
196 case IRQ_TYPE_MSIX:
197 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
198 if (irq < 0)
199 dev_err(dev, "Failed to get MSI-X interrupts\n");
200 break;
201 default:
202 dev_err(dev, "Invalid IRQ type selected\n");
203 }
204
205 if (irq < 0) {
206 irq = 0;
207 res = false;
208 }
b2ba9225
KVA
209
210 test->irq_type = type;
e0332712
GP
211 test->num_irqs = irq;
212
213 return res;
214}
215
216static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
217{
218 int i;
219 struct pci_dev *pdev = test->pdev;
220 struct device *dev = &pdev->dev;
221
222 for (i = 0; i < test->num_irqs; i++)
223 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
224
225 test->num_irqs = 0;
226}
227
228static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
229{
230 int i;
231 int err;
232 struct pci_dev *pdev = test->pdev;
233 struct device *dev = &pdev->dev;
234
235 for (i = 0; i < test->num_irqs; i++) {
236 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
237 pci_endpoint_test_irqhandler,
c2be14ab 238 IRQF_SHARED, test->name, test);
e0332712
GP
239 if (err)
240 goto fail;
241 }
242
243 return true;
244
245fail:
246 switch (irq_type) {
247 case IRQ_TYPE_LEGACY:
248 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
249 pci_irq_vector(pdev, i));
250 break;
251 case IRQ_TYPE_MSI:
252 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
253 pci_irq_vector(pdev, i),
254 i + 1);
255 break;
256 case IRQ_TYPE_MSIX:
257 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
258 pci_irq_vector(pdev, i),
259 i + 1);
260 break;
261 }
262
263 return false;
264}
265
2c156ac7
KVA
266static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
267 enum pci_barno barno)
268{
269 int j;
270 u32 val;
271 int size;
cda370ec 272 struct pci_dev *pdev = test->pdev;
2c156ac7
KVA
273
274 if (!test->bar[barno])
275 return false;
276
cda370ec 277 size = pci_resource_len(pdev, barno);
2c156ac7 278
834b9051
KVA
279 if (barno == test->test_reg_bar)
280 size = 0x4;
281
2c156ac7
KVA
282 for (j = 0; j < size; j += 4)
283 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
284
285 for (j = 0; j < size; j += 4) {
286 val = pci_endpoint_test_bar_readl(test, barno, j);
287 if (val != 0xA0A0A0A0)
288 return false;
289 }
290
291 return true;
292}
293
294static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
295{
296 u32 val;
297
e8817de7
GP
298 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
299 IRQ_TYPE_LEGACY);
300 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
2c156ac7
KVA
301 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
302 COMMAND_RAISE_LEGACY_IRQ);
303 val = wait_for_completion_timeout(&test->irq_raised,
304 msecs_to_jiffies(1000));
305 if (!val)
306 return false;
307
308 return true;
309}
310
311static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
c2e00e31 312 u16 msi_num, bool msix)
2c156ac7
KVA
313{
314 u32 val;
315 struct pci_dev *pdev = test->pdev;
316
e8817de7 317 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
c2e00e31
GP
318 msix == false ? IRQ_TYPE_MSI :
319 IRQ_TYPE_MSIX);
e8817de7 320 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
2c156ac7 321 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
c2e00e31
GP
322 msix == false ? COMMAND_RAISE_MSI_IRQ :
323 COMMAND_RAISE_MSIX_IRQ);
2c156ac7
KVA
324 val = wait_for_completion_timeout(&test->irq_raised,
325 msecs_to_jiffies(1000));
326 if (!val)
327 return false;
328
ecc57efe 329 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
2c156ac7
KVA
330 return true;
331
332 return false;
333}
334
cf376b4b
KVA
335static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
336 unsigned long arg)
2c156ac7 337{
cf376b4b 338 struct pci_endpoint_test_xfer_param param;
2c156ac7
KVA
339 bool ret = false;
340 void *src_addr;
341 void *dst_addr;
cf376b4b
KVA
342 u32 flags = 0;
343 bool use_dma;
344 size_t size;
2c156ac7
KVA
345 dma_addr_t src_phys_addr;
346 dma_addr_t dst_phys_addr;
347 struct pci_dev *pdev = test->pdev;
348 struct device *dev = &pdev->dev;
13107c60
KVA
349 void *orig_src_addr;
350 dma_addr_t orig_src_phys_addr;
351 void *orig_dst_addr;
352 dma_addr_t orig_dst_phys_addr;
353 size_t offset;
354 size_t alignment = test->alignment;
b2ba9225 355 int irq_type = test->irq_type;
2c156ac7
KVA
356 u32 src_crc32;
357 u32 dst_crc32;
cf376b4b 358 int err;
2c156ac7 359
cf376b4b
KVA
360 err = copy_from_user(&param, (void __user *)arg, sizeof(param));
361 if (err) {
362 dev_err(dev, "Failed to get transfer param\n");
363 return false;
364 }
365
366 size = param.size;
343dc693
DC
367 if (size > SIZE_MAX - alignment)
368 goto err;
369
cf376b4b
KVA
370 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
371 if (use_dma)
372 flags |= FLAG_USE_DMA;
373
e0332712
GP
374 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
375 dev_err(dev, "Invalid IRQ type option\n");
376 goto err;
377 }
378
0a121f9b 379 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 380 if (!orig_src_addr) {
0e52ea61 381 dev_err(dev, "Failed to allocate source buffer\n");
2c156ac7
KVA
382 ret = false;
383 goto err;
384 }
385
0a121f9b
KVA
386 get_random_bytes(orig_src_addr, size + alignment);
387 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
388 size + alignment, DMA_TO_DEVICE);
389 if (dma_mapping_error(dev, orig_src_phys_addr)) {
390 dev_err(dev, "failed to map source buffer address\n");
391 ret = false;
392 goto err_src_phys_addr;
393 }
394
13107c60
KVA
395 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
396 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
397 offset = src_phys_addr - orig_src_phys_addr;
398 src_addr = orig_src_addr + offset;
399 } else {
400 src_phys_addr = orig_src_phys_addr;
401 src_addr = orig_src_addr;
402 }
403
2c156ac7
KVA
404 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
405 lower_32_bits(src_phys_addr));
406
407 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
408 upper_32_bits(src_phys_addr));
409
2c156ac7
KVA
410 src_crc32 = crc32_le(~0, src_addr, size);
411
0a121f9b 412 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 413 if (!orig_dst_addr) {
0e52ea61 414 dev_err(dev, "Failed to allocate destination address\n");
2c156ac7 415 ret = false;
0a121f9b
KVA
416 goto err_dst_addr;
417 }
418
419 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
420 size + alignment, DMA_FROM_DEVICE);
421 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
422 dev_err(dev, "failed to map destination buffer address\n");
423 ret = false;
424 goto err_dst_phys_addr;
13107c60
KVA
425 }
426
427 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
428 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
429 offset = dst_phys_addr - orig_dst_phys_addr;
430 dst_addr = orig_dst_addr + offset;
431 } else {
432 dst_phys_addr = orig_dst_phys_addr;
433 dst_addr = orig_dst_addr;
2c156ac7
KVA
434 }
435
436 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
437 lower_32_bits(dst_phys_addr));
438 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
439 upper_32_bits(dst_phys_addr));
440
441 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
442 size);
443
cf376b4b 444 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 445 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 446 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 447 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 448 COMMAND_COPY);
2c156ac7
KVA
449
450 wait_for_completion(&test->irq_raised);
451
0a121f9b
KVA
452 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
453 DMA_FROM_DEVICE);
454
2c156ac7
KVA
455 dst_crc32 = crc32_le(~0, dst_addr, size);
456 if (dst_crc32 == src_crc32)
457 ret = true;
458
0a121f9b
KVA
459err_dst_phys_addr:
460 kfree(orig_dst_addr);
2c156ac7 461
0a121f9b
KVA
462err_dst_addr:
463 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
464 DMA_TO_DEVICE);
465
466err_src_phys_addr:
467 kfree(orig_src_addr);
2c156ac7
KVA
468
469err:
470 return ret;
471}
472
cf376b4b
KVA
473static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
474 unsigned long arg)
2c156ac7 475{
cf376b4b 476 struct pci_endpoint_test_xfer_param param;
2c156ac7 477 bool ret = false;
cf376b4b
KVA
478 u32 flags = 0;
479 bool use_dma;
2c156ac7
KVA
480 u32 reg;
481 void *addr;
482 dma_addr_t phys_addr;
483 struct pci_dev *pdev = test->pdev;
484 struct device *dev = &pdev->dev;
13107c60
KVA
485 void *orig_addr;
486 dma_addr_t orig_phys_addr;
487 size_t offset;
488 size_t alignment = test->alignment;
b2ba9225 489 int irq_type = test->irq_type;
cf376b4b 490 size_t size;
2c156ac7 491 u32 crc32;
cf376b4b 492 int err;
2c156ac7 493
cf376b4b
KVA
494 err = copy_from_user(&param, (void __user *)arg, sizeof(param));
495 if (err != 0) {
496 dev_err(dev, "Failed to get transfer param\n");
497 return false;
498 }
499
500 size = param.size;
343dc693
DC
501 if (size > SIZE_MAX - alignment)
502 goto err;
503
cf376b4b
KVA
504 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
505 if (use_dma)
506 flags |= FLAG_USE_DMA;
507
e0332712
GP
508 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
509 dev_err(dev, "Invalid IRQ type option\n");
510 goto err;
511 }
512
0a121f9b 513 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 514 if (!orig_addr) {
0e52ea61 515 dev_err(dev, "Failed to allocate address\n");
2c156ac7
KVA
516 ret = false;
517 goto err;
518 }
519
0a121f9b
KVA
520 get_random_bytes(orig_addr, size + alignment);
521
522 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
523 DMA_TO_DEVICE);
524 if (dma_mapping_error(dev, orig_phys_addr)) {
525 dev_err(dev, "failed to map source buffer address\n");
526 ret = false;
527 goto err_phys_addr;
528 }
529
13107c60
KVA
530 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
531 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
532 offset = phys_addr - orig_phys_addr;
533 addr = orig_addr + offset;
534 } else {
535 phys_addr = orig_phys_addr;
536 addr = orig_addr;
537 }
538
2c156ac7
KVA
539 crc32 = crc32_le(~0, addr, size);
540 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
541 crc32);
542
543 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
544 lower_32_bits(phys_addr));
545 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
546 upper_32_bits(phys_addr));
547
548 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
549
cf376b4b 550 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 551 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 552 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 553 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 554 COMMAND_READ);
2c156ac7
KVA
555
556 wait_for_completion(&test->irq_raised);
557
558 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
559 if (reg & STATUS_READ_SUCCESS)
560 ret = true;
561
0a121f9b
KVA
562 dma_unmap_single(dev, orig_phys_addr, size + alignment,
563 DMA_TO_DEVICE);
564
565err_phys_addr:
566 kfree(orig_addr);
2c156ac7
KVA
567
568err:
569 return ret;
570}
571
cf376b4b
KVA
572static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
573 unsigned long arg)
2c156ac7 574{
cf376b4b 575 struct pci_endpoint_test_xfer_param param;
2c156ac7 576 bool ret = false;
cf376b4b
KVA
577 u32 flags = 0;
578 bool use_dma;
579 size_t size;
2c156ac7
KVA
580 void *addr;
581 dma_addr_t phys_addr;
582 struct pci_dev *pdev = test->pdev;
583 struct device *dev = &pdev->dev;
13107c60
KVA
584 void *orig_addr;
585 dma_addr_t orig_phys_addr;
586 size_t offset;
587 size_t alignment = test->alignment;
b2ba9225 588 int irq_type = test->irq_type;
2c156ac7 589 u32 crc32;
cf376b4b 590 int err;
2c156ac7 591
cf376b4b
KVA
592 err = copy_from_user(&param, (void __user *)arg, sizeof(param));
593 if (err) {
594 dev_err(dev, "Failed to get transfer param\n");
595 return false;
596 }
597
598 size = param.size;
343dc693
DC
599 if (size > SIZE_MAX - alignment)
600 goto err;
601
cf376b4b
KVA
602 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
603 if (use_dma)
604 flags |= FLAG_USE_DMA;
605
e0332712
GP
606 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
607 dev_err(dev, "Invalid IRQ type option\n");
608 goto err;
609 }
610
0a121f9b 611 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 612 if (!orig_addr) {
0e52ea61 613 dev_err(dev, "Failed to allocate destination address\n");
2c156ac7
KVA
614 ret = false;
615 goto err;
616 }
617
0a121f9b
KVA
618 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
619 DMA_FROM_DEVICE);
620 if (dma_mapping_error(dev, orig_phys_addr)) {
621 dev_err(dev, "failed to map source buffer address\n");
622 ret = false;
623 goto err_phys_addr;
624 }
625
13107c60
KVA
626 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
627 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
628 offset = phys_addr - orig_phys_addr;
629 addr = orig_addr + offset;
630 } else {
631 phys_addr = orig_phys_addr;
632 addr = orig_addr;
633 }
634
2c156ac7
KVA
635 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
636 lower_32_bits(phys_addr));
637 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
638 upper_32_bits(phys_addr));
639
640 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
641
cf376b4b 642 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 643 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 644 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 645 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 646 COMMAND_WRITE);
2c156ac7
KVA
647
648 wait_for_completion(&test->irq_raised);
649
0a121f9b
KVA
650 dma_unmap_single(dev, orig_phys_addr, size + alignment,
651 DMA_FROM_DEVICE);
652
2c156ac7
KVA
653 crc32 = crc32_le(~0, addr, size);
654 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
655 ret = true;
656
0a121f9b
KVA
657err_phys_addr:
658 kfree(orig_addr);
2c156ac7
KVA
659err:
660 return ret;
661}
662
475007f9
KVA
663static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
664{
665 pci_endpoint_test_release_irq(test);
666 pci_endpoint_test_free_irq_vectors(test);
667 return true;
668}
669
e0332712
GP
670static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
671 int req_irq_type)
672{
673 struct pci_dev *pdev = test->pdev;
674 struct device *dev = &pdev->dev;
675
676 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
677 dev_err(dev, "Invalid IRQ type option\n");
678 return false;
679 }
680
b2ba9225 681 if (test->irq_type == req_irq_type)
e0332712
GP
682 return true;
683
684 pci_endpoint_test_release_irq(test);
685 pci_endpoint_test_free_irq_vectors(test);
686
687 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
688 goto err;
689
690 if (!pci_endpoint_test_request_irq(test))
691 goto err;
692
e0332712
GP
693 return true;
694
695err:
696 pci_endpoint_test_free_irq_vectors(test);
e0332712
GP
697 return false;
698}
699
2c156ac7
KVA
700static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
701 unsigned long arg)
702{
703 int ret = -EINVAL;
704 enum pci_barno bar;
705 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
5bb04b19 706 struct pci_dev *pdev = test->pdev;
2c156ac7
KVA
707
708 mutex_lock(&test->mutex);
709 switch (cmd) {
710 case PCITEST_BAR:
711 bar = arg;
33fcc549 712 if (bar > BAR_5)
2c156ac7 713 goto ret;
5bb04b19
KVA
714 if (is_am654_pci_dev(pdev) && bar == BAR_0)
715 goto ret;
2c156ac7
KVA
716 ret = pci_endpoint_test_bar(test, bar);
717 break;
718 case PCITEST_LEGACY_IRQ:
719 ret = pci_endpoint_test_legacy_irq(test);
720 break;
721 case PCITEST_MSI:
c2e00e31
GP
722 case PCITEST_MSIX:
723 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
2c156ac7
KVA
724 break;
725 case PCITEST_WRITE:
726 ret = pci_endpoint_test_write(test, arg);
727 break;
728 case PCITEST_READ:
729 ret = pci_endpoint_test_read(test, arg);
730 break;
731 case PCITEST_COPY:
732 ret = pci_endpoint_test_copy(test, arg);
733 break;
e0332712
GP
734 case PCITEST_SET_IRQTYPE:
735 ret = pci_endpoint_test_set_irq(test, arg);
736 break;
737 case PCITEST_GET_IRQTYPE:
738 ret = irq_type;
739 break;
475007f9
KVA
740 case PCITEST_CLEAR_IRQ:
741 ret = pci_endpoint_test_clear_irq(test);
742 break;
2c156ac7
KVA
743 }
744
745ret:
746 mutex_unlock(&test->mutex);
747 return ret;
748}
749
750static const struct file_operations pci_endpoint_test_fops = {
751 .owner = THIS_MODULE,
752 .unlocked_ioctl = pci_endpoint_test_ioctl,
753};
754
755static int pci_endpoint_test_probe(struct pci_dev *pdev,
756 const struct pci_device_id *ent)
757{
2c156ac7 758 int err;
2c156ac7 759 int id;
6b443e5c 760 char name[24];
2c156ac7
KVA
761 enum pci_barno bar;
762 void __iomem *base;
763 struct device *dev = &pdev->dev;
764 struct pci_endpoint_test *test;
834b9051
KVA
765 struct pci_endpoint_test_data *data;
766 enum pci_barno test_reg_bar = BAR_0;
2c156ac7
KVA
767 struct miscdevice *misc_device;
768
769 if (pci_is_bridge(pdev))
770 return -ENODEV;
771
772 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
773 if (!test)
774 return -ENOMEM;
775
834b9051 776 test->test_reg_bar = 0;
13107c60 777 test->alignment = 0;
2c156ac7 778 test->pdev = pdev;
b2ba9225 779 test->irq_type = IRQ_TYPE_UNDEFINED;
834b9051 780
9133e394
GP
781 if (no_msi)
782 irq_type = IRQ_TYPE_LEGACY;
783
834b9051 784 data = (struct pci_endpoint_test_data *)ent->driver_data;
13107c60 785 if (data) {
834b9051 786 test_reg_bar = data->test_reg_bar;
8f220664 787 test->test_reg_bar = test_reg_bar;
13107c60 788 test->alignment = data->alignment;
9133e394 789 irq_type = data->irq_type;
13107c60 790 }
834b9051 791
2c156ac7
KVA
792 init_completion(&test->irq_raised);
793 mutex_init(&test->mutex);
794
0a121f9b
KVA
795 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
796 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
797 dev_err(dev, "Cannot set DMA mask\n");
798 return -EINVAL;
799 }
800
2c156ac7
KVA
801 err = pci_enable_device(pdev);
802 if (err) {
803 dev_err(dev, "Cannot enable PCI device\n");
804 return err;
805 }
806
807 err = pci_request_regions(pdev, DRV_MODULE_NAME);
808 if (err) {
809 dev_err(dev, "Cannot obtain PCI resources\n");
810 goto err_disable_pdev;
811 }
812
813 pci_set_master(pdev);
814
1749c904
XW
815 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
816 err = -EINVAL;
e0332712 817 goto err_disable_irq;
1749c904 818 }
2c156ac7 819
c9c13ba4 820 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
16b17cad
NC
821 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
822 base = pci_ioremap_bar(pdev, bar);
823 if (!base) {
0e52ea61 824 dev_err(dev, "Failed to read BAR%d\n", bar);
16b17cad
NC
825 WARN_ON(bar == test_reg_bar);
826 }
827 test->bar[bar] = base;
2c156ac7 828 }
2c156ac7
KVA
829 }
830
834b9051 831 test->base = test->bar[test_reg_bar];
2c156ac7 832 if (!test->base) {
80068c93 833 err = -ENOMEM;
834b9051
KVA
834 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
835 test_reg_bar);
2c156ac7
KVA
836 goto err_iounmap;
837 }
838
839 pci_set_drvdata(pdev, test);
840
841 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
842 if (id < 0) {
80068c93 843 err = id;
0e52ea61 844 dev_err(dev, "Unable to get id\n");
2c156ac7
KVA
845 goto err_iounmap;
846 }
847
848 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
c2be14ab
KVA
849 test->name = kstrdup(name, GFP_KERNEL);
850 if (!test->name) {
851 err = -ENOMEM;
852 goto err_ida_remove;
853 }
854
1749c904
XW
855 if (!pci_endpoint_test_request_irq(test)) {
856 err = -EINVAL;
c2be14ab 857 goto err_kfree_test_name;
1749c904 858 }
c2be14ab 859
2c156ac7
KVA
860 misc_device = &test->miscdev;
861 misc_device->minor = MISC_DYNAMIC_MINOR;
139838ff
KVA
862 misc_device->name = kstrdup(name, GFP_KERNEL);
863 if (!misc_device->name) {
864 err = -ENOMEM;
c2be14ab 865 goto err_release_irq;
139838ff 866 }
2c156ac7
KVA
867 misc_device->fops = &pci_endpoint_test_fops,
868
869 err = misc_register(misc_device);
870 if (err) {
0e52ea61 871 dev_err(dev, "Failed to register device\n");
139838ff 872 goto err_kfree_name;
2c156ac7
KVA
873 }
874
875 return 0;
876
139838ff
KVA
877err_kfree_name:
878 kfree(misc_device->name);
879
c2be14ab
KVA
880err_release_irq:
881 pci_endpoint_test_release_irq(test);
882
883err_kfree_test_name:
884 kfree(test->name);
885
2c156ac7
KVA
886err_ida_remove:
887 ida_simple_remove(&pci_endpoint_test_ida, id);
888
889err_iounmap:
c9c13ba4 890 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2c156ac7
KVA
891 if (test->bar[bar])
892 pci_iounmap(pdev, test->bar[bar]);
893 }
894
e0332712
GP
895err_disable_irq:
896 pci_endpoint_test_free_irq_vectors(test);
2c156ac7
KVA
897 pci_release_regions(pdev);
898
899err_disable_pdev:
900 pci_disable_device(pdev);
901
902 return err;
903}
904
905static void pci_endpoint_test_remove(struct pci_dev *pdev)
906{
907 int id;
908 enum pci_barno bar;
909 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
910 struct miscdevice *misc_device = &test->miscdev;
911
912 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
913 return;
a2db2663
DC
914 if (id < 0)
915 return;
2c156ac7
KVA
916
917 misc_deregister(&test->miscdev);
139838ff 918 kfree(misc_device->name);
c2be14ab 919 kfree(test->name);
2c156ac7 920 ida_simple_remove(&pci_endpoint_test_ida, id);
c9c13ba4 921 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2c156ac7
KVA
922 if (test->bar[bar])
923 pci_iounmap(pdev, test->bar[bar]);
924 }
e0332712
GP
925
926 pci_endpoint_test_release_irq(test);
927 pci_endpoint_test_free_irq_vectors(test);
928
2c156ac7
KVA
929 pci_release_regions(pdev);
930 pci_disable_device(pdev);
931}
932
0a121f9b
KVA
933static const struct pci_endpoint_test_data default_data = {
934 .test_reg_bar = BAR_0,
935 .alignment = SZ_4K,
936 .irq_type = IRQ_TYPE_MSI,
937};
938
5bb04b19
KVA
939static const struct pci_endpoint_test_data am654_data = {
940 .test_reg_bar = BAR_2,
941 .alignment = SZ_64K,
942 .irq_type = IRQ_TYPE_MSI,
943};
944
6546ae29
KVA
945static const struct pci_endpoint_test_data j721e_data = {
946 .alignment = 256,
947 .irq_type = IRQ_TYPE_MSI,
948};
949
2c156ac7 950static const struct pci_device_id pci_endpoint_test_tbl[] = {
0a121f9b
KVA
951 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
952 .driver_data = (kernel_ulong_t)&default_data,
953 },
954 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
955 .driver_data = (kernel_ulong_t)&default_data,
956 },
09fb37b3
HZ
957 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
958 .driver_data = (kernel_ulong_t)&default_data,
959 },
960 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
961 .driver_data = (kernel_ulong_t)&default_data,
962 },
1f418f46 963 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
5bb04b19
KVA
964 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
965 .driver_data = (kernel_ulong_t)&am654_data
966 },
cfb824dd
LP
967 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
968 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
969 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
a63c5f3d 970 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
6546ae29
KVA
971 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
972 .driver_data = (kernel_ulong_t)&j721e_data,
973 },
7c52009d
KVA
974 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
975 .driver_data = (kernel_ulong_t)&j721e_data,
976 },
977 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
978 .driver_data = (kernel_ulong_t)&j721e_data,
979 },
2c156ac7
KVA
980 { }
981};
982MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
983
984static struct pci_driver pci_endpoint_test_driver = {
985 .name = DRV_MODULE_NAME,
986 .id_table = pci_endpoint_test_tbl,
987 .probe = pci_endpoint_test_probe,
988 .remove = pci_endpoint_test_remove,
989};
990module_pci_driver(pci_endpoint_test_driver);
991
992MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
993MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
994MODULE_LICENSE("GPL v2");