]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/dma/dmatest.c
dmatest: define MAX_ERROR_COUNT constant
[mirror_ubuntu-bionic-kernel.git] / drivers / dma / dmatest.c
CommitLineData
4a776f0a
HS
1/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
851b7e16 5 * Copyright (C) 2013 Intel Corporation
4a776f0a
HS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/delay.h>
b7f080cf 12#include <linux/dma-mapping.h>
4a776f0a 13#include <linux/dmaengine.h>
981ed70d 14#include <linux/freezer.h>
4a776f0a
HS
15#include <linux/init.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
5a0e3ad6 20#include <linux/slab.h>
4a776f0a 21#include <linux/wait.h>
851b7e16
AS
22#include <linux/ctype.h>
23#include <linux/debugfs.h>
24#include <linux/uaccess.h>
25#include <linux/seq_file.h>
4a776f0a
HS
26
27static unsigned int test_buf_size = 16384;
28module_param(test_buf_size, uint, S_IRUGO);
29MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30
06190d84 31static char test_channel[20];
4a776f0a
HS
32module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
33MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
06190d84 35static char test_device[20];
4a776f0a
HS
36module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
37MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39static unsigned int threads_per_chan = 1;
40module_param(threads_per_chan, uint, S_IRUGO);
41MODULE_PARM_DESC(threads_per_chan,
42 "Number of threads to start per channel (default: 1)");
43
44static unsigned int max_channels;
45module_param(max_channels, uint, S_IRUGO);
33df8ca0 46MODULE_PARM_DESC(max_channels,
4a776f0a
HS
47 "Maximum number of channels to use (default: all)");
48
0a2ff57d
NF
49static unsigned int iterations;
50module_param(iterations, uint, S_IRUGO);
51MODULE_PARM_DESC(iterations,
52 "Iterations before stopping test (default: infinite)");
53
b54d5cb9
DW
54static unsigned int xor_sources = 3;
55module_param(xor_sources, uint, S_IRUGO);
56MODULE_PARM_DESC(xor_sources,
57 "Number of xor source buffers (default: 3)");
58
58691d64
DW
59static unsigned int pq_sources = 3;
60module_param(pq_sources, uint, S_IRUGO);
61MODULE_PARM_DESC(pq_sources,
62 "Number of p+q source buffers (default: 3)");
63
d42efe6b
VK
64static int timeout = 3000;
65module_param(timeout, uint, S_IRUGO);
85ee7a1d
JP
66MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
d42efe6b 68
74b5c07a
AS
69/* Maximum amount of mismatched bytes in buffer to print */
70#define MAX_ERROR_COUNT 32
71
4a776f0a
HS
72/*
73 * Initialization patterns. All bytes in the source buffer has bit 7
74 * set, all bytes in the destination buffer has bit 7 cleared.
75 *
76 * Bit 6 is set for all bytes which are to be copied by the DMA
77 * engine. Bit 5 is set for all bytes which are to be overwritten by
78 * the DMA engine.
79 *
80 * The remaining bits are the inverse of a counter which increments by
81 * one for each byte address.
82 */
83#define PATTERN_SRC 0x80
84#define PATTERN_DST 0x00
85#define PATTERN_COPY 0x40
86#define PATTERN_OVERWRITE 0x20
87#define PATTERN_COUNT_MASK 0x1f
88
e03e93a9
AS
89struct dmatest_info;
90
4a776f0a
HS
91struct dmatest_thread {
92 struct list_head node;
e03e93a9 93 struct dmatest_info *info;
4a776f0a
HS
94 struct task_struct *task;
95 struct dma_chan *chan;
b54d5cb9
DW
96 u8 **srcs;
97 u8 **dsts;
98 enum dma_transaction_type type;
3e5ccd86 99 bool done;
4a776f0a
HS
100};
101
102struct dmatest_chan {
103 struct list_head node;
104 struct dma_chan *chan;
105 struct list_head threads;
106};
107
e03e93a9 108/**
15b8a8ea 109 * struct dmatest_params - test parameters.
e03e93a9
AS
110 * @buf_size: size of the memcpy test buffer
111 * @channel: bus ID of the channel to test
112 * @device: bus ID of the DMA Engine to test
113 * @threads_per_chan: number of threads to start per channel
114 * @max_channels: maximum number of channels to use
115 * @iterations: iterations before stopping test
116 * @xor_sources: number of xor source buffers
117 * @pq_sources: number of p+q source buffers
118 * @timeout: transfer timeout in msec, -1 for infinite timeout
119 */
15b8a8ea 120struct dmatest_params {
e03e93a9
AS
121 unsigned int buf_size;
122 char channel[20];
123 char device[20];
124 unsigned int threads_per_chan;
125 unsigned int max_channels;
126 unsigned int iterations;
127 unsigned int xor_sources;
128 unsigned int pq_sources;
129 int timeout;
15b8a8ea
AS
130};
131
132/**
133 * struct dmatest_info - test information.
134 * @params: test parameters
851b7e16 135 * @lock: access protection to the fields of this structure
15b8a8ea
AS
136 */
137struct dmatest_info {
138 /* Test parameters */
139 struct dmatest_params params;
838cc704
AS
140
141 /* Internal state */
142 struct list_head channels;
143 unsigned int nr_channels;
851b7e16
AS
144 struct mutex lock;
145
146 /* debugfs related stuff */
147 struct dentry *root;
148 struct dmatest_params dbgfs_params;
e03e93a9
AS
149};
150
151static struct dmatest_info test_info;
152
15b8a8ea 153static bool dmatest_match_channel(struct dmatest_params *params,
e03e93a9 154 struct dma_chan *chan)
4a776f0a 155{
15b8a8ea 156 if (params->channel[0] == '\0')
4a776f0a 157 return true;
15b8a8ea 158 return strcmp(dma_chan_name(chan), params->channel) == 0;
4a776f0a
HS
159}
160
15b8a8ea 161static bool dmatest_match_device(struct dmatest_params *params,
e03e93a9 162 struct dma_device *device)
4a776f0a 163{
15b8a8ea 164 if (params->device[0] == '\0')
4a776f0a 165 return true;
15b8a8ea 166 return strcmp(dev_name(device->dev), params->device) == 0;
4a776f0a
HS
167}
168
169static unsigned long dmatest_random(void)
170{
171 unsigned long buf;
172
173 get_random_bytes(&buf, sizeof(buf));
174 return buf;
175}
176
e03e93a9
AS
177static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
178 unsigned int buf_size)
4a776f0a
HS
179{
180 unsigned int i;
b54d5cb9
DW
181 u8 *buf;
182
183 for (; (buf = *bufs); bufs++) {
184 for (i = 0; i < start; i++)
185 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
186 for ( ; i < start + len; i++)
187 buf[i] = PATTERN_SRC | PATTERN_COPY
c019894e 188 | (~i & PATTERN_COUNT_MASK);
e03e93a9 189 for ( ; i < buf_size; i++)
b54d5cb9
DW
190 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
191 buf++;
192 }
4a776f0a
HS
193}
194
e03e93a9
AS
195static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
196 unsigned int buf_size)
4a776f0a
HS
197{
198 unsigned int i;
b54d5cb9
DW
199 u8 *buf;
200
201 for (; (buf = *bufs); bufs++) {
202 for (i = 0; i < start; i++)
203 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
204 for ( ; i < start + len; i++)
205 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
206 | (~i & PATTERN_COUNT_MASK);
e03e93a9 207 for ( ; i < buf_size; i++)
b54d5cb9
DW
208 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
209 }
4a776f0a
HS
210}
211
212static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
213 unsigned int counter, bool is_srcbuf)
214{
215 u8 diff = actual ^ pattern;
216 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
217 const char *thread_name = current->comm;
218
219 if (is_srcbuf)
220 pr_warning("%s: srcbuf[0x%x] overwritten!"
221 " Expected %02x, got %02x\n",
222 thread_name, index, expected, actual);
223 else if ((pattern & PATTERN_COPY)
224 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
225 pr_warning("%s: dstbuf[0x%x] not copied!"
226 " Expected %02x, got %02x\n",
227 thread_name, index, expected, actual);
228 else if (diff & PATTERN_SRC)
229 pr_warning("%s: dstbuf[0x%x] was copied!"
230 " Expected %02x, got %02x\n",
231 thread_name, index, expected, actual);
232 else
233 pr_warning("%s: dstbuf[0x%x] mismatch!"
234 " Expected %02x, got %02x\n",
235 thread_name, index, expected, actual);
236}
237
b54d5cb9 238static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
4a776f0a
HS
239 unsigned int end, unsigned int counter, u8 pattern,
240 bool is_srcbuf)
241{
242 unsigned int i;
243 unsigned int error_count = 0;
244 u8 actual;
b54d5cb9
DW
245 u8 expected;
246 u8 *buf;
247 unsigned int counter_orig = counter;
248
249 for (; (buf = *bufs); bufs++) {
250 counter = counter_orig;
251 for (i = start; i < end; i++) {
252 actual = buf[i];
253 expected = pattern | (~counter & PATTERN_COUNT_MASK);
254 if (actual != expected) {
74b5c07a 255 if (error_count < MAX_ERROR_COUNT)
b54d5cb9
DW
256 dmatest_mismatch(actual, pattern, i,
257 counter, is_srcbuf);
258 error_count++;
259 }
260 counter++;
4a776f0a 261 }
4a776f0a
HS
262 }
263
74b5c07a 264 if (error_count > MAX_ERROR_COUNT)
4a776f0a 265 pr_warning("%s: %u errors suppressed\n",
74b5c07a 266 current->comm, error_count - MAX_ERROR_COUNT);
4a776f0a
HS
267
268 return error_count;
269}
270
adfa543e
TH
271/* poor man's completion - we want to use wait_event_freezable() on it */
272struct dmatest_done {
273 bool done;
274 wait_queue_head_t *wait;
275};
276
277static void dmatest_callback(void *arg)
e44e0aa3 278{
adfa543e
TH
279 struct dmatest_done *done = arg;
280
281 done->done = true;
282 wake_up_all(done->wait);
e44e0aa3
DW
283}
284
632fd283
AS
285static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
286 unsigned int count)
287{
288 while (count--)
289 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
290}
291
292static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
293 unsigned int count)
294{
295 while (count--)
296 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
297}
298
8be9e32b
AM
299static unsigned int min_odd(unsigned int x, unsigned int y)
300{
301 unsigned int val = min(x, y);
302
303 return val % 2 ? val : val - 1;
304}
305
4a776f0a
HS
306/*
307 * This function repeatedly tests DMA transfers of various lengths and
b54d5cb9
DW
308 * offsets for a given operation type until it is told to exit by
309 * kthread_stop(). There may be multiple threads running this function
310 * in parallel for a single channel, and there may be multiple channels
311 * being tested in parallel.
4a776f0a
HS
312 *
313 * Before each test, the source and destination buffer is initialized
314 * with a known pattern. This pattern is different depending on
315 * whether it's in an area which is supposed to be copied or
316 * overwritten, and different in the source and destination buffers.
317 * So if the DMA engine doesn't copy exactly what we tell it to copy,
318 * we'll notice.
319 */
320static int dmatest_func(void *data)
321{
adfa543e 322 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
4a776f0a 323 struct dmatest_thread *thread = data;
adfa543e 324 struct dmatest_done done = { .wait = &done_wait };
e03e93a9 325 struct dmatest_info *info;
15b8a8ea 326 struct dmatest_params *params;
4a776f0a 327 struct dma_chan *chan;
8be9e32b 328 struct dma_device *dev;
4a776f0a
HS
329 const char *thread_name;
330 unsigned int src_off, dst_off, len;
331 unsigned int error_count;
332 unsigned int failed_tests = 0;
333 unsigned int total_tests = 0;
334 dma_cookie_t cookie;
335 enum dma_status status;
b54d5cb9 336 enum dma_ctrl_flags flags;
945b5af3 337 u8 *pq_coefs = NULL;
4a776f0a 338 int ret;
b54d5cb9
DW
339 int src_cnt;
340 int dst_cnt;
341 int i;
4a776f0a
HS
342
343 thread_name = current->comm;
adfa543e 344 set_freezable();
4a776f0a
HS
345
346 ret = -ENOMEM;
4a776f0a
HS
347
348 smp_rmb();
e03e93a9 349 info = thread->info;
15b8a8ea 350 params = &info->params;
4a776f0a 351 chan = thread->chan;
8be9e32b 352 dev = chan->device;
b54d5cb9
DW
353 if (thread->type == DMA_MEMCPY)
354 src_cnt = dst_cnt = 1;
355 else if (thread->type == DMA_XOR) {
8be9e32b 356 /* force odd to ensure dst = src */
15b8a8ea 357 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
b54d5cb9 358 dst_cnt = 1;
58691d64 359 } else if (thread->type == DMA_PQ) {
8be9e32b 360 /* force odd to ensure dst = src */
15b8a8ea 361 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
58691d64 362 dst_cnt = 2;
945b5af3 363
15b8a8ea 364 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
945b5af3
AS
365 if (!pq_coefs)
366 goto err_thread_type;
367
94de648d 368 for (i = 0; i < src_cnt; i++)
58691d64 369 pq_coefs[i] = 1;
b54d5cb9 370 } else
945b5af3 371 goto err_thread_type;
b54d5cb9
DW
372
373 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
374 if (!thread->srcs)
375 goto err_srcs;
376 for (i = 0; i < src_cnt; i++) {
15b8a8ea 377 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
b54d5cb9
DW
378 if (!thread->srcs[i])
379 goto err_srcbuf;
380 }
381 thread->srcs[i] = NULL;
382
383 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
384 if (!thread->dsts)
385 goto err_dsts;
386 for (i = 0; i < dst_cnt; i++) {
15b8a8ea 387 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
b54d5cb9
DW
388 if (!thread->dsts[i])
389 goto err_dstbuf;
390 }
391 thread->dsts[i] = NULL;
392
e44e0aa3
DW
393 set_user_nice(current, 10);
394
b203bd3f
IS
395 /*
396 * src buffers are freed by the DMAEngine code with dma_unmap_single()
397 * dst buffers are freed by ourselves below
398 */
399 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
400 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
4a776f0a 401
0a2ff57d 402 while (!kthread_should_stop()
15b8a8ea 403 && !(params->iterations && total_tests >= params->iterations)) {
b54d5cb9
DW
404 struct dma_async_tx_descriptor *tx = NULL;
405 dma_addr_t dma_srcs[src_cnt];
406 dma_addr_t dma_dsts[dst_cnt];
83544ae9 407 u8 align = 0;
d86be86e 408
4a776f0a
HS
409 total_tests++;
410
83544ae9
DW
411 /* honor alignment restrictions */
412 if (thread->type == DMA_MEMCPY)
413 align = dev->copy_align;
414 else if (thread->type == DMA_XOR)
415 align = dev->xor_align;
416 else if (thread->type == DMA_PQ)
417 align = dev->pq_align;
418
15b8a8ea 419 if (1 << align > params->buf_size) {
cfe4f275 420 pr_err("%u-byte buffer too small for %d-byte alignment\n",
15b8a8ea 421 params->buf_size, 1 << align);
cfe4f275
GL
422 break;
423 }
424
15b8a8ea 425 len = dmatest_random() % params->buf_size + 1;
83544ae9 426 len = (len >> align) << align;
cfe4f275
GL
427 if (!len)
428 len = 1 << align;
15b8a8ea
AS
429 src_off = dmatest_random() % (params->buf_size - len + 1);
430 dst_off = dmatest_random() % (params->buf_size - len + 1);
cfe4f275 431
83544ae9
DW
432 src_off = (src_off >> align) << align;
433 dst_off = (dst_off >> align) << align;
434
15b8a8ea
AS
435 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
436 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
4a776f0a 437
b54d5cb9
DW
438 for (i = 0; i < src_cnt; i++) {
439 u8 *buf = thread->srcs[i] + src_off;
440
441 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
442 DMA_TO_DEVICE);
afde3be1
AS
443 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
444 if (ret) {
445 unmap_src(dev->dev, dma_srcs, len, i);
446 pr_warn("%s: #%u: mapping error %d with "
447 "src_off=0x%x len=0x%x\n",
448 thread_name, total_tests - 1, ret,
449 src_off, len);
450 failed_tests++;
451 continue;
452 }
b54d5cb9 453 }
d86be86e 454 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
b54d5cb9
DW
455 for (i = 0; i < dst_cnt; i++) {
456 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
15b8a8ea 457 params->buf_size,
b54d5cb9 458 DMA_BIDIRECTIONAL);
afde3be1
AS
459 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
460 if (ret) {
461 unmap_src(dev->dev, dma_srcs, len, src_cnt);
15b8a8ea
AS
462 unmap_dst(dev->dev, dma_dsts, params->buf_size,
463 i);
afde3be1
AS
464 pr_warn("%s: #%u: mapping error %d with "
465 "dst_off=0x%x len=0x%x\n",
466 thread_name, total_tests - 1, ret,
15b8a8ea 467 dst_off, params->buf_size);
afde3be1
AS
468 failed_tests++;
469 continue;
470 }
b54d5cb9
DW
471 }
472
473 if (thread->type == DMA_MEMCPY)
474 tx = dev->device_prep_dma_memcpy(chan,
475 dma_dsts[0] + dst_off,
476 dma_srcs[0], len,
477 flags);
478 else if (thread->type == DMA_XOR)
479 tx = dev->device_prep_dma_xor(chan,
480 dma_dsts[0] + dst_off,
67b9124f 481 dma_srcs, src_cnt,
b54d5cb9 482 len, flags);
58691d64
DW
483 else if (thread->type == DMA_PQ) {
484 dma_addr_t dma_pq[dst_cnt];
485
486 for (i = 0; i < dst_cnt; i++)
487 dma_pq[i] = dma_dsts[i] + dst_off;
488 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
94de648d 489 src_cnt, pq_coefs,
58691d64
DW
490 len, flags);
491 }
d86be86e 492
d86be86e 493 if (!tx) {
632fd283 494 unmap_src(dev->dev, dma_srcs, len, src_cnt);
15b8a8ea
AS
495 unmap_dst(dev->dev, dma_dsts, params->buf_size,
496 dst_cnt);
d86be86e
AN
497 pr_warning("%s: #%u: prep error with src_off=0x%x "
498 "dst_off=0x%x len=0x%x\n",
499 thread_name, total_tests - 1,
500 src_off, dst_off, len);
501 msleep(100);
502 failed_tests++;
503 continue;
504 }
e44e0aa3 505
adfa543e 506 done.done = false;
e44e0aa3 507 tx->callback = dmatest_callback;
adfa543e 508 tx->callback_param = &done;
d86be86e
AN
509 cookie = tx->tx_submit(tx);
510
4a776f0a
HS
511 if (dma_submit_error(cookie)) {
512 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
513 "dst_off=0x%x len=0x%x\n",
514 thread_name, total_tests - 1, cookie,
515 src_off, dst_off, len);
516 msleep(100);
517 failed_tests++;
518 continue;
519 }
b54d5cb9 520 dma_async_issue_pending(chan);
4a776f0a 521
77101ce5
AS
522 wait_event_freezable_timeout(done_wait,
523 done.done || kthread_should_stop(),
15b8a8ea 524 msecs_to_jiffies(params->timeout));
981ed70d 525
e44e0aa3 526 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
4a776f0a 527
adfa543e
TH
528 if (!done.done) {
529 /*
530 * We're leaving the timed out dma operation with
531 * dangling pointer to done_wait. To make this
532 * correct, we'll need to allocate wait_done for
533 * each test iteration and perform "who's gonna
534 * free it this time?" dancing. For now, just
535 * leave it dangling.
536 */
e44e0aa3
DW
537 pr_warning("%s: #%u: test timed out\n",
538 thread_name, total_tests - 1);
539 failed_tests++;
540 continue;
541 } else if (status != DMA_SUCCESS) {
542 pr_warning("%s: #%u: got completion callback,"
543 " but status is \'%s\'\n",
544 thread_name, total_tests - 1,
545 status == DMA_ERROR ? "error" : "in progress");
4a776f0a
HS
546 failed_tests++;
547 continue;
548 }
e44e0aa3 549
d86be86e 550 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
15b8a8ea 551 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
4a776f0a
HS
552
553 error_count = 0;
554
555 pr_debug("%s: verifying source buffer...\n", thread_name);
b54d5cb9 556 error_count += dmatest_verify(thread->srcs, 0, src_off,
4a776f0a 557 0, PATTERN_SRC, true);
b54d5cb9 558 error_count += dmatest_verify(thread->srcs, src_off,
4a776f0a
HS
559 src_off + len, src_off,
560 PATTERN_SRC | PATTERN_COPY, true);
b54d5cb9 561 error_count += dmatest_verify(thread->srcs, src_off + len,
15b8a8ea 562 params->buf_size, src_off + len,
4a776f0a
HS
563 PATTERN_SRC, true);
564
565 pr_debug("%s: verifying dest buffer...\n",
566 thread->task->comm);
b54d5cb9 567 error_count += dmatest_verify(thread->dsts, 0, dst_off,
4a776f0a 568 0, PATTERN_DST, false);
b54d5cb9 569 error_count += dmatest_verify(thread->dsts, dst_off,
4a776f0a
HS
570 dst_off + len, src_off,
571 PATTERN_SRC | PATTERN_COPY, false);
b54d5cb9 572 error_count += dmatest_verify(thread->dsts, dst_off + len,
15b8a8ea 573 params->buf_size, dst_off + len,
4a776f0a
HS
574 PATTERN_DST, false);
575
576 if (error_count) {
577 pr_warning("%s: #%u: %u errors with "
578 "src_off=0x%x dst_off=0x%x len=0x%x\n",
579 thread_name, total_tests - 1, error_count,
580 src_off, dst_off, len);
581 failed_tests++;
582 } else {
583 pr_debug("%s: #%u: No errors with "
584 "src_off=0x%x dst_off=0x%x len=0x%x\n",
585 thread_name, total_tests - 1,
586 src_off, dst_off, len);
587 }
588 }
589
590 ret = 0;
b54d5cb9
DW
591 for (i = 0; thread->dsts[i]; i++)
592 kfree(thread->dsts[i]);
4a776f0a 593err_dstbuf:
b54d5cb9
DW
594 kfree(thread->dsts);
595err_dsts:
596 for (i = 0; thread->srcs[i]; i++)
597 kfree(thread->srcs[i]);
4a776f0a 598err_srcbuf:
b54d5cb9
DW
599 kfree(thread->srcs);
600err_srcs:
945b5af3
AS
601 kfree(pq_coefs);
602err_thread_type:
4a776f0a
HS
603 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
604 thread_name, total_tests, failed_tests, ret);
0a2ff57d 605
9704efaa 606 /* terminate all transfers on specified channels */
5e034f7b
SH
607 if (ret)
608 dmaengine_terminate_all(chan);
609
3e5ccd86
AS
610 thread->done = true;
611
15b8a8ea 612 if (params->iterations > 0)
0a2ff57d 613 while (!kthread_should_stop()) {
b953df7c 614 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
0a2ff57d
NF
615 interruptible_sleep_on(&wait_dmatest_exit);
616 }
617
4a776f0a
HS
618 return ret;
619}
620
621static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
622{
623 struct dmatest_thread *thread;
624 struct dmatest_thread *_thread;
625 int ret;
626
627 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
628 ret = kthread_stop(thread->task);
629 pr_debug("dmatest: thread %s exited with status %d\n",
630 thread->task->comm, ret);
631 list_del(&thread->node);
632 kfree(thread);
633 }
9704efaa
VK
634
635 /* terminate all transfers on specified channels */
944ea4dd 636 dmaengine_terminate_all(dtc->chan);
9704efaa 637
4a776f0a
HS
638 kfree(dtc);
639}
640
e03e93a9
AS
641static int dmatest_add_threads(struct dmatest_info *info,
642 struct dmatest_chan *dtc, enum dma_transaction_type type)
4a776f0a 643{
15b8a8ea 644 struct dmatest_params *params = &info->params;
b54d5cb9
DW
645 struct dmatest_thread *thread;
646 struct dma_chan *chan = dtc->chan;
647 char *op;
648 unsigned int i;
4a776f0a 649
b54d5cb9
DW
650 if (type == DMA_MEMCPY)
651 op = "copy";
652 else if (type == DMA_XOR)
653 op = "xor";
58691d64
DW
654 else if (type == DMA_PQ)
655 op = "pq";
b54d5cb9
DW
656 else
657 return -EINVAL;
4a776f0a 658
15b8a8ea 659 for (i = 0; i < params->threads_per_chan; i++) {
4a776f0a
HS
660 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
661 if (!thread) {
b54d5cb9
DW
662 pr_warning("dmatest: No memory for %s-%s%u\n",
663 dma_chan_name(chan), op, i);
664
4a776f0a
HS
665 break;
666 }
e03e93a9 667 thread->info = info;
4a776f0a 668 thread->chan = dtc->chan;
b54d5cb9 669 thread->type = type;
4a776f0a 670 smp_wmb();
b54d5cb9
DW
671 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
672 dma_chan_name(chan), op, i);
4a776f0a 673 if (IS_ERR(thread->task)) {
b54d5cb9
DW
674 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
675 dma_chan_name(chan), op, i);
4a776f0a
HS
676 kfree(thread);
677 break;
678 }
679
680 /* srcbuf and dstbuf are allocated by the thread itself */
681
682 list_add_tail(&thread->node, &dtc->threads);
683 }
684
b54d5cb9
DW
685 return i;
686}
687
e03e93a9
AS
688static int dmatest_add_channel(struct dmatest_info *info,
689 struct dma_chan *chan)
b54d5cb9
DW
690{
691 struct dmatest_chan *dtc;
692 struct dma_device *dma_dev = chan->device;
693 unsigned int thread_count = 0;
b9033e68 694 int cnt;
b54d5cb9
DW
695
696 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
697 if (!dtc) {
698 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
699 return -ENOMEM;
700 }
701
702 dtc->chan = chan;
703 INIT_LIST_HEAD(&dtc->threads);
704
705 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
e03e93a9 706 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
f1aef8b6 707 thread_count += cnt > 0 ? cnt : 0;
b54d5cb9
DW
708 }
709 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
e03e93a9 710 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
f1aef8b6 711 thread_count += cnt > 0 ? cnt : 0;
b54d5cb9 712 }
58691d64 713 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
e03e93a9 714 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
d07a74a5 715 thread_count += cnt > 0 ? cnt : 0;
58691d64 716 }
b54d5cb9
DW
717
718 pr_info("dmatest: Started %u threads using %s\n",
719 thread_count, dma_chan_name(chan));
4a776f0a 720
838cc704
AS
721 list_add_tail(&dtc->node, &info->channels);
722 info->nr_channels++;
4a776f0a 723
33df8ca0 724 return 0;
4a776f0a
HS
725}
726
7dd60251 727static bool filter(struct dma_chan *chan, void *param)
4a776f0a 728{
15b8a8ea 729 struct dmatest_params *params = param;
e03e93a9 730
15b8a8ea
AS
731 if (!dmatest_match_channel(params, chan) ||
732 !dmatest_match_device(params, chan->device))
7dd60251 733 return false;
33df8ca0 734 else
7dd60251 735 return true;
4a776f0a
HS
736}
737
851b7e16 738static int __run_threaded_test(struct dmatest_info *info)
4a776f0a 739{
33df8ca0
DW
740 dma_cap_mask_t mask;
741 struct dma_chan *chan;
15b8a8ea 742 struct dmatest_params *params = &info->params;
33df8ca0
DW
743 int err = 0;
744
745 dma_cap_zero(mask);
746 dma_cap_set(DMA_MEMCPY, mask);
747 for (;;) {
15b8a8ea 748 chan = dma_request_channel(mask, filter, params);
33df8ca0 749 if (chan) {
e03e93a9 750 err = dmatest_add_channel(info, chan);
c56c81ab 751 if (err) {
33df8ca0
DW
752 dma_release_channel(chan);
753 break; /* add_channel failed, punt */
754 }
755 } else
756 break; /* no more channels available */
15b8a8ea
AS
757 if (params->max_channels &&
758 info->nr_channels >= params->max_channels)
33df8ca0
DW
759 break; /* we have all we need */
760 }
33df8ca0 761 return err;
4a776f0a 762}
4a776f0a 763
851b7e16
AS
764#ifndef MODULE
765static int run_threaded_test(struct dmatest_info *info)
766{
767 int ret;
768
769 mutex_lock(&info->lock);
770 ret = __run_threaded_test(info);
771 mutex_unlock(&info->lock);
772 return ret;
773}
774#endif
775
776static void __stop_threaded_test(struct dmatest_info *info)
4a776f0a 777{
33df8ca0 778 struct dmatest_chan *dtc, *_dtc;
7cbd4877 779 struct dma_chan *chan;
33df8ca0 780
838cc704 781 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
33df8ca0 782 list_del(&dtc->node);
7cbd4877 783 chan = dtc->chan;
33df8ca0 784 dmatest_cleanup_channel(dtc);
838cc704 785 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
7cbd4877 786 dma_release_channel(chan);
33df8ca0 787 }
838cc704
AS
788
789 info->nr_channels = 0;
4a776f0a 790}
e03e93a9 791
851b7e16
AS
792static void stop_threaded_test(struct dmatest_info *info)
793{
794 mutex_lock(&info->lock);
795 __stop_threaded_test(info);
796 mutex_unlock(&info->lock);
797}
798
799static int __restart_threaded_test(struct dmatest_info *info, bool run)
800{
801 struct dmatest_params *params = &info->params;
802 int ret;
803
804 /* Stop any running test first */
805 __stop_threaded_test(info);
806
807 if (run == false)
808 return 0;
809
810 /* Copy test parameters */
811 memcpy(params, &info->dbgfs_params, sizeof(*params));
812
813 /* Run test with new parameters */
814 ret = __run_threaded_test(info);
815 if (ret) {
816 __stop_threaded_test(info);
817 pr_err("dmatest: Can't run test\n");
818 }
819
820 return ret;
821}
822
823static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
824 const void __user *from, size_t count)
825{
826 char tmp[20];
827 ssize_t len;
828
829 len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
830 if (len >= 0) {
831 tmp[len] = '\0';
832 strlcpy(to, strim(tmp), available);
833 }
834
835 return len;
836}
837
838static ssize_t dtf_read_channel(struct file *file, char __user *buf,
839 size_t count, loff_t *ppos)
840{
841 struct dmatest_info *info = file->private_data;
842 return simple_read_from_buffer(buf, count, ppos,
843 info->dbgfs_params.channel,
844 strlen(info->dbgfs_params.channel));
845}
846
847static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
848 size_t size, loff_t *ppos)
849{
850 struct dmatest_info *info = file->private_data;
851 return dtf_write_string(info->dbgfs_params.channel,
852 sizeof(info->dbgfs_params.channel),
853 ppos, buf, size);
854}
855
856static const struct file_operations dtf_channel_fops = {
857 .read = dtf_read_channel,
858 .write = dtf_write_channel,
859 .open = simple_open,
860 .llseek = default_llseek,
861};
862
863static ssize_t dtf_read_device(struct file *file, char __user *buf,
864 size_t count, loff_t *ppos)
865{
866 struct dmatest_info *info = file->private_data;
867 return simple_read_from_buffer(buf, count, ppos,
868 info->dbgfs_params.device,
869 strlen(info->dbgfs_params.device));
870}
871
872static ssize_t dtf_write_device(struct file *file, const char __user *buf,
873 size_t size, loff_t *ppos)
874{
875 struct dmatest_info *info = file->private_data;
876 return dtf_write_string(info->dbgfs_params.device,
877 sizeof(info->dbgfs_params.device),
878 ppos, buf, size);
879}
880
881static const struct file_operations dtf_device_fops = {
882 .read = dtf_read_device,
883 .write = dtf_write_device,
884 .open = simple_open,
885 .llseek = default_llseek,
886};
887
888static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
889 size_t count, loff_t *ppos)
890{
891 struct dmatest_info *info = file->private_data;
892 char buf[3];
3e5ccd86
AS
893 struct dmatest_chan *dtc;
894 bool alive = false;
851b7e16
AS
895
896 mutex_lock(&info->lock);
3e5ccd86
AS
897 list_for_each_entry(dtc, &info->channels, node) {
898 struct dmatest_thread *thread;
899
900 list_for_each_entry(thread, &dtc->threads, node) {
901 if (!thread->done) {
902 alive = true;
903 break;
904 }
905 }
906 }
907
908 if (alive) {
851b7e16 909 buf[0] = 'Y';
3e5ccd86
AS
910 } else {
911 __stop_threaded_test(info);
851b7e16 912 buf[0] = 'N';
3e5ccd86
AS
913 }
914
851b7e16
AS
915 mutex_unlock(&info->lock);
916 buf[1] = '\n';
917 buf[2] = 0x00;
918 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
919}
920
921static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
922 size_t count, loff_t *ppos)
923{
924 struct dmatest_info *info = file->private_data;
925 char buf[16];
926 bool bv;
927 int ret = 0;
928
929 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
930 return -EFAULT;
931
932 if (strtobool(buf, &bv) == 0) {
933 mutex_lock(&info->lock);
934 ret = __restart_threaded_test(info, bv);
935 mutex_unlock(&info->lock);
936 }
937
938 return ret ? ret : count;
939}
940
941static const struct file_operations dtf_run_fops = {
942 .read = dtf_read_run,
943 .write = dtf_write_run,
944 .open = simple_open,
945 .llseek = default_llseek,
946};
947
948static int dmatest_register_dbgfs(struct dmatest_info *info)
949{
950 struct dentry *d;
951 struct dmatest_params *params = &info->dbgfs_params;
952 int ret = -ENOMEM;
953
954 d = debugfs_create_dir("dmatest", NULL);
955 if (IS_ERR(d))
956 return PTR_ERR(d);
957 if (!d)
958 goto err_root;
959
960 info->root = d;
961
962 /* Copy initial values */
963 memcpy(params, &info->params, sizeof(*params));
964
965 /* Test parameters */
966
967 d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
968 (u32 *)&params->buf_size);
969 if (IS_ERR_OR_NULL(d))
970 goto err_node;
971
972 d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
973 info, &dtf_channel_fops);
974 if (IS_ERR_OR_NULL(d))
975 goto err_node;
976
977 d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
978 info, &dtf_device_fops);
979 if (IS_ERR_OR_NULL(d))
980 goto err_node;
981
982 d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
983 (u32 *)&params->threads_per_chan);
984 if (IS_ERR_OR_NULL(d))
985 goto err_node;
986
987 d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
988 (u32 *)&params->max_channels);
989 if (IS_ERR_OR_NULL(d))
990 goto err_node;
991
992 d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
993 (u32 *)&params->iterations);
994 if (IS_ERR_OR_NULL(d))
995 goto err_node;
996
997 d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
998 (u32 *)&params->xor_sources);
999 if (IS_ERR_OR_NULL(d))
1000 goto err_node;
1001
1002 d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
1003 (u32 *)&params->pq_sources);
1004 if (IS_ERR_OR_NULL(d))
1005 goto err_node;
1006
1007 d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
1008 (u32 *)&params->timeout);
1009 if (IS_ERR_OR_NULL(d))
1010 goto err_node;
1011
1012 /* Run or stop threaded test */
1013 d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
1014 info, &dtf_run_fops);
1015 if (IS_ERR_OR_NULL(d))
1016 goto err_node;
1017
1018 return 0;
1019
1020err_node:
1021 debugfs_remove_recursive(info->root);
1022err_root:
1023 pr_err("dmatest: Failed to initialize debugfs\n");
1024 return ret;
1025}
1026
e03e93a9
AS
1027static int __init dmatest_init(void)
1028{
1029 struct dmatest_info *info = &test_info;
15b8a8ea 1030 struct dmatest_params *params = &info->params;
851b7e16 1031 int ret;
e03e93a9
AS
1032
1033 memset(info, 0, sizeof(*info));
1034
851b7e16 1035 mutex_init(&info->lock);
838cc704
AS
1036 INIT_LIST_HEAD(&info->channels);
1037
1038 /* Set default parameters */
15b8a8ea
AS
1039 params->buf_size = test_buf_size;
1040 strlcpy(params->channel, test_channel, sizeof(params->channel));
1041 strlcpy(params->device, test_device, sizeof(params->device));
1042 params->threads_per_chan = threads_per_chan;
1043 params->max_channels = max_channels;
1044 params->iterations = iterations;
1045 params->xor_sources = xor_sources;
1046 params->pq_sources = pq_sources;
1047 params->timeout = timeout;
e03e93a9 1048
851b7e16
AS
1049 ret = dmatest_register_dbgfs(info);
1050 if (ret)
1051 return ret;
1052
1053#ifdef MODULE
1054 return 0;
1055#else
e03e93a9 1056 return run_threaded_test(info);
851b7e16 1057#endif
e03e93a9
AS
1058}
1059/* when compiled-in wait for drivers to load first */
1060late_initcall(dmatest_init);
1061
1062static void __exit dmatest_exit(void)
1063{
1064 struct dmatest_info *info = &test_info;
1065
851b7e16 1066 debugfs_remove_recursive(info->root);
e03e93a9
AS
1067 stop_threaded_test(info);
1068}
4a776f0a
HS
1069module_exit(dmatest_exit);
1070
e05503ef 1071MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4a776f0a 1072MODULE_LICENSE("GPL v2");