]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/dma/dmatest.c
Revert "dmatest: append verify result to results"
[mirror_ubuntu-bionic-kernel.git] / drivers / dma / dmatest.c
CommitLineData
4a776f0a
HS
1/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
851b7e16 5 * Copyright (C) 2013 Intel Corporation
4a776f0a
HS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/delay.h>
b7f080cf 12#include <linux/dma-mapping.h>
4a776f0a 13#include <linux/dmaengine.h>
981ed70d 14#include <linux/freezer.h>
4a776f0a
HS
15#include <linux/init.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
5a0e3ad6 20#include <linux/slab.h>
4a776f0a 21#include <linux/wait.h>
851b7e16
AS
22#include <linux/ctype.h>
23#include <linux/debugfs.h>
24#include <linux/uaccess.h>
25#include <linux/seq_file.h>
4a776f0a
HS
26
27static unsigned int test_buf_size = 16384;
a6c268d0 28module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
4a776f0a
HS
29MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30
06190d84 31static char test_channel[20];
a6c268d0
AS
32module_param_string(channel, test_channel, sizeof(test_channel),
33 S_IRUGO | S_IWUSR);
4a776f0a
HS
34MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
35
06190d84 36static char test_device[20];
a6c268d0
AS
37module_param_string(device, test_device, sizeof(test_device),
38 S_IRUGO | S_IWUSR);
4a776f0a
HS
39MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
40
41static unsigned int threads_per_chan = 1;
a6c268d0 42module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
4a776f0a
HS
43MODULE_PARM_DESC(threads_per_chan,
44 "Number of threads to start per channel (default: 1)");
45
46static unsigned int max_channels;
a6c268d0 47module_param(max_channels, uint, S_IRUGO | S_IWUSR);
33df8ca0 48MODULE_PARM_DESC(max_channels,
4a776f0a
HS
49 "Maximum number of channels to use (default: all)");
50
0a2ff57d 51static unsigned int iterations;
a6c268d0 52module_param(iterations, uint, S_IRUGO | S_IWUSR);
0a2ff57d
NF
53MODULE_PARM_DESC(iterations,
54 "Iterations before stopping test (default: infinite)");
55
b54d5cb9 56static unsigned int xor_sources = 3;
a6c268d0 57module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
b54d5cb9
DW
58MODULE_PARM_DESC(xor_sources,
59 "Number of xor source buffers (default: 3)");
60
58691d64 61static unsigned int pq_sources = 3;
a6c268d0 62module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
58691d64
DW
63MODULE_PARM_DESC(pq_sources,
64 "Number of p+q source buffers (default: 3)");
65
d42efe6b 66static int timeout = 3000;
a6c268d0 67module_param(timeout, uint, S_IRUGO | S_IWUSR);
85ee7a1d
JP
68MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
69 "Pass -1 for infinite timeout");
d42efe6b 70
74b5c07a
AS
71/* Maximum amount of mismatched bytes in buffer to print */
72#define MAX_ERROR_COUNT 32
73
4a776f0a
HS
74/*
75 * Initialization patterns. All bytes in the source buffer has bit 7
76 * set, all bytes in the destination buffer has bit 7 cleared.
77 *
78 * Bit 6 is set for all bytes which are to be copied by the DMA
79 * engine. Bit 5 is set for all bytes which are to be overwritten by
80 * the DMA engine.
81 *
82 * The remaining bits are the inverse of a counter which increments by
83 * one for each byte address.
84 */
85#define PATTERN_SRC 0x80
86#define PATTERN_DST 0x00
87#define PATTERN_COPY 0x40
88#define PATTERN_OVERWRITE 0x20
89#define PATTERN_COUNT_MASK 0x1f
90
95019c8c
AS
91enum dmatest_error_type {
92 DMATEST_ET_OK,
93 DMATEST_ET_MAP_SRC,
94 DMATEST_ET_MAP_DST,
95 DMATEST_ET_PREP,
96 DMATEST_ET_SUBMIT,
97 DMATEST_ET_TIMEOUT,
98 DMATEST_ET_DMA_ERROR,
99 DMATEST_ET_DMA_IN_PROGRESS,
100 DMATEST_ET_VERIFY,
101};
102
103struct dmatest_thread_result {
104 struct list_head node;
105 unsigned int n;
106 unsigned int src_off;
107 unsigned int dst_off;
108 unsigned int len;
109 enum dmatest_error_type type;
110 union {
7b610178
DW
111 unsigned long data;
112 dma_cookie_t cookie;
113 enum dma_status status;
114 int error;
95019c8c
AS
115 };
116};
117
118struct dmatest_result {
119 struct list_head node;
120 char *name;
121 struct list_head results;
122};
123
e03e93a9
AS
124struct dmatest_info;
125
4a776f0a
HS
126struct dmatest_thread {
127 struct list_head node;
e03e93a9 128 struct dmatest_info *info;
4a776f0a
HS
129 struct task_struct *task;
130 struct dma_chan *chan;
b54d5cb9
DW
131 u8 **srcs;
132 u8 **dsts;
133 enum dma_transaction_type type;
3e5ccd86 134 bool done;
4a776f0a
HS
135};
136
137struct dmatest_chan {
138 struct list_head node;
139 struct dma_chan *chan;
140 struct list_head threads;
141};
142
e03e93a9 143/**
15b8a8ea 144 * struct dmatest_params - test parameters.
e03e93a9
AS
145 * @buf_size: size of the memcpy test buffer
146 * @channel: bus ID of the channel to test
147 * @device: bus ID of the DMA Engine to test
148 * @threads_per_chan: number of threads to start per channel
149 * @max_channels: maximum number of channels to use
150 * @iterations: iterations before stopping test
151 * @xor_sources: number of xor source buffers
152 * @pq_sources: number of p+q source buffers
153 * @timeout: transfer timeout in msec, -1 for infinite timeout
154 */
15b8a8ea 155struct dmatest_params {
e03e93a9
AS
156 unsigned int buf_size;
157 char channel[20];
158 char device[20];
159 unsigned int threads_per_chan;
160 unsigned int max_channels;
161 unsigned int iterations;
162 unsigned int xor_sources;
163 unsigned int pq_sources;
164 int timeout;
15b8a8ea
AS
165};
166
167/**
168 * struct dmatest_info - test information.
169 * @params: test parameters
851b7e16 170 * @lock: access protection to the fields of this structure
15b8a8ea
AS
171 */
172struct dmatest_info {
173 /* Test parameters */
174 struct dmatest_params params;
838cc704
AS
175
176 /* Internal state */
177 struct list_head channels;
178 unsigned int nr_channels;
851b7e16
AS
179 struct mutex lock;
180
181 /* debugfs related stuff */
182 struct dentry *root;
95019c8c
AS
183
184 /* Test results */
185 struct list_head results;
186 struct mutex results_lock;
e03e93a9
AS
187};
188
189static struct dmatest_info test_info;
190
15b8a8ea 191static bool dmatest_match_channel(struct dmatest_params *params,
e03e93a9 192 struct dma_chan *chan)
4a776f0a 193{
15b8a8ea 194 if (params->channel[0] == '\0')
4a776f0a 195 return true;
15b8a8ea 196 return strcmp(dma_chan_name(chan), params->channel) == 0;
4a776f0a
HS
197}
198
15b8a8ea 199static bool dmatest_match_device(struct dmatest_params *params,
e03e93a9 200 struct dma_device *device)
4a776f0a 201{
15b8a8ea 202 if (params->device[0] == '\0')
4a776f0a 203 return true;
15b8a8ea 204 return strcmp(dev_name(device->dev), params->device) == 0;
4a776f0a
HS
205}
206
207static unsigned long dmatest_random(void)
208{
209 unsigned long buf;
210
211 get_random_bytes(&buf, sizeof(buf));
212 return buf;
213}
214
e03e93a9
AS
215static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
216 unsigned int buf_size)
4a776f0a
HS
217{
218 unsigned int i;
b54d5cb9
DW
219 u8 *buf;
220
221 for (; (buf = *bufs); bufs++) {
222 for (i = 0; i < start; i++)
223 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
224 for ( ; i < start + len; i++)
225 buf[i] = PATTERN_SRC | PATTERN_COPY
c019894e 226 | (~i & PATTERN_COUNT_MASK);
e03e93a9 227 for ( ; i < buf_size; i++)
b54d5cb9
DW
228 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
229 buf++;
230 }
4a776f0a
HS
231}
232
e03e93a9
AS
233static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
234 unsigned int buf_size)
4a776f0a
HS
235{
236 unsigned int i;
b54d5cb9
DW
237 u8 *buf;
238
239 for (; (buf = *bufs); bufs++) {
240 for (i = 0; i < start; i++)
241 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
242 for ( ; i < start + len; i++)
243 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
244 | (~i & PATTERN_COUNT_MASK);
e03e93a9 245 for ( ; i < buf_size; i++)
b54d5cb9
DW
246 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
247 }
4a776f0a
HS
248}
249
7b610178
DW
250static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
251 unsigned int counter, bool is_srcbuf)
252{
253 u8 diff = actual ^ pattern;
254 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
255 const char *thread_name = current->comm;
256
257 if (is_srcbuf)
258 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
259 thread_name, index, expected, actual);
260 else if ((pattern & PATTERN_COPY)
261 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
262 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
263 thread_name, index, expected, actual);
264 else if (diff & PATTERN_SRC)
265 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
266 thread_name, index, expected, actual);
267 else
268 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
269 thread_name, index, expected, actual);
270}
271
272static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
273 unsigned int end, unsigned int counter, u8 pattern,
274 bool is_srcbuf)
4a776f0a
HS
275{
276 unsigned int i;
277 unsigned int error_count = 0;
278 u8 actual;
b54d5cb9
DW
279 u8 expected;
280 u8 *buf;
281 unsigned int counter_orig = counter;
282
283 for (; (buf = *bufs); bufs++) {
284 counter = counter_orig;
285 for (i = start; i < end; i++) {
286 actual = buf[i];
287 expected = pattern | (~counter & PATTERN_COUNT_MASK);
288 if (actual != expected) {
7b610178
DW
289 if (error_count < MAX_ERROR_COUNT)
290 dmatest_mismatch(actual, pattern, i,
291 counter, is_srcbuf);
b54d5cb9
DW
292 error_count++;
293 }
294 counter++;
4a776f0a 295 }
4a776f0a
HS
296 }
297
74b5c07a 298 if (error_count > MAX_ERROR_COUNT)
7b610178 299 pr_warn("%s: %u errors suppressed\n",
74b5c07a 300 current->comm, error_count - MAX_ERROR_COUNT);
4a776f0a
HS
301
302 return error_count;
303}
304
adfa543e
TH
305/* poor man's completion - we want to use wait_event_freezable() on it */
306struct dmatest_done {
307 bool done;
308 wait_queue_head_t *wait;
309};
310
311static void dmatest_callback(void *arg)
e44e0aa3 312{
adfa543e
TH
313 struct dmatest_done *done = arg;
314
315 done->done = true;
316 wake_up_all(done->wait);
e44e0aa3
DW
317}
318
632fd283
AS
319static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
320 unsigned int count)
321{
322 while (count--)
323 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
324}
325
326static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
327 unsigned int count)
328{
329 while (count--)
330 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
331}
332
8be9e32b
AM
333static unsigned int min_odd(unsigned int x, unsigned int y)
334{
335 unsigned int val = min(x, y);
336
337 return val % 2 ? val : val - 1;
338}
339
95019c8c
AS
340static char *thread_result_get(const char *name,
341 struct dmatest_thread_result *tr)
342{
343 static const char * const messages[] = {
344 [DMATEST_ET_OK] = "No errors",
345 [DMATEST_ET_MAP_SRC] = "src mapping error",
346 [DMATEST_ET_MAP_DST] = "dst mapping error",
347 [DMATEST_ET_PREP] = "prep error",
348 [DMATEST_ET_SUBMIT] = "submit error",
349 [DMATEST_ET_TIMEOUT] = "test timed out",
350 [DMATEST_ET_DMA_ERROR] =
351 "got completion callback (DMA_ERROR)",
352 [DMATEST_ET_DMA_IN_PROGRESS] =
353 "got completion callback (DMA_IN_PROGRESS)",
354 [DMATEST_ET_VERIFY] = "errors",
355 };
356 static char buf[512];
357
358 snprintf(buf, sizeof(buf) - 1,
359 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
360 name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
361 tr->len, tr->data);
362
363 return buf;
364}
365
366static int thread_result_add(struct dmatest_info *info,
367 struct dmatest_result *r, enum dmatest_error_type type,
368 unsigned int n, unsigned int src_off, unsigned int dst_off,
369 unsigned int len, unsigned long data)
370{
371 struct dmatest_thread_result *tr;
372
373 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
374 if (!tr)
375 return -ENOMEM;
376
377 tr->type = type;
378 tr->n = n;
379 tr->src_off = src_off;
380 tr->dst_off = dst_off;
381 tr->len = len;
382 tr->data = data;
383
384 mutex_lock(&info->results_lock);
385 list_add_tail(&tr->node, &r->results);
386 mutex_unlock(&info->results_lock);
387
ad5278cd
AS
388 if (tr->type == DMATEST_ET_OK)
389 pr_debug("%s\n", thread_result_get(r->name, tr));
390 else
391 pr_warn("%s\n", thread_result_get(r->name, tr));
392
95019c8c
AS
393 return 0;
394}
395
396static void result_free(struct dmatest_info *info, const char *name)
397{
398 struct dmatest_result *r, *_r;
399
400 mutex_lock(&info->results_lock);
401 list_for_each_entry_safe(r, _r, &info->results, node) {
402 struct dmatest_thread_result *tr, *_tr;
403
404 if (name && strcmp(r->name, name))
405 continue;
406
407 list_for_each_entry_safe(tr, _tr, &r->results, node) {
408 list_del(&tr->node);
409 kfree(tr);
410 }
411
412 kfree(r->name);
413 list_del(&r->node);
414 kfree(r);
415 }
416
417 mutex_unlock(&info->results_lock);
418}
419
420static struct dmatest_result *result_init(struct dmatest_info *info,
421 const char *name)
422{
423 struct dmatest_result *r;
424
425 r = kzalloc(sizeof(*r), GFP_KERNEL);
426 if (r) {
427 r->name = kstrdup(name, GFP_KERNEL);
428 INIT_LIST_HEAD(&r->results);
429 mutex_lock(&info->results_lock);
430 list_add_tail(&r->node, &info->results);
431 mutex_unlock(&info->results_lock);
432 }
433 return r;
434}
435
4a776f0a
HS
436/*
437 * This function repeatedly tests DMA transfers of various lengths and
b54d5cb9
DW
438 * offsets for a given operation type until it is told to exit by
439 * kthread_stop(). There may be multiple threads running this function
440 * in parallel for a single channel, and there may be multiple channels
441 * being tested in parallel.
4a776f0a
HS
442 *
443 * Before each test, the source and destination buffer is initialized
444 * with a known pattern. This pattern is different depending on
445 * whether it's in an area which is supposed to be copied or
446 * overwritten, and different in the source and destination buffers.
447 * So if the DMA engine doesn't copy exactly what we tell it to copy,
448 * we'll notice.
449 */
450static int dmatest_func(void *data)
451{
adfa543e 452 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
4a776f0a 453 struct dmatest_thread *thread = data;
adfa543e 454 struct dmatest_done done = { .wait = &done_wait };
e03e93a9 455 struct dmatest_info *info;
15b8a8ea 456 struct dmatest_params *params;
4a776f0a 457 struct dma_chan *chan;
8be9e32b 458 struct dma_device *dev;
4a776f0a
HS
459 const char *thread_name;
460 unsigned int src_off, dst_off, len;
461 unsigned int error_count;
462 unsigned int failed_tests = 0;
463 unsigned int total_tests = 0;
464 dma_cookie_t cookie;
465 enum dma_status status;
b54d5cb9 466 enum dma_ctrl_flags flags;
945b5af3 467 u8 *pq_coefs = NULL;
4a776f0a 468 int ret;
b54d5cb9
DW
469 int src_cnt;
470 int dst_cnt;
471 int i;
95019c8c 472 struct dmatest_result *result;
4a776f0a
HS
473
474 thread_name = current->comm;
adfa543e 475 set_freezable();
4a776f0a
HS
476
477 ret = -ENOMEM;
4a776f0a
HS
478
479 smp_rmb();
e03e93a9 480 info = thread->info;
15b8a8ea 481 params = &info->params;
4a776f0a 482 chan = thread->chan;
8be9e32b 483 dev = chan->device;
b54d5cb9
DW
484 if (thread->type == DMA_MEMCPY)
485 src_cnt = dst_cnt = 1;
486 else if (thread->type == DMA_XOR) {
8be9e32b 487 /* force odd to ensure dst = src */
15b8a8ea 488 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
b54d5cb9 489 dst_cnt = 1;
58691d64 490 } else if (thread->type == DMA_PQ) {
8be9e32b 491 /* force odd to ensure dst = src */
15b8a8ea 492 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
58691d64 493 dst_cnt = 2;
945b5af3 494
15b8a8ea 495 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
945b5af3
AS
496 if (!pq_coefs)
497 goto err_thread_type;
498
94de648d 499 for (i = 0; i < src_cnt; i++)
58691d64 500 pq_coefs[i] = 1;
b54d5cb9 501 } else
945b5af3 502 goto err_thread_type;
b54d5cb9 503
95019c8c
AS
504 result = result_init(info, thread_name);
505 if (!result)
506 goto err_srcs;
507
b54d5cb9
DW
508 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
509 if (!thread->srcs)
510 goto err_srcs;
511 for (i = 0; i < src_cnt; i++) {
15b8a8ea 512 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
b54d5cb9
DW
513 if (!thread->srcs[i])
514 goto err_srcbuf;
515 }
516 thread->srcs[i] = NULL;
517
518 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
519 if (!thread->dsts)
520 goto err_dsts;
521 for (i = 0; i < dst_cnt; i++) {
15b8a8ea 522 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
b54d5cb9
DW
523 if (!thread->dsts[i])
524 goto err_dstbuf;
525 }
526 thread->dsts[i] = NULL;
527
e44e0aa3
DW
528 set_user_nice(current, 10);
529
b203bd3f 530 /*
d1cab34c 531 * src and dst buffers are freed by ourselves below
b203bd3f 532 */
0776ae7b 533 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
4a776f0a 534
0a2ff57d 535 while (!kthread_should_stop()
15b8a8ea 536 && !(params->iterations && total_tests >= params->iterations)) {
b54d5cb9
DW
537 struct dma_async_tx_descriptor *tx = NULL;
538 dma_addr_t dma_srcs[src_cnt];
539 dma_addr_t dma_dsts[dst_cnt];
83544ae9 540 u8 align = 0;
d86be86e 541
4a776f0a
HS
542 total_tests++;
543
83544ae9
DW
544 /* honor alignment restrictions */
545 if (thread->type == DMA_MEMCPY)
546 align = dev->copy_align;
547 else if (thread->type == DMA_XOR)
548 align = dev->xor_align;
549 else if (thread->type == DMA_PQ)
550 align = dev->pq_align;
551
15b8a8ea 552 if (1 << align > params->buf_size) {
cfe4f275 553 pr_err("%u-byte buffer too small for %d-byte alignment\n",
15b8a8ea 554 params->buf_size, 1 << align);
cfe4f275
GL
555 break;
556 }
557
15b8a8ea 558 len = dmatest_random() % params->buf_size + 1;
83544ae9 559 len = (len >> align) << align;
cfe4f275
GL
560 if (!len)
561 len = 1 << align;
15b8a8ea
AS
562 src_off = dmatest_random() % (params->buf_size - len + 1);
563 dst_off = dmatest_random() % (params->buf_size - len + 1);
cfe4f275 564
83544ae9
DW
565 src_off = (src_off >> align) << align;
566 dst_off = (dst_off >> align) << align;
567
15b8a8ea
AS
568 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
569 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
4a776f0a 570
b54d5cb9
DW
571 for (i = 0; i < src_cnt; i++) {
572 u8 *buf = thread->srcs[i] + src_off;
573
574 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
575 DMA_TO_DEVICE);
afde3be1
AS
576 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
577 if (ret) {
578 unmap_src(dev->dev, dma_srcs, len, i);
95019c8c
AS
579 thread_result_add(info, result,
580 DMATEST_ET_MAP_SRC,
581 total_tests, src_off, dst_off,
582 len, ret);
afde3be1
AS
583 failed_tests++;
584 continue;
585 }
b54d5cb9 586 }
d86be86e 587 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
b54d5cb9
DW
588 for (i = 0; i < dst_cnt; i++) {
589 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
15b8a8ea 590 params->buf_size,
b54d5cb9 591 DMA_BIDIRECTIONAL);
afde3be1
AS
592 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
593 if (ret) {
594 unmap_src(dev->dev, dma_srcs, len, src_cnt);
15b8a8ea
AS
595 unmap_dst(dev->dev, dma_dsts, params->buf_size,
596 i);
95019c8c
AS
597 thread_result_add(info, result,
598 DMATEST_ET_MAP_DST,
599 total_tests, src_off, dst_off,
600 len, ret);
afde3be1
AS
601 failed_tests++;
602 continue;
603 }
b54d5cb9
DW
604 }
605
606 if (thread->type == DMA_MEMCPY)
607 tx = dev->device_prep_dma_memcpy(chan,
608 dma_dsts[0] + dst_off,
609 dma_srcs[0], len,
610 flags);
611 else if (thread->type == DMA_XOR)
612 tx = dev->device_prep_dma_xor(chan,
613 dma_dsts[0] + dst_off,
67b9124f 614 dma_srcs, src_cnt,
b54d5cb9 615 len, flags);
58691d64
DW
616 else if (thread->type == DMA_PQ) {
617 dma_addr_t dma_pq[dst_cnt];
618
619 for (i = 0; i < dst_cnt; i++)
620 dma_pq[i] = dma_dsts[i] + dst_off;
621 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
94de648d 622 src_cnt, pq_coefs,
58691d64
DW
623 len, flags);
624 }
d86be86e 625
d86be86e 626 if (!tx) {
632fd283 627 unmap_src(dev->dev, dma_srcs, len, src_cnt);
15b8a8ea
AS
628 unmap_dst(dev->dev, dma_dsts, params->buf_size,
629 dst_cnt);
95019c8c
AS
630 thread_result_add(info, result, DMATEST_ET_PREP,
631 total_tests, src_off, dst_off,
632 len, 0);
d86be86e
AN
633 msleep(100);
634 failed_tests++;
635 continue;
636 }
e44e0aa3 637
adfa543e 638 done.done = false;
e44e0aa3 639 tx->callback = dmatest_callback;
adfa543e 640 tx->callback_param = &done;
d86be86e
AN
641 cookie = tx->tx_submit(tx);
642
4a776f0a 643 if (dma_submit_error(cookie)) {
95019c8c
AS
644 thread_result_add(info, result, DMATEST_ET_SUBMIT,
645 total_tests, src_off, dst_off,
646 len, cookie);
4a776f0a
HS
647 msleep(100);
648 failed_tests++;
649 continue;
650 }
b54d5cb9 651 dma_async_issue_pending(chan);
4a776f0a 652
bcc567e3 653 wait_event_freezable_timeout(done_wait, done.done,
15b8a8ea 654 msecs_to_jiffies(params->timeout));
981ed70d 655
e44e0aa3 656 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
4a776f0a 657
adfa543e
TH
658 if (!done.done) {
659 /*
660 * We're leaving the timed out dma operation with
661 * dangling pointer to done_wait. To make this
662 * correct, we'll need to allocate wait_done for
663 * each test iteration and perform "who's gonna
664 * free it this time?" dancing. For now, just
665 * leave it dangling.
666 */
95019c8c
AS
667 thread_result_add(info, result, DMATEST_ET_TIMEOUT,
668 total_tests, src_off, dst_off,
669 len, 0);
e44e0aa3
DW
670 failed_tests++;
671 continue;
672 } else if (status != DMA_SUCCESS) {
95019c8c
AS
673 enum dmatest_error_type type = (status == DMA_ERROR) ?
674 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
675 thread_result_add(info, result, type,
676 total_tests, src_off, dst_off,
677 len, status);
4a776f0a
HS
678 failed_tests++;
679 continue;
680 }
e44e0aa3 681
d1cab34c
BZ
682 /* Unmap by myself */
683 unmap_src(dev->dev, dma_srcs, len, src_cnt);
15b8a8ea 684 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
4a776f0a
HS
685
686 error_count = 0;
687
688 pr_debug("%s: verifying source buffer...\n", thread_name);
7b610178 689 error_count += dmatest_verify(thread->srcs, 0, src_off,
4a776f0a 690 0, PATTERN_SRC, true);
7b610178
DW
691 error_count += dmatest_verify(thread->srcs, src_off,
692 src_off + len, src_off,
693 PATTERN_SRC | PATTERN_COPY, true);
694 error_count += dmatest_verify(thread->srcs, src_off + len,
695 params->buf_size, src_off + len,
696 PATTERN_SRC, true);
697
698 pr_debug("%s: verifying dest buffer...\n",
699 thread->task->comm);
700 error_count += dmatest_verify(thread->dsts, 0, dst_off,
4a776f0a 701 0, PATTERN_DST, false);
7b610178
DW
702 error_count += dmatest_verify(thread->dsts, dst_off,
703 dst_off + len, src_off,
704 PATTERN_SRC | PATTERN_COPY, false);
705 error_count += dmatest_verify(thread->dsts, dst_off + len,
706 params->buf_size, dst_off + len,
707 PATTERN_DST, false);
4a776f0a
HS
708
709 if (error_count) {
95019c8c
AS
710 thread_result_add(info, result, DMATEST_ET_VERIFY,
711 total_tests, src_off, dst_off,
712 len, error_count);
4a776f0a
HS
713 failed_tests++;
714 } else {
95019c8c
AS
715 thread_result_add(info, result, DMATEST_ET_OK,
716 total_tests, src_off, dst_off,
717 len, 0);
4a776f0a
HS
718 }
719 }
720
721 ret = 0;
b54d5cb9
DW
722 for (i = 0; thread->dsts[i]; i++)
723 kfree(thread->dsts[i]);
4a776f0a 724err_dstbuf:
b54d5cb9
DW
725 kfree(thread->dsts);
726err_dsts:
727 for (i = 0; thread->srcs[i]; i++)
728 kfree(thread->srcs[i]);
4a776f0a 729err_srcbuf:
b54d5cb9
DW
730 kfree(thread->srcs);
731err_srcs:
945b5af3
AS
732 kfree(pq_coefs);
733err_thread_type:
4a776f0a
HS
734 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
735 thread_name, total_tests, failed_tests, ret);
0a2ff57d 736
9704efaa 737 /* terminate all transfers on specified channels */
5e034f7b
SH
738 if (ret)
739 dmaengine_terminate_all(chan);
740
3e5ccd86
AS
741 thread->done = true;
742
15b8a8ea 743 if (params->iterations > 0)
0a2ff57d 744 while (!kthread_should_stop()) {
b953df7c 745 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
0a2ff57d
NF
746 interruptible_sleep_on(&wait_dmatest_exit);
747 }
748
4a776f0a
HS
749 return ret;
750}
751
752static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
753{
754 struct dmatest_thread *thread;
755 struct dmatest_thread *_thread;
756 int ret;
757
758 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
759 ret = kthread_stop(thread->task);
760 pr_debug("dmatest: thread %s exited with status %d\n",
761 thread->task->comm, ret);
762 list_del(&thread->node);
763 kfree(thread);
764 }
9704efaa
VK
765
766 /* terminate all transfers on specified channels */
944ea4dd 767 dmaengine_terminate_all(dtc->chan);
9704efaa 768
4a776f0a
HS
769 kfree(dtc);
770}
771
e03e93a9
AS
772static int dmatest_add_threads(struct dmatest_info *info,
773 struct dmatest_chan *dtc, enum dma_transaction_type type)
4a776f0a 774{
15b8a8ea 775 struct dmatest_params *params = &info->params;
b54d5cb9
DW
776 struct dmatest_thread *thread;
777 struct dma_chan *chan = dtc->chan;
778 char *op;
779 unsigned int i;
4a776f0a 780
b54d5cb9
DW
781 if (type == DMA_MEMCPY)
782 op = "copy";
783 else if (type == DMA_XOR)
784 op = "xor";
58691d64
DW
785 else if (type == DMA_PQ)
786 op = "pq";
b54d5cb9
DW
787 else
788 return -EINVAL;
4a776f0a 789
15b8a8ea 790 for (i = 0; i < params->threads_per_chan; i++) {
4a776f0a
HS
791 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
792 if (!thread) {
b54d5cb9
DW
793 pr_warning("dmatest: No memory for %s-%s%u\n",
794 dma_chan_name(chan), op, i);
795
4a776f0a
HS
796 break;
797 }
e03e93a9 798 thread->info = info;
4a776f0a 799 thread->chan = dtc->chan;
b54d5cb9 800 thread->type = type;
4a776f0a 801 smp_wmb();
b54d5cb9
DW
802 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
803 dma_chan_name(chan), op, i);
4a776f0a 804 if (IS_ERR(thread->task)) {
b54d5cb9
DW
805 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
806 dma_chan_name(chan), op, i);
4a776f0a
HS
807 kfree(thread);
808 break;
809 }
810
811 /* srcbuf and dstbuf are allocated by the thread itself */
812
813 list_add_tail(&thread->node, &dtc->threads);
814 }
815
b54d5cb9
DW
816 return i;
817}
818
e03e93a9
AS
819static int dmatest_add_channel(struct dmatest_info *info,
820 struct dma_chan *chan)
b54d5cb9
DW
821{
822 struct dmatest_chan *dtc;
823 struct dma_device *dma_dev = chan->device;
824 unsigned int thread_count = 0;
b9033e68 825 int cnt;
b54d5cb9
DW
826
827 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
828 if (!dtc) {
829 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
830 return -ENOMEM;
831 }
832
833 dtc->chan = chan;
834 INIT_LIST_HEAD(&dtc->threads);
835
836 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
e03e93a9 837 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
f1aef8b6 838 thread_count += cnt > 0 ? cnt : 0;
b54d5cb9
DW
839 }
840 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
e03e93a9 841 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
f1aef8b6 842 thread_count += cnt > 0 ? cnt : 0;
b54d5cb9 843 }
58691d64 844 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
e03e93a9 845 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
d07a74a5 846 thread_count += cnt > 0 ? cnt : 0;
58691d64 847 }
b54d5cb9
DW
848
849 pr_info("dmatest: Started %u threads using %s\n",
850 thread_count, dma_chan_name(chan));
4a776f0a 851
838cc704
AS
852 list_add_tail(&dtc->node, &info->channels);
853 info->nr_channels++;
4a776f0a 854
33df8ca0 855 return 0;
4a776f0a
HS
856}
857
7dd60251 858static bool filter(struct dma_chan *chan, void *param)
4a776f0a 859{
15b8a8ea 860 struct dmatest_params *params = param;
e03e93a9 861
15b8a8ea
AS
862 if (!dmatest_match_channel(params, chan) ||
863 !dmatest_match_device(params, chan->device))
7dd60251 864 return false;
33df8ca0 865 else
7dd60251 866 return true;
4a776f0a
HS
867}
868
851b7e16 869static int __run_threaded_test(struct dmatest_info *info)
4a776f0a 870{
33df8ca0
DW
871 dma_cap_mask_t mask;
872 struct dma_chan *chan;
15b8a8ea 873 struct dmatest_params *params = &info->params;
33df8ca0
DW
874 int err = 0;
875
876 dma_cap_zero(mask);
877 dma_cap_set(DMA_MEMCPY, mask);
878 for (;;) {
15b8a8ea 879 chan = dma_request_channel(mask, filter, params);
33df8ca0 880 if (chan) {
e03e93a9 881 err = dmatest_add_channel(info, chan);
c56c81ab 882 if (err) {
33df8ca0
DW
883 dma_release_channel(chan);
884 break; /* add_channel failed, punt */
885 }
886 } else
887 break; /* no more channels available */
15b8a8ea
AS
888 if (params->max_channels &&
889 info->nr_channels >= params->max_channels)
33df8ca0
DW
890 break; /* we have all we need */
891 }
33df8ca0 892 return err;
4a776f0a 893}
4a776f0a 894
851b7e16
AS
895#ifndef MODULE
896static int run_threaded_test(struct dmatest_info *info)
897{
898 int ret;
899
900 mutex_lock(&info->lock);
901 ret = __run_threaded_test(info);
902 mutex_unlock(&info->lock);
903 return ret;
904}
905#endif
906
907static void __stop_threaded_test(struct dmatest_info *info)
4a776f0a 908{
33df8ca0 909 struct dmatest_chan *dtc, *_dtc;
7cbd4877 910 struct dma_chan *chan;
33df8ca0 911
838cc704 912 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
33df8ca0 913 list_del(&dtc->node);
7cbd4877 914 chan = dtc->chan;
33df8ca0 915 dmatest_cleanup_channel(dtc);
838cc704 916 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
7cbd4877 917 dma_release_channel(chan);
33df8ca0 918 }
838cc704
AS
919
920 info->nr_channels = 0;
4a776f0a 921}
e03e93a9 922
851b7e16
AS
923static void stop_threaded_test(struct dmatest_info *info)
924{
925 mutex_lock(&info->lock);
926 __stop_threaded_test(info);
927 mutex_unlock(&info->lock);
928}
929
930static int __restart_threaded_test(struct dmatest_info *info, bool run)
931{
932 struct dmatest_params *params = &info->params;
851b7e16
AS
933
934 /* Stop any running test first */
935 __stop_threaded_test(info);
936
937 if (run == false)
938 return 0;
939
95019c8c
AS
940 /* Clear results from previous run */
941 result_free(info, NULL);
942
851b7e16 943 /* Copy test parameters */
a6c268d0
AS
944 params->buf_size = test_buf_size;
945 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
946 strlcpy(params->device, strim(test_device), sizeof(params->device));
947 params->threads_per_chan = threads_per_chan;
948 params->max_channels = max_channels;
949 params->iterations = iterations;
950 params->xor_sources = xor_sources;
951 params->pq_sources = pq_sources;
952 params->timeout = timeout;
851b7e16
AS
953
954 /* Run test with new parameters */
bcc567e3
AS
955 return __run_threaded_test(info);
956}
957
958static bool __is_threaded_test_run(struct dmatest_info *info)
959{
960 struct dmatest_chan *dtc;
961
962 list_for_each_entry(dtc, &info->channels, node) {
963 struct dmatest_thread *thread;
964
965 list_for_each_entry(thread, &dtc->threads, node) {
966 if (!thread->done)
967 return true;
968 }
851b7e16
AS
969 }
970
bcc567e3 971 return false;
851b7e16
AS
972}
973
851b7e16
AS
974static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
975 size_t count, loff_t *ppos)
976{
977 struct dmatest_info *info = file->private_data;
978 char buf[3];
979
980 mutex_lock(&info->lock);
3e5ccd86 981
bcc567e3 982 if (__is_threaded_test_run(info)) {
851b7e16 983 buf[0] = 'Y';
3e5ccd86
AS
984 } else {
985 __stop_threaded_test(info);
851b7e16 986 buf[0] = 'N';
3e5ccd86
AS
987 }
988
851b7e16
AS
989 mutex_unlock(&info->lock);
990 buf[1] = '\n';
991 buf[2] = 0x00;
992 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
993}
994
995static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
996 size_t count, loff_t *ppos)
997{
998 struct dmatest_info *info = file->private_data;
999 char buf[16];
1000 bool bv;
1001 int ret = 0;
1002
1003 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
1004 return -EFAULT;
1005
1006 if (strtobool(buf, &bv) == 0) {
1007 mutex_lock(&info->lock);
bcc567e3
AS
1008
1009 if (__is_threaded_test_run(info))
1010 ret = -EBUSY;
1011 else
1012 ret = __restart_threaded_test(info, bv);
1013
851b7e16
AS
1014 mutex_unlock(&info->lock);
1015 }
1016
1017 return ret ? ret : count;
1018}
1019
1020static const struct file_operations dtf_run_fops = {
1021 .read = dtf_read_run,
1022 .write = dtf_write_run,
1023 .open = simple_open,
1024 .llseek = default_llseek,
1025};
1026
95019c8c
AS
1027static int dtf_results_show(struct seq_file *sf, void *data)
1028{
1029 struct dmatest_info *info = sf->private;
1030 struct dmatest_result *result;
1031 struct dmatest_thread_result *tr;
1032
1033 mutex_lock(&info->results_lock);
1034 list_for_each_entry(result, &info->results, node) {
7b610178 1035 list_for_each_entry(tr, &result->results, node)
95019c8c
AS
1036 seq_printf(sf, "%s\n",
1037 thread_result_get(result->name, tr));
1038 }
1039
1040 mutex_unlock(&info->results_lock);
1041 return 0;
1042}
1043
1044static int dtf_results_open(struct inode *inode, struct file *file)
1045{
1046 return single_open(file, dtf_results_show, inode->i_private);
1047}
1048
1049static const struct file_operations dtf_results_fops = {
1050 .open = dtf_results_open,
1051 .read = seq_read,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
851b7e16
AS
1056static int dmatest_register_dbgfs(struct dmatest_info *info)
1057{
1058 struct dentry *d;
851b7e16
AS
1059
1060 d = debugfs_create_dir("dmatest", NULL);
1061 if (IS_ERR(d))
1062 return PTR_ERR(d);
1063 if (!d)
1064 goto err_root;
1065
1066 info->root = d;
1067
851b7e16 1068 /* Run or stop threaded test */
e24775e4
AS
1069 debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
1070 &dtf_run_fops);
851b7e16 1071
95019c8c 1072 /* Results of test in progress */
e24775e4
AS
1073 debugfs_create_file("results", S_IRUGO, info->root, info,
1074 &dtf_results_fops);
95019c8c 1075
851b7e16
AS
1076 return 0;
1077
851b7e16
AS
1078err_root:
1079 pr_err("dmatest: Failed to initialize debugfs\n");
e24775e4 1080 return -ENOMEM;
851b7e16
AS
1081}
1082
e03e93a9
AS
1083static int __init dmatest_init(void)
1084{
1085 struct dmatest_info *info = &test_info;
851b7e16 1086 int ret;
e03e93a9
AS
1087
1088 memset(info, 0, sizeof(*info));
1089
851b7e16 1090 mutex_init(&info->lock);
838cc704
AS
1091 INIT_LIST_HEAD(&info->channels);
1092
95019c8c
AS
1093 mutex_init(&info->results_lock);
1094 INIT_LIST_HEAD(&info->results);
1095
851b7e16
AS
1096 ret = dmatest_register_dbgfs(info);
1097 if (ret)
1098 return ret;
1099
1100#ifdef MODULE
1101 return 0;
1102#else
e03e93a9 1103 return run_threaded_test(info);
851b7e16 1104#endif
e03e93a9
AS
1105}
1106/* when compiled-in wait for drivers to load first */
1107late_initcall(dmatest_init);
1108
1109static void __exit dmatest_exit(void)
1110{
1111 struct dmatest_info *info = &test_info;
1112
851b7e16 1113 debugfs_remove_recursive(info->root);
e03e93a9 1114 stop_threaded_test(info);
95019c8c 1115 result_free(info, NULL);
e03e93a9 1116}
4a776f0a
HS
1117module_exit(dmatest_exit);
1118
e05503ef 1119MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4a776f0a 1120MODULE_LICENSE("GPL v2");