]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mmc/card/mmc_test.c
route: Take the right src and dst addresses in ip_route_newports
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / card / mmc_test.c
1 /*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
4 * Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
17
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
21
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25
26 #define RESULT_OK 0
27 #define RESULT_FAIL 1
28 #define RESULT_UNSUP_HOST 2
29 #define RESULT_UNSUP_CARD 3
30
31 #define BUFFER_ORDER 2
32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
33
34 /*
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
37 */
38 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
40 /**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
44 */
45 struct mmc_test_pages {
46 struct page *page;
47 unsigned int order;
48 };
49
50 /**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
54 */
55 struct mmc_test_mem {
56 struct mmc_test_pages *arr;
57 unsigned int cnt;
58 };
59
60 /**
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
71 */
72 struct mmc_test_area {
73 unsigned long max_sz;
74 unsigned int dev_addr;
75 unsigned int max_tfr;
76 unsigned int max_segs;
77 unsigned int max_seg_sz;
78 unsigned int blocks;
79 unsigned int sg_len;
80 struct mmc_test_mem *mem;
81 struct scatterlist *sg;
82 };
83
84 /**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 */
92 struct mmc_test_transfer_result {
93 struct list_head link;
94 unsigned int count;
95 unsigned int sectors;
96 struct timespec ts;
97 unsigned int rate;
98 };
99
100 /**
101 * struct mmc_test_general_result - results for tests.
102 * @link: double-linked list
103 * @card: card under test
104 * @testcase: number of test case
105 * @result: result of test run
106 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
107 */
108 struct mmc_test_general_result {
109 struct list_head link;
110 struct mmc_card *card;
111 int testcase;
112 int result;
113 struct list_head tr_lst;
114 };
115
116 /**
117 * struct mmc_test_dbgfs_file - debugfs related file.
118 * @link: double-linked list
119 * @card: card under test
120 * @file: file created under debugfs
121 */
122 struct mmc_test_dbgfs_file {
123 struct list_head link;
124 struct mmc_card *card;
125 struct dentry *file;
126 };
127
128 /**
129 * struct mmc_test_card - test information.
130 * @card: card under test
131 * @scratch: transfer buffer
132 * @buffer: transfer buffer
133 * @highmem: buffer for highmem tests
134 * @area: information for performance tests
135 * @gr: pointer to results of current testcase
136 */
137 struct mmc_test_card {
138 struct mmc_card *card;
139
140 u8 scratch[BUFFER_SIZE];
141 u8 *buffer;
142 #ifdef CONFIG_HIGHMEM
143 struct page *highmem;
144 #endif
145 struct mmc_test_area area;
146 struct mmc_test_general_result *gr;
147 };
148
149 /*******************************************************************/
150 /* General helper functions */
151 /*******************************************************************/
152
153 /*
154 * Configure correct block size in card
155 */
156 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
157 {
158 return mmc_set_blocklen(test->card, size);
159 }
160
161 /*
162 * Fill in the mmc_request structure given a set of transfer parameters.
163 */
164 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
165 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
166 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
167 {
168 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
169
170 if (blocks > 1) {
171 mrq->cmd->opcode = write ?
172 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
173 } else {
174 mrq->cmd->opcode = write ?
175 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
176 }
177
178 mrq->cmd->arg = dev_addr;
179 if (!mmc_card_blockaddr(test->card))
180 mrq->cmd->arg <<= 9;
181
182 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
183
184 if (blocks == 1)
185 mrq->stop = NULL;
186 else {
187 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
188 mrq->stop->arg = 0;
189 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
190 }
191
192 mrq->data->blksz = blksz;
193 mrq->data->blocks = blocks;
194 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
195 mrq->data->sg = sg;
196 mrq->data->sg_len = sg_len;
197
198 mmc_set_data_timeout(mrq->data, test->card);
199 }
200
201 static int mmc_test_busy(struct mmc_command *cmd)
202 {
203 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
204 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
205 }
206
207 /*
208 * Wait for the card to finish the busy state
209 */
210 static int mmc_test_wait_busy(struct mmc_test_card *test)
211 {
212 int ret, busy;
213 struct mmc_command cmd;
214
215 busy = 0;
216 do {
217 memset(&cmd, 0, sizeof(struct mmc_command));
218
219 cmd.opcode = MMC_SEND_STATUS;
220 cmd.arg = test->card->rca << 16;
221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
222
223 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
224 if (ret)
225 break;
226
227 if (!busy && mmc_test_busy(&cmd)) {
228 busy = 1;
229 printk(KERN_INFO "%s: Warning: Host did not "
230 "wait for busy state to end.\n",
231 mmc_hostname(test->card->host));
232 }
233 } while (mmc_test_busy(&cmd));
234
235 return ret;
236 }
237
238 /*
239 * Transfer a single sector of kernel addressable data
240 */
241 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
242 u8 *buffer, unsigned addr, unsigned blksz, int write)
243 {
244 int ret;
245
246 struct mmc_request mrq;
247 struct mmc_command cmd;
248 struct mmc_command stop;
249 struct mmc_data data;
250
251 struct scatterlist sg;
252
253 memset(&mrq, 0, sizeof(struct mmc_request));
254 memset(&cmd, 0, sizeof(struct mmc_command));
255 memset(&data, 0, sizeof(struct mmc_data));
256 memset(&stop, 0, sizeof(struct mmc_command));
257
258 mrq.cmd = &cmd;
259 mrq.data = &data;
260 mrq.stop = &stop;
261
262 sg_init_one(&sg, buffer, blksz);
263
264 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
265
266 mmc_wait_for_req(test->card->host, &mrq);
267
268 if (cmd.error)
269 return cmd.error;
270 if (data.error)
271 return data.error;
272
273 ret = mmc_test_wait_busy(test);
274 if (ret)
275 return ret;
276
277 return 0;
278 }
279
280 static void mmc_test_free_mem(struct mmc_test_mem *mem)
281 {
282 if (!mem)
283 return;
284 while (mem->cnt--)
285 __free_pages(mem->arr[mem->cnt].page,
286 mem->arr[mem->cnt].order);
287 kfree(mem->arr);
288 kfree(mem);
289 }
290
291 /*
292 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
293 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
294 * not exceed a maximum number of segments and try not to make segments much
295 * bigger than maximum segment size.
296 */
297 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
298 unsigned long max_sz,
299 unsigned int max_segs,
300 unsigned int max_seg_sz)
301 {
302 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
303 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
304 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
305 unsigned long page_cnt = 0;
306 unsigned long limit = nr_free_buffer_pages() >> 4;
307 struct mmc_test_mem *mem;
308
309 if (max_page_cnt > limit)
310 max_page_cnt = limit;
311 if (min_page_cnt > max_page_cnt)
312 min_page_cnt = max_page_cnt;
313
314 if (max_seg_page_cnt > max_page_cnt)
315 max_seg_page_cnt = max_page_cnt;
316
317 if (max_segs > max_page_cnt)
318 max_segs = max_page_cnt;
319
320 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
321 if (!mem)
322 return NULL;
323
324 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
325 GFP_KERNEL);
326 if (!mem->arr)
327 goto out_free;
328
329 while (max_page_cnt) {
330 struct page *page;
331 unsigned int order;
332 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
333 __GFP_NORETRY;
334
335 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
336 while (1) {
337 page = alloc_pages(flags, order);
338 if (page || !order)
339 break;
340 order -= 1;
341 }
342 if (!page) {
343 if (page_cnt < min_page_cnt)
344 goto out_free;
345 break;
346 }
347 mem->arr[mem->cnt].page = page;
348 mem->arr[mem->cnt].order = order;
349 mem->cnt += 1;
350 if (max_page_cnt <= (1UL << order))
351 break;
352 max_page_cnt -= 1UL << order;
353 page_cnt += 1UL << order;
354 if (mem->cnt >= max_segs) {
355 if (page_cnt < min_page_cnt)
356 goto out_free;
357 break;
358 }
359 }
360
361 return mem;
362
363 out_free:
364 mmc_test_free_mem(mem);
365 return NULL;
366 }
367
368 /*
369 * Map memory into a scatterlist. Optionally allow the same memory to be
370 * mapped more than once.
371 */
372 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
373 struct scatterlist *sglist, int repeat,
374 unsigned int max_segs, unsigned int max_seg_sz,
375 unsigned int *sg_len)
376 {
377 struct scatterlist *sg = NULL;
378 unsigned int i;
379
380 sg_init_table(sglist, max_segs);
381
382 *sg_len = 0;
383 do {
384 for (i = 0; i < mem->cnt; i++) {
385 unsigned long len = PAGE_SIZE << mem->arr[i].order;
386
387 if (len > sz)
388 len = sz;
389 if (len > max_seg_sz)
390 len = max_seg_sz;
391 if (sg)
392 sg = sg_next(sg);
393 else
394 sg = sglist;
395 if (!sg)
396 return -EINVAL;
397 sg_set_page(sg, mem->arr[i].page, len, 0);
398 sz -= len;
399 *sg_len += 1;
400 if (!sz)
401 break;
402 }
403 } while (sz && repeat);
404
405 if (sz)
406 return -EINVAL;
407
408 if (sg)
409 sg_mark_end(sg);
410
411 return 0;
412 }
413
414 /*
415 * Map memory into a scatterlist so that no pages are contiguous. Allow the
416 * same memory to be mapped more than once.
417 */
418 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
419 unsigned long sz,
420 struct scatterlist *sglist,
421 unsigned int max_segs,
422 unsigned int max_seg_sz,
423 unsigned int *sg_len)
424 {
425 struct scatterlist *sg = NULL;
426 unsigned int i = mem->cnt, cnt;
427 unsigned long len;
428 void *base, *addr, *last_addr = NULL;
429
430 sg_init_table(sglist, max_segs);
431
432 *sg_len = 0;
433 while (sz) {
434 base = page_address(mem->arr[--i].page);
435 cnt = 1 << mem->arr[i].order;
436 while (sz && cnt) {
437 addr = base + PAGE_SIZE * --cnt;
438 if (last_addr && last_addr + PAGE_SIZE == addr)
439 continue;
440 last_addr = addr;
441 len = PAGE_SIZE;
442 if (len > max_seg_sz)
443 len = max_seg_sz;
444 if (len > sz)
445 len = sz;
446 if (sg)
447 sg = sg_next(sg);
448 else
449 sg = sglist;
450 if (!sg)
451 return -EINVAL;
452 sg_set_page(sg, virt_to_page(addr), len, 0);
453 sz -= len;
454 *sg_len += 1;
455 }
456 if (i == 0)
457 i = mem->cnt;
458 }
459
460 if (sg)
461 sg_mark_end(sg);
462
463 return 0;
464 }
465
466 /*
467 * Calculate transfer rate in bytes per second.
468 */
469 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
470 {
471 uint64_t ns;
472
473 ns = ts->tv_sec;
474 ns *= 1000000000;
475 ns += ts->tv_nsec;
476
477 bytes *= 1000000000;
478
479 while (ns > UINT_MAX) {
480 bytes >>= 1;
481 ns >>= 1;
482 }
483
484 if (!ns)
485 return 0;
486
487 do_div(bytes, (uint32_t)ns);
488
489 return bytes;
490 }
491
492 /*
493 * Save transfer results for future usage
494 */
495 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
496 unsigned int count, unsigned int sectors, struct timespec ts,
497 unsigned int rate)
498 {
499 struct mmc_test_transfer_result *tr;
500
501 if (!test->gr)
502 return;
503
504 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
505 if (!tr)
506 return;
507
508 tr->count = count;
509 tr->sectors = sectors;
510 tr->ts = ts;
511 tr->rate = rate;
512
513 list_add_tail(&tr->link, &test->gr->tr_lst);
514 }
515
516 /*
517 * Print the transfer rate.
518 */
519 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
520 struct timespec *ts1, struct timespec *ts2)
521 {
522 unsigned int rate, sectors = bytes >> 9;
523 struct timespec ts;
524
525 ts = timespec_sub(*ts2, *ts1);
526
527 rate = mmc_test_rate(bytes, &ts);
528
529 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
530 "seconds (%u kB/s, %u KiB/s)\n",
531 mmc_hostname(test->card->host), sectors, sectors >> 1,
532 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
533 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
534
535 mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
536 }
537
538 /*
539 * Print the average transfer rate.
540 */
541 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
542 unsigned int count, struct timespec *ts1,
543 struct timespec *ts2)
544 {
545 unsigned int rate, sectors = bytes >> 9;
546 uint64_t tot = bytes * count;
547 struct timespec ts;
548
549 ts = timespec_sub(*ts2, *ts1);
550
551 rate = mmc_test_rate(tot, &ts);
552
553 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
554 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
555 mmc_hostname(test->card->host), count, sectors, count,
556 sectors >> 1, (sectors & 1 ? ".5" : ""),
557 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
558 rate / 1000, rate / 1024);
559
560 mmc_test_save_transfer_result(test, count, sectors, ts, rate);
561 }
562
563 /*
564 * Return the card size in sectors.
565 */
566 static unsigned int mmc_test_capacity(struct mmc_card *card)
567 {
568 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
569 return card->ext_csd.sectors;
570 else
571 return card->csd.capacity << (card->csd.read_blkbits - 9);
572 }
573
574 /*******************************************************************/
575 /* Test preparation and cleanup */
576 /*******************************************************************/
577
578 /*
579 * Fill the first couple of sectors of the card with known data
580 * so that bad reads/writes can be detected
581 */
582 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
583 {
584 int ret, i;
585
586 ret = mmc_test_set_blksize(test, 512);
587 if (ret)
588 return ret;
589
590 if (write)
591 memset(test->buffer, 0xDF, 512);
592 else {
593 for (i = 0;i < 512;i++)
594 test->buffer[i] = i;
595 }
596
597 for (i = 0;i < BUFFER_SIZE / 512;i++) {
598 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
599 if (ret)
600 return ret;
601 }
602
603 return 0;
604 }
605
606 static int mmc_test_prepare_write(struct mmc_test_card *test)
607 {
608 return __mmc_test_prepare(test, 1);
609 }
610
611 static int mmc_test_prepare_read(struct mmc_test_card *test)
612 {
613 return __mmc_test_prepare(test, 0);
614 }
615
616 static int mmc_test_cleanup(struct mmc_test_card *test)
617 {
618 int ret, i;
619
620 ret = mmc_test_set_blksize(test, 512);
621 if (ret)
622 return ret;
623
624 memset(test->buffer, 0, 512);
625
626 for (i = 0;i < BUFFER_SIZE / 512;i++) {
627 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
628 if (ret)
629 return ret;
630 }
631
632 return 0;
633 }
634
635 /*******************************************************************/
636 /* Test execution helpers */
637 /*******************************************************************/
638
639 /*
640 * Modifies the mmc_request to perform the "short transfer" tests
641 */
642 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
643 struct mmc_request *mrq, int write)
644 {
645 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
646
647 if (mrq->data->blocks > 1) {
648 mrq->cmd->opcode = write ?
649 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
650 mrq->stop = NULL;
651 } else {
652 mrq->cmd->opcode = MMC_SEND_STATUS;
653 mrq->cmd->arg = test->card->rca << 16;
654 }
655 }
656
657 /*
658 * Checks that a normal transfer didn't have any errors
659 */
660 static int mmc_test_check_result(struct mmc_test_card *test,
661 struct mmc_request *mrq)
662 {
663 int ret;
664
665 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
666
667 ret = 0;
668
669 if (!ret && mrq->cmd->error)
670 ret = mrq->cmd->error;
671 if (!ret && mrq->data->error)
672 ret = mrq->data->error;
673 if (!ret && mrq->stop && mrq->stop->error)
674 ret = mrq->stop->error;
675 if (!ret && mrq->data->bytes_xfered !=
676 mrq->data->blocks * mrq->data->blksz)
677 ret = RESULT_FAIL;
678
679 if (ret == -EINVAL)
680 ret = RESULT_UNSUP_HOST;
681
682 return ret;
683 }
684
685 /*
686 * Checks that a "short transfer" behaved as expected
687 */
688 static int mmc_test_check_broken_result(struct mmc_test_card *test,
689 struct mmc_request *mrq)
690 {
691 int ret;
692
693 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
694
695 ret = 0;
696
697 if (!ret && mrq->cmd->error)
698 ret = mrq->cmd->error;
699 if (!ret && mrq->data->error == 0)
700 ret = RESULT_FAIL;
701 if (!ret && mrq->data->error != -ETIMEDOUT)
702 ret = mrq->data->error;
703 if (!ret && mrq->stop && mrq->stop->error)
704 ret = mrq->stop->error;
705 if (mrq->data->blocks > 1) {
706 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
707 ret = RESULT_FAIL;
708 } else {
709 if (!ret && mrq->data->bytes_xfered > 0)
710 ret = RESULT_FAIL;
711 }
712
713 if (ret == -EINVAL)
714 ret = RESULT_UNSUP_HOST;
715
716 return ret;
717 }
718
719 /*
720 * Tests a basic transfer with certain parameters
721 */
722 static int mmc_test_simple_transfer(struct mmc_test_card *test,
723 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
724 unsigned blocks, unsigned blksz, int write)
725 {
726 struct mmc_request mrq;
727 struct mmc_command cmd;
728 struct mmc_command stop;
729 struct mmc_data data;
730
731 memset(&mrq, 0, sizeof(struct mmc_request));
732 memset(&cmd, 0, sizeof(struct mmc_command));
733 memset(&data, 0, sizeof(struct mmc_data));
734 memset(&stop, 0, sizeof(struct mmc_command));
735
736 mrq.cmd = &cmd;
737 mrq.data = &data;
738 mrq.stop = &stop;
739
740 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
741 blocks, blksz, write);
742
743 mmc_wait_for_req(test->card->host, &mrq);
744
745 mmc_test_wait_busy(test);
746
747 return mmc_test_check_result(test, &mrq);
748 }
749
750 /*
751 * Tests a transfer where the card will fail completely or partly
752 */
753 static int mmc_test_broken_transfer(struct mmc_test_card *test,
754 unsigned blocks, unsigned blksz, int write)
755 {
756 struct mmc_request mrq;
757 struct mmc_command cmd;
758 struct mmc_command stop;
759 struct mmc_data data;
760
761 struct scatterlist sg;
762
763 memset(&mrq, 0, sizeof(struct mmc_request));
764 memset(&cmd, 0, sizeof(struct mmc_command));
765 memset(&data, 0, sizeof(struct mmc_data));
766 memset(&stop, 0, sizeof(struct mmc_command));
767
768 mrq.cmd = &cmd;
769 mrq.data = &data;
770 mrq.stop = &stop;
771
772 sg_init_one(&sg, test->buffer, blocks * blksz);
773
774 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
775 mmc_test_prepare_broken_mrq(test, &mrq, write);
776
777 mmc_wait_for_req(test->card->host, &mrq);
778
779 mmc_test_wait_busy(test);
780
781 return mmc_test_check_broken_result(test, &mrq);
782 }
783
784 /*
785 * Does a complete transfer test where data is also validated
786 *
787 * Note: mmc_test_prepare() must have been done before this call
788 */
789 static int mmc_test_transfer(struct mmc_test_card *test,
790 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
791 unsigned blocks, unsigned blksz, int write)
792 {
793 int ret, i;
794 unsigned long flags;
795
796 if (write) {
797 for (i = 0;i < blocks * blksz;i++)
798 test->scratch[i] = i;
799 } else {
800 memset(test->scratch, 0, BUFFER_SIZE);
801 }
802 local_irq_save(flags);
803 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
804 local_irq_restore(flags);
805
806 ret = mmc_test_set_blksize(test, blksz);
807 if (ret)
808 return ret;
809
810 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
811 blocks, blksz, write);
812 if (ret)
813 return ret;
814
815 if (write) {
816 int sectors;
817
818 ret = mmc_test_set_blksize(test, 512);
819 if (ret)
820 return ret;
821
822 sectors = (blocks * blksz + 511) / 512;
823 if ((sectors * 512) == (blocks * blksz))
824 sectors++;
825
826 if ((sectors * 512) > BUFFER_SIZE)
827 return -EINVAL;
828
829 memset(test->buffer, 0, sectors * 512);
830
831 for (i = 0;i < sectors;i++) {
832 ret = mmc_test_buffer_transfer(test,
833 test->buffer + i * 512,
834 dev_addr + i, 512, 0);
835 if (ret)
836 return ret;
837 }
838
839 for (i = 0;i < blocks * blksz;i++) {
840 if (test->buffer[i] != (u8)i)
841 return RESULT_FAIL;
842 }
843
844 for (;i < sectors * 512;i++) {
845 if (test->buffer[i] != 0xDF)
846 return RESULT_FAIL;
847 }
848 } else {
849 local_irq_save(flags);
850 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
851 local_irq_restore(flags);
852 for (i = 0;i < blocks * blksz;i++) {
853 if (test->scratch[i] != (u8)i)
854 return RESULT_FAIL;
855 }
856 }
857
858 return 0;
859 }
860
861 /*******************************************************************/
862 /* Tests */
863 /*******************************************************************/
864
865 struct mmc_test_case {
866 const char *name;
867
868 int (*prepare)(struct mmc_test_card *);
869 int (*run)(struct mmc_test_card *);
870 int (*cleanup)(struct mmc_test_card *);
871 };
872
873 static int mmc_test_basic_write(struct mmc_test_card *test)
874 {
875 int ret;
876 struct scatterlist sg;
877
878 ret = mmc_test_set_blksize(test, 512);
879 if (ret)
880 return ret;
881
882 sg_init_one(&sg, test->buffer, 512);
883
884 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
885 if (ret)
886 return ret;
887
888 return 0;
889 }
890
891 static int mmc_test_basic_read(struct mmc_test_card *test)
892 {
893 int ret;
894 struct scatterlist sg;
895
896 ret = mmc_test_set_blksize(test, 512);
897 if (ret)
898 return ret;
899
900 sg_init_one(&sg, test->buffer, 512);
901
902 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
903 if (ret)
904 return ret;
905
906 return 0;
907 }
908
909 static int mmc_test_verify_write(struct mmc_test_card *test)
910 {
911 int ret;
912 struct scatterlist sg;
913
914 sg_init_one(&sg, test->buffer, 512);
915
916 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
917 if (ret)
918 return ret;
919
920 return 0;
921 }
922
923 static int mmc_test_verify_read(struct mmc_test_card *test)
924 {
925 int ret;
926 struct scatterlist sg;
927
928 sg_init_one(&sg, test->buffer, 512);
929
930 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
931 if (ret)
932 return ret;
933
934 return 0;
935 }
936
937 static int mmc_test_multi_write(struct mmc_test_card *test)
938 {
939 int ret;
940 unsigned int size;
941 struct scatterlist sg;
942
943 if (test->card->host->max_blk_count == 1)
944 return RESULT_UNSUP_HOST;
945
946 size = PAGE_SIZE * 2;
947 size = min(size, test->card->host->max_req_size);
948 size = min(size, test->card->host->max_seg_size);
949 size = min(size, test->card->host->max_blk_count * 512);
950
951 if (size < 1024)
952 return RESULT_UNSUP_HOST;
953
954 sg_init_one(&sg, test->buffer, size);
955
956 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
957 if (ret)
958 return ret;
959
960 return 0;
961 }
962
963 static int mmc_test_multi_read(struct mmc_test_card *test)
964 {
965 int ret;
966 unsigned int size;
967 struct scatterlist sg;
968
969 if (test->card->host->max_blk_count == 1)
970 return RESULT_UNSUP_HOST;
971
972 size = PAGE_SIZE * 2;
973 size = min(size, test->card->host->max_req_size);
974 size = min(size, test->card->host->max_seg_size);
975 size = min(size, test->card->host->max_blk_count * 512);
976
977 if (size < 1024)
978 return RESULT_UNSUP_HOST;
979
980 sg_init_one(&sg, test->buffer, size);
981
982 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
983 if (ret)
984 return ret;
985
986 return 0;
987 }
988
989 static int mmc_test_pow2_write(struct mmc_test_card *test)
990 {
991 int ret, i;
992 struct scatterlist sg;
993
994 if (!test->card->csd.write_partial)
995 return RESULT_UNSUP_CARD;
996
997 for (i = 1; i < 512;i <<= 1) {
998 sg_init_one(&sg, test->buffer, i);
999 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1000 if (ret)
1001 return ret;
1002 }
1003
1004 return 0;
1005 }
1006
1007 static int mmc_test_pow2_read(struct mmc_test_card *test)
1008 {
1009 int ret, i;
1010 struct scatterlist sg;
1011
1012 if (!test->card->csd.read_partial)
1013 return RESULT_UNSUP_CARD;
1014
1015 for (i = 1; i < 512;i <<= 1) {
1016 sg_init_one(&sg, test->buffer, i);
1017 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1018 if (ret)
1019 return ret;
1020 }
1021
1022 return 0;
1023 }
1024
1025 static int mmc_test_weird_write(struct mmc_test_card *test)
1026 {
1027 int ret, i;
1028 struct scatterlist sg;
1029
1030 if (!test->card->csd.write_partial)
1031 return RESULT_UNSUP_CARD;
1032
1033 for (i = 3; i < 512;i += 7) {
1034 sg_init_one(&sg, test->buffer, i);
1035 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1036 if (ret)
1037 return ret;
1038 }
1039
1040 return 0;
1041 }
1042
1043 static int mmc_test_weird_read(struct mmc_test_card *test)
1044 {
1045 int ret, i;
1046 struct scatterlist sg;
1047
1048 if (!test->card->csd.read_partial)
1049 return RESULT_UNSUP_CARD;
1050
1051 for (i = 3; i < 512;i += 7) {
1052 sg_init_one(&sg, test->buffer, i);
1053 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1054 if (ret)
1055 return ret;
1056 }
1057
1058 return 0;
1059 }
1060
1061 static int mmc_test_align_write(struct mmc_test_card *test)
1062 {
1063 int ret, i;
1064 struct scatterlist sg;
1065
1066 for (i = 1;i < 4;i++) {
1067 sg_init_one(&sg, test->buffer + i, 512);
1068 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1069 if (ret)
1070 return ret;
1071 }
1072
1073 return 0;
1074 }
1075
1076 static int mmc_test_align_read(struct mmc_test_card *test)
1077 {
1078 int ret, i;
1079 struct scatterlist sg;
1080
1081 for (i = 1;i < 4;i++) {
1082 sg_init_one(&sg, test->buffer + i, 512);
1083 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1084 if (ret)
1085 return ret;
1086 }
1087
1088 return 0;
1089 }
1090
1091 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1092 {
1093 int ret, i;
1094 unsigned int size;
1095 struct scatterlist sg;
1096
1097 if (test->card->host->max_blk_count == 1)
1098 return RESULT_UNSUP_HOST;
1099
1100 size = PAGE_SIZE * 2;
1101 size = min(size, test->card->host->max_req_size);
1102 size = min(size, test->card->host->max_seg_size);
1103 size = min(size, test->card->host->max_blk_count * 512);
1104
1105 if (size < 1024)
1106 return RESULT_UNSUP_HOST;
1107
1108 for (i = 1;i < 4;i++) {
1109 sg_init_one(&sg, test->buffer + i, size);
1110 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1111 if (ret)
1112 return ret;
1113 }
1114
1115 return 0;
1116 }
1117
1118 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1119 {
1120 int ret, i;
1121 unsigned int size;
1122 struct scatterlist sg;
1123
1124 if (test->card->host->max_blk_count == 1)
1125 return RESULT_UNSUP_HOST;
1126
1127 size = PAGE_SIZE * 2;
1128 size = min(size, test->card->host->max_req_size);
1129 size = min(size, test->card->host->max_seg_size);
1130 size = min(size, test->card->host->max_blk_count * 512);
1131
1132 if (size < 1024)
1133 return RESULT_UNSUP_HOST;
1134
1135 for (i = 1;i < 4;i++) {
1136 sg_init_one(&sg, test->buffer + i, size);
1137 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1138 if (ret)
1139 return ret;
1140 }
1141
1142 return 0;
1143 }
1144
1145 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1146 {
1147 int ret;
1148
1149 ret = mmc_test_set_blksize(test, 512);
1150 if (ret)
1151 return ret;
1152
1153 ret = mmc_test_broken_transfer(test, 1, 512, 1);
1154 if (ret)
1155 return ret;
1156
1157 return 0;
1158 }
1159
1160 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1161 {
1162 int ret;
1163
1164 ret = mmc_test_set_blksize(test, 512);
1165 if (ret)
1166 return ret;
1167
1168 ret = mmc_test_broken_transfer(test, 1, 512, 0);
1169 if (ret)
1170 return ret;
1171
1172 return 0;
1173 }
1174
1175 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1176 {
1177 int ret;
1178
1179 if (test->card->host->max_blk_count == 1)
1180 return RESULT_UNSUP_HOST;
1181
1182 ret = mmc_test_set_blksize(test, 512);
1183 if (ret)
1184 return ret;
1185
1186 ret = mmc_test_broken_transfer(test, 2, 512, 1);
1187 if (ret)
1188 return ret;
1189
1190 return 0;
1191 }
1192
1193 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1194 {
1195 int ret;
1196
1197 if (test->card->host->max_blk_count == 1)
1198 return RESULT_UNSUP_HOST;
1199
1200 ret = mmc_test_set_blksize(test, 512);
1201 if (ret)
1202 return ret;
1203
1204 ret = mmc_test_broken_transfer(test, 2, 512, 0);
1205 if (ret)
1206 return ret;
1207
1208 return 0;
1209 }
1210
1211 #ifdef CONFIG_HIGHMEM
1212
1213 static int mmc_test_write_high(struct mmc_test_card *test)
1214 {
1215 int ret;
1216 struct scatterlist sg;
1217
1218 sg_init_table(&sg, 1);
1219 sg_set_page(&sg, test->highmem, 512, 0);
1220
1221 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1222 if (ret)
1223 return ret;
1224
1225 return 0;
1226 }
1227
1228 static int mmc_test_read_high(struct mmc_test_card *test)
1229 {
1230 int ret;
1231 struct scatterlist sg;
1232
1233 sg_init_table(&sg, 1);
1234 sg_set_page(&sg, test->highmem, 512, 0);
1235
1236 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1237 if (ret)
1238 return ret;
1239
1240 return 0;
1241 }
1242
1243 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1244 {
1245 int ret;
1246 unsigned int size;
1247 struct scatterlist sg;
1248
1249 if (test->card->host->max_blk_count == 1)
1250 return RESULT_UNSUP_HOST;
1251
1252 size = PAGE_SIZE * 2;
1253 size = min(size, test->card->host->max_req_size);
1254 size = min(size, test->card->host->max_seg_size);
1255 size = min(size, test->card->host->max_blk_count * 512);
1256
1257 if (size < 1024)
1258 return RESULT_UNSUP_HOST;
1259
1260 sg_init_table(&sg, 1);
1261 sg_set_page(&sg, test->highmem, size, 0);
1262
1263 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1264 if (ret)
1265 return ret;
1266
1267 return 0;
1268 }
1269
1270 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1271 {
1272 int ret;
1273 unsigned int size;
1274 struct scatterlist sg;
1275
1276 if (test->card->host->max_blk_count == 1)
1277 return RESULT_UNSUP_HOST;
1278
1279 size = PAGE_SIZE * 2;
1280 size = min(size, test->card->host->max_req_size);
1281 size = min(size, test->card->host->max_seg_size);
1282 size = min(size, test->card->host->max_blk_count * 512);
1283
1284 if (size < 1024)
1285 return RESULT_UNSUP_HOST;
1286
1287 sg_init_table(&sg, 1);
1288 sg_set_page(&sg, test->highmem, size, 0);
1289
1290 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1291 if (ret)
1292 return ret;
1293
1294 return 0;
1295 }
1296
1297 #else
1298
1299 static int mmc_test_no_highmem(struct mmc_test_card *test)
1300 {
1301 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1302 mmc_hostname(test->card->host));
1303 return 0;
1304 }
1305
1306 #endif /* CONFIG_HIGHMEM */
1307
1308 /*
1309 * Map sz bytes so that it can be transferred.
1310 */
1311 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1312 int max_scatter)
1313 {
1314 struct mmc_test_area *t = &test->area;
1315 int err;
1316
1317 t->blocks = sz >> 9;
1318
1319 if (max_scatter) {
1320 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1321 t->max_segs, t->max_seg_sz,
1322 &t->sg_len);
1323 } else {
1324 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1325 t->max_seg_sz, &t->sg_len);
1326 }
1327 if (err)
1328 printk(KERN_INFO "%s: Failed to map sg list\n",
1329 mmc_hostname(test->card->host));
1330 return err;
1331 }
1332
1333 /*
1334 * Transfer bytes mapped by mmc_test_area_map().
1335 */
1336 static int mmc_test_area_transfer(struct mmc_test_card *test,
1337 unsigned int dev_addr, int write)
1338 {
1339 struct mmc_test_area *t = &test->area;
1340
1341 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1342 t->blocks, 512, write);
1343 }
1344
1345 /*
1346 * Map and transfer bytes.
1347 */
1348 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1349 unsigned int dev_addr, int write, int max_scatter,
1350 int timed)
1351 {
1352 struct timespec ts1, ts2;
1353 int ret;
1354
1355 /*
1356 * In the case of a maximally scattered transfer, the maximum transfer
1357 * size is further limited by using PAGE_SIZE segments.
1358 */
1359 if (max_scatter) {
1360 struct mmc_test_area *t = &test->area;
1361 unsigned long max_tfr;
1362
1363 if (t->max_seg_sz >= PAGE_SIZE)
1364 max_tfr = t->max_segs * PAGE_SIZE;
1365 else
1366 max_tfr = t->max_segs * t->max_seg_sz;
1367 if (sz > max_tfr)
1368 sz = max_tfr;
1369 }
1370
1371 ret = mmc_test_area_map(test, sz, max_scatter);
1372 if (ret)
1373 return ret;
1374
1375 if (timed)
1376 getnstimeofday(&ts1);
1377
1378 ret = mmc_test_area_transfer(test, dev_addr, write);
1379 if (ret)
1380 return ret;
1381
1382 if (timed)
1383 getnstimeofday(&ts2);
1384
1385 if (timed)
1386 mmc_test_print_rate(test, sz, &ts1, &ts2);
1387
1388 return 0;
1389 }
1390
1391 /*
1392 * Write the test area entirely.
1393 */
1394 static int mmc_test_area_fill(struct mmc_test_card *test)
1395 {
1396 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1397 1, 0, 0);
1398 }
1399
1400 /*
1401 * Erase the test area entirely.
1402 */
1403 static int mmc_test_area_erase(struct mmc_test_card *test)
1404 {
1405 struct mmc_test_area *t = &test->area;
1406
1407 if (!mmc_can_erase(test->card))
1408 return 0;
1409
1410 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1411 MMC_ERASE_ARG);
1412 }
1413
1414 /*
1415 * Cleanup struct mmc_test_area.
1416 */
1417 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1418 {
1419 struct mmc_test_area *t = &test->area;
1420
1421 kfree(t->sg);
1422 mmc_test_free_mem(t->mem);
1423
1424 return 0;
1425 }
1426
1427 /*
1428 * Initialize an area for testing large transfers. The size of the area is the
1429 * preferred erase size which is a good size for optimal transfer speed. Note
1430 * that is typically 4MiB for modern cards. The test area is set to the middle
1431 * of the card because cards may have different charateristics at the front
1432 * (for FAT file system optimization). Optionally, the area is erased (if the
1433 * card supports it) which may improve write performance. Optionally, the area
1434 * is filled with data for subsequent read tests.
1435 */
1436 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1437 {
1438 struct mmc_test_area *t = &test->area;
1439 unsigned long min_sz = 64 * 1024;
1440 int ret;
1441
1442 ret = mmc_test_set_blksize(test, 512);
1443 if (ret)
1444 return ret;
1445
1446 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1447 t->max_sz = TEST_AREA_MAX_SIZE;
1448 else
1449 t->max_sz = (unsigned long)test->card->pref_erase << 9;
1450
1451 t->max_segs = test->card->host->max_segs;
1452 t->max_seg_sz = test->card->host->max_seg_size;
1453
1454 t->max_tfr = t->max_sz;
1455 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1456 t->max_tfr = test->card->host->max_blk_count << 9;
1457 if (t->max_tfr > test->card->host->max_req_size)
1458 t->max_tfr = test->card->host->max_req_size;
1459 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1460 t->max_tfr = t->max_segs * t->max_seg_sz;
1461
1462 /*
1463 * Try to allocate enough memory for a max. sized transfer. Less is OK
1464 * because the same memory can be mapped into the scatterlist more than
1465 * once. Also, take into account the limits imposed on scatterlist
1466 * segments by the host driver.
1467 */
1468 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1469 t->max_seg_sz);
1470 if (!t->mem)
1471 return -ENOMEM;
1472
1473 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1474 if (!t->sg) {
1475 ret = -ENOMEM;
1476 goto out_free;
1477 }
1478
1479 t->dev_addr = mmc_test_capacity(test->card) / 2;
1480 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1481
1482 if (erase) {
1483 ret = mmc_test_area_erase(test);
1484 if (ret)
1485 goto out_free;
1486 }
1487
1488 if (fill) {
1489 ret = mmc_test_area_fill(test);
1490 if (ret)
1491 goto out_free;
1492 }
1493
1494 return 0;
1495
1496 out_free:
1497 mmc_test_area_cleanup(test);
1498 return ret;
1499 }
1500
1501 /*
1502 * Prepare for large transfers. Do not erase the test area.
1503 */
1504 static int mmc_test_area_prepare(struct mmc_test_card *test)
1505 {
1506 return mmc_test_area_init(test, 0, 0);
1507 }
1508
1509 /*
1510 * Prepare for large transfers. Do erase the test area.
1511 */
1512 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1513 {
1514 return mmc_test_area_init(test, 1, 0);
1515 }
1516
1517 /*
1518 * Prepare for large transfers. Erase and fill the test area.
1519 */
1520 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1521 {
1522 return mmc_test_area_init(test, 1, 1);
1523 }
1524
1525 /*
1526 * Test best-case performance. Best-case performance is expected from
1527 * a single large transfer.
1528 *
1529 * An additional option (max_scatter) allows the measurement of the same
1530 * transfer but with no contiguous pages in the scatter list. This tests
1531 * the efficiency of DMA to handle scattered pages.
1532 */
1533 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1534 int max_scatter)
1535 {
1536 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1537 write, max_scatter, 1);
1538 }
1539
1540 /*
1541 * Best-case read performance.
1542 */
1543 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1544 {
1545 return mmc_test_best_performance(test, 0, 0);
1546 }
1547
1548 /*
1549 * Best-case write performance.
1550 */
1551 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1552 {
1553 return mmc_test_best_performance(test, 1, 0);
1554 }
1555
1556 /*
1557 * Best-case read performance into scattered pages.
1558 */
1559 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1560 {
1561 return mmc_test_best_performance(test, 0, 1);
1562 }
1563
1564 /*
1565 * Best-case write performance from scattered pages.
1566 */
1567 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1568 {
1569 return mmc_test_best_performance(test, 1, 1);
1570 }
1571
1572 /*
1573 * Single read performance by transfer size.
1574 */
1575 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1576 {
1577 unsigned long sz;
1578 unsigned int dev_addr;
1579 int ret;
1580
1581 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1582 dev_addr = test->area.dev_addr + (sz >> 9);
1583 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1584 if (ret)
1585 return ret;
1586 }
1587 sz = test->area.max_tfr;
1588 dev_addr = test->area.dev_addr;
1589 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1590 }
1591
1592 /*
1593 * Single write performance by transfer size.
1594 */
1595 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1596 {
1597 unsigned long sz;
1598 unsigned int dev_addr;
1599 int ret;
1600
1601 ret = mmc_test_area_erase(test);
1602 if (ret)
1603 return ret;
1604 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1605 dev_addr = test->area.dev_addr + (sz >> 9);
1606 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1607 if (ret)
1608 return ret;
1609 }
1610 ret = mmc_test_area_erase(test);
1611 if (ret)
1612 return ret;
1613 sz = test->area.max_tfr;
1614 dev_addr = test->area.dev_addr;
1615 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1616 }
1617
1618 /*
1619 * Single trim performance by transfer size.
1620 */
1621 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1622 {
1623 unsigned long sz;
1624 unsigned int dev_addr;
1625 struct timespec ts1, ts2;
1626 int ret;
1627
1628 if (!mmc_can_trim(test->card))
1629 return RESULT_UNSUP_CARD;
1630
1631 if (!mmc_can_erase(test->card))
1632 return RESULT_UNSUP_HOST;
1633
1634 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1635 dev_addr = test->area.dev_addr + (sz >> 9);
1636 getnstimeofday(&ts1);
1637 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1638 if (ret)
1639 return ret;
1640 getnstimeofday(&ts2);
1641 mmc_test_print_rate(test, sz, &ts1, &ts2);
1642 }
1643 dev_addr = test->area.dev_addr;
1644 getnstimeofday(&ts1);
1645 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1646 if (ret)
1647 return ret;
1648 getnstimeofday(&ts2);
1649 mmc_test_print_rate(test, sz, &ts1, &ts2);
1650 return 0;
1651 }
1652
1653 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1654 {
1655 unsigned int dev_addr, i, cnt;
1656 struct timespec ts1, ts2;
1657 int ret;
1658
1659 cnt = test->area.max_sz / sz;
1660 dev_addr = test->area.dev_addr;
1661 getnstimeofday(&ts1);
1662 for (i = 0; i < cnt; i++) {
1663 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1664 if (ret)
1665 return ret;
1666 dev_addr += (sz >> 9);
1667 }
1668 getnstimeofday(&ts2);
1669 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1670 return 0;
1671 }
1672
1673 /*
1674 * Consecutive read performance by transfer size.
1675 */
1676 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1677 {
1678 unsigned long sz;
1679 int ret;
1680
1681 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1682 ret = mmc_test_seq_read_perf(test, sz);
1683 if (ret)
1684 return ret;
1685 }
1686 sz = test->area.max_tfr;
1687 return mmc_test_seq_read_perf(test, sz);
1688 }
1689
1690 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1691 {
1692 unsigned int dev_addr, i, cnt;
1693 struct timespec ts1, ts2;
1694 int ret;
1695
1696 ret = mmc_test_area_erase(test);
1697 if (ret)
1698 return ret;
1699 cnt = test->area.max_sz / sz;
1700 dev_addr = test->area.dev_addr;
1701 getnstimeofday(&ts1);
1702 for (i = 0; i < cnt; i++) {
1703 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1704 if (ret)
1705 return ret;
1706 dev_addr += (sz >> 9);
1707 }
1708 getnstimeofday(&ts2);
1709 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1710 return 0;
1711 }
1712
1713 /*
1714 * Consecutive write performance by transfer size.
1715 */
1716 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1717 {
1718 unsigned long sz;
1719 int ret;
1720
1721 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1722 ret = mmc_test_seq_write_perf(test, sz);
1723 if (ret)
1724 return ret;
1725 }
1726 sz = test->area.max_tfr;
1727 return mmc_test_seq_write_perf(test, sz);
1728 }
1729
1730 /*
1731 * Consecutive trim performance by transfer size.
1732 */
1733 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1734 {
1735 unsigned long sz;
1736 unsigned int dev_addr, i, cnt;
1737 struct timespec ts1, ts2;
1738 int ret;
1739
1740 if (!mmc_can_trim(test->card))
1741 return RESULT_UNSUP_CARD;
1742
1743 if (!mmc_can_erase(test->card))
1744 return RESULT_UNSUP_HOST;
1745
1746 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1747 ret = mmc_test_area_erase(test);
1748 if (ret)
1749 return ret;
1750 ret = mmc_test_area_fill(test);
1751 if (ret)
1752 return ret;
1753 cnt = test->area.max_sz / sz;
1754 dev_addr = test->area.dev_addr;
1755 getnstimeofday(&ts1);
1756 for (i = 0; i < cnt; i++) {
1757 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1758 MMC_TRIM_ARG);
1759 if (ret)
1760 return ret;
1761 dev_addr += (sz >> 9);
1762 }
1763 getnstimeofday(&ts2);
1764 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1765 }
1766 return 0;
1767 }
1768
1769 static const struct mmc_test_case mmc_test_cases[] = {
1770 {
1771 .name = "Basic write (no data verification)",
1772 .run = mmc_test_basic_write,
1773 },
1774
1775 {
1776 .name = "Basic read (no data verification)",
1777 .run = mmc_test_basic_read,
1778 },
1779
1780 {
1781 .name = "Basic write (with data verification)",
1782 .prepare = mmc_test_prepare_write,
1783 .run = mmc_test_verify_write,
1784 .cleanup = mmc_test_cleanup,
1785 },
1786
1787 {
1788 .name = "Basic read (with data verification)",
1789 .prepare = mmc_test_prepare_read,
1790 .run = mmc_test_verify_read,
1791 .cleanup = mmc_test_cleanup,
1792 },
1793
1794 {
1795 .name = "Multi-block write",
1796 .prepare = mmc_test_prepare_write,
1797 .run = mmc_test_multi_write,
1798 .cleanup = mmc_test_cleanup,
1799 },
1800
1801 {
1802 .name = "Multi-block read",
1803 .prepare = mmc_test_prepare_read,
1804 .run = mmc_test_multi_read,
1805 .cleanup = mmc_test_cleanup,
1806 },
1807
1808 {
1809 .name = "Power of two block writes",
1810 .prepare = mmc_test_prepare_write,
1811 .run = mmc_test_pow2_write,
1812 .cleanup = mmc_test_cleanup,
1813 },
1814
1815 {
1816 .name = "Power of two block reads",
1817 .prepare = mmc_test_prepare_read,
1818 .run = mmc_test_pow2_read,
1819 .cleanup = mmc_test_cleanup,
1820 },
1821
1822 {
1823 .name = "Weird sized block writes",
1824 .prepare = mmc_test_prepare_write,
1825 .run = mmc_test_weird_write,
1826 .cleanup = mmc_test_cleanup,
1827 },
1828
1829 {
1830 .name = "Weird sized block reads",
1831 .prepare = mmc_test_prepare_read,
1832 .run = mmc_test_weird_read,
1833 .cleanup = mmc_test_cleanup,
1834 },
1835
1836 {
1837 .name = "Badly aligned write",
1838 .prepare = mmc_test_prepare_write,
1839 .run = mmc_test_align_write,
1840 .cleanup = mmc_test_cleanup,
1841 },
1842
1843 {
1844 .name = "Badly aligned read",
1845 .prepare = mmc_test_prepare_read,
1846 .run = mmc_test_align_read,
1847 .cleanup = mmc_test_cleanup,
1848 },
1849
1850 {
1851 .name = "Badly aligned multi-block write",
1852 .prepare = mmc_test_prepare_write,
1853 .run = mmc_test_align_multi_write,
1854 .cleanup = mmc_test_cleanup,
1855 },
1856
1857 {
1858 .name = "Badly aligned multi-block read",
1859 .prepare = mmc_test_prepare_read,
1860 .run = mmc_test_align_multi_read,
1861 .cleanup = mmc_test_cleanup,
1862 },
1863
1864 {
1865 .name = "Correct xfer_size at write (start failure)",
1866 .run = mmc_test_xfersize_write,
1867 },
1868
1869 {
1870 .name = "Correct xfer_size at read (start failure)",
1871 .run = mmc_test_xfersize_read,
1872 },
1873
1874 {
1875 .name = "Correct xfer_size at write (midway failure)",
1876 .run = mmc_test_multi_xfersize_write,
1877 },
1878
1879 {
1880 .name = "Correct xfer_size at read (midway failure)",
1881 .run = mmc_test_multi_xfersize_read,
1882 },
1883
1884 #ifdef CONFIG_HIGHMEM
1885
1886 {
1887 .name = "Highmem write",
1888 .prepare = mmc_test_prepare_write,
1889 .run = mmc_test_write_high,
1890 .cleanup = mmc_test_cleanup,
1891 },
1892
1893 {
1894 .name = "Highmem read",
1895 .prepare = mmc_test_prepare_read,
1896 .run = mmc_test_read_high,
1897 .cleanup = mmc_test_cleanup,
1898 },
1899
1900 {
1901 .name = "Multi-block highmem write",
1902 .prepare = mmc_test_prepare_write,
1903 .run = mmc_test_multi_write_high,
1904 .cleanup = mmc_test_cleanup,
1905 },
1906
1907 {
1908 .name = "Multi-block highmem read",
1909 .prepare = mmc_test_prepare_read,
1910 .run = mmc_test_multi_read_high,
1911 .cleanup = mmc_test_cleanup,
1912 },
1913
1914 #else
1915
1916 {
1917 .name = "Highmem write",
1918 .run = mmc_test_no_highmem,
1919 },
1920
1921 {
1922 .name = "Highmem read",
1923 .run = mmc_test_no_highmem,
1924 },
1925
1926 {
1927 .name = "Multi-block highmem write",
1928 .run = mmc_test_no_highmem,
1929 },
1930
1931 {
1932 .name = "Multi-block highmem read",
1933 .run = mmc_test_no_highmem,
1934 },
1935
1936 #endif /* CONFIG_HIGHMEM */
1937
1938 {
1939 .name = "Best-case read performance",
1940 .prepare = mmc_test_area_prepare_fill,
1941 .run = mmc_test_best_read_performance,
1942 .cleanup = mmc_test_area_cleanup,
1943 },
1944
1945 {
1946 .name = "Best-case write performance",
1947 .prepare = mmc_test_area_prepare_erase,
1948 .run = mmc_test_best_write_performance,
1949 .cleanup = mmc_test_area_cleanup,
1950 },
1951
1952 {
1953 .name = "Best-case read performance into scattered pages",
1954 .prepare = mmc_test_area_prepare_fill,
1955 .run = mmc_test_best_read_perf_max_scatter,
1956 .cleanup = mmc_test_area_cleanup,
1957 },
1958
1959 {
1960 .name = "Best-case write performance from scattered pages",
1961 .prepare = mmc_test_area_prepare_erase,
1962 .run = mmc_test_best_write_perf_max_scatter,
1963 .cleanup = mmc_test_area_cleanup,
1964 },
1965
1966 {
1967 .name = "Single read performance by transfer size",
1968 .prepare = mmc_test_area_prepare_fill,
1969 .run = mmc_test_profile_read_perf,
1970 .cleanup = mmc_test_area_cleanup,
1971 },
1972
1973 {
1974 .name = "Single write performance by transfer size",
1975 .prepare = mmc_test_area_prepare,
1976 .run = mmc_test_profile_write_perf,
1977 .cleanup = mmc_test_area_cleanup,
1978 },
1979
1980 {
1981 .name = "Single trim performance by transfer size",
1982 .prepare = mmc_test_area_prepare_fill,
1983 .run = mmc_test_profile_trim_perf,
1984 .cleanup = mmc_test_area_cleanup,
1985 },
1986
1987 {
1988 .name = "Consecutive read performance by transfer size",
1989 .prepare = mmc_test_area_prepare_fill,
1990 .run = mmc_test_profile_seq_read_perf,
1991 .cleanup = mmc_test_area_cleanup,
1992 },
1993
1994 {
1995 .name = "Consecutive write performance by transfer size",
1996 .prepare = mmc_test_area_prepare,
1997 .run = mmc_test_profile_seq_write_perf,
1998 .cleanup = mmc_test_area_cleanup,
1999 },
2000
2001 {
2002 .name = "Consecutive trim performance by transfer size",
2003 .prepare = mmc_test_area_prepare,
2004 .run = mmc_test_profile_seq_trim_perf,
2005 .cleanup = mmc_test_area_cleanup,
2006 },
2007
2008 };
2009
2010 static DEFINE_MUTEX(mmc_test_lock);
2011
2012 static LIST_HEAD(mmc_test_result);
2013
2014 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2015 {
2016 int i, ret;
2017
2018 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2019 mmc_hostname(test->card->host), mmc_card_id(test->card));
2020
2021 mmc_claim_host(test->card->host);
2022
2023 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2024 struct mmc_test_general_result *gr;
2025
2026 if (testcase && ((i + 1) != testcase))
2027 continue;
2028
2029 printk(KERN_INFO "%s: Test case %d. %s...\n",
2030 mmc_hostname(test->card->host), i + 1,
2031 mmc_test_cases[i].name);
2032
2033 if (mmc_test_cases[i].prepare) {
2034 ret = mmc_test_cases[i].prepare(test);
2035 if (ret) {
2036 printk(KERN_INFO "%s: Result: Prepare "
2037 "stage failed! (%d)\n",
2038 mmc_hostname(test->card->host),
2039 ret);
2040 continue;
2041 }
2042 }
2043
2044 gr = kzalloc(sizeof(struct mmc_test_general_result),
2045 GFP_KERNEL);
2046 if (gr) {
2047 INIT_LIST_HEAD(&gr->tr_lst);
2048
2049 /* Assign data what we know already */
2050 gr->card = test->card;
2051 gr->testcase = i;
2052
2053 /* Append container to global one */
2054 list_add_tail(&gr->link, &mmc_test_result);
2055
2056 /*
2057 * Save the pointer to created container in our private
2058 * structure.
2059 */
2060 test->gr = gr;
2061 }
2062
2063 ret = mmc_test_cases[i].run(test);
2064 switch (ret) {
2065 case RESULT_OK:
2066 printk(KERN_INFO "%s: Result: OK\n",
2067 mmc_hostname(test->card->host));
2068 break;
2069 case RESULT_FAIL:
2070 printk(KERN_INFO "%s: Result: FAILED\n",
2071 mmc_hostname(test->card->host));
2072 break;
2073 case RESULT_UNSUP_HOST:
2074 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2075 "(by host)\n",
2076 mmc_hostname(test->card->host));
2077 break;
2078 case RESULT_UNSUP_CARD:
2079 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2080 "(by card)\n",
2081 mmc_hostname(test->card->host));
2082 break;
2083 default:
2084 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2085 mmc_hostname(test->card->host), ret);
2086 }
2087
2088 /* Save the result */
2089 if (gr)
2090 gr->result = ret;
2091
2092 if (mmc_test_cases[i].cleanup) {
2093 ret = mmc_test_cases[i].cleanup(test);
2094 if (ret) {
2095 printk(KERN_INFO "%s: Warning: Cleanup "
2096 "stage failed! (%d)\n",
2097 mmc_hostname(test->card->host),
2098 ret);
2099 }
2100 }
2101 }
2102
2103 mmc_release_host(test->card->host);
2104
2105 printk(KERN_INFO "%s: Tests completed.\n",
2106 mmc_hostname(test->card->host));
2107 }
2108
2109 static void mmc_test_free_result(struct mmc_card *card)
2110 {
2111 struct mmc_test_general_result *gr, *grs;
2112
2113 mutex_lock(&mmc_test_lock);
2114
2115 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2116 struct mmc_test_transfer_result *tr, *trs;
2117
2118 if (card && gr->card != card)
2119 continue;
2120
2121 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2122 list_del(&tr->link);
2123 kfree(tr);
2124 }
2125
2126 list_del(&gr->link);
2127 kfree(gr);
2128 }
2129
2130 mutex_unlock(&mmc_test_lock);
2131 }
2132
2133 static LIST_HEAD(mmc_test_file_test);
2134
2135 static int mtf_test_show(struct seq_file *sf, void *data)
2136 {
2137 struct mmc_card *card = (struct mmc_card *)sf->private;
2138 struct mmc_test_general_result *gr;
2139
2140 mutex_lock(&mmc_test_lock);
2141
2142 list_for_each_entry(gr, &mmc_test_result, link) {
2143 struct mmc_test_transfer_result *tr;
2144
2145 if (gr->card != card)
2146 continue;
2147
2148 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2149
2150 list_for_each_entry(tr, &gr->tr_lst, link) {
2151 seq_printf(sf, "%u %d %lu.%09lu %u\n",
2152 tr->count, tr->sectors,
2153 (unsigned long)tr->ts.tv_sec,
2154 (unsigned long)tr->ts.tv_nsec,
2155 tr->rate);
2156 }
2157 }
2158
2159 mutex_unlock(&mmc_test_lock);
2160
2161 return 0;
2162 }
2163
2164 static int mtf_test_open(struct inode *inode, struct file *file)
2165 {
2166 return single_open(file, mtf_test_show, inode->i_private);
2167 }
2168
2169 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2170 size_t count, loff_t *pos)
2171 {
2172 struct seq_file *sf = (struct seq_file *)file->private_data;
2173 struct mmc_card *card = (struct mmc_card *)sf->private;
2174 struct mmc_test_card *test;
2175 char lbuf[12];
2176 long testcase;
2177
2178 if (count >= sizeof(lbuf))
2179 return -EINVAL;
2180
2181 if (copy_from_user(lbuf, buf, count))
2182 return -EFAULT;
2183 lbuf[count] = '\0';
2184
2185 if (strict_strtol(lbuf, 10, &testcase))
2186 return -EINVAL;
2187
2188 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2189 if (!test)
2190 return -ENOMEM;
2191
2192 /*
2193 * Remove all test cases associated with given card. Thus we have only
2194 * actual data of the last run.
2195 */
2196 mmc_test_free_result(card);
2197
2198 test->card = card;
2199
2200 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2201 #ifdef CONFIG_HIGHMEM
2202 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2203 #endif
2204
2205 #ifdef CONFIG_HIGHMEM
2206 if (test->buffer && test->highmem) {
2207 #else
2208 if (test->buffer) {
2209 #endif
2210 mutex_lock(&mmc_test_lock);
2211 mmc_test_run(test, testcase);
2212 mutex_unlock(&mmc_test_lock);
2213 }
2214
2215 #ifdef CONFIG_HIGHMEM
2216 __free_pages(test->highmem, BUFFER_ORDER);
2217 #endif
2218 kfree(test->buffer);
2219 kfree(test);
2220
2221 return count;
2222 }
2223
2224 static const struct file_operations mmc_test_fops_test = {
2225 .open = mtf_test_open,
2226 .read = seq_read,
2227 .write = mtf_test_write,
2228 .llseek = seq_lseek,
2229 .release = single_release,
2230 };
2231
2232 static void mmc_test_free_file_test(struct mmc_card *card)
2233 {
2234 struct mmc_test_dbgfs_file *df, *dfs;
2235
2236 mutex_lock(&mmc_test_lock);
2237
2238 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2239 if (card && df->card != card)
2240 continue;
2241 debugfs_remove(df->file);
2242 list_del(&df->link);
2243 kfree(df);
2244 }
2245
2246 mutex_unlock(&mmc_test_lock);
2247 }
2248
2249 static int mmc_test_register_file_test(struct mmc_card *card)
2250 {
2251 struct dentry *file = NULL;
2252 struct mmc_test_dbgfs_file *df;
2253 int ret = 0;
2254
2255 mutex_lock(&mmc_test_lock);
2256
2257 if (card->debugfs_root)
2258 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2259 card->debugfs_root, card, &mmc_test_fops_test);
2260
2261 if (IS_ERR_OR_NULL(file)) {
2262 dev_err(&card->dev,
2263 "Can't create file. Perhaps debugfs is disabled.\n");
2264 ret = -ENODEV;
2265 goto err;
2266 }
2267
2268 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2269 if (!df) {
2270 debugfs_remove(file);
2271 dev_err(&card->dev,
2272 "Can't allocate memory for internal usage.\n");
2273 ret = -ENOMEM;
2274 goto err;
2275 }
2276
2277 df->card = card;
2278 df->file = file;
2279
2280 list_add(&df->link, &mmc_test_file_test);
2281
2282 err:
2283 mutex_unlock(&mmc_test_lock);
2284
2285 return ret;
2286 }
2287
2288 static int mmc_test_probe(struct mmc_card *card)
2289 {
2290 int ret;
2291
2292 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2293 return -ENODEV;
2294
2295 ret = mmc_test_register_file_test(card);
2296 if (ret)
2297 return ret;
2298
2299 dev_info(&card->dev, "Card claimed for testing.\n");
2300
2301 return 0;
2302 }
2303
2304 static void mmc_test_remove(struct mmc_card *card)
2305 {
2306 mmc_test_free_result(card);
2307 mmc_test_free_file_test(card);
2308 }
2309
2310 static struct mmc_driver mmc_driver = {
2311 .drv = {
2312 .name = "mmc_test",
2313 },
2314 .probe = mmc_test_probe,
2315 .remove = mmc_test_remove,
2316 };
2317
2318 static int __init mmc_test_init(void)
2319 {
2320 return mmc_register_driver(&mmc_driver);
2321 }
2322
2323 static void __exit mmc_test_exit(void)
2324 {
2325 /* Clear stalled data if card is still plugged */
2326 mmc_test_free_result(NULL);
2327 mmc_test_free_file_test(NULL);
2328
2329 mmc_unregister_driver(&mmc_driver);
2330 }
2331
2332 module_init(mmc_test_init);
2333 module_exit(mmc_test_exit);
2334
2335 MODULE_LICENSE("GPL");
2336 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2337 MODULE_AUTHOR("Pierre Ossman");