abc1a63bcc5ee9f0ceb1c76ded78fda2b4fd5f97
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / card / mmc_test.c
1 /*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
4 * Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
17
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
21
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25
26 #define RESULT_OK 0
27 #define RESULT_FAIL 1
28 #define RESULT_UNSUP_HOST 2
29 #define RESULT_UNSUP_CARD 3
30
31 #define BUFFER_ORDER 2
32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
33
34 /*
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
37 */
38 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
40 /**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
44 */
45 struct mmc_test_pages {
46 struct page *page;
47 unsigned int order;
48 };
49
50 /**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
54 */
55 struct mmc_test_mem {
56 struct mmc_test_pages *arr;
57 unsigned int cnt;
58 };
59
60 /**
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
71 */
72 struct mmc_test_area {
73 unsigned long max_sz;
74 unsigned int dev_addr;
75 unsigned int max_tfr;
76 unsigned int max_segs;
77 unsigned int max_seg_sz;
78 unsigned int blocks;
79 unsigned int sg_len;
80 struct mmc_test_mem *mem;
81 struct scatterlist *sg;
82 };
83
84 /**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
92 */
93 struct mmc_test_transfer_result {
94 struct list_head link;
95 unsigned int count;
96 unsigned int sectors;
97 struct timespec ts;
98 unsigned int rate;
99 unsigned int iops;
100 };
101
102 /**
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
109 */
110 struct mmc_test_general_result {
111 struct list_head link;
112 struct mmc_card *card;
113 int testcase;
114 int result;
115 struct list_head tr_lst;
116 };
117
118 /**
119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
123 */
124 struct mmc_test_dbgfs_file {
125 struct list_head link;
126 struct mmc_card *card;
127 struct dentry *file;
128 };
129
130 /**
131 * struct mmc_test_card - test information.
132 * @card: card under test
133 * @scratch: transfer buffer
134 * @buffer: transfer buffer
135 * @highmem: buffer for highmem tests
136 * @area: information for performance tests
137 * @gr: pointer to results of current testcase
138 */
139 struct mmc_test_card {
140 struct mmc_card *card;
141
142 u8 scratch[BUFFER_SIZE];
143 u8 *buffer;
144 #ifdef CONFIG_HIGHMEM
145 struct page *highmem;
146 #endif
147 struct mmc_test_area area;
148 struct mmc_test_general_result *gr;
149 };
150
151 /*******************************************************************/
152 /* General helper functions */
153 /*******************************************************************/
154
155 /*
156 * Configure correct block size in card
157 */
158 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
159 {
160 return mmc_set_blocklen(test->card, size);
161 }
162
163 /*
164 * Fill in the mmc_request structure given a set of transfer parameters.
165 */
166 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
167 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
168 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
169 {
170 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
171
172 if (blocks > 1) {
173 mrq->cmd->opcode = write ?
174 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
175 } else {
176 mrq->cmd->opcode = write ?
177 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
178 }
179
180 mrq->cmd->arg = dev_addr;
181 if (!mmc_card_blockaddr(test->card))
182 mrq->cmd->arg <<= 9;
183
184 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
185
186 if (blocks == 1)
187 mrq->stop = NULL;
188 else {
189 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
190 mrq->stop->arg = 0;
191 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
192 }
193
194 mrq->data->blksz = blksz;
195 mrq->data->blocks = blocks;
196 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
197 mrq->data->sg = sg;
198 mrq->data->sg_len = sg_len;
199
200 mmc_set_data_timeout(mrq->data, test->card);
201 }
202
203 static int mmc_test_busy(struct mmc_command *cmd)
204 {
205 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
206 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
207 }
208
209 /*
210 * Wait for the card to finish the busy state
211 */
212 static int mmc_test_wait_busy(struct mmc_test_card *test)
213 {
214 int ret, busy;
215 struct mmc_command cmd;
216
217 busy = 0;
218 do {
219 memset(&cmd, 0, sizeof(struct mmc_command));
220
221 cmd.opcode = MMC_SEND_STATUS;
222 cmd.arg = test->card->rca << 16;
223 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
224
225 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
226 if (ret)
227 break;
228
229 if (!busy && mmc_test_busy(&cmd)) {
230 busy = 1;
231 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
232 printk(KERN_INFO "%s: Warning: Host did not "
233 "wait for busy state to end.\n",
234 mmc_hostname(test->card->host));
235 }
236 } while (mmc_test_busy(&cmd));
237
238 return ret;
239 }
240
241 /*
242 * Transfer a single sector of kernel addressable data
243 */
244 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
245 u8 *buffer, unsigned addr, unsigned blksz, int write)
246 {
247 int ret;
248
249 struct mmc_request mrq;
250 struct mmc_command cmd;
251 struct mmc_command stop;
252 struct mmc_data data;
253
254 struct scatterlist sg;
255
256 memset(&mrq, 0, sizeof(struct mmc_request));
257 memset(&cmd, 0, sizeof(struct mmc_command));
258 memset(&data, 0, sizeof(struct mmc_data));
259 memset(&stop, 0, sizeof(struct mmc_command));
260
261 mrq.cmd = &cmd;
262 mrq.data = &data;
263 mrq.stop = &stop;
264
265 sg_init_one(&sg, buffer, blksz);
266
267 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
268
269 mmc_wait_for_req(test->card->host, &mrq);
270
271 if (cmd.error)
272 return cmd.error;
273 if (data.error)
274 return data.error;
275
276 ret = mmc_test_wait_busy(test);
277 if (ret)
278 return ret;
279
280 return 0;
281 }
282
283 static void mmc_test_free_mem(struct mmc_test_mem *mem)
284 {
285 if (!mem)
286 return;
287 while (mem->cnt--)
288 __free_pages(mem->arr[mem->cnt].page,
289 mem->arr[mem->cnt].order);
290 kfree(mem->arr);
291 kfree(mem);
292 }
293
294 /*
295 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
296 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
297 * not exceed a maximum number of segments and try not to make segments much
298 * bigger than maximum segment size.
299 */
300 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
301 unsigned long max_sz,
302 unsigned int max_segs,
303 unsigned int max_seg_sz)
304 {
305 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
306 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
307 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
308 unsigned long page_cnt = 0;
309 unsigned long limit = nr_free_buffer_pages() >> 4;
310 struct mmc_test_mem *mem;
311
312 if (max_page_cnt > limit)
313 max_page_cnt = limit;
314 if (min_page_cnt > max_page_cnt)
315 min_page_cnt = max_page_cnt;
316
317 if (max_seg_page_cnt > max_page_cnt)
318 max_seg_page_cnt = max_page_cnt;
319
320 if (max_segs > max_page_cnt)
321 max_segs = max_page_cnt;
322
323 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
324 if (!mem)
325 return NULL;
326
327 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
328 GFP_KERNEL);
329 if (!mem->arr)
330 goto out_free;
331
332 while (max_page_cnt) {
333 struct page *page;
334 unsigned int order;
335 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
336 __GFP_NORETRY;
337
338 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
339 while (1) {
340 page = alloc_pages(flags, order);
341 if (page || !order)
342 break;
343 order -= 1;
344 }
345 if (!page) {
346 if (page_cnt < min_page_cnt)
347 goto out_free;
348 break;
349 }
350 mem->arr[mem->cnt].page = page;
351 mem->arr[mem->cnt].order = order;
352 mem->cnt += 1;
353 if (max_page_cnt <= (1UL << order))
354 break;
355 max_page_cnt -= 1UL << order;
356 page_cnt += 1UL << order;
357 if (mem->cnt >= max_segs) {
358 if (page_cnt < min_page_cnt)
359 goto out_free;
360 break;
361 }
362 }
363
364 return mem;
365
366 out_free:
367 mmc_test_free_mem(mem);
368 return NULL;
369 }
370
371 /*
372 * Map memory into a scatterlist. Optionally allow the same memory to be
373 * mapped more than once.
374 */
375 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
376 struct scatterlist *sglist, int repeat,
377 unsigned int max_segs, unsigned int max_seg_sz,
378 unsigned int *sg_len)
379 {
380 struct scatterlist *sg = NULL;
381 unsigned int i;
382
383 sg_init_table(sglist, max_segs);
384
385 *sg_len = 0;
386 do {
387 for (i = 0; i < mem->cnt; i++) {
388 unsigned long len = PAGE_SIZE << mem->arr[i].order;
389
390 if (len > sz)
391 len = sz;
392 if (len > max_seg_sz)
393 len = max_seg_sz;
394 if (sg)
395 sg = sg_next(sg);
396 else
397 sg = sglist;
398 if (!sg)
399 return -EINVAL;
400 sg_set_page(sg, mem->arr[i].page, len, 0);
401 sz -= len;
402 *sg_len += 1;
403 if (!sz)
404 break;
405 }
406 } while (sz && repeat);
407
408 if (sz)
409 return -EINVAL;
410
411 if (sg)
412 sg_mark_end(sg);
413
414 return 0;
415 }
416
417 /*
418 * Map memory into a scatterlist so that no pages are contiguous. Allow the
419 * same memory to be mapped more than once.
420 */
421 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
422 unsigned long sz,
423 struct scatterlist *sglist,
424 unsigned int max_segs,
425 unsigned int max_seg_sz,
426 unsigned int *sg_len)
427 {
428 struct scatterlist *sg = NULL;
429 unsigned int i = mem->cnt, cnt;
430 unsigned long len;
431 void *base, *addr, *last_addr = NULL;
432
433 sg_init_table(sglist, max_segs);
434
435 *sg_len = 0;
436 while (sz) {
437 base = page_address(mem->arr[--i].page);
438 cnt = 1 << mem->arr[i].order;
439 while (sz && cnt) {
440 addr = base + PAGE_SIZE * --cnt;
441 if (last_addr && last_addr + PAGE_SIZE == addr)
442 continue;
443 last_addr = addr;
444 len = PAGE_SIZE;
445 if (len > max_seg_sz)
446 len = max_seg_sz;
447 if (len > sz)
448 len = sz;
449 if (sg)
450 sg = sg_next(sg);
451 else
452 sg = sglist;
453 if (!sg)
454 return -EINVAL;
455 sg_set_page(sg, virt_to_page(addr), len, 0);
456 sz -= len;
457 *sg_len += 1;
458 }
459 if (i == 0)
460 i = mem->cnt;
461 }
462
463 if (sg)
464 sg_mark_end(sg);
465
466 return 0;
467 }
468
469 /*
470 * Calculate transfer rate in bytes per second.
471 */
472 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
473 {
474 uint64_t ns;
475
476 ns = ts->tv_sec;
477 ns *= 1000000000;
478 ns += ts->tv_nsec;
479
480 bytes *= 1000000000;
481
482 while (ns > UINT_MAX) {
483 bytes >>= 1;
484 ns >>= 1;
485 }
486
487 if (!ns)
488 return 0;
489
490 do_div(bytes, (uint32_t)ns);
491
492 return bytes;
493 }
494
495 /*
496 * Save transfer results for future usage
497 */
498 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
499 unsigned int count, unsigned int sectors, struct timespec ts,
500 unsigned int rate, unsigned int iops)
501 {
502 struct mmc_test_transfer_result *tr;
503
504 if (!test->gr)
505 return;
506
507 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
508 if (!tr)
509 return;
510
511 tr->count = count;
512 tr->sectors = sectors;
513 tr->ts = ts;
514 tr->rate = rate;
515 tr->iops = iops;
516
517 list_add_tail(&tr->link, &test->gr->tr_lst);
518 }
519
520 /*
521 * Print the transfer rate.
522 */
523 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
524 struct timespec *ts1, struct timespec *ts2)
525 {
526 unsigned int rate, iops, sectors = bytes >> 9;
527 struct timespec ts;
528
529 ts = timespec_sub(*ts2, *ts1);
530
531 rate = mmc_test_rate(bytes, &ts);
532 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
533
534 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
535 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
536 mmc_hostname(test->card->host), sectors, sectors >> 1,
537 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
538 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
539 iops / 100, iops % 100);
540
541 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
542 }
543
544 /*
545 * Print the average transfer rate.
546 */
547 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
548 unsigned int count, struct timespec *ts1,
549 struct timespec *ts2)
550 {
551 unsigned int rate, iops, sectors = bytes >> 9;
552 uint64_t tot = bytes * count;
553 struct timespec ts;
554
555 ts = timespec_sub(*ts2, *ts1);
556
557 rate = mmc_test_rate(tot, &ts);
558 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
559
560 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
561 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
562 "%u.%02u IOPS)\n",
563 mmc_hostname(test->card->host), count, sectors, count,
564 sectors >> 1, (sectors & 1 ? ".5" : ""),
565 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
566 rate / 1000, rate / 1024, iops / 100, iops % 100);
567
568 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
569 }
570
571 /*
572 * Return the card size in sectors.
573 */
574 static unsigned int mmc_test_capacity(struct mmc_card *card)
575 {
576 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
577 return card->ext_csd.sectors;
578 else
579 return card->csd.capacity << (card->csd.read_blkbits - 9);
580 }
581
582 /*******************************************************************/
583 /* Test preparation and cleanup */
584 /*******************************************************************/
585
586 /*
587 * Fill the first couple of sectors of the card with known data
588 * so that bad reads/writes can be detected
589 */
590 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
591 {
592 int ret, i;
593
594 ret = mmc_test_set_blksize(test, 512);
595 if (ret)
596 return ret;
597
598 if (write)
599 memset(test->buffer, 0xDF, 512);
600 else {
601 for (i = 0;i < 512;i++)
602 test->buffer[i] = i;
603 }
604
605 for (i = 0;i < BUFFER_SIZE / 512;i++) {
606 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
607 if (ret)
608 return ret;
609 }
610
611 return 0;
612 }
613
614 static int mmc_test_prepare_write(struct mmc_test_card *test)
615 {
616 return __mmc_test_prepare(test, 1);
617 }
618
619 static int mmc_test_prepare_read(struct mmc_test_card *test)
620 {
621 return __mmc_test_prepare(test, 0);
622 }
623
624 static int mmc_test_cleanup(struct mmc_test_card *test)
625 {
626 int ret, i;
627
628 ret = mmc_test_set_blksize(test, 512);
629 if (ret)
630 return ret;
631
632 memset(test->buffer, 0, 512);
633
634 for (i = 0;i < BUFFER_SIZE / 512;i++) {
635 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
636 if (ret)
637 return ret;
638 }
639
640 return 0;
641 }
642
643 /*******************************************************************/
644 /* Test execution helpers */
645 /*******************************************************************/
646
647 /*
648 * Modifies the mmc_request to perform the "short transfer" tests
649 */
650 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
651 struct mmc_request *mrq, int write)
652 {
653 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
654
655 if (mrq->data->blocks > 1) {
656 mrq->cmd->opcode = write ?
657 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
658 mrq->stop = NULL;
659 } else {
660 mrq->cmd->opcode = MMC_SEND_STATUS;
661 mrq->cmd->arg = test->card->rca << 16;
662 }
663 }
664
665 /*
666 * Checks that a normal transfer didn't have any errors
667 */
668 static int mmc_test_check_result(struct mmc_test_card *test,
669 struct mmc_request *mrq)
670 {
671 int ret;
672
673 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
674
675 ret = 0;
676
677 if (!ret && mrq->cmd->error)
678 ret = mrq->cmd->error;
679 if (!ret && mrq->data->error)
680 ret = mrq->data->error;
681 if (!ret && mrq->stop && mrq->stop->error)
682 ret = mrq->stop->error;
683 if (!ret && mrq->data->bytes_xfered !=
684 mrq->data->blocks * mrq->data->blksz)
685 ret = RESULT_FAIL;
686
687 if (ret == -EINVAL)
688 ret = RESULT_UNSUP_HOST;
689
690 return ret;
691 }
692
693 /*
694 * Checks that a "short transfer" behaved as expected
695 */
696 static int mmc_test_check_broken_result(struct mmc_test_card *test,
697 struct mmc_request *mrq)
698 {
699 int ret;
700
701 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
702
703 ret = 0;
704
705 if (!ret && mrq->cmd->error)
706 ret = mrq->cmd->error;
707 if (!ret && mrq->data->error == 0)
708 ret = RESULT_FAIL;
709 if (!ret && mrq->data->error != -ETIMEDOUT)
710 ret = mrq->data->error;
711 if (!ret && mrq->stop && mrq->stop->error)
712 ret = mrq->stop->error;
713 if (mrq->data->blocks > 1) {
714 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
715 ret = RESULT_FAIL;
716 } else {
717 if (!ret && mrq->data->bytes_xfered > 0)
718 ret = RESULT_FAIL;
719 }
720
721 if (ret == -EINVAL)
722 ret = RESULT_UNSUP_HOST;
723
724 return ret;
725 }
726
727 /*
728 * Tests a basic transfer with certain parameters
729 */
730 static int mmc_test_simple_transfer(struct mmc_test_card *test,
731 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
732 unsigned blocks, unsigned blksz, int write)
733 {
734 struct mmc_request mrq;
735 struct mmc_command cmd;
736 struct mmc_command stop;
737 struct mmc_data data;
738
739 memset(&mrq, 0, sizeof(struct mmc_request));
740 memset(&cmd, 0, sizeof(struct mmc_command));
741 memset(&data, 0, sizeof(struct mmc_data));
742 memset(&stop, 0, sizeof(struct mmc_command));
743
744 mrq.cmd = &cmd;
745 mrq.data = &data;
746 mrq.stop = &stop;
747
748 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
749 blocks, blksz, write);
750
751 mmc_wait_for_req(test->card->host, &mrq);
752
753 mmc_test_wait_busy(test);
754
755 return mmc_test_check_result(test, &mrq);
756 }
757
758 /*
759 * Tests a transfer where the card will fail completely or partly
760 */
761 static int mmc_test_broken_transfer(struct mmc_test_card *test,
762 unsigned blocks, unsigned blksz, int write)
763 {
764 struct mmc_request mrq;
765 struct mmc_command cmd;
766 struct mmc_command stop;
767 struct mmc_data data;
768
769 struct scatterlist sg;
770
771 memset(&mrq, 0, sizeof(struct mmc_request));
772 memset(&cmd, 0, sizeof(struct mmc_command));
773 memset(&data, 0, sizeof(struct mmc_data));
774 memset(&stop, 0, sizeof(struct mmc_command));
775
776 mrq.cmd = &cmd;
777 mrq.data = &data;
778 mrq.stop = &stop;
779
780 sg_init_one(&sg, test->buffer, blocks * blksz);
781
782 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
783 mmc_test_prepare_broken_mrq(test, &mrq, write);
784
785 mmc_wait_for_req(test->card->host, &mrq);
786
787 mmc_test_wait_busy(test);
788
789 return mmc_test_check_broken_result(test, &mrq);
790 }
791
792 /*
793 * Does a complete transfer test where data is also validated
794 *
795 * Note: mmc_test_prepare() must have been done before this call
796 */
797 static int mmc_test_transfer(struct mmc_test_card *test,
798 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
799 unsigned blocks, unsigned blksz, int write)
800 {
801 int ret, i;
802 unsigned long flags;
803
804 if (write) {
805 for (i = 0;i < blocks * blksz;i++)
806 test->scratch[i] = i;
807 } else {
808 memset(test->scratch, 0, BUFFER_SIZE);
809 }
810 local_irq_save(flags);
811 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
812 local_irq_restore(flags);
813
814 ret = mmc_test_set_blksize(test, blksz);
815 if (ret)
816 return ret;
817
818 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
819 blocks, blksz, write);
820 if (ret)
821 return ret;
822
823 if (write) {
824 int sectors;
825
826 ret = mmc_test_set_blksize(test, 512);
827 if (ret)
828 return ret;
829
830 sectors = (blocks * blksz + 511) / 512;
831 if ((sectors * 512) == (blocks * blksz))
832 sectors++;
833
834 if ((sectors * 512) > BUFFER_SIZE)
835 return -EINVAL;
836
837 memset(test->buffer, 0, sectors * 512);
838
839 for (i = 0;i < sectors;i++) {
840 ret = mmc_test_buffer_transfer(test,
841 test->buffer + i * 512,
842 dev_addr + i, 512, 0);
843 if (ret)
844 return ret;
845 }
846
847 for (i = 0;i < blocks * blksz;i++) {
848 if (test->buffer[i] != (u8)i)
849 return RESULT_FAIL;
850 }
851
852 for (;i < sectors * 512;i++) {
853 if (test->buffer[i] != 0xDF)
854 return RESULT_FAIL;
855 }
856 } else {
857 local_irq_save(flags);
858 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
859 local_irq_restore(flags);
860 for (i = 0;i < blocks * blksz;i++) {
861 if (test->scratch[i] != (u8)i)
862 return RESULT_FAIL;
863 }
864 }
865
866 return 0;
867 }
868
869 /*******************************************************************/
870 /* Tests */
871 /*******************************************************************/
872
873 struct mmc_test_case {
874 const char *name;
875
876 int (*prepare)(struct mmc_test_card *);
877 int (*run)(struct mmc_test_card *);
878 int (*cleanup)(struct mmc_test_card *);
879 };
880
881 static int mmc_test_basic_write(struct mmc_test_card *test)
882 {
883 int ret;
884 struct scatterlist sg;
885
886 ret = mmc_test_set_blksize(test, 512);
887 if (ret)
888 return ret;
889
890 sg_init_one(&sg, test->buffer, 512);
891
892 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
893 if (ret)
894 return ret;
895
896 return 0;
897 }
898
899 static int mmc_test_basic_read(struct mmc_test_card *test)
900 {
901 int ret;
902 struct scatterlist sg;
903
904 ret = mmc_test_set_blksize(test, 512);
905 if (ret)
906 return ret;
907
908 sg_init_one(&sg, test->buffer, 512);
909
910 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
911 if (ret)
912 return ret;
913
914 return 0;
915 }
916
917 static int mmc_test_verify_write(struct mmc_test_card *test)
918 {
919 int ret;
920 struct scatterlist sg;
921
922 sg_init_one(&sg, test->buffer, 512);
923
924 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
925 if (ret)
926 return ret;
927
928 return 0;
929 }
930
931 static int mmc_test_verify_read(struct mmc_test_card *test)
932 {
933 int ret;
934 struct scatterlist sg;
935
936 sg_init_one(&sg, test->buffer, 512);
937
938 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
939 if (ret)
940 return ret;
941
942 return 0;
943 }
944
945 static int mmc_test_multi_write(struct mmc_test_card *test)
946 {
947 int ret;
948 unsigned int size;
949 struct scatterlist sg;
950
951 if (test->card->host->max_blk_count == 1)
952 return RESULT_UNSUP_HOST;
953
954 size = PAGE_SIZE * 2;
955 size = min(size, test->card->host->max_req_size);
956 size = min(size, test->card->host->max_seg_size);
957 size = min(size, test->card->host->max_blk_count * 512);
958
959 if (size < 1024)
960 return RESULT_UNSUP_HOST;
961
962 sg_init_one(&sg, test->buffer, size);
963
964 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
965 if (ret)
966 return ret;
967
968 return 0;
969 }
970
971 static int mmc_test_multi_read(struct mmc_test_card *test)
972 {
973 int ret;
974 unsigned int size;
975 struct scatterlist sg;
976
977 if (test->card->host->max_blk_count == 1)
978 return RESULT_UNSUP_HOST;
979
980 size = PAGE_SIZE * 2;
981 size = min(size, test->card->host->max_req_size);
982 size = min(size, test->card->host->max_seg_size);
983 size = min(size, test->card->host->max_blk_count * 512);
984
985 if (size < 1024)
986 return RESULT_UNSUP_HOST;
987
988 sg_init_one(&sg, test->buffer, size);
989
990 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
991 if (ret)
992 return ret;
993
994 return 0;
995 }
996
997 static int mmc_test_pow2_write(struct mmc_test_card *test)
998 {
999 int ret, i;
1000 struct scatterlist sg;
1001
1002 if (!test->card->csd.write_partial)
1003 return RESULT_UNSUP_CARD;
1004
1005 for (i = 1; i < 512;i <<= 1) {
1006 sg_init_one(&sg, test->buffer, i);
1007 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1008 if (ret)
1009 return ret;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static int mmc_test_pow2_read(struct mmc_test_card *test)
1016 {
1017 int ret, i;
1018 struct scatterlist sg;
1019
1020 if (!test->card->csd.read_partial)
1021 return RESULT_UNSUP_CARD;
1022
1023 for (i = 1; i < 512;i <<= 1) {
1024 sg_init_one(&sg, test->buffer, i);
1025 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1026 if (ret)
1027 return ret;
1028 }
1029
1030 return 0;
1031 }
1032
1033 static int mmc_test_weird_write(struct mmc_test_card *test)
1034 {
1035 int ret, i;
1036 struct scatterlist sg;
1037
1038 if (!test->card->csd.write_partial)
1039 return RESULT_UNSUP_CARD;
1040
1041 for (i = 3; i < 512;i += 7) {
1042 sg_init_one(&sg, test->buffer, i);
1043 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1044 if (ret)
1045 return ret;
1046 }
1047
1048 return 0;
1049 }
1050
1051 static int mmc_test_weird_read(struct mmc_test_card *test)
1052 {
1053 int ret, i;
1054 struct scatterlist sg;
1055
1056 if (!test->card->csd.read_partial)
1057 return RESULT_UNSUP_CARD;
1058
1059 for (i = 3; i < 512;i += 7) {
1060 sg_init_one(&sg, test->buffer, i);
1061 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1062 if (ret)
1063 return ret;
1064 }
1065
1066 return 0;
1067 }
1068
1069 static int mmc_test_align_write(struct mmc_test_card *test)
1070 {
1071 int ret, i;
1072 struct scatterlist sg;
1073
1074 for (i = 1;i < 4;i++) {
1075 sg_init_one(&sg, test->buffer + i, 512);
1076 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1077 if (ret)
1078 return ret;
1079 }
1080
1081 return 0;
1082 }
1083
1084 static int mmc_test_align_read(struct mmc_test_card *test)
1085 {
1086 int ret, i;
1087 struct scatterlist sg;
1088
1089 for (i = 1;i < 4;i++) {
1090 sg_init_one(&sg, test->buffer + i, 512);
1091 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1092 if (ret)
1093 return ret;
1094 }
1095
1096 return 0;
1097 }
1098
1099 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1100 {
1101 int ret, i;
1102 unsigned int size;
1103 struct scatterlist sg;
1104
1105 if (test->card->host->max_blk_count == 1)
1106 return RESULT_UNSUP_HOST;
1107
1108 size = PAGE_SIZE * 2;
1109 size = min(size, test->card->host->max_req_size);
1110 size = min(size, test->card->host->max_seg_size);
1111 size = min(size, test->card->host->max_blk_count * 512);
1112
1113 if (size < 1024)
1114 return RESULT_UNSUP_HOST;
1115
1116 for (i = 1;i < 4;i++) {
1117 sg_init_one(&sg, test->buffer + i, size);
1118 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1119 if (ret)
1120 return ret;
1121 }
1122
1123 return 0;
1124 }
1125
1126 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1127 {
1128 int ret, i;
1129 unsigned int size;
1130 struct scatterlist sg;
1131
1132 if (test->card->host->max_blk_count == 1)
1133 return RESULT_UNSUP_HOST;
1134
1135 size = PAGE_SIZE * 2;
1136 size = min(size, test->card->host->max_req_size);
1137 size = min(size, test->card->host->max_seg_size);
1138 size = min(size, test->card->host->max_blk_count * 512);
1139
1140 if (size < 1024)
1141 return RESULT_UNSUP_HOST;
1142
1143 for (i = 1;i < 4;i++) {
1144 sg_init_one(&sg, test->buffer + i, size);
1145 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1146 if (ret)
1147 return ret;
1148 }
1149
1150 return 0;
1151 }
1152
1153 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1154 {
1155 int ret;
1156
1157 ret = mmc_test_set_blksize(test, 512);
1158 if (ret)
1159 return ret;
1160
1161 ret = mmc_test_broken_transfer(test, 1, 512, 1);
1162 if (ret)
1163 return ret;
1164
1165 return 0;
1166 }
1167
1168 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1169 {
1170 int ret;
1171
1172 ret = mmc_test_set_blksize(test, 512);
1173 if (ret)
1174 return ret;
1175
1176 ret = mmc_test_broken_transfer(test, 1, 512, 0);
1177 if (ret)
1178 return ret;
1179
1180 return 0;
1181 }
1182
1183 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1184 {
1185 int ret;
1186
1187 if (test->card->host->max_blk_count == 1)
1188 return RESULT_UNSUP_HOST;
1189
1190 ret = mmc_test_set_blksize(test, 512);
1191 if (ret)
1192 return ret;
1193
1194 ret = mmc_test_broken_transfer(test, 2, 512, 1);
1195 if (ret)
1196 return ret;
1197
1198 return 0;
1199 }
1200
1201 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1202 {
1203 int ret;
1204
1205 if (test->card->host->max_blk_count == 1)
1206 return RESULT_UNSUP_HOST;
1207
1208 ret = mmc_test_set_blksize(test, 512);
1209 if (ret)
1210 return ret;
1211
1212 ret = mmc_test_broken_transfer(test, 2, 512, 0);
1213 if (ret)
1214 return ret;
1215
1216 return 0;
1217 }
1218
1219 #ifdef CONFIG_HIGHMEM
1220
1221 static int mmc_test_write_high(struct mmc_test_card *test)
1222 {
1223 int ret;
1224 struct scatterlist sg;
1225
1226 sg_init_table(&sg, 1);
1227 sg_set_page(&sg, test->highmem, 512, 0);
1228
1229 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1230 if (ret)
1231 return ret;
1232
1233 return 0;
1234 }
1235
1236 static int mmc_test_read_high(struct mmc_test_card *test)
1237 {
1238 int ret;
1239 struct scatterlist sg;
1240
1241 sg_init_table(&sg, 1);
1242 sg_set_page(&sg, test->highmem, 512, 0);
1243
1244 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1245 if (ret)
1246 return ret;
1247
1248 return 0;
1249 }
1250
1251 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1252 {
1253 int ret;
1254 unsigned int size;
1255 struct scatterlist sg;
1256
1257 if (test->card->host->max_blk_count == 1)
1258 return RESULT_UNSUP_HOST;
1259
1260 size = PAGE_SIZE * 2;
1261 size = min(size, test->card->host->max_req_size);
1262 size = min(size, test->card->host->max_seg_size);
1263 size = min(size, test->card->host->max_blk_count * 512);
1264
1265 if (size < 1024)
1266 return RESULT_UNSUP_HOST;
1267
1268 sg_init_table(&sg, 1);
1269 sg_set_page(&sg, test->highmem, size, 0);
1270
1271 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1272 if (ret)
1273 return ret;
1274
1275 return 0;
1276 }
1277
1278 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1279 {
1280 int ret;
1281 unsigned int size;
1282 struct scatterlist sg;
1283
1284 if (test->card->host->max_blk_count == 1)
1285 return RESULT_UNSUP_HOST;
1286
1287 size = PAGE_SIZE * 2;
1288 size = min(size, test->card->host->max_req_size);
1289 size = min(size, test->card->host->max_seg_size);
1290 size = min(size, test->card->host->max_blk_count * 512);
1291
1292 if (size < 1024)
1293 return RESULT_UNSUP_HOST;
1294
1295 sg_init_table(&sg, 1);
1296 sg_set_page(&sg, test->highmem, size, 0);
1297
1298 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1299 if (ret)
1300 return ret;
1301
1302 return 0;
1303 }
1304
1305 #else
1306
1307 static int mmc_test_no_highmem(struct mmc_test_card *test)
1308 {
1309 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1310 mmc_hostname(test->card->host));
1311 return 0;
1312 }
1313
1314 #endif /* CONFIG_HIGHMEM */
1315
1316 /*
1317 * Map sz bytes so that it can be transferred.
1318 */
1319 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1320 int max_scatter)
1321 {
1322 struct mmc_test_area *t = &test->area;
1323 int err;
1324
1325 t->blocks = sz >> 9;
1326
1327 if (max_scatter) {
1328 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1329 t->max_segs, t->max_seg_sz,
1330 &t->sg_len);
1331 } else {
1332 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1333 t->max_seg_sz, &t->sg_len);
1334 }
1335 if (err)
1336 printk(KERN_INFO "%s: Failed to map sg list\n",
1337 mmc_hostname(test->card->host));
1338 return err;
1339 }
1340
1341 /*
1342 * Transfer bytes mapped by mmc_test_area_map().
1343 */
1344 static int mmc_test_area_transfer(struct mmc_test_card *test,
1345 unsigned int dev_addr, int write)
1346 {
1347 struct mmc_test_area *t = &test->area;
1348
1349 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1350 t->blocks, 512, write);
1351 }
1352
1353 /*
1354 * Map and transfer bytes.
1355 */
1356 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1357 unsigned int dev_addr, int write, int max_scatter,
1358 int timed)
1359 {
1360 struct timespec ts1, ts2;
1361 int ret;
1362
1363 /*
1364 * In the case of a maximally scattered transfer, the maximum transfer
1365 * size is further limited by using PAGE_SIZE segments.
1366 */
1367 if (max_scatter) {
1368 struct mmc_test_area *t = &test->area;
1369 unsigned long max_tfr;
1370
1371 if (t->max_seg_sz >= PAGE_SIZE)
1372 max_tfr = t->max_segs * PAGE_SIZE;
1373 else
1374 max_tfr = t->max_segs * t->max_seg_sz;
1375 if (sz > max_tfr)
1376 sz = max_tfr;
1377 }
1378
1379 ret = mmc_test_area_map(test, sz, max_scatter);
1380 if (ret)
1381 return ret;
1382
1383 if (timed)
1384 getnstimeofday(&ts1);
1385
1386 ret = mmc_test_area_transfer(test, dev_addr, write);
1387 if (ret)
1388 return ret;
1389
1390 if (timed)
1391 getnstimeofday(&ts2);
1392
1393 if (timed)
1394 mmc_test_print_rate(test, sz, &ts1, &ts2);
1395
1396 return 0;
1397 }
1398
1399 /*
1400 * Write the test area entirely.
1401 */
1402 static int mmc_test_area_fill(struct mmc_test_card *test)
1403 {
1404 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1405 1, 0, 0);
1406 }
1407
1408 /*
1409 * Erase the test area entirely.
1410 */
1411 static int mmc_test_area_erase(struct mmc_test_card *test)
1412 {
1413 struct mmc_test_area *t = &test->area;
1414
1415 if (!mmc_can_erase(test->card))
1416 return 0;
1417
1418 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1419 MMC_ERASE_ARG);
1420 }
1421
1422 /*
1423 * Cleanup struct mmc_test_area.
1424 */
1425 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1426 {
1427 struct mmc_test_area *t = &test->area;
1428
1429 kfree(t->sg);
1430 mmc_test_free_mem(t->mem);
1431
1432 return 0;
1433 }
1434
1435 /*
1436 * Initialize an area for testing large transfers. The test area is set to the
1437 * middle of the card because cards may have different charateristics at the
1438 * front (for FAT file system optimization). Optionally, the area is erased
1439 * (if the card supports it) which may improve write performance. Optionally,
1440 * the area is filled with data for subsequent read tests.
1441 */
1442 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1443 {
1444 struct mmc_test_area *t = &test->area;
1445 unsigned long min_sz = 64 * 1024, sz;
1446 int ret;
1447
1448 ret = mmc_test_set_blksize(test, 512);
1449 if (ret)
1450 return ret;
1451
1452 /* Make the test area size about 4MiB */
1453 sz = (unsigned long)test->card->pref_erase << 9;
1454 t->max_sz = sz;
1455 while (t->max_sz < 4 * 1024 * 1024)
1456 t->max_sz += sz;
1457 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1458 t->max_sz -= sz;
1459
1460 t->max_segs = test->card->host->max_segs;
1461 t->max_seg_sz = test->card->host->max_seg_size;
1462
1463 t->max_tfr = t->max_sz;
1464 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1465 t->max_tfr = test->card->host->max_blk_count << 9;
1466 if (t->max_tfr > test->card->host->max_req_size)
1467 t->max_tfr = test->card->host->max_req_size;
1468 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1469 t->max_tfr = t->max_segs * t->max_seg_sz;
1470
1471 /*
1472 * Try to allocate enough memory for a max. sized transfer. Less is OK
1473 * because the same memory can be mapped into the scatterlist more than
1474 * once. Also, take into account the limits imposed on scatterlist
1475 * segments by the host driver.
1476 */
1477 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1478 t->max_seg_sz);
1479 if (!t->mem)
1480 return -ENOMEM;
1481
1482 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1483 if (!t->sg) {
1484 ret = -ENOMEM;
1485 goto out_free;
1486 }
1487
1488 t->dev_addr = mmc_test_capacity(test->card) / 2;
1489 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1490
1491 if (erase) {
1492 ret = mmc_test_area_erase(test);
1493 if (ret)
1494 goto out_free;
1495 }
1496
1497 if (fill) {
1498 ret = mmc_test_area_fill(test);
1499 if (ret)
1500 goto out_free;
1501 }
1502
1503 return 0;
1504
1505 out_free:
1506 mmc_test_area_cleanup(test);
1507 return ret;
1508 }
1509
1510 /*
1511 * Prepare for large transfers. Do not erase the test area.
1512 */
1513 static int mmc_test_area_prepare(struct mmc_test_card *test)
1514 {
1515 return mmc_test_area_init(test, 0, 0);
1516 }
1517
1518 /*
1519 * Prepare for large transfers. Do erase the test area.
1520 */
1521 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1522 {
1523 return mmc_test_area_init(test, 1, 0);
1524 }
1525
1526 /*
1527 * Prepare for large transfers. Erase and fill the test area.
1528 */
1529 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1530 {
1531 return mmc_test_area_init(test, 1, 1);
1532 }
1533
1534 /*
1535 * Test best-case performance. Best-case performance is expected from
1536 * a single large transfer.
1537 *
1538 * An additional option (max_scatter) allows the measurement of the same
1539 * transfer but with no contiguous pages in the scatter list. This tests
1540 * the efficiency of DMA to handle scattered pages.
1541 */
1542 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1543 int max_scatter)
1544 {
1545 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1546 write, max_scatter, 1);
1547 }
1548
1549 /*
1550 * Best-case read performance.
1551 */
1552 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1553 {
1554 return mmc_test_best_performance(test, 0, 0);
1555 }
1556
1557 /*
1558 * Best-case write performance.
1559 */
1560 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1561 {
1562 return mmc_test_best_performance(test, 1, 0);
1563 }
1564
1565 /*
1566 * Best-case read performance into scattered pages.
1567 */
1568 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1569 {
1570 return mmc_test_best_performance(test, 0, 1);
1571 }
1572
1573 /*
1574 * Best-case write performance from scattered pages.
1575 */
1576 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1577 {
1578 return mmc_test_best_performance(test, 1, 1);
1579 }
1580
1581 /*
1582 * Single read performance by transfer size.
1583 */
1584 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1585 {
1586 unsigned long sz;
1587 unsigned int dev_addr;
1588 int ret;
1589
1590 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1591 dev_addr = test->area.dev_addr + (sz >> 9);
1592 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1593 if (ret)
1594 return ret;
1595 }
1596 sz = test->area.max_tfr;
1597 dev_addr = test->area.dev_addr;
1598 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1599 }
1600
1601 /*
1602 * Single write performance by transfer size.
1603 */
1604 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1605 {
1606 unsigned long sz;
1607 unsigned int dev_addr;
1608 int ret;
1609
1610 ret = mmc_test_area_erase(test);
1611 if (ret)
1612 return ret;
1613 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1614 dev_addr = test->area.dev_addr + (sz >> 9);
1615 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1616 if (ret)
1617 return ret;
1618 }
1619 ret = mmc_test_area_erase(test);
1620 if (ret)
1621 return ret;
1622 sz = test->area.max_tfr;
1623 dev_addr = test->area.dev_addr;
1624 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1625 }
1626
1627 /*
1628 * Single trim performance by transfer size.
1629 */
1630 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1631 {
1632 unsigned long sz;
1633 unsigned int dev_addr;
1634 struct timespec ts1, ts2;
1635 int ret;
1636
1637 if (!mmc_can_trim(test->card))
1638 return RESULT_UNSUP_CARD;
1639
1640 if (!mmc_can_erase(test->card))
1641 return RESULT_UNSUP_HOST;
1642
1643 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1644 dev_addr = test->area.dev_addr + (sz >> 9);
1645 getnstimeofday(&ts1);
1646 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1647 if (ret)
1648 return ret;
1649 getnstimeofday(&ts2);
1650 mmc_test_print_rate(test, sz, &ts1, &ts2);
1651 }
1652 dev_addr = test->area.dev_addr;
1653 getnstimeofday(&ts1);
1654 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1655 if (ret)
1656 return ret;
1657 getnstimeofday(&ts2);
1658 mmc_test_print_rate(test, sz, &ts1, &ts2);
1659 return 0;
1660 }
1661
1662 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1663 {
1664 unsigned int dev_addr, i, cnt;
1665 struct timespec ts1, ts2;
1666 int ret;
1667
1668 cnt = test->area.max_sz / sz;
1669 dev_addr = test->area.dev_addr;
1670 getnstimeofday(&ts1);
1671 for (i = 0; i < cnt; i++) {
1672 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1673 if (ret)
1674 return ret;
1675 dev_addr += (sz >> 9);
1676 }
1677 getnstimeofday(&ts2);
1678 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1679 return 0;
1680 }
1681
1682 /*
1683 * Consecutive read performance by transfer size.
1684 */
1685 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1686 {
1687 unsigned long sz;
1688 int ret;
1689
1690 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1691 ret = mmc_test_seq_read_perf(test, sz);
1692 if (ret)
1693 return ret;
1694 }
1695 sz = test->area.max_tfr;
1696 return mmc_test_seq_read_perf(test, sz);
1697 }
1698
1699 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1700 {
1701 unsigned int dev_addr, i, cnt;
1702 struct timespec ts1, ts2;
1703 int ret;
1704
1705 ret = mmc_test_area_erase(test);
1706 if (ret)
1707 return ret;
1708 cnt = test->area.max_sz / sz;
1709 dev_addr = test->area.dev_addr;
1710 getnstimeofday(&ts1);
1711 for (i = 0; i < cnt; i++) {
1712 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1713 if (ret)
1714 return ret;
1715 dev_addr += (sz >> 9);
1716 }
1717 getnstimeofday(&ts2);
1718 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1719 return 0;
1720 }
1721
1722 /*
1723 * Consecutive write performance by transfer size.
1724 */
1725 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1726 {
1727 unsigned long sz;
1728 int ret;
1729
1730 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1731 ret = mmc_test_seq_write_perf(test, sz);
1732 if (ret)
1733 return ret;
1734 }
1735 sz = test->area.max_tfr;
1736 return mmc_test_seq_write_perf(test, sz);
1737 }
1738
1739 /*
1740 * Consecutive trim performance by transfer size.
1741 */
1742 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1743 {
1744 unsigned long sz;
1745 unsigned int dev_addr, i, cnt;
1746 struct timespec ts1, ts2;
1747 int ret;
1748
1749 if (!mmc_can_trim(test->card))
1750 return RESULT_UNSUP_CARD;
1751
1752 if (!mmc_can_erase(test->card))
1753 return RESULT_UNSUP_HOST;
1754
1755 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1756 ret = mmc_test_area_erase(test);
1757 if (ret)
1758 return ret;
1759 ret = mmc_test_area_fill(test);
1760 if (ret)
1761 return ret;
1762 cnt = test->area.max_sz / sz;
1763 dev_addr = test->area.dev_addr;
1764 getnstimeofday(&ts1);
1765 for (i = 0; i < cnt; i++) {
1766 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1767 MMC_TRIM_ARG);
1768 if (ret)
1769 return ret;
1770 dev_addr += (sz >> 9);
1771 }
1772 getnstimeofday(&ts2);
1773 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1774 }
1775 return 0;
1776 }
1777
1778 static unsigned int rnd_next = 1;
1779
1780 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1781 {
1782 uint64_t r;
1783
1784 rnd_next = rnd_next * 1103515245 + 12345;
1785 r = (rnd_next >> 16) & 0x7fff;
1786 return (r * rnd_cnt) >> 15;
1787 }
1788
1789 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1790 unsigned long sz)
1791 {
1792 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1793 unsigned int ssz;
1794 struct timespec ts1, ts2, ts;
1795 int ret;
1796
1797 ssz = sz >> 9;
1798
1799 rnd_addr = mmc_test_capacity(test->card) / 4;
1800 range1 = rnd_addr / test->card->pref_erase;
1801 range2 = range1 / ssz;
1802
1803 getnstimeofday(&ts1);
1804 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1805 getnstimeofday(&ts2);
1806 ts = timespec_sub(ts2, ts1);
1807 if (ts.tv_sec >= 10)
1808 break;
1809 ea = mmc_test_rnd_num(range1);
1810 if (ea == last_ea)
1811 ea -= 1;
1812 last_ea = ea;
1813 dev_addr = rnd_addr + test->card->pref_erase * ea +
1814 ssz * mmc_test_rnd_num(range2);
1815 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1816 if (ret)
1817 return ret;
1818 }
1819 if (print)
1820 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1821 return 0;
1822 }
1823
1824 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1825 {
1826 unsigned int next;
1827 unsigned long sz;
1828 int ret;
1829
1830 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1831 /*
1832 * When writing, try to get more consistent results by running
1833 * the test twice with exactly the same I/O but outputting the
1834 * results only for the 2nd run.
1835 */
1836 if (write) {
1837 next = rnd_next;
1838 ret = mmc_test_rnd_perf(test, write, 0, sz);
1839 if (ret)
1840 return ret;
1841 rnd_next = next;
1842 }
1843 ret = mmc_test_rnd_perf(test, write, 1, sz);
1844 if (ret)
1845 return ret;
1846 }
1847 sz = test->area.max_tfr;
1848 if (write) {
1849 next = rnd_next;
1850 ret = mmc_test_rnd_perf(test, write, 0, sz);
1851 if (ret)
1852 return ret;
1853 rnd_next = next;
1854 }
1855 return mmc_test_rnd_perf(test, write, 1, sz);
1856 }
1857
1858 /*
1859 * Random read performance by transfer size.
1860 */
1861 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1862 {
1863 return mmc_test_random_perf(test, 0);
1864 }
1865
1866 /*
1867 * Random write performance by transfer size.
1868 */
1869 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1870 {
1871 return mmc_test_random_perf(test, 1);
1872 }
1873
1874 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1875 unsigned int tot_sz, int max_scatter)
1876 {
1877 unsigned int dev_addr, i, cnt, sz, ssz;
1878 struct timespec ts1, ts2;
1879 int ret;
1880
1881 sz = test->area.max_tfr;
1882 /*
1883 * In the case of a maximally scattered transfer, the maximum transfer
1884 * size is further limited by using PAGE_SIZE segments.
1885 */
1886 if (max_scatter) {
1887 struct mmc_test_area *t = &test->area;
1888 unsigned long max_tfr;
1889
1890 if (t->max_seg_sz >= PAGE_SIZE)
1891 max_tfr = t->max_segs * PAGE_SIZE;
1892 else
1893 max_tfr = t->max_segs * t->max_seg_sz;
1894 if (sz > max_tfr)
1895 sz = max_tfr;
1896 }
1897
1898 ssz = sz >> 9;
1899 dev_addr = mmc_test_capacity(test->card) / 4;
1900 if (tot_sz > dev_addr << 9)
1901 tot_sz = dev_addr << 9;
1902 cnt = tot_sz / sz;
1903 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1904
1905 getnstimeofday(&ts1);
1906 for (i = 0; i < cnt; i++) {
1907 ret = mmc_test_area_io(test, sz, dev_addr, write,
1908 max_scatter, 0);
1909 if (ret)
1910 return ret;
1911 dev_addr += ssz;
1912 }
1913 getnstimeofday(&ts2);
1914
1915 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1916
1917 return 0;
1918 }
1919
1920 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1921 {
1922 int ret, i;
1923
1924 for (i = 0; i < 10; i++) {
1925 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1926 if (ret)
1927 return ret;
1928 }
1929 for (i = 0; i < 5; i++) {
1930 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1931 if (ret)
1932 return ret;
1933 }
1934 for (i = 0; i < 3; i++) {
1935 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1936 if (ret)
1937 return ret;
1938 }
1939
1940 return ret;
1941 }
1942
1943 /*
1944 * Large sequential read performance.
1945 */
1946 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1947 {
1948 return mmc_test_large_seq_perf(test, 0);
1949 }
1950
1951 /*
1952 * Large sequential write performance.
1953 */
1954 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1955 {
1956 return mmc_test_large_seq_perf(test, 1);
1957 }
1958
1959 static const struct mmc_test_case mmc_test_cases[] = {
1960 {
1961 .name = "Basic write (no data verification)",
1962 .run = mmc_test_basic_write,
1963 },
1964
1965 {
1966 .name = "Basic read (no data verification)",
1967 .run = mmc_test_basic_read,
1968 },
1969
1970 {
1971 .name = "Basic write (with data verification)",
1972 .prepare = mmc_test_prepare_write,
1973 .run = mmc_test_verify_write,
1974 .cleanup = mmc_test_cleanup,
1975 },
1976
1977 {
1978 .name = "Basic read (with data verification)",
1979 .prepare = mmc_test_prepare_read,
1980 .run = mmc_test_verify_read,
1981 .cleanup = mmc_test_cleanup,
1982 },
1983
1984 {
1985 .name = "Multi-block write",
1986 .prepare = mmc_test_prepare_write,
1987 .run = mmc_test_multi_write,
1988 .cleanup = mmc_test_cleanup,
1989 },
1990
1991 {
1992 .name = "Multi-block read",
1993 .prepare = mmc_test_prepare_read,
1994 .run = mmc_test_multi_read,
1995 .cleanup = mmc_test_cleanup,
1996 },
1997
1998 {
1999 .name = "Power of two block writes",
2000 .prepare = mmc_test_prepare_write,
2001 .run = mmc_test_pow2_write,
2002 .cleanup = mmc_test_cleanup,
2003 },
2004
2005 {
2006 .name = "Power of two block reads",
2007 .prepare = mmc_test_prepare_read,
2008 .run = mmc_test_pow2_read,
2009 .cleanup = mmc_test_cleanup,
2010 },
2011
2012 {
2013 .name = "Weird sized block writes",
2014 .prepare = mmc_test_prepare_write,
2015 .run = mmc_test_weird_write,
2016 .cleanup = mmc_test_cleanup,
2017 },
2018
2019 {
2020 .name = "Weird sized block reads",
2021 .prepare = mmc_test_prepare_read,
2022 .run = mmc_test_weird_read,
2023 .cleanup = mmc_test_cleanup,
2024 },
2025
2026 {
2027 .name = "Badly aligned write",
2028 .prepare = mmc_test_prepare_write,
2029 .run = mmc_test_align_write,
2030 .cleanup = mmc_test_cleanup,
2031 },
2032
2033 {
2034 .name = "Badly aligned read",
2035 .prepare = mmc_test_prepare_read,
2036 .run = mmc_test_align_read,
2037 .cleanup = mmc_test_cleanup,
2038 },
2039
2040 {
2041 .name = "Badly aligned multi-block write",
2042 .prepare = mmc_test_prepare_write,
2043 .run = mmc_test_align_multi_write,
2044 .cleanup = mmc_test_cleanup,
2045 },
2046
2047 {
2048 .name = "Badly aligned multi-block read",
2049 .prepare = mmc_test_prepare_read,
2050 .run = mmc_test_align_multi_read,
2051 .cleanup = mmc_test_cleanup,
2052 },
2053
2054 {
2055 .name = "Correct xfer_size at write (start failure)",
2056 .run = mmc_test_xfersize_write,
2057 },
2058
2059 {
2060 .name = "Correct xfer_size at read (start failure)",
2061 .run = mmc_test_xfersize_read,
2062 },
2063
2064 {
2065 .name = "Correct xfer_size at write (midway failure)",
2066 .run = mmc_test_multi_xfersize_write,
2067 },
2068
2069 {
2070 .name = "Correct xfer_size at read (midway failure)",
2071 .run = mmc_test_multi_xfersize_read,
2072 },
2073
2074 #ifdef CONFIG_HIGHMEM
2075
2076 {
2077 .name = "Highmem write",
2078 .prepare = mmc_test_prepare_write,
2079 .run = mmc_test_write_high,
2080 .cleanup = mmc_test_cleanup,
2081 },
2082
2083 {
2084 .name = "Highmem read",
2085 .prepare = mmc_test_prepare_read,
2086 .run = mmc_test_read_high,
2087 .cleanup = mmc_test_cleanup,
2088 },
2089
2090 {
2091 .name = "Multi-block highmem write",
2092 .prepare = mmc_test_prepare_write,
2093 .run = mmc_test_multi_write_high,
2094 .cleanup = mmc_test_cleanup,
2095 },
2096
2097 {
2098 .name = "Multi-block highmem read",
2099 .prepare = mmc_test_prepare_read,
2100 .run = mmc_test_multi_read_high,
2101 .cleanup = mmc_test_cleanup,
2102 },
2103
2104 #else
2105
2106 {
2107 .name = "Highmem write",
2108 .run = mmc_test_no_highmem,
2109 },
2110
2111 {
2112 .name = "Highmem read",
2113 .run = mmc_test_no_highmem,
2114 },
2115
2116 {
2117 .name = "Multi-block highmem write",
2118 .run = mmc_test_no_highmem,
2119 },
2120
2121 {
2122 .name = "Multi-block highmem read",
2123 .run = mmc_test_no_highmem,
2124 },
2125
2126 #endif /* CONFIG_HIGHMEM */
2127
2128 {
2129 .name = "Best-case read performance",
2130 .prepare = mmc_test_area_prepare_fill,
2131 .run = mmc_test_best_read_performance,
2132 .cleanup = mmc_test_area_cleanup,
2133 },
2134
2135 {
2136 .name = "Best-case write performance",
2137 .prepare = mmc_test_area_prepare_erase,
2138 .run = mmc_test_best_write_performance,
2139 .cleanup = mmc_test_area_cleanup,
2140 },
2141
2142 {
2143 .name = "Best-case read performance into scattered pages",
2144 .prepare = mmc_test_area_prepare_fill,
2145 .run = mmc_test_best_read_perf_max_scatter,
2146 .cleanup = mmc_test_area_cleanup,
2147 },
2148
2149 {
2150 .name = "Best-case write performance from scattered pages",
2151 .prepare = mmc_test_area_prepare_erase,
2152 .run = mmc_test_best_write_perf_max_scatter,
2153 .cleanup = mmc_test_area_cleanup,
2154 },
2155
2156 {
2157 .name = "Single read performance by transfer size",
2158 .prepare = mmc_test_area_prepare_fill,
2159 .run = mmc_test_profile_read_perf,
2160 .cleanup = mmc_test_area_cleanup,
2161 },
2162
2163 {
2164 .name = "Single write performance by transfer size",
2165 .prepare = mmc_test_area_prepare,
2166 .run = mmc_test_profile_write_perf,
2167 .cleanup = mmc_test_area_cleanup,
2168 },
2169
2170 {
2171 .name = "Single trim performance by transfer size",
2172 .prepare = mmc_test_area_prepare_fill,
2173 .run = mmc_test_profile_trim_perf,
2174 .cleanup = mmc_test_area_cleanup,
2175 },
2176
2177 {
2178 .name = "Consecutive read performance by transfer size",
2179 .prepare = mmc_test_area_prepare_fill,
2180 .run = mmc_test_profile_seq_read_perf,
2181 .cleanup = mmc_test_area_cleanup,
2182 },
2183
2184 {
2185 .name = "Consecutive write performance by transfer size",
2186 .prepare = mmc_test_area_prepare,
2187 .run = mmc_test_profile_seq_write_perf,
2188 .cleanup = mmc_test_area_cleanup,
2189 },
2190
2191 {
2192 .name = "Consecutive trim performance by transfer size",
2193 .prepare = mmc_test_area_prepare,
2194 .run = mmc_test_profile_seq_trim_perf,
2195 .cleanup = mmc_test_area_cleanup,
2196 },
2197
2198 {
2199 .name = "Random read performance by transfer size",
2200 .prepare = mmc_test_area_prepare,
2201 .run = mmc_test_random_read_perf,
2202 .cleanup = mmc_test_area_cleanup,
2203 },
2204
2205 {
2206 .name = "Random write performance by transfer size",
2207 .prepare = mmc_test_area_prepare,
2208 .run = mmc_test_random_write_perf,
2209 .cleanup = mmc_test_area_cleanup,
2210 },
2211
2212 {
2213 .name = "Large sequential read into scattered pages",
2214 .prepare = mmc_test_area_prepare,
2215 .run = mmc_test_large_seq_read_perf,
2216 .cleanup = mmc_test_area_cleanup,
2217 },
2218
2219 {
2220 .name = "Large sequential write from scattered pages",
2221 .prepare = mmc_test_area_prepare,
2222 .run = mmc_test_large_seq_write_perf,
2223 .cleanup = mmc_test_area_cleanup,
2224 },
2225
2226 };
2227
2228 static DEFINE_MUTEX(mmc_test_lock);
2229
2230 static LIST_HEAD(mmc_test_result);
2231
2232 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2233 {
2234 int i, ret;
2235
2236 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2237 mmc_hostname(test->card->host), mmc_card_id(test->card));
2238
2239 mmc_claim_host(test->card->host);
2240
2241 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2242 struct mmc_test_general_result *gr;
2243
2244 if (testcase && ((i + 1) != testcase))
2245 continue;
2246
2247 printk(KERN_INFO "%s: Test case %d. %s...\n",
2248 mmc_hostname(test->card->host), i + 1,
2249 mmc_test_cases[i].name);
2250
2251 if (mmc_test_cases[i].prepare) {
2252 ret = mmc_test_cases[i].prepare(test);
2253 if (ret) {
2254 printk(KERN_INFO "%s: Result: Prepare "
2255 "stage failed! (%d)\n",
2256 mmc_hostname(test->card->host),
2257 ret);
2258 continue;
2259 }
2260 }
2261
2262 gr = kzalloc(sizeof(struct mmc_test_general_result),
2263 GFP_KERNEL);
2264 if (gr) {
2265 INIT_LIST_HEAD(&gr->tr_lst);
2266
2267 /* Assign data what we know already */
2268 gr->card = test->card;
2269 gr->testcase = i;
2270
2271 /* Append container to global one */
2272 list_add_tail(&gr->link, &mmc_test_result);
2273
2274 /*
2275 * Save the pointer to created container in our private
2276 * structure.
2277 */
2278 test->gr = gr;
2279 }
2280
2281 ret = mmc_test_cases[i].run(test);
2282 switch (ret) {
2283 case RESULT_OK:
2284 printk(KERN_INFO "%s: Result: OK\n",
2285 mmc_hostname(test->card->host));
2286 break;
2287 case RESULT_FAIL:
2288 printk(KERN_INFO "%s: Result: FAILED\n",
2289 mmc_hostname(test->card->host));
2290 break;
2291 case RESULT_UNSUP_HOST:
2292 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2293 "(by host)\n",
2294 mmc_hostname(test->card->host));
2295 break;
2296 case RESULT_UNSUP_CARD:
2297 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2298 "(by card)\n",
2299 mmc_hostname(test->card->host));
2300 break;
2301 default:
2302 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2303 mmc_hostname(test->card->host), ret);
2304 }
2305
2306 /* Save the result */
2307 if (gr)
2308 gr->result = ret;
2309
2310 if (mmc_test_cases[i].cleanup) {
2311 ret = mmc_test_cases[i].cleanup(test);
2312 if (ret) {
2313 printk(KERN_INFO "%s: Warning: Cleanup "
2314 "stage failed! (%d)\n",
2315 mmc_hostname(test->card->host),
2316 ret);
2317 }
2318 }
2319 }
2320
2321 mmc_release_host(test->card->host);
2322
2323 printk(KERN_INFO "%s: Tests completed.\n",
2324 mmc_hostname(test->card->host));
2325 }
2326
2327 static void mmc_test_free_result(struct mmc_card *card)
2328 {
2329 struct mmc_test_general_result *gr, *grs;
2330
2331 mutex_lock(&mmc_test_lock);
2332
2333 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2334 struct mmc_test_transfer_result *tr, *trs;
2335
2336 if (card && gr->card != card)
2337 continue;
2338
2339 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2340 list_del(&tr->link);
2341 kfree(tr);
2342 }
2343
2344 list_del(&gr->link);
2345 kfree(gr);
2346 }
2347
2348 mutex_unlock(&mmc_test_lock);
2349 }
2350
2351 static LIST_HEAD(mmc_test_file_test);
2352
2353 static int mtf_test_show(struct seq_file *sf, void *data)
2354 {
2355 struct mmc_card *card = (struct mmc_card *)sf->private;
2356 struct mmc_test_general_result *gr;
2357
2358 mutex_lock(&mmc_test_lock);
2359
2360 list_for_each_entry(gr, &mmc_test_result, link) {
2361 struct mmc_test_transfer_result *tr;
2362
2363 if (gr->card != card)
2364 continue;
2365
2366 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2367
2368 list_for_each_entry(tr, &gr->tr_lst, link) {
2369 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2370 tr->count, tr->sectors,
2371 (unsigned long)tr->ts.tv_sec,
2372 (unsigned long)tr->ts.tv_nsec,
2373 tr->rate, tr->iops / 100, tr->iops % 100);
2374 }
2375 }
2376
2377 mutex_unlock(&mmc_test_lock);
2378
2379 return 0;
2380 }
2381
2382 static int mtf_test_open(struct inode *inode, struct file *file)
2383 {
2384 return single_open(file, mtf_test_show, inode->i_private);
2385 }
2386
2387 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2388 size_t count, loff_t *pos)
2389 {
2390 struct seq_file *sf = (struct seq_file *)file->private_data;
2391 struct mmc_card *card = (struct mmc_card *)sf->private;
2392 struct mmc_test_card *test;
2393 char lbuf[12];
2394 long testcase;
2395
2396 if (count >= sizeof(lbuf))
2397 return -EINVAL;
2398
2399 if (copy_from_user(lbuf, buf, count))
2400 return -EFAULT;
2401 lbuf[count] = '\0';
2402
2403 if (strict_strtol(lbuf, 10, &testcase))
2404 return -EINVAL;
2405
2406 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2407 if (!test)
2408 return -ENOMEM;
2409
2410 /*
2411 * Remove all test cases associated with given card. Thus we have only
2412 * actual data of the last run.
2413 */
2414 mmc_test_free_result(card);
2415
2416 test->card = card;
2417
2418 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2419 #ifdef CONFIG_HIGHMEM
2420 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2421 #endif
2422
2423 #ifdef CONFIG_HIGHMEM
2424 if (test->buffer && test->highmem) {
2425 #else
2426 if (test->buffer) {
2427 #endif
2428 mutex_lock(&mmc_test_lock);
2429 mmc_test_run(test, testcase);
2430 mutex_unlock(&mmc_test_lock);
2431 }
2432
2433 #ifdef CONFIG_HIGHMEM
2434 __free_pages(test->highmem, BUFFER_ORDER);
2435 #endif
2436 kfree(test->buffer);
2437 kfree(test);
2438
2439 return count;
2440 }
2441
2442 static const struct file_operations mmc_test_fops_test = {
2443 .open = mtf_test_open,
2444 .read = seq_read,
2445 .write = mtf_test_write,
2446 .llseek = seq_lseek,
2447 .release = single_release,
2448 };
2449
2450 static void mmc_test_free_file_test(struct mmc_card *card)
2451 {
2452 struct mmc_test_dbgfs_file *df, *dfs;
2453
2454 mutex_lock(&mmc_test_lock);
2455
2456 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2457 if (card && df->card != card)
2458 continue;
2459 debugfs_remove(df->file);
2460 list_del(&df->link);
2461 kfree(df);
2462 }
2463
2464 mutex_unlock(&mmc_test_lock);
2465 }
2466
2467 static int mmc_test_register_file_test(struct mmc_card *card)
2468 {
2469 struct dentry *file = NULL;
2470 struct mmc_test_dbgfs_file *df;
2471 int ret = 0;
2472
2473 mutex_lock(&mmc_test_lock);
2474
2475 if (card->debugfs_root)
2476 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2477 card->debugfs_root, card, &mmc_test_fops_test);
2478
2479 if (IS_ERR_OR_NULL(file)) {
2480 dev_err(&card->dev,
2481 "Can't create file. Perhaps debugfs is disabled.\n");
2482 ret = -ENODEV;
2483 goto err;
2484 }
2485
2486 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2487 if (!df) {
2488 debugfs_remove(file);
2489 dev_err(&card->dev,
2490 "Can't allocate memory for internal usage.\n");
2491 ret = -ENOMEM;
2492 goto err;
2493 }
2494
2495 df->card = card;
2496 df->file = file;
2497
2498 list_add(&df->link, &mmc_test_file_test);
2499
2500 err:
2501 mutex_unlock(&mmc_test_lock);
2502
2503 return ret;
2504 }
2505
2506 static int mmc_test_probe(struct mmc_card *card)
2507 {
2508 int ret;
2509
2510 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2511 return -ENODEV;
2512
2513 ret = mmc_test_register_file_test(card);
2514 if (ret)
2515 return ret;
2516
2517 dev_info(&card->dev, "Card claimed for testing.\n");
2518
2519 return 0;
2520 }
2521
2522 static void mmc_test_remove(struct mmc_card *card)
2523 {
2524 mmc_test_free_result(card);
2525 mmc_test_free_file_test(card);
2526 }
2527
2528 static struct mmc_driver mmc_driver = {
2529 .drv = {
2530 .name = "mmc_test",
2531 },
2532 .probe = mmc_test_probe,
2533 .remove = mmc_test_remove,
2534 };
2535
2536 static int __init mmc_test_init(void)
2537 {
2538 return mmc_register_driver(&mmc_driver);
2539 }
2540
2541 static void __exit mmc_test_exit(void)
2542 {
2543 /* Clear stalled data if card is still plugged */
2544 mmc_test_free_result(NULL);
2545 mmc_test_free_file_test(NULL);
2546
2547 mmc_unregister_driver(&mmc_driver);
2548 }
2549
2550 module_init(mmc_test_init);
2551 module_exit(mmc_test_exit);
2552
2553 MODULE_LICENSE("GPL");
2554 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2555 MODULE_AUTHOR("Pierre Ossman");