2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
22 #define RESULT_UNSUP_HOST 2
23 #define RESULT_UNSUP_CARD 3
25 #define BUFFER_ORDER 2
26 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
29 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
30 * @page: first page in the allocation
31 * @order: order of the number of pages allocated
33 struct mmc_test_pages
{
39 * struct mmc_test_mem - allocated memory.
40 * @arr: array of allocations
41 * @cnt: number of allocations
44 struct mmc_test_pages
*arr
;
49 * struct mmc_test_area - information for performance tests.
50 * @dev_addr: address on card at which to do performance tests
51 * @max_sz: test area size (in bytes)
52 * @max_segs: maximum segments in scatterlist @sg
53 * @blocks: number of (512 byte) blocks currently mapped by @sg
54 * @sg_len: length of currently mapped scatterlist @sg
55 * @mem: allocated memory
58 struct mmc_test_area
{
59 unsigned int dev_addr
;
61 unsigned int max_segs
;
64 struct mmc_test_mem
*mem
;
65 struct scatterlist
*sg
;
69 * struct mmc_test_card - test information.
70 * @card: card under test
71 * @scratch: transfer buffer
72 * @buffer: transfer buffer
73 * @highmem: buffer for highmem tests
74 * @area: information for performance tests
76 struct mmc_test_card
{
77 struct mmc_card
*card
;
79 u8 scratch
[BUFFER_SIZE
];
84 struct mmc_test_area area
;
87 /*******************************************************************/
88 /* General helper functions */
89 /*******************************************************************/
92 * Configure correct block size in card
94 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
96 struct mmc_command cmd
;
99 cmd
.opcode
= MMC_SET_BLOCKLEN
;
101 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
102 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
110 * Fill in the mmc_request structure given a set of transfer parameters.
112 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
113 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
114 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
116 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
119 mrq
->cmd
->opcode
= write
?
120 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
122 mrq
->cmd
->opcode
= write
?
123 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
126 mrq
->cmd
->arg
= dev_addr
;
127 if (!mmc_card_blockaddr(test
->card
))
130 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
135 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
137 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
140 mrq
->data
->blksz
= blksz
;
141 mrq
->data
->blocks
= blocks
;
142 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
144 mrq
->data
->sg_len
= sg_len
;
146 mmc_set_data_timeout(mrq
->data
, test
->card
);
149 static int mmc_test_busy(struct mmc_command
*cmd
)
151 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
152 (R1_CURRENT_STATE(cmd
->resp
[0]) == 7);
156 * Wait for the card to finish the busy state
158 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
161 struct mmc_command cmd
;
165 memset(&cmd
, 0, sizeof(struct mmc_command
));
167 cmd
.opcode
= MMC_SEND_STATUS
;
168 cmd
.arg
= test
->card
->rca
<< 16;
169 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
171 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
175 if (!busy
&& mmc_test_busy(&cmd
)) {
177 printk(KERN_INFO
"%s: Warning: Host did not "
178 "wait for busy state to end.\n",
179 mmc_hostname(test
->card
->host
));
181 } while (mmc_test_busy(&cmd
));
187 * Transfer a single sector of kernel addressable data
189 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
190 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
194 struct mmc_request mrq
;
195 struct mmc_command cmd
;
196 struct mmc_command stop
;
197 struct mmc_data data
;
199 struct scatterlist sg
;
201 memset(&mrq
, 0, sizeof(struct mmc_request
));
202 memset(&cmd
, 0, sizeof(struct mmc_command
));
203 memset(&data
, 0, sizeof(struct mmc_data
));
204 memset(&stop
, 0, sizeof(struct mmc_command
));
210 sg_init_one(&sg
, buffer
, blksz
);
212 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
214 mmc_wait_for_req(test
->card
->host
, &mrq
);
221 ret
= mmc_test_wait_busy(test
);
228 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
233 __free_pages(mem
->arr
[mem
->cnt
].page
,
234 mem
->arr
[mem
->cnt
].order
);
240 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
241 * there isn't much memory do not exceed 1/16th total RAM.
243 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned int min_sz
,
246 unsigned int max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
247 unsigned int min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
248 unsigned int page_cnt
= 0;
249 struct mmc_test_mem
*mem
;
253 if (max_page_cnt
> si
.totalram
>> 4)
254 max_page_cnt
= si
.totalram
>> 4;
255 if (max_page_cnt
< min_page_cnt
)
256 max_page_cnt
= min_page_cnt
;
258 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
262 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_page_cnt
,
267 while (max_page_cnt
) {
270 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
273 order
= get_order(page_cnt
<< PAGE_SHIFT
);
275 page
= alloc_pages(flags
, order
);
281 if (page_cnt
< min_page_cnt
)
285 mem
->arr
[mem
->cnt
].page
= page
;
286 mem
->arr
[mem
->cnt
].order
= order
;
288 max_page_cnt
-= 1 << order
;
289 page_cnt
+= 1 << order
;
295 mmc_test_free_mem(mem
);
300 * Map memory into a scatterlist. Optionally allow the same memory to be
301 * mapped more than once.
303 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned int sz
,
304 struct scatterlist
*sglist
, int repeat
,
305 unsigned int max_segs
, unsigned int *sg_len
)
307 struct scatterlist
*sg
= NULL
;
310 sg_init_table(sglist
, max_segs
);
314 for (i
= 0; i
< mem
->cnt
; i
++) {
315 unsigned int len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
325 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
331 } while (sz
&& repeat
);
343 * Map memory into a scatterlist so that no pages are contiguous. Allow the
344 * same memory to be mapped more than once.
346 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
348 struct scatterlist
*sglist
,
349 unsigned int max_segs
,
350 unsigned int *sg_len
)
352 struct scatterlist
*sg
= NULL
;
353 unsigned int i
= mem
->cnt
, cnt
, len
;
354 void *base
, *addr
, *last_addr
= NULL
;
356 sg_init_table(sglist
, max_segs
);
360 base
= page_address(mem
->arr
[--i
].page
);
361 cnt
= 1 << mem
->arr
[i
].order
;
363 addr
= base
+ PAGE_SIZE
* --cnt
;
364 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
376 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
389 * Calculate transfer rate in bytes per second.
391 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
401 while (ns
> UINT_MAX
) {
409 do_div(bytes
, (uint32_t)ns
);
415 * Print the transfer rate.
417 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
418 struct timespec
*ts1
, struct timespec
*ts2
)
420 unsigned int rate
, sectors
= bytes
>> 9;
423 ts
= timespec_sub(*ts2
, *ts1
);
425 rate
= mmc_test_rate(bytes
, &ts
);
427 printk(KERN_INFO
"%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
428 "seconds (%u kB/s, %u KiB/s)\n",
429 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
430 (sectors
== 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
431 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024);
435 * Print the average transfer rate.
437 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
438 unsigned int count
, struct timespec
*ts1
,
439 struct timespec
*ts2
)
441 unsigned int rate
, sectors
= bytes
>> 9;
442 uint64_t tot
= bytes
* count
;
445 ts
= timespec_sub(*ts2
, *ts1
);
447 rate
= mmc_test_rate(tot
, &ts
);
449 printk(KERN_INFO
"%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
450 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
451 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
452 sectors
>> 1, (sectors
== 1 ? ".5" : ""),
453 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
454 rate
/ 1000, rate
/ 1024);
458 * Return the card size in sectors.
460 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
462 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
463 return card
->ext_csd
.sectors
;
465 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
468 /*******************************************************************/
469 /* Test preparation and cleanup */
470 /*******************************************************************/
473 * Fill the first couple of sectors of the card with known data
474 * so that bad reads/writes can be detected
476 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
480 ret
= mmc_test_set_blksize(test
, 512);
485 memset(test
->buffer
, 0xDF, 512);
487 for (i
= 0;i
< 512;i
++)
491 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
492 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
500 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
502 return __mmc_test_prepare(test
, 1);
505 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
507 return __mmc_test_prepare(test
, 0);
510 static int mmc_test_cleanup(struct mmc_test_card
*test
)
514 ret
= mmc_test_set_blksize(test
, 512);
518 memset(test
->buffer
, 0, 512);
520 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
521 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
529 /*******************************************************************/
530 /* Test execution helpers */
531 /*******************************************************************/
534 * Modifies the mmc_request to perform the "short transfer" tests
536 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
537 struct mmc_request
*mrq
, int write
)
539 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
541 if (mrq
->data
->blocks
> 1) {
542 mrq
->cmd
->opcode
= write
?
543 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
546 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
547 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
552 * Checks that a normal transfer didn't have any errors
554 static int mmc_test_check_result(struct mmc_test_card
*test
,
555 struct mmc_request
*mrq
)
559 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
563 if (!ret
&& mrq
->cmd
->error
)
564 ret
= mrq
->cmd
->error
;
565 if (!ret
&& mrq
->data
->error
)
566 ret
= mrq
->data
->error
;
567 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
568 ret
= mrq
->stop
->error
;
569 if (!ret
&& mrq
->data
->bytes_xfered
!=
570 mrq
->data
->blocks
* mrq
->data
->blksz
)
574 ret
= RESULT_UNSUP_HOST
;
580 * Checks that a "short transfer" behaved as expected
582 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
583 struct mmc_request
*mrq
)
587 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
591 if (!ret
&& mrq
->cmd
->error
)
592 ret
= mrq
->cmd
->error
;
593 if (!ret
&& mrq
->data
->error
== 0)
595 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
596 ret
= mrq
->data
->error
;
597 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
598 ret
= mrq
->stop
->error
;
599 if (mrq
->data
->blocks
> 1) {
600 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
603 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
608 ret
= RESULT_UNSUP_HOST
;
614 * Tests a basic transfer with certain parameters
616 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
617 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
618 unsigned blocks
, unsigned blksz
, int write
)
620 struct mmc_request mrq
;
621 struct mmc_command cmd
;
622 struct mmc_command stop
;
623 struct mmc_data data
;
625 memset(&mrq
, 0, sizeof(struct mmc_request
));
626 memset(&cmd
, 0, sizeof(struct mmc_command
));
627 memset(&data
, 0, sizeof(struct mmc_data
));
628 memset(&stop
, 0, sizeof(struct mmc_command
));
634 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
635 blocks
, blksz
, write
);
637 mmc_wait_for_req(test
->card
->host
, &mrq
);
639 mmc_test_wait_busy(test
);
641 return mmc_test_check_result(test
, &mrq
);
645 * Tests a transfer where the card will fail completely or partly
647 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
648 unsigned blocks
, unsigned blksz
, int write
)
650 struct mmc_request mrq
;
651 struct mmc_command cmd
;
652 struct mmc_command stop
;
653 struct mmc_data data
;
655 struct scatterlist sg
;
657 memset(&mrq
, 0, sizeof(struct mmc_request
));
658 memset(&cmd
, 0, sizeof(struct mmc_command
));
659 memset(&data
, 0, sizeof(struct mmc_data
));
660 memset(&stop
, 0, sizeof(struct mmc_command
));
666 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
668 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
669 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
671 mmc_wait_for_req(test
->card
->host
, &mrq
);
673 mmc_test_wait_busy(test
);
675 return mmc_test_check_broken_result(test
, &mrq
);
679 * Does a complete transfer test where data is also validated
681 * Note: mmc_test_prepare() must have been done before this call
683 static int mmc_test_transfer(struct mmc_test_card
*test
,
684 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
685 unsigned blocks
, unsigned blksz
, int write
)
691 for (i
= 0;i
< blocks
* blksz
;i
++)
692 test
->scratch
[i
] = i
;
694 memset(test
->scratch
, 0, BUFFER_SIZE
);
696 local_irq_save(flags
);
697 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
698 local_irq_restore(flags
);
700 ret
= mmc_test_set_blksize(test
, blksz
);
704 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
705 blocks
, blksz
, write
);
712 ret
= mmc_test_set_blksize(test
, 512);
716 sectors
= (blocks
* blksz
+ 511) / 512;
717 if ((sectors
* 512) == (blocks
* blksz
))
720 if ((sectors
* 512) > BUFFER_SIZE
)
723 memset(test
->buffer
, 0, sectors
* 512);
725 for (i
= 0;i
< sectors
;i
++) {
726 ret
= mmc_test_buffer_transfer(test
,
727 test
->buffer
+ i
* 512,
728 dev_addr
+ i
, 512, 0);
733 for (i
= 0;i
< blocks
* blksz
;i
++) {
734 if (test
->buffer
[i
] != (u8
)i
)
738 for (;i
< sectors
* 512;i
++) {
739 if (test
->buffer
[i
] != 0xDF)
743 local_irq_save(flags
);
744 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
745 local_irq_restore(flags
);
746 for (i
= 0;i
< blocks
* blksz
;i
++) {
747 if (test
->scratch
[i
] != (u8
)i
)
755 /*******************************************************************/
757 /*******************************************************************/
759 struct mmc_test_case
{
762 int (*prepare
)(struct mmc_test_card
*);
763 int (*run
)(struct mmc_test_card
*);
764 int (*cleanup
)(struct mmc_test_card
*);
767 static int mmc_test_basic_write(struct mmc_test_card
*test
)
770 struct scatterlist sg
;
772 ret
= mmc_test_set_blksize(test
, 512);
776 sg_init_one(&sg
, test
->buffer
, 512);
778 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
785 static int mmc_test_basic_read(struct mmc_test_card
*test
)
788 struct scatterlist sg
;
790 ret
= mmc_test_set_blksize(test
, 512);
794 sg_init_one(&sg
, test
->buffer
, 512);
796 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
803 static int mmc_test_verify_write(struct mmc_test_card
*test
)
806 struct scatterlist sg
;
808 sg_init_one(&sg
, test
->buffer
, 512);
810 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
817 static int mmc_test_verify_read(struct mmc_test_card
*test
)
820 struct scatterlist sg
;
822 sg_init_one(&sg
, test
->buffer
, 512);
824 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
831 static int mmc_test_multi_write(struct mmc_test_card
*test
)
835 struct scatterlist sg
;
837 if (test
->card
->host
->max_blk_count
== 1)
838 return RESULT_UNSUP_HOST
;
840 size
= PAGE_SIZE
* 2;
841 size
= min(size
, test
->card
->host
->max_req_size
);
842 size
= min(size
, test
->card
->host
->max_seg_size
);
843 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
846 return RESULT_UNSUP_HOST
;
848 sg_init_one(&sg
, test
->buffer
, size
);
850 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
857 static int mmc_test_multi_read(struct mmc_test_card
*test
)
861 struct scatterlist sg
;
863 if (test
->card
->host
->max_blk_count
== 1)
864 return RESULT_UNSUP_HOST
;
866 size
= PAGE_SIZE
* 2;
867 size
= min(size
, test
->card
->host
->max_req_size
);
868 size
= min(size
, test
->card
->host
->max_seg_size
);
869 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
872 return RESULT_UNSUP_HOST
;
874 sg_init_one(&sg
, test
->buffer
, size
);
876 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
883 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
886 struct scatterlist sg
;
888 if (!test
->card
->csd
.write_partial
)
889 return RESULT_UNSUP_CARD
;
891 for (i
= 1; i
< 512;i
<<= 1) {
892 sg_init_one(&sg
, test
->buffer
, i
);
893 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
901 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
904 struct scatterlist sg
;
906 if (!test
->card
->csd
.read_partial
)
907 return RESULT_UNSUP_CARD
;
909 for (i
= 1; i
< 512;i
<<= 1) {
910 sg_init_one(&sg
, test
->buffer
, i
);
911 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
919 static int mmc_test_weird_write(struct mmc_test_card
*test
)
922 struct scatterlist sg
;
924 if (!test
->card
->csd
.write_partial
)
925 return RESULT_UNSUP_CARD
;
927 for (i
= 3; i
< 512;i
+= 7) {
928 sg_init_one(&sg
, test
->buffer
, i
);
929 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
937 static int mmc_test_weird_read(struct mmc_test_card
*test
)
940 struct scatterlist sg
;
942 if (!test
->card
->csd
.read_partial
)
943 return RESULT_UNSUP_CARD
;
945 for (i
= 3; i
< 512;i
+= 7) {
946 sg_init_one(&sg
, test
->buffer
, i
);
947 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
955 static int mmc_test_align_write(struct mmc_test_card
*test
)
958 struct scatterlist sg
;
960 for (i
= 1;i
< 4;i
++) {
961 sg_init_one(&sg
, test
->buffer
+ i
, 512);
962 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
970 static int mmc_test_align_read(struct mmc_test_card
*test
)
973 struct scatterlist sg
;
975 for (i
= 1;i
< 4;i
++) {
976 sg_init_one(&sg
, test
->buffer
+ i
, 512);
977 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
985 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
989 struct scatterlist sg
;
991 if (test
->card
->host
->max_blk_count
== 1)
992 return RESULT_UNSUP_HOST
;
994 size
= PAGE_SIZE
* 2;
995 size
= min(size
, test
->card
->host
->max_req_size
);
996 size
= min(size
, test
->card
->host
->max_seg_size
);
997 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1000 return RESULT_UNSUP_HOST
;
1002 for (i
= 1;i
< 4;i
++) {
1003 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1004 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1012 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1016 struct scatterlist sg
;
1018 if (test
->card
->host
->max_blk_count
== 1)
1019 return RESULT_UNSUP_HOST
;
1021 size
= PAGE_SIZE
* 2;
1022 size
= min(size
, test
->card
->host
->max_req_size
);
1023 size
= min(size
, test
->card
->host
->max_seg_size
);
1024 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1027 return RESULT_UNSUP_HOST
;
1029 for (i
= 1;i
< 4;i
++) {
1030 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1031 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1039 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1043 ret
= mmc_test_set_blksize(test
, 512);
1047 ret
= mmc_test_broken_transfer(test
, 1, 512, 1);
1054 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1058 ret
= mmc_test_set_blksize(test
, 512);
1062 ret
= mmc_test_broken_transfer(test
, 1, 512, 0);
1069 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1073 if (test
->card
->host
->max_blk_count
== 1)
1074 return RESULT_UNSUP_HOST
;
1076 ret
= mmc_test_set_blksize(test
, 512);
1080 ret
= mmc_test_broken_transfer(test
, 2, 512, 1);
1087 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1091 if (test
->card
->host
->max_blk_count
== 1)
1092 return RESULT_UNSUP_HOST
;
1094 ret
= mmc_test_set_blksize(test
, 512);
1098 ret
= mmc_test_broken_transfer(test
, 2, 512, 0);
1105 #ifdef CONFIG_HIGHMEM
1107 static int mmc_test_write_high(struct mmc_test_card
*test
)
1110 struct scatterlist sg
;
1112 sg_init_table(&sg
, 1);
1113 sg_set_page(&sg
, test
->highmem
, 512, 0);
1115 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1122 static int mmc_test_read_high(struct mmc_test_card
*test
)
1125 struct scatterlist sg
;
1127 sg_init_table(&sg
, 1);
1128 sg_set_page(&sg
, test
->highmem
, 512, 0);
1130 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1137 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1141 struct scatterlist sg
;
1143 if (test
->card
->host
->max_blk_count
== 1)
1144 return RESULT_UNSUP_HOST
;
1146 size
= PAGE_SIZE
* 2;
1147 size
= min(size
, test
->card
->host
->max_req_size
);
1148 size
= min(size
, test
->card
->host
->max_seg_size
);
1149 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1152 return RESULT_UNSUP_HOST
;
1154 sg_init_table(&sg
, 1);
1155 sg_set_page(&sg
, test
->highmem
, size
, 0);
1157 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1164 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1168 struct scatterlist sg
;
1170 if (test
->card
->host
->max_blk_count
== 1)
1171 return RESULT_UNSUP_HOST
;
1173 size
= PAGE_SIZE
* 2;
1174 size
= min(size
, test
->card
->host
->max_req_size
);
1175 size
= min(size
, test
->card
->host
->max_seg_size
);
1176 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1179 return RESULT_UNSUP_HOST
;
1181 sg_init_table(&sg
, 1);
1182 sg_set_page(&sg
, test
->highmem
, size
, 0);
1184 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1193 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1195 printk(KERN_INFO
"%s: Highmem not configured - test skipped\n",
1196 mmc_hostname(test
->card
->host
));
1200 #endif /* CONFIG_HIGHMEM */
1203 * Map sz bytes so that it can be transferred.
1205 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned int sz
,
1208 struct mmc_test_area
*t
= &test
->area
;
1210 t
->blocks
= sz
>> 9;
1213 return mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1214 t
->max_segs
, &t
->sg_len
);
1216 return mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1222 * Transfer bytes mapped by mmc_test_area_map().
1224 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1225 unsigned int dev_addr
, int write
)
1227 struct mmc_test_area
*t
= &test
->area
;
1229 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1230 t
->blocks
, 512, write
);
1234 * Map and transfer bytes.
1236 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned int sz
,
1237 unsigned int dev_addr
, int write
, int max_scatter
,
1240 struct timespec ts1
, ts2
;
1243 ret
= mmc_test_area_map(test
, sz
, max_scatter
);
1248 getnstimeofday(&ts1
);
1250 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1255 getnstimeofday(&ts2
);
1258 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1264 * Write the test area entirely.
1266 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1268 return mmc_test_area_io(test
, test
->area
.max_sz
, test
->area
.dev_addr
,
1273 * Erase the test area entirely.
1275 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1277 struct mmc_test_area
*t
= &test
->area
;
1279 if (!mmc_can_erase(test
->card
))
1282 return mmc_erase(test
->card
, t
->dev_addr
, test
->area
.max_sz
>> 9,
1287 * Cleanup struct mmc_test_area.
1289 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1291 struct mmc_test_area
*t
= &test
->area
;
1294 mmc_test_free_mem(t
->mem
);
1300 * Initialize an area for testing large transfers. The size of the area is the
1301 * preferred erase size which is a good size for optimal transfer speed. Note
1302 * that is typically 4MiB for modern cards. The test area is set to the middle
1303 * of the card because cards may have different charateristics at the front
1304 * (for FAT file system optimization). Optionally, the area is erased (if the
1305 * card supports it) which may improve write performance. Optionally, the area
1306 * is filled with data for subsequent read tests.
1308 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1310 struct mmc_test_area
*t
= &test
->area
;
1311 unsigned int min_sz
= 64 * 1024;
1314 ret
= mmc_test_set_blksize(test
, 512);
1319 * Try to allocate enough memory for the whole area. Less is OK
1320 * because the same memory can be mapped into the scatterlist more than
1323 t
->max_sz
= test
->card
->pref_erase
<< 9;
1324 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_sz
);
1328 t
->max_segs
= DIV_ROUND_UP(t
->max_sz
, PAGE_SIZE
);
1329 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1335 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1336 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1339 ret
= mmc_test_area_erase(test
);
1345 ret
= mmc_test_area_fill(test
);
1353 mmc_test_area_cleanup(test
);
1358 * Prepare for large transfers. Do not erase the test area.
1360 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1362 return mmc_test_area_init(test
, 0, 0);
1366 * Prepare for large transfers. Do erase the test area.
1368 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1370 return mmc_test_area_init(test
, 1, 0);
1374 * Prepare for large transfers. Erase and fill the test area.
1376 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1378 return mmc_test_area_init(test
, 1, 1);
1382 * Test best-case performance. Best-case performance is expected from
1383 * a single large transfer.
1385 * An additional option (max_scatter) allows the measurement of the same
1386 * transfer but with no contiguous pages in the scatter list. This tests
1387 * the efficiency of DMA to handle scattered pages.
1389 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1392 return mmc_test_area_io(test
, test
->area
.max_sz
, test
->area
.dev_addr
,
1393 write
, max_scatter
, 1);
1397 * Best-case read performance.
1399 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1401 return mmc_test_best_performance(test
, 0, 0);
1405 * Best-case write performance.
1407 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1409 return mmc_test_best_performance(test
, 1, 0);
1413 * Best-case read performance into scattered pages.
1415 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1417 return mmc_test_best_performance(test
, 0, 1);
1421 * Best-case write performance from scattered pages.
1423 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1425 return mmc_test_best_performance(test
, 1, 1);
1429 * Single read performance by transfer size.
1431 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1433 unsigned int sz
, dev_addr
;
1436 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1437 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1438 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1442 dev_addr
= test
->area
.dev_addr
;
1443 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1447 * Single write performance by transfer size.
1449 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1451 unsigned int sz
, dev_addr
;
1454 ret
= mmc_test_area_erase(test
);
1457 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1458 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1459 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1463 ret
= mmc_test_area_erase(test
);
1466 dev_addr
= test
->area
.dev_addr
;
1467 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1471 * Single trim performance by transfer size.
1473 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1475 unsigned int sz
, dev_addr
;
1476 struct timespec ts1
, ts2
;
1479 if (!mmc_can_trim(test
->card
))
1480 return RESULT_UNSUP_CARD
;
1482 if (!mmc_can_erase(test
->card
))
1483 return RESULT_UNSUP_HOST
;
1485 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1486 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1487 getnstimeofday(&ts1
);
1488 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1491 getnstimeofday(&ts2
);
1492 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1494 dev_addr
= test
->area
.dev_addr
;
1495 getnstimeofday(&ts1
);
1496 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1499 getnstimeofday(&ts2
);
1500 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1505 * Consecutive read performance by transfer size.
1507 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1509 unsigned int sz
, dev_addr
, i
, cnt
;
1510 struct timespec ts1
, ts2
;
1513 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1514 cnt
= test
->area
.max_sz
/ sz
;
1515 dev_addr
= test
->area
.dev_addr
;
1516 getnstimeofday(&ts1
);
1517 for (i
= 0; i
< cnt
; i
++) {
1518 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1521 dev_addr
+= (sz
>> 9);
1523 getnstimeofday(&ts2
);
1524 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1530 * Consecutive write performance by transfer size.
1532 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1534 unsigned int sz
, dev_addr
, i
, cnt
;
1535 struct timespec ts1
, ts2
;
1538 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1539 ret
= mmc_test_area_erase(test
);
1542 cnt
= test
->area
.max_sz
/ sz
;
1543 dev_addr
= test
->area
.dev_addr
;
1544 getnstimeofday(&ts1
);
1545 for (i
= 0; i
< cnt
; i
++) {
1546 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1549 dev_addr
+= (sz
>> 9);
1551 getnstimeofday(&ts2
);
1552 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1558 * Consecutive trim performance by transfer size.
1560 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1562 unsigned int sz
, dev_addr
, i
, cnt
;
1563 struct timespec ts1
, ts2
;
1566 if (!mmc_can_trim(test
->card
))
1567 return RESULT_UNSUP_CARD
;
1569 if (!mmc_can_erase(test
->card
))
1570 return RESULT_UNSUP_HOST
;
1572 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1573 ret
= mmc_test_area_erase(test
);
1576 ret
= mmc_test_area_fill(test
);
1579 cnt
= test
->area
.max_sz
/ sz
;
1580 dev_addr
= test
->area
.dev_addr
;
1581 getnstimeofday(&ts1
);
1582 for (i
= 0; i
< cnt
; i
++) {
1583 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1587 dev_addr
+= (sz
>> 9);
1589 getnstimeofday(&ts2
);
1590 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1595 static const struct mmc_test_case mmc_test_cases
[] = {
1597 .name
= "Basic write (no data verification)",
1598 .run
= mmc_test_basic_write
,
1602 .name
= "Basic read (no data verification)",
1603 .run
= mmc_test_basic_read
,
1607 .name
= "Basic write (with data verification)",
1608 .prepare
= mmc_test_prepare_write
,
1609 .run
= mmc_test_verify_write
,
1610 .cleanup
= mmc_test_cleanup
,
1614 .name
= "Basic read (with data verification)",
1615 .prepare
= mmc_test_prepare_read
,
1616 .run
= mmc_test_verify_read
,
1617 .cleanup
= mmc_test_cleanup
,
1621 .name
= "Multi-block write",
1622 .prepare
= mmc_test_prepare_write
,
1623 .run
= mmc_test_multi_write
,
1624 .cleanup
= mmc_test_cleanup
,
1628 .name
= "Multi-block read",
1629 .prepare
= mmc_test_prepare_read
,
1630 .run
= mmc_test_multi_read
,
1631 .cleanup
= mmc_test_cleanup
,
1635 .name
= "Power of two block writes",
1636 .prepare
= mmc_test_prepare_write
,
1637 .run
= mmc_test_pow2_write
,
1638 .cleanup
= mmc_test_cleanup
,
1642 .name
= "Power of two block reads",
1643 .prepare
= mmc_test_prepare_read
,
1644 .run
= mmc_test_pow2_read
,
1645 .cleanup
= mmc_test_cleanup
,
1649 .name
= "Weird sized block writes",
1650 .prepare
= mmc_test_prepare_write
,
1651 .run
= mmc_test_weird_write
,
1652 .cleanup
= mmc_test_cleanup
,
1656 .name
= "Weird sized block reads",
1657 .prepare
= mmc_test_prepare_read
,
1658 .run
= mmc_test_weird_read
,
1659 .cleanup
= mmc_test_cleanup
,
1663 .name
= "Badly aligned write",
1664 .prepare
= mmc_test_prepare_write
,
1665 .run
= mmc_test_align_write
,
1666 .cleanup
= mmc_test_cleanup
,
1670 .name
= "Badly aligned read",
1671 .prepare
= mmc_test_prepare_read
,
1672 .run
= mmc_test_align_read
,
1673 .cleanup
= mmc_test_cleanup
,
1677 .name
= "Badly aligned multi-block write",
1678 .prepare
= mmc_test_prepare_write
,
1679 .run
= mmc_test_align_multi_write
,
1680 .cleanup
= mmc_test_cleanup
,
1684 .name
= "Badly aligned multi-block read",
1685 .prepare
= mmc_test_prepare_read
,
1686 .run
= mmc_test_align_multi_read
,
1687 .cleanup
= mmc_test_cleanup
,
1691 .name
= "Correct xfer_size at write (start failure)",
1692 .run
= mmc_test_xfersize_write
,
1696 .name
= "Correct xfer_size at read (start failure)",
1697 .run
= mmc_test_xfersize_read
,
1701 .name
= "Correct xfer_size at write (midway failure)",
1702 .run
= mmc_test_multi_xfersize_write
,
1706 .name
= "Correct xfer_size at read (midway failure)",
1707 .run
= mmc_test_multi_xfersize_read
,
1710 #ifdef CONFIG_HIGHMEM
1713 .name
= "Highmem write",
1714 .prepare
= mmc_test_prepare_write
,
1715 .run
= mmc_test_write_high
,
1716 .cleanup
= mmc_test_cleanup
,
1720 .name
= "Highmem read",
1721 .prepare
= mmc_test_prepare_read
,
1722 .run
= mmc_test_read_high
,
1723 .cleanup
= mmc_test_cleanup
,
1727 .name
= "Multi-block highmem write",
1728 .prepare
= mmc_test_prepare_write
,
1729 .run
= mmc_test_multi_write_high
,
1730 .cleanup
= mmc_test_cleanup
,
1734 .name
= "Multi-block highmem read",
1735 .prepare
= mmc_test_prepare_read
,
1736 .run
= mmc_test_multi_read_high
,
1737 .cleanup
= mmc_test_cleanup
,
1743 .name
= "Highmem write",
1744 .run
= mmc_test_no_highmem
,
1748 .name
= "Highmem read",
1749 .run
= mmc_test_no_highmem
,
1753 .name
= "Multi-block highmem write",
1754 .run
= mmc_test_no_highmem
,
1758 .name
= "Multi-block highmem read",
1759 .run
= mmc_test_no_highmem
,
1762 #endif /* CONFIG_HIGHMEM */
1765 .name
= "Best-case read performance",
1766 .prepare
= mmc_test_area_prepare_fill
,
1767 .run
= mmc_test_best_read_performance
,
1768 .cleanup
= mmc_test_area_cleanup
,
1772 .name
= "Best-case write performance",
1773 .prepare
= mmc_test_area_prepare_erase
,
1774 .run
= mmc_test_best_write_performance
,
1775 .cleanup
= mmc_test_area_cleanup
,
1779 .name
= "Best-case read performance into scattered pages",
1780 .prepare
= mmc_test_area_prepare_fill
,
1781 .run
= mmc_test_best_read_perf_max_scatter
,
1782 .cleanup
= mmc_test_area_cleanup
,
1786 .name
= "Best-case write performance from scattered pages",
1787 .prepare
= mmc_test_area_prepare_erase
,
1788 .run
= mmc_test_best_write_perf_max_scatter
,
1789 .cleanup
= mmc_test_area_cleanup
,
1793 .name
= "Single read performance by transfer size",
1794 .prepare
= mmc_test_area_prepare_fill
,
1795 .run
= mmc_test_profile_read_perf
,
1796 .cleanup
= mmc_test_area_cleanup
,
1800 .name
= "Single write performance by transfer size",
1801 .prepare
= mmc_test_area_prepare
,
1802 .run
= mmc_test_profile_write_perf
,
1803 .cleanup
= mmc_test_area_cleanup
,
1807 .name
= "Single trim performance by transfer size",
1808 .prepare
= mmc_test_area_prepare_fill
,
1809 .run
= mmc_test_profile_trim_perf
,
1810 .cleanup
= mmc_test_area_cleanup
,
1814 .name
= "Consecutive read performance by transfer size",
1815 .prepare
= mmc_test_area_prepare_fill
,
1816 .run
= mmc_test_profile_seq_read_perf
,
1817 .cleanup
= mmc_test_area_cleanup
,
1821 .name
= "Consecutive write performance by transfer size",
1822 .prepare
= mmc_test_area_prepare
,
1823 .run
= mmc_test_profile_seq_write_perf
,
1824 .cleanup
= mmc_test_area_cleanup
,
1828 .name
= "Consecutive trim performance by transfer size",
1829 .prepare
= mmc_test_area_prepare
,
1830 .run
= mmc_test_profile_seq_trim_perf
,
1831 .cleanup
= mmc_test_area_cleanup
,
1836 static DEFINE_MUTEX(mmc_test_lock
);
1838 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
1842 printk(KERN_INFO
"%s: Starting tests of card %s...\n",
1843 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
1845 mmc_claim_host(test
->card
->host
);
1847 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
1848 if (testcase
&& ((i
+ 1) != testcase
))
1851 printk(KERN_INFO
"%s: Test case %d. %s...\n",
1852 mmc_hostname(test
->card
->host
), i
+ 1,
1853 mmc_test_cases
[i
].name
);
1855 if (mmc_test_cases
[i
].prepare
) {
1856 ret
= mmc_test_cases
[i
].prepare(test
);
1858 printk(KERN_INFO
"%s: Result: Prepare "
1859 "stage failed! (%d)\n",
1860 mmc_hostname(test
->card
->host
),
1866 ret
= mmc_test_cases
[i
].run(test
);
1869 printk(KERN_INFO
"%s: Result: OK\n",
1870 mmc_hostname(test
->card
->host
));
1873 printk(KERN_INFO
"%s: Result: FAILED\n",
1874 mmc_hostname(test
->card
->host
));
1876 case RESULT_UNSUP_HOST
:
1877 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
1879 mmc_hostname(test
->card
->host
));
1881 case RESULT_UNSUP_CARD
:
1882 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
1884 mmc_hostname(test
->card
->host
));
1887 printk(KERN_INFO
"%s: Result: ERROR (%d)\n",
1888 mmc_hostname(test
->card
->host
), ret
);
1891 if (mmc_test_cases
[i
].cleanup
) {
1892 ret
= mmc_test_cases
[i
].cleanup(test
);
1894 printk(KERN_INFO
"%s: Warning: Cleanup "
1895 "stage failed! (%d)\n",
1896 mmc_hostname(test
->card
->host
),
1902 mmc_release_host(test
->card
->host
);
1904 printk(KERN_INFO
"%s: Tests completed.\n",
1905 mmc_hostname(test
->card
->host
));
1908 static ssize_t
mmc_test_show(struct device
*dev
,
1909 struct device_attribute
*attr
, char *buf
)
1911 mutex_lock(&mmc_test_lock
);
1912 mutex_unlock(&mmc_test_lock
);
1917 static ssize_t
mmc_test_store(struct device
*dev
,
1918 struct device_attribute
*attr
, const char *buf
, size_t count
)
1920 struct mmc_card
*card
;
1921 struct mmc_test_card
*test
;
1924 card
= container_of(dev
, struct mmc_card
, dev
);
1926 testcase
= simple_strtol(buf
, NULL
, 10);
1928 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
1934 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
1935 #ifdef CONFIG_HIGHMEM
1936 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
1939 #ifdef CONFIG_HIGHMEM
1940 if (test
->buffer
&& test
->highmem
) {
1944 mutex_lock(&mmc_test_lock
);
1945 mmc_test_run(test
, testcase
);
1946 mutex_unlock(&mmc_test_lock
);
1949 #ifdef CONFIG_HIGHMEM
1950 __free_pages(test
->highmem
, BUFFER_ORDER
);
1952 kfree(test
->buffer
);
1958 static DEVICE_ATTR(test
, S_IWUSR
| S_IRUGO
, mmc_test_show
, mmc_test_store
);
1960 static int mmc_test_probe(struct mmc_card
*card
)
1964 if ((card
->type
!= MMC_TYPE_MMC
) && (card
->type
!= MMC_TYPE_SD
))
1967 ret
= device_create_file(&card
->dev
, &dev_attr_test
);
1971 dev_info(&card
->dev
, "Card claimed for testing.\n");
1976 static void mmc_test_remove(struct mmc_card
*card
)
1978 device_remove_file(&card
->dev
, &dev_attr_test
);
1981 static struct mmc_driver mmc_driver
= {
1985 .probe
= mmc_test_probe
,
1986 .remove
= mmc_test_remove
,
1989 static int __init
mmc_test_init(void)
1991 return mmc_register_driver(&mmc_driver
);
1994 static void __exit
mmc_test_exit(void)
1996 mmc_unregister_driver(&mmc_driver
);
1999 module_init(mmc_test_init
);
2000 module_exit(mmc_test_exit
);
2002 MODULE_LICENSE("GPL");
2003 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2004 MODULE_AUTHOR("Pierre Ossman");