block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors
[deliverable/linux.git] / drivers / mmc / card / queue.c
1 /*
2 * linux/drivers/mmc/card/queue.c
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12 #include <linux/module.h>
13 #include <linux/blkdev.h>
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <linux/scatterlist.h>
17
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
20 #include "queue.h"
21
22 #define MMC_QUEUE_BOUNCESZ 65536
23
24 #define MMC_QUEUE_SUSPENDED (1 << 0)
25
26 /*
27 * Prepare a MMC request. This just filters out odd stuff.
28 */
29 static int mmc_prep_request(struct request_queue *q, struct request *req)
30 {
31 /*
32 * We only like normal block requests.
33 */
34 if (!blk_fs_request(req)) {
35 blk_dump_rq_flags(req, "MMC bad request");
36 return BLKPREP_KILL;
37 }
38
39 req->cmd_flags |= REQ_DONTPREP;
40
41 return BLKPREP_OK;
42 }
43
44 static int mmc_queue_thread(void *d)
45 {
46 struct mmc_queue *mq = d;
47 struct request_queue *q = mq->queue;
48
49 current->flags |= PF_MEMALLOC;
50
51 down(&mq->thread_sem);
52 do {
53 struct request *req = NULL;
54
55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q))
58 req = blk_fetch_request(q);
59 mq->req = req;
60 spin_unlock_irq(q->queue_lock);
61
62 if (!req) {
63 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING);
65 break;
66 }
67 up(&mq->thread_sem);
68 schedule();
69 down(&mq->thread_sem);
70 continue;
71 }
72 set_current_state(TASK_RUNNING);
73
74 mq->issue_fn(mq, req);
75 } while (1);
76 up(&mq->thread_sem);
77
78 return 0;
79 }
80
81 /*
82 * Generic MMC request handler. This is called for any queue on a
83 * particular host. When the host is not busy, we look for a request
84 * on any queue on this host, and attempt to issue it. This may
85 * not be the queue we were asked to process.
86 */
87 static void mmc_request(struct request_queue *q)
88 {
89 struct mmc_queue *mq = q->queuedata;
90 struct request *req;
91
92 if (!mq) {
93 while ((req = blk_fetch_request(q)) != NULL) {
94 req->cmd_flags |= REQ_QUIET;
95 __blk_end_request_all(req, -EIO);
96 }
97 return;
98 }
99
100 if (!mq->req)
101 wake_up_process(mq->thread);
102 }
103
104 /**
105 * mmc_init_queue - initialise a queue structure.
106 * @mq: mmc queue
107 * @card: mmc card to attach this queue
108 * @lock: queue lock
109 *
110 * Initialise a MMC card request queue.
111 */
112 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
113 {
114 struct mmc_host *host = card->host;
115 u64 limit = BLK_BOUNCE_HIGH;
116 int ret;
117
118 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
119 limit = *mmc_dev(host)->dma_mask;
120
121 mq->card = card;
122 mq->queue = blk_init_queue(mmc_request, lock);
123 if (!mq->queue)
124 return -ENOMEM;
125
126 mq->queue->queuedata = mq;
127 mq->req = NULL;
128
129 blk_queue_prep_rq(mq->queue, mmc_prep_request);
130 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
131 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
132
133 #ifdef CONFIG_MMC_BLOCK_BOUNCE
134 if (host->max_hw_segs == 1) {
135 unsigned int bouncesz;
136
137 bouncesz = MMC_QUEUE_BOUNCESZ;
138
139 if (bouncesz > host->max_req_size)
140 bouncesz = host->max_req_size;
141 if (bouncesz > host->max_seg_size)
142 bouncesz = host->max_seg_size;
143 if (bouncesz > (host->max_blk_count * 512))
144 bouncesz = host->max_blk_count * 512;
145
146 if (bouncesz > 512) {
147 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
148 if (!mq->bounce_buf) {
149 printk(KERN_WARNING "%s: unable to "
150 "allocate bounce buffer\n",
151 mmc_card_name(card));
152 }
153 }
154
155 if (mq->bounce_buf) {
156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
157 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
158 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
159 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
160 blk_queue_max_segment_size(mq->queue, bouncesz);
161
162 mq->sg = kmalloc(sizeof(struct scatterlist),
163 GFP_KERNEL);
164 if (!mq->sg) {
165 ret = -ENOMEM;
166 goto cleanup_queue;
167 }
168 sg_init_table(mq->sg, 1);
169
170 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
171 bouncesz / 512, GFP_KERNEL);
172 if (!mq->bounce_sg) {
173 ret = -ENOMEM;
174 goto cleanup_queue;
175 }
176 sg_init_table(mq->bounce_sg, bouncesz / 512);
177 }
178 }
179 #endif
180
181 if (!mq->bounce_buf) {
182 blk_queue_bounce_limit(mq->queue, limit);
183 blk_queue_max_hw_sectors(mq->queue,
184 min(host->max_blk_count, host->max_req_size / 512));
185 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
186 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
187 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
188
189 mq->sg = kmalloc(sizeof(struct scatterlist) *
190 host->max_phys_segs, GFP_KERNEL);
191 if (!mq->sg) {
192 ret = -ENOMEM;
193 goto cleanup_queue;
194 }
195 sg_init_table(mq->sg, host->max_phys_segs);
196 }
197
198 init_MUTEX(&mq->thread_sem);
199
200 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
201 if (IS_ERR(mq->thread)) {
202 ret = PTR_ERR(mq->thread);
203 goto free_bounce_sg;
204 }
205
206 return 0;
207 free_bounce_sg:
208 if (mq->bounce_sg)
209 kfree(mq->bounce_sg);
210 mq->bounce_sg = NULL;
211 cleanup_queue:
212 if (mq->sg)
213 kfree(mq->sg);
214 mq->sg = NULL;
215 if (mq->bounce_buf)
216 kfree(mq->bounce_buf);
217 mq->bounce_buf = NULL;
218 blk_cleanup_queue(mq->queue);
219 return ret;
220 }
221
222 void mmc_cleanup_queue(struct mmc_queue *mq)
223 {
224 struct request_queue *q = mq->queue;
225 unsigned long flags;
226
227 /* Make sure the queue isn't suspended, as that will deadlock */
228 mmc_queue_resume(mq);
229
230 /* Then terminate our worker thread */
231 kthread_stop(mq->thread);
232
233 /* Empty the queue */
234 spin_lock_irqsave(q->queue_lock, flags);
235 q->queuedata = NULL;
236 blk_start_queue(q);
237 spin_unlock_irqrestore(q->queue_lock, flags);
238
239 if (mq->bounce_sg)
240 kfree(mq->bounce_sg);
241 mq->bounce_sg = NULL;
242
243 kfree(mq->sg);
244 mq->sg = NULL;
245
246 if (mq->bounce_buf)
247 kfree(mq->bounce_buf);
248 mq->bounce_buf = NULL;
249
250 mq->card = NULL;
251 }
252 EXPORT_SYMBOL(mmc_cleanup_queue);
253
254 /**
255 * mmc_queue_suspend - suspend a MMC request queue
256 * @mq: MMC queue to suspend
257 *
258 * Stop the block request queue, and wait for our thread to
259 * complete any outstanding requests. This ensures that we
260 * won't suspend while a request is being processed.
261 */
262 void mmc_queue_suspend(struct mmc_queue *mq)
263 {
264 struct request_queue *q = mq->queue;
265 unsigned long flags;
266
267 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
268 mq->flags |= MMC_QUEUE_SUSPENDED;
269
270 spin_lock_irqsave(q->queue_lock, flags);
271 blk_stop_queue(q);
272 spin_unlock_irqrestore(q->queue_lock, flags);
273
274 down(&mq->thread_sem);
275 }
276 }
277
278 /**
279 * mmc_queue_resume - resume a previously suspended MMC request queue
280 * @mq: MMC queue to resume
281 */
282 void mmc_queue_resume(struct mmc_queue *mq)
283 {
284 struct request_queue *q = mq->queue;
285 unsigned long flags;
286
287 if (mq->flags & MMC_QUEUE_SUSPENDED) {
288 mq->flags &= ~MMC_QUEUE_SUSPENDED;
289
290 up(&mq->thread_sem);
291
292 spin_lock_irqsave(q->queue_lock, flags);
293 blk_start_queue(q);
294 spin_unlock_irqrestore(q->queue_lock, flags);
295 }
296 }
297
298 /*
299 * Prepare the sg list(s) to be handed of to the host driver
300 */
301 unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
302 {
303 unsigned int sg_len;
304 size_t buflen;
305 struct scatterlist *sg;
306 int i;
307
308 if (!mq->bounce_buf)
309 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
310
311 BUG_ON(!mq->bounce_sg);
312
313 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
314
315 mq->bounce_sg_len = sg_len;
316
317 buflen = 0;
318 for_each_sg(mq->bounce_sg, sg, sg_len, i)
319 buflen += sg->length;
320
321 sg_init_one(mq->sg, mq->bounce_buf, buflen);
322
323 return 1;
324 }
325
326 /*
327 * If writing, bounce the data to the buffer before the request
328 * is sent to the host driver
329 */
330 void mmc_queue_bounce_pre(struct mmc_queue *mq)
331 {
332 unsigned long flags;
333
334 if (!mq->bounce_buf)
335 return;
336
337 if (rq_data_dir(mq->req) != WRITE)
338 return;
339
340 local_irq_save(flags);
341 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
342 mq->bounce_buf, mq->sg[0].length);
343 local_irq_restore(flags);
344 }
345
346 /*
347 * If reading, bounce the data from the buffer after the request
348 * has been handled by the host driver
349 */
350 void mmc_queue_bounce_post(struct mmc_queue *mq)
351 {
352 unsigned long flags;
353
354 if (!mq->bounce_buf)
355 return;
356
357 if (rq_data_dir(mq->req) != READ)
358 return;
359
360 local_irq_save(flags);
361 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
362 mq->bounce_buf, mq->sg[0].length);
363 local_irq_restore(flags);
364 }
365
This page took 0.040105 seconds and 5 git commands to generate.