Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney...
[~shefty/rdma-dev.git] / drivers / mmc / card / queue.c
1 /*
2  *  linux/drivers/mmc/card/queue.c
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *  Copyright 2006-2007 Pierre Ossman
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
21 #include "queue.h"
22
23 #define MMC_QUEUE_BOUNCESZ      65536
24
25 #define MMC_QUEUE_SUSPENDED     (1 << 0)
26
27 /*
28  * Prepare a MMC request. This just filters out odd stuff.
29  */
30 static int mmc_prep_request(struct request_queue *q, struct request *req)
31 {
32         struct mmc_queue *mq = q->queuedata;
33
34         /*
35          * We only like normal block requests and discards.
36          */
37         if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
38                 blk_dump_rq_flags(req, "MMC bad request");
39                 return BLKPREP_KILL;
40         }
41
42         if (mq && mmc_card_removed(mq->card))
43                 return BLKPREP_KILL;
44
45         req->cmd_flags |= REQ_DONTPREP;
46
47         return BLKPREP_OK;
48 }
49
50 static int mmc_queue_thread(void *d)
51 {
52         struct mmc_queue *mq = d;
53         struct request_queue *q = mq->queue;
54
55         current->flags |= PF_MEMALLOC;
56
57         down(&mq->thread_sem);
58         do {
59                 struct request *req = NULL;
60                 struct mmc_queue_req *tmp;
61
62                 spin_lock_irq(q->queue_lock);
63                 set_current_state(TASK_INTERRUPTIBLE);
64                 req = blk_fetch_request(q);
65                 mq->mqrq_cur->req = req;
66                 spin_unlock_irq(q->queue_lock);
67
68                 if (req || mq->mqrq_prev->req) {
69                         set_current_state(TASK_RUNNING);
70                         mq->issue_fn(mq, req);
71
72                         /*
73                          * Current request becomes previous request
74                          * and vice versa.
75                          */
76                         mq->mqrq_prev->brq.mrq.data = NULL;
77                         mq->mqrq_prev->req = NULL;
78                         tmp = mq->mqrq_prev;
79                         mq->mqrq_prev = mq->mqrq_cur;
80                         mq->mqrq_cur = tmp;
81                 } else {
82                         if (kthread_should_stop()) {
83                                 set_current_state(TASK_RUNNING);
84                                 break;
85                         }
86                         up(&mq->thread_sem);
87                         schedule();
88                         down(&mq->thread_sem);
89                 }
90         } while (1);
91         up(&mq->thread_sem);
92
93         return 0;
94 }
95
96 /*
97  * Generic MMC request handler.  This is called for any queue on a
98  * particular host.  When the host is not busy, we look for a request
99  * on any queue on this host, and attempt to issue it.  This may
100  * not be the queue we were asked to process.
101  */
102 static void mmc_request_fn(struct request_queue *q)
103 {
104         struct mmc_queue *mq = q->queuedata;
105         struct request *req;
106
107         if (!mq) {
108                 while ((req = blk_fetch_request(q)) != NULL) {
109                         req->cmd_flags |= REQ_QUIET;
110                         __blk_end_request_all(req, -EIO);
111                 }
112                 return;
113         }
114
115         if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
116                 wake_up_process(mq->thread);
117 }
118
119 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
120 {
121         struct scatterlist *sg;
122
123         sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
124         if (!sg)
125                 *err = -ENOMEM;
126         else {
127                 *err = 0;
128                 sg_init_table(sg, sg_len);
129         }
130
131         return sg;
132 }
133
134 static void mmc_queue_setup_discard(struct request_queue *q,
135                                     struct mmc_card *card)
136 {
137         unsigned max_discard;
138
139         max_discard = mmc_calc_max_discard(card);
140         if (!max_discard)
141                 return;
142
143         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
144         q->limits.max_discard_sectors = max_discard;
145         if (card->erased_byte == 0 && !mmc_can_discard(card))
146                 q->limits.discard_zeroes_data = 1;
147         q->limits.discard_granularity = card->pref_erase << 9;
148         /* granularity must not be greater than max. discard */
149         if (card->pref_erase > max_discard)
150                 q->limits.discard_granularity = 0;
151         if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
152                 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
153 }
154
155 /**
156  * mmc_init_queue - initialise a queue structure.
157  * @mq: mmc queue
158  * @card: mmc card to attach this queue
159  * @lock: queue lock
160  * @subname: partition subname
161  *
162  * Initialise a MMC card request queue.
163  */
164 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
165                    spinlock_t *lock, const char *subname)
166 {
167         struct mmc_host *host = card->host;
168         u64 limit = BLK_BOUNCE_HIGH;
169         int ret;
170         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
171         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
172
173         if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
174                 limit = *mmc_dev(host)->dma_mask;
175
176         mq->card = card;
177         mq->queue = blk_init_queue(mmc_request_fn, lock);
178         if (!mq->queue)
179                 return -ENOMEM;
180
181         mq->mqrq_cur = mqrq_cur;
182         mq->mqrq_prev = mqrq_prev;
183         mq->queue->queuedata = mq;
184
185         blk_queue_prep_rq(mq->queue, mmc_prep_request);
186         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
187         if (mmc_can_erase(card))
188                 mmc_queue_setup_discard(mq->queue, card);
189
190 #ifdef CONFIG_MMC_BLOCK_BOUNCE
191         if (host->max_segs == 1) {
192                 unsigned int bouncesz;
193
194                 bouncesz = MMC_QUEUE_BOUNCESZ;
195
196                 if (bouncesz > host->max_req_size)
197                         bouncesz = host->max_req_size;
198                 if (bouncesz > host->max_seg_size)
199                         bouncesz = host->max_seg_size;
200                 if (bouncesz > (host->max_blk_count * 512))
201                         bouncesz = host->max_blk_count * 512;
202
203                 if (bouncesz > 512) {
204                         mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
205                         if (!mqrq_cur->bounce_buf) {
206                                 pr_warning("%s: unable to "
207                                         "allocate bounce cur buffer\n",
208                                         mmc_card_name(card));
209                         }
210                         mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
211                         if (!mqrq_prev->bounce_buf) {
212                                 pr_warning("%s: unable to "
213                                         "allocate bounce prev buffer\n",
214                                         mmc_card_name(card));
215                                 kfree(mqrq_cur->bounce_buf);
216                                 mqrq_cur->bounce_buf = NULL;
217                         }
218                 }
219
220                 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
221                         blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
222                         blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
223                         blk_queue_max_segments(mq->queue, bouncesz / 512);
224                         blk_queue_max_segment_size(mq->queue, bouncesz);
225
226                         mqrq_cur->sg = mmc_alloc_sg(1, &ret);
227                         if (ret)
228                                 goto cleanup_queue;
229
230                         mqrq_cur->bounce_sg =
231                                 mmc_alloc_sg(bouncesz / 512, &ret);
232                         if (ret)
233                                 goto cleanup_queue;
234
235                         mqrq_prev->sg = mmc_alloc_sg(1, &ret);
236                         if (ret)
237                                 goto cleanup_queue;
238
239                         mqrq_prev->bounce_sg =
240                                 mmc_alloc_sg(bouncesz / 512, &ret);
241                         if (ret)
242                                 goto cleanup_queue;
243                 }
244         }
245 #endif
246
247         if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
248                 blk_queue_bounce_limit(mq->queue, limit);
249                 blk_queue_max_hw_sectors(mq->queue,
250                         min(host->max_blk_count, host->max_req_size / 512));
251                 blk_queue_max_segments(mq->queue, host->max_segs);
252                 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
253
254                 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
255                 if (ret)
256                         goto cleanup_queue;
257
258
259                 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
260                 if (ret)
261                         goto cleanup_queue;
262         }
263
264         sema_init(&mq->thread_sem, 1);
265
266         mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
267                 host->index, subname ? subname : "");
268
269         if (IS_ERR(mq->thread)) {
270                 ret = PTR_ERR(mq->thread);
271                 goto free_bounce_sg;
272         }
273
274         return 0;
275  free_bounce_sg:
276         kfree(mqrq_cur->bounce_sg);
277         mqrq_cur->bounce_sg = NULL;
278         kfree(mqrq_prev->bounce_sg);
279         mqrq_prev->bounce_sg = NULL;
280
281  cleanup_queue:
282         kfree(mqrq_cur->sg);
283         mqrq_cur->sg = NULL;
284         kfree(mqrq_cur->bounce_buf);
285         mqrq_cur->bounce_buf = NULL;
286
287         kfree(mqrq_prev->sg);
288         mqrq_prev->sg = NULL;
289         kfree(mqrq_prev->bounce_buf);
290         mqrq_prev->bounce_buf = NULL;
291
292         blk_cleanup_queue(mq->queue);
293         return ret;
294 }
295
296 void mmc_cleanup_queue(struct mmc_queue *mq)
297 {
298         struct request_queue *q = mq->queue;
299         unsigned long flags;
300         struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
301         struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
302
303         /* Make sure the queue isn't suspended, as that will deadlock */
304         mmc_queue_resume(mq);
305
306         /* Then terminate our worker thread */
307         kthread_stop(mq->thread);
308
309         /* Empty the queue */
310         spin_lock_irqsave(q->queue_lock, flags);
311         q->queuedata = NULL;
312         blk_start_queue(q);
313         spin_unlock_irqrestore(q->queue_lock, flags);
314
315         kfree(mqrq_cur->bounce_sg);
316         mqrq_cur->bounce_sg = NULL;
317
318         kfree(mqrq_cur->sg);
319         mqrq_cur->sg = NULL;
320
321         kfree(mqrq_cur->bounce_buf);
322         mqrq_cur->bounce_buf = NULL;
323
324         kfree(mqrq_prev->bounce_sg);
325         mqrq_prev->bounce_sg = NULL;
326
327         kfree(mqrq_prev->sg);
328         mqrq_prev->sg = NULL;
329
330         kfree(mqrq_prev->bounce_buf);
331         mqrq_prev->bounce_buf = NULL;
332
333         mq->card = NULL;
334 }
335 EXPORT_SYMBOL(mmc_cleanup_queue);
336
337 /**
338  * mmc_queue_suspend - suspend a MMC request queue
339  * @mq: MMC queue to suspend
340  *
341  * Stop the block request queue, and wait for our thread to
342  * complete any outstanding requests.  This ensures that we
343  * won't suspend while a request is being processed.
344  */
345 void mmc_queue_suspend(struct mmc_queue *mq)
346 {
347         struct request_queue *q = mq->queue;
348         unsigned long flags;
349
350         if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
351                 mq->flags |= MMC_QUEUE_SUSPENDED;
352
353                 spin_lock_irqsave(q->queue_lock, flags);
354                 blk_stop_queue(q);
355                 spin_unlock_irqrestore(q->queue_lock, flags);
356
357                 down(&mq->thread_sem);
358         }
359 }
360
361 /**
362  * mmc_queue_resume - resume a previously suspended MMC request queue
363  * @mq: MMC queue to resume
364  */
365 void mmc_queue_resume(struct mmc_queue *mq)
366 {
367         struct request_queue *q = mq->queue;
368         unsigned long flags;
369
370         if (mq->flags & MMC_QUEUE_SUSPENDED) {
371                 mq->flags &= ~MMC_QUEUE_SUSPENDED;
372
373                 up(&mq->thread_sem);
374
375                 spin_lock_irqsave(q->queue_lock, flags);
376                 blk_start_queue(q);
377                 spin_unlock_irqrestore(q->queue_lock, flags);
378         }
379 }
380
381 /*
382  * Prepare the sg list(s) to be handed of to the host driver
383  */
384 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
385 {
386         unsigned int sg_len;
387         size_t buflen;
388         struct scatterlist *sg;
389         int i;
390
391         if (!mqrq->bounce_buf)
392                 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
393
394         BUG_ON(!mqrq->bounce_sg);
395
396         sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
397
398         mqrq->bounce_sg_len = sg_len;
399
400         buflen = 0;
401         for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
402                 buflen += sg->length;
403
404         sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
405
406         return 1;
407 }
408
409 /*
410  * If writing, bounce the data to the buffer before the request
411  * is sent to the host driver
412  */
413 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
414 {
415         if (!mqrq->bounce_buf)
416                 return;
417
418         if (rq_data_dir(mqrq->req) != WRITE)
419                 return;
420
421         sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
422                 mqrq->bounce_buf, mqrq->sg[0].length);
423 }
424
425 /*
426  * If reading, bounce the data from the buffer after the request
427  * has been handled by the host driver
428  */
429 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
430 {
431         if (!mqrq->bounce_buf)
432                 return;
433
434         if (rq_data_dir(mqrq->req) != READ)
435                 return;
436
437         sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
438                 mqrq->bounce_buf, mqrq->sg[0].length);
439 }