Merge branch 'pm-cpufreq-sched' into pm-cpufreq
[cascardo/linux.git] / drivers / mmc / card / queue.c
1 /*
2  *  linux/drivers/mmc/card/queue.c
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *  Copyright 2006-2007 Pierre Ossman
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
19
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include "queue.h"
23
24 #define MMC_QUEUE_BOUNCESZ      65536
25
26 /*
27  * Prepare a MMC request. This just filters out odd stuff.
28  */
29 static int mmc_prep_request(struct request_queue *q, struct request *req)
30 {
31         struct mmc_queue *mq = q->queuedata;
32
33         /*
34          * We only like normal block requests and discards.
35          */
36         if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
37             req_op(req) != REQ_OP_SECURE_ERASE) {
38                 blk_dump_rq_flags(req, "MMC bad request");
39                 return BLKPREP_KILL;
40         }
41
42         if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
43                 return BLKPREP_KILL;
44
45         req->cmd_flags |= REQ_DONTPREP;
46
47         return BLKPREP_OK;
48 }
49
50 static int mmc_queue_thread(void *d)
51 {
52         struct mmc_queue *mq = d;
53         struct request_queue *q = mq->queue;
54
55         current->flags |= PF_MEMALLOC;
56
57         down(&mq->thread_sem);
58         do {
59                 struct request *req = NULL;
60
61                 spin_lock_irq(q->queue_lock);
62                 set_current_state(TASK_INTERRUPTIBLE);
63                 req = blk_fetch_request(q);
64                 mq->mqrq_cur->req = req;
65                 spin_unlock_irq(q->queue_lock);
66
67                 if (req || mq->mqrq_prev->req) {
68                         bool req_is_special = mmc_req_is_special(req);
69
70                         set_current_state(TASK_RUNNING);
71                         mq->issue_fn(mq, req);
72                         cond_resched();
73                         if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
74                                 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
75                                 continue; /* fetch again */
76                         }
77
78                         /*
79                          * Current request becomes previous request
80                          * and vice versa.
81                          * In case of special requests, current request
82                          * has been finished. Do not assign it to previous
83                          * request.
84                          */
85                         if (req_is_special)
86                                 mq->mqrq_cur->req = NULL;
87
88                         mq->mqrq_prev->brq.mrq.data = NULL;
89                         mq->mqrq_prev->req = NULL;
90                         swap(mq->mqrq_prev, mq->mqrq_cur);
91                 } else {
92                         if (kthread_should_stop()) {
93                                 set_current_state(TASK_RUNNING);
94                                 break;
95                         }
96                         up(&mq->thread_sem);
97                         schedule();
98                         down(&mq->thread_sem);
99                 }
100         } while (1);
101         up(&mq->thread_sem);
102
103         return 0;
104 }
105
106 /*
107  * Generic MMC request handler.  This is called for any queue on a
108  * particular host.  When the host is not busy, we look for a request
109  * on any queue on this host, and attempt to issue it.  This may
110  * not be the queue we were asked to process.
111  */
112 static void mmc_request_fn(struct request_queue *q)
113 {
114         struct mmc_queue *mq = q->queuedata;
115         struct request *req;
116         unsigned long flags;
117         struct mmc_context_info *cntx;
118
119         if (!mq) {
120                 while ((req = blk_fetch_request(q)) != NULL) {
121                         req->cmd_flags |= REQ_QUIET;
122                         __blk_end_request_all(req, -EIO);
123                 }
124                 return;
125         }
126
127         cntx = &mq->card->host->context_info;
128         if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
129                 /*
130                  * New MMC request arrived when MMC thread may be
131                  * blocked on the previous request to be complete
132                  * with no current request fetched
133                  */
134                 spin_lock_irqsave(&cntx->lock, flags);
135                 if (cntx->is_waiting_last_req) {
136                         cntx->is_new_req = true;
137                         wake_up_interruptible(&cntx->wait);
138                 }
139                 spin_unlock_irqrestore(&cntx->lock, flags);
140         } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
141                 wake_up_process(mq->thread);
142 }
143
144 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
145 {
146         struct scatterlist *sg;
147
148         sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
149         if (!sg)
150                 *err = -ENOMEM;
151         else {
152                 *err = 0;
153                 sg_init_table(sg, sg_len);
154         }
155
156         return sg;
157 }
158
159 static void mmc_queue_setup_discard(struct request_queue *q,
160                                     struct mmc_card *card)
161 {
162         unsigned max_discard;
163
164         max_discard = mmc_calc_max_discard(card);
165         if (!max_discard)
166                 return;
167
168         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
169         blk_queue_max_discard_sectors(q, max_discard);
170         if (card->erased_byte == 0 && !mmc_can_discard(card))
171                 q->limits.discard_zeroes_data = 1;
172         q->limits.discard_granularity = card->pref_erase << 9;
173         /* granularity must not be greater than max. discard */
174         if (card->pref_erase > max_discard)
175                 q->limits.discard_granularity = 0;
176         if (mmc_can_secure_erase_trim(card))
177                 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
178 }
179
180 /**
181  * mmc_init_queue - initialise a queue structure.
182  * @mq: mmc queue
183  * @card: mmc card to attach this queue
184  * @lock: queue lock
185  * @subname: partition subname
186  *
187  * Initialise a MMC card request queue.
188  */
189 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
190                    spinlock_t *lock, const char *subname)
191 {
192         struct mmc_host *host = card->host;
193         u64 limit = BLK_BOUNCE_HIGH;
194         int ret;
195         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
196         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
197
198         if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
199                 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
200
201         mq->card = card;
202         mq->queue = blk_init_queue(mmc_request_fn, lock);
203         if (!mq->queue)
204                 return -ENOMEM;
205
206         mq->mqrq_cur = mqrq_cur;
207         mq->mqrq_prev = mqrq_prev;
208         mq->queue->queuedata = mq;
209
210         blk_queue_prep_rq(mq->queue, mmc_prep_request);
211         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
212         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
213         if (mmc_can_erase(card))
214                 mmc_queue_setup_discard(mq->queue, card);
215
216 #ifdef CONFIG_MMC_BLOCK_BOUNCE
217         if (host->max_segs == 1) {
218                 unsigned int bouncesz;
219
220                 bouncesz = MMC_QUEUE_BOUNCESZ;
221
222                 if (bouncesz > host->max_req_size)
223                         bouncesz = host->max_req_size;
224                 if (bouncesz > host->max_seg_size)
225                         bouncesz = host->max_seg_size;
226                 if (bouncesz > (host->max_blk_count * 512))
227                         bouncesz = host->max_blk_count * 512;
228
229                 if (bouncesz > 512) {
230                         mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
231                         if (!mqrq_cur->bounce_buf) {
232                                 pr_warn("%s: unable to allocate bounce cur buffer\n",
233                                         mmc_card_name(card));
234                         } else {
235                                 mqrq_prev->bounce_buf =
236                                                 kmalloc(bouncesz, GFP_KERNEL);
237                                 if (!mqrq_prev->bounce_buf) {
238                                         pr_warn("%s: unable to allocate bounce prev buffer\n",
239                                                 mmc_card_name(card));
240                                         kfree(mqrq_cur->bounce_buf);
241                                         mqrq_cur->bounce_buf = NULL;
242                                 }
243                         }
244                 }
245
246                 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
247                         blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
248                         blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
249                         blk_queue_max_segments(mq->queue, bouncesz / 512);
250                         blk_queue_max_segment_size(mq->queue, bouncesz);
251
252                         mqrq_cur->sg = mmc_alloc_sg(1, &ret);
253                         if (ret)
254                                 goto cleanup_queue;
255
256                         mqrq_cur->bounce_sg =
257                                 mmc_alloc_sg(bouncesz / 512, &ret);
258                         if (ret)
259                                 goto cleanup_queue;
260
261                         mqrq_prev->sg = mmc_alloc_sg(1, &ret);
262                         if (ret)
263                                 goto cleanup_queue;
264
265                         mqrq_prev->bounce_sg =
266                                 mmc_alloc_sg(bouncesz / 512, &ret);
267                         if (ret)
268                                 goto cleanup_queue;
269                 }
270         }
271 #endif
272
273         if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
274                 blk_queue_bounce_limit(mq->queue, limit);
275                 blk_queue_max_hw_sectors(mq->queue,
276                         min(host->max_blk_count, host->max_req_size / 512));
277                 blk_queue_max_segments(mq->queue, host->max_segs);
278                 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
279
280                 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
281                 if (ret)
282                         goto cleanup_queue;
283
284
285                 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
286                 if (ret)
287                         goto cleanup_queue;
288         }
289
290         sema_init(&mq->thread_sem, 1);
291
292         mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
293                 host->index, subname ? subname : "");
294
295         if (IS_ERR(mq->thread)) {
296                 ret = PTR_ERR(mq->thread);
297                 goto free_bounce_sg;
298         }
299
300         return 0;
301  free_bounce_sg:
302         kfree(mqrq_cur->bounce_sg);
303         mqrq_cur->bounce_sg = NULL;
304         kfree(mqrq_prev->bounce_sg);
305         mqrq_prev->bounce_sg = NULL;
306
307  cleanup_queue:
308         kfree(mqrq_cur->sg);
309         mqrq_cur->sg = NULL;
310         kfree(mqrq_cur->bounce_buf);
311         mqrq_cur->bounce_buf = NULL;
312
313         kfree(mqrq_prev->sg);
314         mqrq_prev->sg = NULL;
315         kfree(mqrq_prev->bounce_buf);
316         mqrq_prev->bounce_buf = NULL;
317
318         blk_cleanup_queue(mq->queue);
319         return ret;
320 }
321
322 void mmc_cleanup_queue(struct mmc_queue *mq)
323 {
324         struct request_queue *q = mq->queue;
325         unsigned long flags;
326         struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
327         struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
328
329         /* Make sure the queue isn't suspended, as that will deadlock */
330         mmc_queue_resume(mq);
331
332         /* Then terminate our worker thread */
333         kthread_stop(mq->thread);
334
335         /* Empty the queue */
336         spin_lock_irqsave(q->queue_lock, flags);
337         q->queuedata = NULL;
338         blk_start_queue(q);
339         spin_unlock_irqrestore(q->queue_lock, flags);
340
341         kfree(mqrq_cur->bounce_sg);
342         mqrq_cur->bounce_sg = NULL;
343
344         kfree(mqrq_cur->sg);
345         mqrq_cur->sg = NULL;
346
347         kfree(mqrq_cur->bounce_buf);
348         mqrq_cur->bounce_buf = NULL;
349
350         kfree(mqrq_prev->bounce_sg);
351         mqrq_prev->bounce_sg = NULL;
352
353         kfree(mqrq_prev->sg);
354         mqrq_prev->sg = NULL;
355
356         kfree(mqrq_prev->bounce_buf);
357         mqrq_prev->bounce_buf = NULL;
358
359         mq->card = NULL;
360 }
361 EXPORT_SYMBOL(mmc_cleanup_queue);
362
363 int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
364 {
365         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
366         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
367         int ret = 0;
368
369
370         mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
371         if (!mqrq_cur->packed) {
372                 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
373                         mmc_card_name(card));
374                 ret = -ENOMEM;
375                 goto out;
376         }
377
378         mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
379         if (!mqrq_prev->packed) {
380                 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
381                         mmc_card_name(card));
382                 kfree(mqrq_cur->packed);
383                 mqrq_cur->packed = NULL;
384                 ret = -ENOMEM;
385                 goto out;
386         }
387
388         INIT_LIST_HEAD(&mqrq_cur->packed->list);
389         INIT_LIST_HEAD(&mqrq_prev->packed->list);
390
391 out:
392         return ret;
393 }
394
395 void mmc_packed_clean(struct mmc_queue *mq)
396 {
397         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
398         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
399
400         kfree(mqrq_cur->packed);
401         mqrq_cur->packed = NULL;
402         kfree(mqrq_prev->packed);
403         mqrq_prev->packed = NULL;
404 }
405
406 /**
407  * mmc_queue_suspend - suspend a MMC request queue
408  * @mq: MMC queue to suspend
409  *
410  * Stop the block request queue, and wait for our thread to
411  * complete any outstanding requests.  This ensures that we
412  * won't suspend while a request is being processed.
413  */
414 void mmc_queue_suspend(struct mmc_queue *mq)
415 {
416         struct request_queue *q = mq->queue;
417         unsigned long flags;
418
419         if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
420                 mq->flags |= MMC_QUEUE_SUSPENDED;
421
422                 spin_lock_irqsave(q->queue_lock, flags);
423                 blk_stop_queue(q);
424                 spin_unlock_irqrestore(q->queue_lock, flags);
425
426                 down(&mq->thread_sem);
427         }
428 }
429
430 /**
431  * mmc_queue_resume - resume a previously suspended MMC request queue
432  * @mq: MMC queue to resume
433  */
434 void mmc_queue_resume(struct mmc_queue *mq)
435 {
436         struct request_queue *q = mq->queue;
437         unsigned long flags;
438
439         if (mq->flags & MMC_QUEUE_SUSPENDED) {
440                 mq->flags &= ~MMC_QUEUE_SUSPENDED;
441
442                 up(&mq->thread_sem);
443
444                 spin_lock_irqsave(q->queue_lock, flags);
445                 blk_start_queue(q);
446                 spin_unlock_irqrestore(q->queue_lock, flags);
447         }
448 }
449
450 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
451                                             struct mmc_packed *packed,
452                                             struct scatterlist *sg,
453                                             enum mmc_packed_type cmd_type)
454 {
455         struct scatterlist *__sg = sg;
456         unsigned int sg_len = 0;
457         struct request *req;
458
459         if (mmc_packed_wr(cmd_type)) {
460                 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
461                 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
462                 unsigned int len, remain, offset = 0;
463                 u8 *buf = (u8 *)packed->cmd_hdr;
464
465                 remain = hdr_sz;
466                 do {
467                         len = min(remain, max_seg_sz);
468                         sg_set_buf(__sg, buf + offset, len);
469                         offset += len;
470                         remain -= len;
471                         sg_unmark_end(__sg++);
472                         sg_len++;
473                 } while (remain);
474         }
475
476         list_for_each_entry(req, &packed->list, queuelist) {
477                 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
478                 __sg = sg + (sg_len - 1);
479                 sg_unmark_end(__sg++);
480         }
481         sg_mark_end(sg + (sg_len - 1));
482         return sg_len;
483 }
484
485 /*
486  * Prepare the sg list(s) to be handed of to the host driver
487  */
488 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
489 {
490         unsigned int sg_len;
491         size_t buflen;
492         struct scatterlist *sg;
493         enum mmc_packed_type cmd_type;
494         int i;
495
496         cmd_type = mqrq->cmd_type;
497
498         if (!mqrq->bounce_buf) {
499                 if (mmc_packed_cmd(cmd_type))
500                         return mmc_queue_packed_map_sg(mq, mqrq->packed,
501                                                        mqrq->sg, cmd_type);
502                 else
503                         return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
504         }
505
506         BUG_ON(!mqrq->bounce_sg);
507
508         if (mmc_packed_cmd(cmd_type))
509                 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
510                                                  mqrq->bounce_sg, cmd_type);
511         else
512                 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
513
514         mqrq->bounce_sg_len = sg_len;
515
516         buflen = 0;
517         for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
518                 buflen += sg->length;
519
520         sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
521
522         return 1;
523 }
524
525 /*
526  * If writing, bounce the data to the buffer before the request
527  * is sent to the host driver
528  */
529 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
530 {
531         if (!mqrq->bounce_buf)
532                 return;
533
534         if (rq_data_dir(mqrq->req) != WRITE)
535                 return;
536
537         sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
538                 mqrq->bounce_buf, mqrq->sg[0].length);
539 }
540
541 /*
542  * If reading, bounce the data from the buffer after the request
543  * has been handled by the host driver
544  */
545 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
546 {
547         if (!mqrq->bounce_buf)
548                 return;
549
550         if (rq_data_dir(mqrq->req) != READ)
551                 return;
552
553         sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
554                 mqrq->bounce_buf, mqrq->sg[0].length);
555 }