at v2.6.37 380 lines 8.7 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/slab.h> 13#include <linux/module.h> 14#include <linux/blkdev.h> 15#include <linux/freezer.h> 16#include <linux/kthread.h> 17#include <linux/scatterlist.h> 18 19#include <linux/mmc/card.h> 20#include <linux/mmc/host.h> 21#include "queue.h" 22 23#define MMC_QUEUE_BOUNCESZ 65536 24 25#define MMC_QUEUE_SUSPENDED (1 << 0) 26 27/* 28 * Prepare a MMC request. This just filters out odd stuff. 29 */ 30static int mmc_prep_request(struct request_queue *q, struct request *req) 31{ 32 /* 33 * We only like normal block requests and discards. 34 */ 35 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { 36 blk_dump_rq_flags(req, "MMC bad request"); 37 return BLKPREP_KILL; 38 } 39 40 req->cmd_flags |= REQ_DONTPREP; 41 42 return BLKPREP_OK; 43} 44 45static int mmc_queue_thread(void *d) 46{ 47 struct mmc_queue *mq = d; 48 struct request_queue *q = mq->queue; 49 50 current->flags |= PF_MEMALLOC; 51 52 down(&mq->thread_sem); 53 do { 54 struct request *req = NULL; 55 56 spin_lock_irq(q->queue_lock); 57 set_current_state(TASK_INTERRUPTIBLE); 58 if (!blk_queue_plugged(q)) 59 req = blk_fetch_request(q); 60 mq->req = req; 61 spin_unlock_irq(q->queue_lock); 62 63 if (!req) { 64 if (kthread_should_stop()) { 65 set_current_state(TASK_RUNNING); 66 break; 67 } 68 up(&mq->thread_sem); 69 schedule(); 70 down(&mq->thread_sem); 71 continue; 72 } 73 set_current_state(TASK_RUNNING); 74 75 mq->issue_fn(mq, req); 76 } while (1); 77 up(&mq->thread_sem); 78 79 return 0; 80} 81 82/* 83 * Generic MMC request handler. This is called for any queue on a 84 * particular host. When the host is not busy, we look for a request 85 * on any queue on this host, and attempt to issue it. This may 86 * not be the queue we were asked to process. 87 */ 88static void mmc_request(struct request_queue *q) 89{ 90 struct mmc_queue *mq = q->queuedata; 91 struct request *req; 92 93 if (!mq) { 94 while ((req = blk_fetch_request(q)) != NULL) { 95 req->cmd_flags |= REQ_QUIET; 96 __blk_end_request_all(req, -EIO); 97 } 98 return; 99 } 100 101 if (!mq->req) 102 wake_up_process(mq->thread); 103} 104 105/** 106 * mmc_init_queue - initialise a queue structure. 107 * @mq: mmc queue 108 * @card: mmc card to attach this queue 109 * @lock: queue lock 110 * 111 * Initialise a MMC card request queue. 112 */ 113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 114{ 115 struct mmc_host *host = card->host; 116 u64 limit = BLK_BOUNCE_HIGH; 117 int ret; 118 119 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 120 limit = *mmc_dev(host)->dma_mask; 121 122 mq->card = card; 123 mq->queue = blk_init_queue(mmc_request, lock); 124 if (!mq->queue) 125 return -ENOMEM; 126 127 mq->queue->queuedata = mq; 128 mq->req = NULL; 129 130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 131 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 132 if (mmc_can_erase(card)) { 133 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 134 mq->queue->limits.max_discard_sectors = UINT_MAX; 135 if (card->erased_byte == 0) 136 mq->queue->limits.discard_zeroes_data = 1; 137 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 138 mq->queue->limits.discard_granularity = 139 card->erase_size << 9; 140 mq->queue->limits.discard_alignment = 141 card->erase_size << 9; 142 } 143 if (mmc_can_secure_erase_trim(card)) 144 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 145 mq->queue); 146 } 147 148#ifdef CONFIG_MMC_BLOCK_BOUNCE 149 if (host->max_segs == 1) { 150 unsigned int bouncesz; 151 152 bouncesz = MMC_QUEUE_BOUNCESZ; 153 154 if (bouncesz > host->max_req_size) 155 bouncesz = host->max_req_size; 156 if (bouncesz > host->max_seg_size) 157 bouncesz = host->max_seg_size; 158 if (bouncesz > (host->max_blk_count * 512)) 159 bouncesz = host->max_blk_count * 512; 160 161 if (bouncesz > 512) { 162 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 163 if (!mq->bounce_buf) { 164 printk(KERN_WARNING "%s: unable to " 165 "allocate bounce buffer\n", 166 mmc_card_name(card)); 167 } 168 } 169 170 if (mq->bounce_buf) { 171 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 172 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 173 blk_queue_max_segments(mq->queue, bouncesz / 512); 174 blk_queue_max_segment_size(mq->queue, bouncesz); 175 176 mq->sg = kmalloc(sizeof(struct scatterlist), 177 GFP_KERNEL); 178 if (!mq->sg) { 179 ret = -ENOMEM; 180 goto cleanup_queue; 181 } 182 sg_init_table(mq->sg, 1); 183 184 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 185 bouncesz / 512, GFP_KERNEL); 186 if (!mq->bounce_sg) { 187 ret = -ENOMEM; 188 goto cleanup_queue; 189 } 190 sg_init_table(mq->bounce_sg, bouncesz / 512); 191 } 192 } 193#endif 194 195 if (!mq->bounce_buf) { 196 blk_queue_bounce_limit(mq->queue, limit); 197 blk_queue_max_hw_sectors(mq->queue, 198 min(host->max_blk_count, host->max_req_size / 512)); 199 blk_queue_max_segments(mq->queue, host->max_segs); 200 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 201 202 mq->sg = kmalloc(sizeof(struct scatterlist) * 203 host->max_segs, GFP_KERNEL); 204 if (!mq->sg) { 205 ret = -ENOMEM; 206 goto cleanup_queue; 207 } 208 sg_init_table(mq->sg, host->max_segs); 209 } 210 211 sema_init(&mq->thread_sem, 1); 212 213 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d", 214 host->index); 215 216 if (IS_ERR(mq->thread)) { 217 ret = PTR_ERR(mq->thread); 218 goto free_bounce_sg; 219 } 220 221 return 0; 222 free_bounce_sg: 223 if (mq->bounce_sg) 224 kfree(mq->bounce_sg); 225 mq->bounce_sg = NULL; 226 cleanup_queue: 227 if (mq->sg) 228 kfree(mq->sg); 229 mq->sg = NULL; 230 if (mq->bounce_buf) 231 kfree(mq->bounce_buf); 232 mq->bounce_buf = NULL; 233 blk_cleanup_queue(mq->queue); 234 return ret; 235} 236 237void mmc_cleanup_queue(struct mmc_queue *mq) 238{ 239 struct request_queue *q = mq->queue; 240 unsigned long flags; 241 242 /* Make sure the queue isn't suspended, as that will deadlock */ 243 mmc_queue_resume(mq); 244 245 /* Then terminate our worker thread */ 246 kthread_stop(mq->thread); 247 248 /* Empty the queue */ 249 spin_lock_irqsave(q->queue_lock, flags); 250 q->queuedata = NULL; 251 blk_start_queue(q); 252 spin_unlock_irqrestore(q->queue_lock, flags); 253 254 if (mq->bounce_sg) 255 kfree(mq->bounce_sg); 256 mq->bounce_sg = NULL; 257 258 kfree(mq->sg); 259 mq->sg = NULL; 260 261 if (mq->bounce_buf) 262 kfree(mq->bounce_buf); 263 mq->bounce_buf = NULL; 264 265 mq->card = NULL; 266} 267EXPORT_SYMBOL(mmc_cleanup_queue); 268 269/** 270 * mmc_queue_suspend - suspend a MMC request queue 271 * @mq: MMC queue to suspend 272 * 273 * Stop the block request queue, and wait for our thread to 274 * complete any outstanding requests. This ensures that we 275 * won't suspend while a request is being processed. 276 */ 277void mmc_queue_suspend(struct mmc_queue *mq) 278{ 279 struct request_queue *q = mq->queue; 280 unsigned long flags; 281 282 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 283 mq->flags |= MMC_QUEUE_SUSPENDED; 284 285 spin_lock_irqsave(q->queue_lock, flags); 286 blk_stop_queue(q); 287 spin_unlock_irqrestore(q->queue_lock, flags); 288 289 down(&mq->thread_sem); 290 } 291} 292 293/** 294 * mmc_queue_resume - resume a previously suspended MMC request queue 295 * @mq: MMC queue to resume 296 */ 297void mmc_queue_resume(struct mmc_queue *mq) 298{ 299 struct request_queue *q = mq->queue; 300 unsigned long flags; 301 302 if (mq->flags & MMC_QUEUE_SUSPENDED) { 303 mq->flags &= ~MMC_QUEUE_SUSPENDED; 304 305 up(&mq->thread_sem); 306 307 spin_lock_irqsave(q->queue_lock, flags); 308 blk_start_queue(q); 309 spin_unlock_irqrestore(q->queue_lock, flags); 310 } 311} 312 313/* 314 * Prepare the sg list(s) to be handed of to the host driver 315 */ 316unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 317{ 318 unsigned int sg_len; 319 size_t buflen; 320 struct scatterlist *sg; 321 int i; 322 323 if (!mq->bounce_buf) 324 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 325 326 BUG_ON(!mq->bounce_sg); 327 328 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 329 330 mq->bounce_sg_len = sg_len; 331 332 buflen = 0; 333 for_each_sg(mq->bounce_sg, sg, sg_len, i) 334 buflen += sg->length; 335 336 sg_init_one(mq->sg, mq->bounce_buf, buflen); 337 338 return 1; 339} 340 341/* 342 * If writing, bounce the data to the buffer before the request 343 * is sent to the host driver 344 */ 345void mmc_queue_bounce_pre(struct mmc_queue *mq) 346{ 347 unsigned long flags; 348 349 if (!mq->bounce_buf) 350 return; 351 352 if (rq_data_dir(mq->req) != WRITE) 353 return; 354 355 local_irq_save(flags); 356 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 357 mq->bounce_buf, mq->sg[0].length); 358 local_irq_restore(flags); 359} 360 361/* 362 * If reading, bounce the data from the buffer after the request 363 * has been handled by the host driver 364 */ 365void mmc_queue_bounce_post(struct mmc_queue *mq) 366{ 367 unsigned long flags; 368 369 if (!mq->bounce_buf) 370 return; 371 372 if (rq_data_dir(mq->req) != READ) 373 return; 374 375 local_irq_save(flags); 376 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 377 mq->bounce_buf, mq->sg[0].length); 378 local_irq_restore(flags); 379} 380