at v3.7 436 lines 10 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/slab.h> 13#include <linux/module.h> 14#include <linux/blkdev.h> 15#include <linux/freezer.h> 16#include <linux/kthread.h> 17#include <linux/scatterlist.h> 18 19#include <linux/mmc/card.h> 20#include <linux/mmc/host.h> 21#include "queue.h" 22 23#define MMC_QUEUE_BOUNCESZ 65536 24 25#define MMC_QUEUE_SUSPENDED (1 << 0) 26 27/* 28 * Prepare a MMC request. This just filters out odd stuff. 29 */ 30static int mmc_prep_request(struct request_queue *q, struct request *req) 31{ 32 struct mmc_queue *mq = q->queuedata; 33 34 /* 35 * We only like normal block requests and discards. 36 */ 37 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { 38 blk_dump_rq_flags(req, "MMC bad request"); 39 return BLKPREP_KILL; 40 } 41 42 if (mq && mmc_card_removed(mq->card)) 43 return BLKPREP_KILL; 44 45 req->cmd_flags |= REQ_DONTPREP; 46 47 return BLKPREP_OK; 48} 49 50static int mmc_queue_thread(void *d) 51{ 52 struct mmc_queue *mq = d; 53 struct request_queue *q = mq->queue; 54 55 current->flags |= PF_MEMALLOC; 56 57 down(&mq->thread_sem); 58 do { 59 struct request *req = NULL; 60 struct mmc_queue_req *tmp; 61 62 spin_lock_irq(q->queue_lock); 63 set_current_state(TASK_INTERRUPTIBLE); 64 req = blk_fetch_request(q); 65 mq->mqrq_cur->req = req; 66 spin_unlock_irq(q->queue_lock); 67 68 if (req || mq->mqrq_prev->req) { 69 set_current_state(TASK_RUNNING); 70 mq->issue_fn(mq, req); 71 } else { 72 if (kthread_should_stop()) { 73 set_current_state(TASK_RUNNING); 74 break; 75 } 76 up(&mq->thread_sem); 77 schedule(); 78 down(&mq->thread_sem); 79 } 80 81 /* Current request becomes previous request and vice versa. */ 82 mq->mqrq_prev->brq.mrq.data = NULL; 83 mq->mqrq_prev->req = NULL; 84 tmp = mq->mqrq_prev; 85 mq->mqrq_prev = mq->mqrq_cur; 86 mq->mqrq_cur = tmp; 87 } while (1); 88 up(&mq->thread_sem); 89 90 return 0; 91} 92 93/* 94 * Generic MMC request handler. This is called for any queue on a 95 * particular host. When the host is not busy, we look for a request 96 * on any queue on this host, and attempt to issue it. This may 97 * not be the queue we were asked to process. 98 */ 99static void mmc_request_fn(struct request_queue *q) 100{ 101 struct mmc_queue *mq = q->queuedata; 102 struct request *req; 103 104 if (!mq) { 105 while ((req = blk_fetch_request(q)) != NULL) { 106 req->cmd_flags |= REQ_QUIET; 107 __blk_end_request_all(req, -EIO); 108 } 109 return; 110 } 111 112 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) 113 wake_up_process(mq->thread); 114} 115 116static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 117{ 118 struct scatterlist *sg; 119 120 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); 121 if (!sg) 122 *err = -ENOMEM; 123 else { 124 *err = 0; 125 sg_init_table(sg, sg_len); 126 } 127 128 return sg; 129} 130 131static void mmc_queue_setup_discard(struct request_queue *q, 132 struct mmc_card *card) 133{ 134 unsigned max_discard; 135 136 max_discard = mmc_calc_max_discard(card); 137 if (!max_discard) 138 return; 139 140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 141 q->limits.max_discard_sectors = max_discard; 142 if (card->erased_byte == 0 && !mmc_can_discard(card)) 143 q->limits.discard_zeroes_data = 1; 144 q->limits.discard_granularity = card->pref_erase << 9; 145 /* granularity must not be greater than max. discard */ 146 if (card->pref_erase > max_discard) 147 q->limits.discard_granularity = 0; 148 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) 149 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); 150} 151 152/** 153 * mmc_init_queue - initialise a queue structure. 154 * @mq: mmc queue 155 * @card: mmc card to attach this queue 156 * @lock: queue lock 157 * @subname: partition subname 158 * 159 * Initialise a MMC card request queue. 160 */ 161int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 162 spinlock_t *lock, const char *subname) 163{ 164 struct mmc_host *host = card->host; 165 u64 limit = BLK_BOUNCE_HIGH; 166 int ret; 167 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; 168 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 169 170 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 171 limit = *mmc_dev(host)->dma_mask; 172 173 mq->card = card; 174 mq->queue = blk_init_queue(mmc_request_fn, lock); 175 if (!mq->queue) 176 return -ENOMEM; 177 178 mq->mqrq_cur = mqrq_cur; 179 mq->mqrq_prev = mqrq_prev; 180 mq->queue->queuedata = mq; 181 182 blk_queue_prep_rq(mq->queue, mmc_prep_request); 183 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 184 if (mmc_can_erase(card)) 185 mmc_queue_setup_discard(mq->queue, card); 186 187#ifdef CONFIG_MMC_BLOCK_BOUNCE 188 if (host->max_segs == 1) { 189 unsigned int bouncesz; 190 191 bouncesz = MMC_QUEUE_BOUNCESZ; 192 193 if (bouncesz > host->max_req_size) 194 bouncesz = host->max_req_size; 195 if (bouncesz > host->max_seg_size) 196 bouncesz = host->max_seg_size; 197 if (bouncesz > (host->max_blk_count * 512)) 198 bouncesz = host->max_blk_count * 512; 199 200 if (bouncesz > 512) { 201 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 202 if (!mqrq_cur->bounce_buf) { 203 pr_warning("%s: unable to " 204 "allocate bounce cur buffer\n", 205 mmc_card_name(card)); 206 } 207 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 208 if (!mqrq_prev->bounce_buf) { 209 pr_warning("%s: unable to " 210 "allocate bounce prev buffer\n", 211 mmc_card_name(card)); 212 kfree(mqrq_cur->bounce_buf); 213 mqrq_cur->bounce_buf = NULL; 214 } 215 } 216 217 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { 218 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 219 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 220 blk_queue_max_segments(mq->queue, bouncesz / 512); 221 blk_queue_max_segment_size(mq->queue, bouncesz); 222 223 mqrq_cur->sg = mmc_alloc_sg(1, &ret); 224 if (ret) 225 goto cleanup_queue; 226 227 mqrq_cur->bounce_sg = 228 mmc_alloc_sg(bouncesz / 512, &ret); 229 if (ret) 230 goto cleanup_queue; 231 232 mqrq_prev->sg = mmc_alloc_sg(1, &ret); 233 if (ret) 234 goto cleanup_queue; 235 236 mqrq_prev->bounce_sg = 237 mmc_alloc_sg(bouncesz / 512, &ret); 238 if (ret) 239 goto cleanup_queue; 240 } 241 } 242#endif 243 244 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { 245 blk_queue_bounce_limit(mq->queue, limit); 246 blk_queue_max_hw_sectors(mq->queue, 247 min(host->max_blk_count, host->max_req_size / 512)); 248 blk_queue_max_segments(mq->queue, host->max_segs); 249 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 250 251 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); 252 if (ret) 253 goto cleanup_queue; 254 255 256 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); 257 if (ret) 258 goto cleanup_queue; 259 } 260 261 sema_init(&mq->thread_sem, 1); 262 263 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 264 host->index, subname ? subname : ""); 265 266 if (IS_ERR(mq->thread)) { 267 ret = PTR_ERR(mq->thread); 268 goto free_bounce_sg; 269 } 270 271 return 0; 272 free_bounce_sg: 273 kfree(mqrq_cur->bounce_sg); 274 mqrq_cur->bounce_sg = NULL; 275 kfree(mqrq_prev->bounce_sg); 276 mqrq_prev->bounce_sg = NULL; 277 278 cleanup_queue: 279 kfree(mqrq_cur->sg); 280 mqrq_cur->sg = NULL; 281 kfree(mqrq_cur->bounce_buf); 282 mqrq_cur->bounce_buf = NULL; 283 284 kfree(mqrq_prev->sg); 285 mqrq_prev->sg = NULL; 286 kfree(mqrq_prev->bounce_buf); 287 mqrq_prev->bounce_buf = NULL; 288 289 blk_cleanup_queue(mq->queue); 290 return ret; 291} 292 293void mmc_cleanup_queue(struct mmc_queue *mq) 294{ 295 struct request_queue *q = mq->queue; 296 unsigned long flags; 297 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 298 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 299 300 /* Make sure the queue isn't suspended, as that will deadlock */ 301 mmc_queue_resume(mq); 302 303 /* Then terminate our worker thread */ 304 kthread_stop(mq->thread); 305 306 /* Empty the queue */ 307 spin_lock_irqsave(q->queue_lock, flags); 308 q->queuedata = NULL; 309 blk_start_queue(q); 310 spin_unlock_irqrestore(q->queue_lock, flags); 311 312 kfree(mqrq_cur->bounce_sg); 313 mqrq_cur->bounce_sg = NULL; 314 315 kfree(mqrq_cur->sg); 316 mqrq_cur->sg = NULL; 317 318 kfree(mqrq_cur->bounce_buf); 319 mqrq_cur->bounce_buf = NULL; 320 321 kfree(mqrq_prev->bounce_sg); 322 mqrq_prev->bounce_sg = NULL; 323 324 kfree(mqrq_prev->sg); 325 mqrq_prev->sg = NULL; 326 327 kfree(mqrq_prev->bounce_buf); 328 mqrq_prev->bounce_buf = NULL; 329 330 mq->card = NULL; 331} 332EXPORT_SYMBOL(mmc_cleanup_queue); 333 334/** 335 * mmc_queue_suspend - suspend a MMC request queue 336 * @mq: MMC queue to suspend 337 * 338 * Stop the block request queue, and wait for our thread to 339 * complete any outstanding requests. This ensures that we 340 * won't suspend while a request is being processed. 341 */ 342void mmc_queue_suspend(struct mmc_queue *mq) 343{ 344 struct request_queue *q = mq->queue; 345 unsigned long flags; 346 347 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 348 mq->flags |= MMC_QUEUE_SUSPENDED; 349 350 spin_lock_irqsave(q->queue_lock, flags); 351 blk_stop_queue(q); 352 spin_unlock_irqrestore(q->queue_lock, flags); 353 354 down(&mq->thread_sem); 355 } 356} 357 358/** 359 * mmc_queue_resume - resume a previously suspended MMC request queue 360 * @mq: MMC queue to resume 361 */ 362void mmc_queue_resume(struct mmc_queue *mq) 363{ 364 struct request_queue *q = mq->queue; 365 unsigned long flags; 366 367 if (mq->flags & MMC_QUEUE_SUSPENDED) { 368 mq->flags &= ~MMC_QUEUE_SUSPENDED; 369 370 up(&mq->thread_sem); 371 372 spin_lock_irqsave(q->queue_lock, flags); 373 blk_start_queue(q); 374 spin_unlock_irqrestore(q->queue_lock, flags); 375 } 376} 377 378/* 379 * Prepare the sg list(s) to be handed of to the host driver 380 */ 381unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 382{ 383 unsigned int sg_len; 384 size_t buflen; 385 struct scatterlist *sg; 386 int i; 387 388 if (!mqrq->bounce_buf) 389 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 390 391 BUG_ON(!mqrq->bounce_sg); 392 393 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 394 395 mqrq->bounce_sg_len = sg_len; 396 397 buflen = 0; 398 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 399 buflen += sg->length; 400 401 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 402 403 return 1; 404} 405 406/* 407 * If writing, bounce the data to the buffer before the request 408 * is sent to the host driver 409 */ 410void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 411{ 412 if (!mqrq->bounce_buf) 413 return; 414 415 if (rq_data_dir(mqrq->req) != WRITE) 416 return; 417 418 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 419 mqrq->bounce_buf, mqrq->sg[0].length); 420} 421 422/* 423 * If reading, bounce the data from the buffer after the request 424 * has been handled by the host driver 425 */ 426void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 427{ 428 if (!mqrq->bounce_buf) 429 return; 430 431 if (rq_data_dir(mqrq->req) != READ) 432 return; 433 434 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 435 mqrq->bounce_buf, mqrq->sg[0].length); 436}