at v3.9-rc2 557 lines 14 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/slab.h> 13#include <linux/module.h> 14#include <linux/blkdev.h> 15#include <linux/freezer.h> 16#include <linux/kthread.h> 17#include <linux/scatterlist.h> 18 19#include <linux/mmc/card.h> 20#include <linux/mmc/host.h> 21#include "queue.h" 22 23#define MMC_QUEUE_BOUNCESZ 65536 24 25 26#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) 27 28/* 29 * Prepare a MMC request. This just filters out odd stuff. 30 */ 31static int mmc_prep_request(struct request_queue *q, struct request *req) 32{ 33 struct mmc_queue *mq = q->queuedata; 34 35 /* 36 * We only like normal block requests and discards. 37 */ 38 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { 39 blk_dump_rq_flags(req, "MMC bad request"); 40 return BLKPREP_KILL; 41 } 42 43 if (mq && mmc_card_removed(mq->card)) 44 return BLKPREP_KILL; 45 46 req->cmd_flags |= REQ_DONTPREP; 47 48 return BLKPREP_OK; 49} 50 51static int mmc_queue_thread(void *d) 52{ 53 struct mmc_queue *mq = d; 54 struct request_queue *q = mq->queue; 55 56 current->flags |= PF_MEMALLOC; 57 58 down(&mq->thread_sem); 59 do { 60 struct request *req = NULL; 61 struct mmc_queue_req *tmp; 62 unsigned int cmd_flags = 0; 63 64 spin_lock_irq(q->queue_lock); 65 set_current_state(TASK_INTERRUPTIBLE); 66 req = blk_fetch_request(q); 67 mq->mqrq_cur->req = req; 68 spin_unlock_irq(q->queue_lock); 69 70 if (req || mq->mqrq_prev->req) { 71 set_current_state(TASK_RUNNING); 72 cmd_flags = req ? req->cmd_flags : 0; 73 mq->issue_fn(mq, req); 74 if (mq->flags & MMC_QUEUE_NEW_REQUEST) { 75 mq->flags &= ~MMC_QUEUE_NEW_REQUEST; 76 continue; /* fetch again */ 77 } 78 79 /* 80 * Current request becomes previous request 81 * and vice versa. 82 * In case of special requests, current request 83 * has been finished. Do not assign it to previous 84 * request. 85 */ 86 if (cmd_flags & MMC_REQ_SPECIAL_MASK) 87 mq->mqrq_cur->req = NULL; 88 89 mq->mqrq_prev->brq.mrq.data = NULL; 90 mq->mqrq_prev->req = NULL; 91 tmp = mq->mqrq_prev; 92 mq->mqrq_prev = mq->mqrq_cur; 93 mq->mqrq_cur = tmp; 94 } else { 95 if (kthread_should_stop()) { 96 set_current_state(TASK_RUNNING); 97 break; 98 } 99 up(&mq->thread_sem); 100 schedule(); 101 down(&mq->thread_sem); 102 } 103 } while (1); 104 up(&mq->thread_sem); 105 106 return 0; 107} 108 109/* 110 * Generic MMC request handler. This is called for any queue on a 111 * particular host. When the host is not busy, we look for a request 112 * on any queue on this host, and attempt to issue it. This may 113 * not be the queue we were asked to process. 114 */ 115static void mmc_request_fn(struct request_queue *q) 116{ 117 struct mmc_queue *mq = q->queuedata; 118 struct request *req; 119 unsigned long flags; 120 struct mmc_context_info *cntx; 121 122 if (!mq) { 123 while ((req = blk_fetch_request(q)) != NULL) { 124 req->cmd_flags |= REQ_QUIET; 125 __blk_end_request_all(req, -EIO); 126 } 127 return; 128 } 129 130 cntx = &mq->card->host->context_info; 131 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { 132 /* 133 * New MMC request arrived when MMC thread may be 134 * blocked on the previous request to be complete 135 * with no current request fetched 136 */ 137 spin_lock_irqsave(&cntx->lock, flags); 138 if (cntx->is_waiting_last_req) { 139 cntx->is_new_req = true; 140 wake_up_interruptible(&cntx->wait); 141 } 142 spin_unlock_irqrestore(&cntx->lock, flags); 143 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) 144 wake_up_process(mq->thread); 145} 146 147static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 148{ 149 struct scatterlist *sg; 150 151 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); 152 if (!sg) 153 *err = -ENOMEM; 154 else { 155 *err = 0; 156 sg_init_table(sg, sg_len); 157 } 158 159 return sg; 160} 161 162static void mmc_queue_setup_discard(struct request_queue *q, 163 struct mmc_card *card) 164{ 165 unsigned max_discard; 166 167 max_discard = mmc_calc_max_discard(card); 168 if (!max_discard) 169 return; 170 171 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 172 q->limits.max_discard_sectors = max_discard; 173 if (card->erased_byte == 0 && !mmc_can_discard(card)) 174 q->limits.discard_zeroes_data = 1; 175 q->limits.discard_granularity = card->pref_erase << 9; 176 /* granularity must not be greater than max. discard */ 177 if (card->pref_erase > max_discard) 178 q->limits.discard_granularity = 0; 179 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) 180 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); 181} 182 183/** 184 * mmc_init_queue - initialise a queue structure. 185 * @mq: mmc queue 186 * @card: mmc card to attach this queue 187 * @lock: queue lock 188 * @subname: partition subname 189 * 190 * Initialise a MMC card request queue. 191 */ 192int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 193 spinlock_t *lock, const char *subname) 194{ 195 struct mmc_host *host = card->host; 196 u64 limit = BLK_BOUNCE_HIGH; 197 int ret; 198 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; 199 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 200 201 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 202 limit = *mmc_dev(host)->dma_mask; 203 204 mq->card = card; 205 mq->queue = blk_init_queue(mmc_request_fn, lock); 206 if (!mq->queue) 207 return -ENOMEM; 208 209 mq->mqrq_cur = mqrq_cur; 210 mq->mqrq_prev = mqrq_prev; 211 mq->queue->queuedata = mq; 212 213 blk_queue_prep_rq(mq->queue, mmc_prep_request); 214 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 215 if (mmc_can_erase(card)) 216 mmc_queue_setup_discard(mq->queue, card); 217 218#ifdef CONFIG_MMC_BLOCK_BOUNCE 219 if (host->max_segs == 1) { 220 unsigned int bouncesz; 221 222 bouncesz = MMC_QUEUE_BOUNCESZ; 223 224 if (bouncesz > host->max_req_size) 225 bouncesz = host->max_req_size; 226 if (bouncesz > host->max_seg_size) 227 bouncesz = host->max_seg_size; 228 if (bouncesz > (host->max_blk_count * 512)) 229 bouncesz = host->max_blk_count * 512; 230 231 if (bouncesz > 512) { 232 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 233 if (!mqrq_cur->bounce_buf) { 234 pr_warning("%s: unable to " 235 "allocate bounce cur buffer\n", 236 mmc_card_name(card)); 237 } 238 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 239 if (!mqrq_prev->bounce_buf) { 240 pr_warning("%s: unable to " 241 "allocate bounce prev buffer\n", 242 mmc_card_name(card)); 243 kfree(mqrq_cur->bounce_buf); 244 mqrq_cur->bounce_buf = NULL; 245 } 246 } 247 248 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { 249 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 250 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 251 blk_queue_max_segments(mq->queue, bouncesz / 512); 252 blk_queue_max_segment_size(mq->queue, bouncesz); 253 254 mqrq_cur->sg = mmc_alloc_sg(1, &ret); 255 if (ret) 256 goto cleanup_queue; 257 258 mqrq_cur->bounce_sg = 259 mmc_alloc_sg(bouncesz / 512, &ret); 260 if (ret) 261 goto cleanup_queue; 262 263 mqrq_prev->sg = mmc_alloc_sg(1, &ret); 264 if (ret) 265 goto cleanup_queue; 266 267 mqrq_prev->bounce_sg = 268 mmc_alloc_sg(bouncesz / 512, &ret); 269 if (ret) 270 goto cleanup_queue; 271 } 272 } 273#endif 274 275 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { 276 blk_queue_bounce_limit(mq->queue, limit); 277 blk_queue_max_hw_sectors(mq->queue, 278 min(host->max_blk_count, host->max_req_size / 512)); 279 blk_queue_max_segments(mq->queue, host->max_segs); 280 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 281 282 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); 283 if (ret) 284 goto cleanup_queue; 285 286 287 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); 288 if (ret) 289 goto cleanup_queue; 290 } 291 292 sema_init(&mq->thread_sem, 1); 293 294 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 295 host->index, subname ? subname : ""); 296 297 if (IS_ERR(mq->thread)) { 298 ret = PTR_ERR(mq->thread); 299 goto free_bounce_sg; 300 } 301 302 return 0; 303 free_bounce_sg: 304 kfree(mqrq_cur->bounce_sg); 305 mqrq_cur->bounce_sg = NULL; 306 kfree(mqrq_prev->bounce_sg); 307 mqrq_prev->bounce_sg = NULL; 308 309 cleanup_queue: 310 kfree(mqrq_cur->sg); 311 mqrq_cur->sg = NULL; 312 kfree(mqrq_cur->bounce_buf); 313 mqrq_cur->bounce_buf = NULL; 314 315 kfree(mqrq_prev->sg); 316 mqrq_prev->sg = NULL; 317 kfree(mqrq_prev->bounce_buf); 318 mqrq_prev->bounce_buf = NULL; 319 320 blk_cleanup_queue(mq->queue); 321 return ret; 322} 323 324void mmc_cleanup_queue(struct mmc_queue *mq) 325{ 326 struct request_queue *q = mq->queue; 327 unsigned long flags; 328 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 329 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 330 331 /* Make sure the queue isn't suspended, as that will deadlock */ 332 mmc_queue_resume(mq); 333 334 /* Then terminate our worker thread */ 335 kthread_stop(mq->thread); 336 337 /* Empty the queue */ 338 spin_lock_irqsave(q->queue_lock, flags); 339 q->queuedata = NULL; 340 blk_start_queue(q); 341 spin_unlock_irqrestore(q->queue_lock, flags); 342 343 kfree(mqrq_cur->bounce_sg); 344 mqrq_cur->bounce_sg = NULL; 345 346 kfree(mqrq_cur->sg); 347 mqrq_cur->sg = NULL; 348 349 kfree(mqrq_cur->bounce_buf); 350 mqrq_cur->bounce_buf = NULL; 351 352 kfree(mqrq_prev->bounce_sg); 353 mqrq_prev->bounce_sg = NULL; 354 355 kfree(mqrq_prev->sg); 356 mqrq_prev->sg = NULL; 357 358 kfree(mqrq_prev->bounce_buf); 359 mqrq_prev->bounce_buf = NULL; 360 361 mq->card = NULL; 362} 363EXPORT_SYMBOL(mmc_cleanup_queue); 364 365int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) 366{ 367 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; 368 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 369 int ret = 0; 370 371 372 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); 373 if (!mqrq_cur->packed) { 374 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", 375 mmc_card_name(card)); 376 ret = -ENOMEM; 377 goto out; 378 } 379 380 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); 381 if (!mqrq_prev->packed) { 382 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", 383 mmc_card_name(card)); 384 kfree(mqrq_cur->packed); 385 mqrq_cur->packed = NULL; 386 ret = -ENOMEM; 387 goto out; 388 } 389 390 INIT_LIST_HEAD(&mqrq_cur->packed->list); 391 INIT_LIST_HEAD(&mqrq_prev->packed->list); 392 393out: 394 return ret; 395} 396 397void mmc_packed_clean(struct mmc_queue *mq) 398{ 399 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; 400 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 401 402 kfree(mqrq_cur->packed); 403 mqrq_cur->packed = NULL; 404 kfree(mqrq_prev->packed); 405 mqrq_prev->packed = NULL; 406} 407 408/** 409 * mmc_queue_suspend - suspend a MMC request queue 410 * @mq: MMC queue to suspend 411 * 412 * Stop the block request queue, and wait for our thread to 413 * complete any outstanding requests. This ensures that we 414 * won't suspend while a request is being processed. 415 */ 416void mmc_queue_suspend(struct mmc_queue *mq) 417{ 418 struct request_queue *q = mq->queue; 419 unsigned long flags; 420 421 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 422 mq->flags |= MMC_QUEUE_SUSPENDED; 423 424 spin_lock_irqsave(q->queue_lock, flags); 425 blk_stop_queue(q); 426 spin_unlock_irqrestore(q->queue_lock, flags); 427 428 down(&mq->thread_sem); 429 } 430} 431 432/** 433 * mmc_queue_resume - resume a previously suspended MMC request queue 434 * @mq: MMC queue to resume 435 */ 436void mmc_queue_resume(struct mmc_queue *mq) 437{ 438 struct request_queue *q = mq->queue; 439 unsigned long flags; 440 441 if (mq->flags & MMC_QUEUE_SUSPENDED) { 442 mq->flags &= ~MMC_QUEUE_SUSPENDED; 443 444 up(&mq->thread_sem); 445 446 spin_lock_irqsave(q->queue_lock, flags); 447 blk_start_queue(q); 448 spin_unlock_irqrestore(q->queue_lock, flags); 449 } 450} 451 452static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, 453 struct mmc_packed *packed, 454 struct scatterlist *sg, 455 enum mmc_packed_type cmd_type) 456{ 457 struct scatterlist *__sg = sg; 458 unsigned int sg_len = 0; 459 struct request *req; 460 461 if (mmc_packed_wr(cmd_type)) { 462 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; 463 unsigned int max_seg_sz = queue_max_segment_size(mq->queue); 464 unsigned int len, remain, offset = 0; 465 u8 *buf = (u8 *)packed->cmd_hdr; 466 467 remain = hdr_sz; 468 do { 469 len = min(remain, max_seg_sz); 470 sg_set_buf(__sg, buf + offset, len); 471 offset += len; 472 remain -= len; 473 (__sg++)->page_link &= ~0x02; 474 sg_len++; 475 } while (remain); 476 } 477 478 list_for_each_entry(req, &packed->list, queuelist) { 479 sg_len += blk_rq_map_sg(mq->queue, req, __sg); 480 __sg = sg + (sg_len - 1); 481 (__sg++)->page_link &= ~0x02; 482 } 483 sg_mark_end(sg + (sg_len - 1)); 484 return sg_len; 485} 486 487/* 488 * Prepare the sg list(s) to be handed of to the host driver 489 */ 490unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 491{ 492 unsigned int sg_len; 493 size_t buflen; 494 struct scatterlist *sg; 495 enum mmc_packed_type cmd_type; 496 int i; 497 498 cmd_type = mqrq->cmd_type; 499 500 if (!mqrq->bounce_buf) { 501 if (mmc_packed_cmd(cmd_type)) 502 return mmc_queue_packed_map_sg(mq, mqrq->packed, 503 mqrq->sg, cmd_type); 504 else 505 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 506 } 507 508 BUG_ON(!mqrq->bounce_sg); 509 510 if (mmc_packed_cmd(cmd_type)) 511 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, 512 mqrq->bounce_sg, cmd_type); 513 else 514 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 515 516 mqrq->bounce_sg_len = sg_len; 517 518 buflen = 0; 519 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 520 buflen += sg->length; 521 522 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 523 524 return 1; 525} 526 527/* 528 * If writing, bounce the data to the buffer before the request 529 * is sent to the host driver 530 */ 531void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 532{ 533 if (!mqrq->bounce_buf) 534 return; 535 536 if (rq_data_dir(mqrq->req) != WRITE) 537 return; 538 539 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 540 mqrq->bounce_buf, mqrq->sg[0].length); 541} 542 543/* 544 * If reading, bounce the data from the buffer after the request 545 * has been handled by the host driver 546 */ 547void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 548{ 549 if (!mqrq->bounce_buf) 550 return; 551 552 if (rq_data_dir(mqrq->req) != READ) 553 return; 554 555 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 556 mqrq->bounce_buf, mqrq->sg[0].length); 557}