at v3.2 433 lines 10 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/slab.h> 13#include <linux/module.h> 14#include <linux/blkdev.h> 15#include <linux/freezer.h> 16#include <linux/kthread.h> 17#include <linux/scatterlist.h> 18 19#include <linux/mmc/card.h> 20#include <linux/mmc/host.h> 21#include "queue.h" 22 23#define MMC_QUEUE_BOUNCESZ 65536 24 25#define MMC_QUEUE_SUSPENDED (1 << 0) 26 27/* 28 * Prepare a MMC request. This just filters out odd stuff. 29 */ 30static int mmc_prep_request(struct request_queue *q, struct request *req) 31{ 32 /* 33 * We only like normal block requests and discards. 34 */ 35 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { 36 blk_dump_rq_flags(req, "MMC bad request"); 37 return BLKPREP_KILL; 38 } 39 40 req->cmd_flags |= REQ_DONTPREP; 41 42 return BLKPREP_OK; 43} 44 45static int mmc_queue_thread(void *d) 46{ 47 struct mmc_queue *mq = d; 48 struct request_queue *q = mq->queue; 49 50 current->flags |= PF_MEMALLOC; 51 52 down(&mq->thread_sem); 53 do { 54 struct request *req = NULL; 55 struct mmc_queue_req *tmp; 56 57 spin_lock_irq(q->queue_lock); 58 set_current_state(TASK_INTERRUPTIBLE); 59 req = blk_fetch_request(q); 60 mq->mqrq_cur->req = req; 61 spin_unlock_irq(q->queue_lock); 62 63 if (req || mq->mqrq_prev->req) { 64 set_current_state(TASK_RUNNING); 65 mq->issue_fn(mq, req); 66 } else { 67 if (kthread_should_stop()) { 68 set_current_state(TASK_RUNNING); 69 break; 70 } 71 up(&mq->thread_sem); 72 schedule(); 73 down(&mq->thread_sem); 74 } 75 76 /* Current request becomes previous request and vice versa. */ 77 mq->mqrq_prev->brq.mrq.data = NULL; 78 mq->mqrq_prev->req = NULL; 79 tmp = mq->mqrq_prev; 80 mq->mqrq_prev = mq->mqrq_cur; 81 mq->mqrq_cur = tmp; 82 } while (1); 83 up(&mq->thread_sem); 84 85 return 0; 86} 87 88/* 89 * Generic MMC request handler. This is called for any queue on a 90 * particular host. When the host is not busy, we look for a request 91 * on any queue on this host, and attempt to issue it. This may 92 * not be the queue we were asked to process. 93 */ 94static void mmc_request(struct request_queue *q) 95{ 96 struct mmc_queue *mq = q->queuedata; 97 struct request *req; 98 99 if (!mq) { 100 while ((req = blk_fetch_request(q)) != NULL) { 101 req->cmd_flags |= REQ_QUIET; 102 __blk_end_request_all(req, -EIO); 103 } 104 return; 105 } 106 107 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) 108 wake_up_process(mq->thread); 109} 110 111static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 112{ 113 struct scatterlist *sg; 114 115 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); 116 if (!sg) 117 *err = -ENOMEM; 118 else { 119 *err = 0; 120 sg_init_table(sg, sg_len); 121 } 122 123 return sg; 124} 125 126static void mmc_queue_setup_discard(struct request_queue *q, 127 struct mmc_card *card) 128{ 129 unsigned max_discard; 130 131 max_discard = mmc_calc_max_discard(card); 132 if (!max_discard) 133 return; 134 135 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 136 q->limits.max_discard_sectors = max_discard; 137 if (card->erased_byte == 0) 138 q->limits.discard_zeroes_data = 1; 139 q->limits.discard_granularity = card->pref_erase << 9; 140 /* granularity must not be greater than max. discard */ 141 if (card->pref_erase > max_discard) 142 q->limits.discard_granularity = 0; 143 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) 144 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); 145} 146 147/** 148 * mmc_init_queue - initialise a queue structure. 149 * @mq: mmc queue 150 * @card: mmc card to attach this queue 151 * @lock: queue lock 152 * @subname: partition subname 153 * 154 * Initialise a MMC card request queue. 155 */ 156int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 157 spinlock_t *lock, const char *subname) 158{ 159 struct mmc_host *host = card->host; 160 u64 limit = BLK_BOUNCE_HIGH; 161 int ret; 162 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; 163 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 164 165 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 166 limit = *mmc_dev(host)->dma_mask; 167 168 mq->card = card; 169 mq->queue = blk_init_queue(mmc_request, lock); 170 if (!mq->queue) 171 return -ENOMEM; 172 173 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); 174 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); 175 mq->mqrq_cur = mqrq_cur; 176 mq->mqrq_prev = mqrq_prev; 177 mq->queue->queuedata = mq; 178 179 blk_queue_prep_rq(mq->queue, mmc_prep_request); 180 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 181 if (mmc_can_erase(card)) 182 mmc_queue_setup_discard(mq->queue, card); 183 184#ifdef CONFIG_MMC_BLOCK_BOUNCE 185 if (host->max_segs == 1) { 186 unsigned int bouncesz; 187 188 bouncesz = MMC_QUEUE_BOUNCESZ; 189 190 if (bouncesz > host->max_req_size) 191 bouncesz = host->max_req_size; 192 if (bouncesz > host->max_seg_size) 193 bouncesz = host->max_seg_size; 194 if (bouncesz > (host->max_blk_count * 512)) 195 bouncesz = host->max_blk_count * 512; 196 197 if (bouncesz > 512) { 198 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 199 if (!mqrq_cur->bounce_buf) { 200 pr_warning("%s: unable to " 201 "allocate bounce cur buffer\n", 202 mmc_card_name(card)); 203 } 204 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 205 if (!mqrq_prev->bounce_buf) { 206 pr_warning("%s: unable to " 207 "allocate bounce prev buffer\n", 208 mmc_card_name(card)); 209 kfree(mqrq_cur->bounce_buf); 210 mqrq_cur->bounce_buf = NULL; 211 } 212 } 213 214 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { 215 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 216 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 217 blk_queue_max_segments(mq->queue, bouncesz / 512); 218 blk_queue_max_segment_size(mq->queue, bouncesz); 219 220 mqrq_cur->sg = mmc_alloc_sg(1, &ret); 221 if (ret) 222 goto cleanup_queue; 223 224 mqrq_cur->bounce_sg = 225 mmc_alloc_sg(bouncesz / 512, &ret); 226 if (ret) 227 goto cleanup_queue; 228 229 mqrq_prev->sg = mmc_alloc_sg(1, &ret); 230 if (ret) 231 goto cleanup_queue; 232 233 mqrq_prev->bounce_sg = 234 mmc_alloc_sg(bouncesz / 512, &ret); 235 if (ret) 236 goto cleanup_queue; 237 } 238 } 239#endif 240 241 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { 242 blk_queue_bounce_limit(mq->queue, limit); 243 blk_queue_max_hw_sectors(mq->queue, 244 min(host->max_blk_count, host->max_req_size / 512)); 245 blk_queue_max_segments(mq->queue, host->max_segs); 246 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 247 248 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); 249 if (ret) 250 goto cleanup_queue; 251 252 253 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); 254 if (ret) 255 goto cleanup_queue; 256 } 257 258 sema_init(&mq->thread_sem, 1); 259 260 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 261 host->index, subname ? subname : ""); 262 263 if (IS_ERR(mq->thread)) { 264 ret = PTR_ERR(mq->thread); 265 goto free_bounce_sg; 266 } 267 268 return 0; 269 free_bounce_sg: 270 kfree(mqrq_cur->bounce_sg); 271 mqrq_cur->bounce_sg = NULL; 272 kfree(mqrq_prev->bounce_sg); 273 mqrq_prev->bounce_sg = NULL; 274 275 cleanup_queue: 276 kfree(mqrq_cur->sg); 277 mqrq_cur->sg = NULL; 278 kfree(mqrq_cur->bounce_buf); 279 mqrq_cur->bounce_buf = NULL; 280 281 kfree(mqrq_prev->sg); 282 mqrq_prev->sg = NULL; 283 kfree(mqrq_prev->bounce_buf); 284 mqrq_prev->bounce_buf = NULL; 285 286 blk_cleanup_queue(mq->queue); 287 return ret; 288} 289 290void mmc_cleanup_queue(struct mmc_queue *mq) 291{ 292 struct request_queue *q = mq->queue; 293 unsigned long flags; 294 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 295 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 296 297 /* Make sure the queue isn't suspended, as that will deadlock */ 298 mmc_queue_resume(mq); 299 300 /* Then terminate our worker thread */ 301 kthread_stop(mq->thread); 302 303 /* Empty the queue */ 304 spin_lock_irqsave(q->queue_lock, flags); 305 q->queuedata = NULL; 306 blk_start_queue(q); 307 spin_unlock_irqrestore(q->queue_lock, flags); 308 309 kfree(mqrq_cur->bounce_sg); 310 mqrq_cur->bounce_sg = NULL; 311 312 kfree(mqrq_cur->sg); 313 mqrq_cur->sg = NULL; 314 315 kfree(mqrq_cur->bounce_buf); 316 mqrq_cur->bounce_buf = NULL; 317 318 kfree(mqrq_prev->bounce_sg); 319 mqrq_prev->bounce_sg = NULL; 320 321 kfree(mqrq_prev->sg); 322 mqrq_prev->sg = NULL; 323 324 kfree(mqrq_prev->bounce_buf); 325 mqrq_prev->bounce_buf = NULL; 326 327 mq->card = NULL; 328} 329EXPORT_SYMBOL(mmc_cleanup_queue); 330 331/** 332 * mmc_queue_suspend - suspend a MMC request queue 333 * @mq: MMC queue to suspend 334 * 335 * Stop the block request queue, and wait for our thread to 336 * complete any outstanding requests. This ensures that we 337 * won't suspend while a request is being processed. 338 */ 339void mmc_queue_suspend(struct mmc_queue *mq) 340{ 341 struct request_queue *q = mq->queue; 342 unsigned long flags; 343 344 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 345 mq->flags |= MMC_QUEUE_SUSPENDED; 346 347 spin_lock_irqsave(q->queue_lock, flags); 348 blk_stop_queue(q); 349 spin_unlock_irqrestore(q->queue_lock, flags); 350 351 down(&mq->thread_sem); 352 } 353} 354 355/** 356 * mmc_queue_resume - resume a previously suspended MMC request queue 357 * @mq: MMC queue to resume 358 */ 359void mmc_queue_resume(struct mmc_queue *mq) 360{ 361 struct request_queue *q = mq->queue; 362 unsigned long flags; 363 364 if (mq->flags & MMC_QUEUE_SUSPENDED) { 365 mq->flags &= ~MMC_QUEUE_SUSPENDED; 366 367 up(&mq->thread_sem); 368 369 spin_lock_irqsave(q->queue_lock, flags); 370 blk_start_queue(q); 371 spin_unlock_irqrestore(q->queue_lock, flags); 372 } 373} 374 375/* 376 * Prepare the sg list(s) to be handed of to the host driver 377 */ 378unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 379{ 380 unsigned int sg_len; 381 size_t buflen; 382 struct scatterlist *sg; 383 int i; 384 385 if (!mqrq->bounce_buf) 386 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 387 388 BUG_ON(!mqrq->bounce_sg); 389 390 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 391 392 mqrq->bounce_sg_len = sg_len; 393 394 buflen = 0; 395 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 396 buflen += sg->length; 397 398 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 399 400 return 1; 401} 402 403/* 404 * If writing, bounce the data to the buffer before the request 405 * is sent to the host driver 406 */ 407void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 408{ 409 if (!mqrq->bounce_buf) 410 return; 411 412 if (rq_data_dir(mqrq->req) != WRITE) 413 return; 414 415 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 416 mqrq->bounce_buf, mqrq->sg[0].length); 417} 418 419/* 420 * If reading, bounce the data from the buffer after the request 421 * has been handled by the host driver 422 */ 423void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 424{ 425 if (!mqrq->bounce_buf) 426 return; 427 428 if (rq_data_dir(mqrq->req) != READ) 429 return; 430 431 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 432 mqrq->bounce_buf, mqrq->sg[0].length); 433}