at v3.0-rc2 371 lines 8.5 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/slab.h> 13#include <linux/module.h> 14#include <linux/blkdev.h> 15#include <linux/freezer.h> 16#include <linux/kthread.h> 17#include <linux/scatterlist.h> 18 19#include <linux/mmc/card.h> 20#include <linux/mmc/host.h> 21#include "queue.h" 22 23#define MMC_QUEUE_BOUNCESZ 65536 24 25#define MMC_QUEUE_SUSPENDED (1 << 0) 26 27/* 28 * Prepare a MMC request. This just filters out odd stuff. 29 */ 30static int mmc_prep_request(struct request_queue *q, struct request *req) 31{ 32 /* 33 * We only like normal block requests and discards. 34 */ 35 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { 36 blk_dump_rq_flags(req, "MMC bad request"); 37 return BLKPREP_KILL; 38 } 39 40 req->cmd_flags |= REQ_DONTPREP; 41 42 return BLKPREP_OK; 43} 44 45static int mmc_queue_thread(void *d) 46{ 47 struct mmc_queue *mq = d; 48 struct request_queue *q = mq->queue; 49 50 current->flags |= PF_MEMALLOC; 51 52 down(&mq->thread_sem); 53 do { 54 struct request *req = NULL; 55 56 spin_lock_irq(q->queue_lock); 57 set_current_state(TASK_INTERRUPTIBLE); 58 req = blk_fetch_request(q); 59 mq->req = req; 60 spin_unlock_irq(q->queue_lock); 61 62 if (!req) { 63 if (kthread_should_stop()) { 64 set_current_state(TASK_RUNNING); 65 break; 66 } 67 up(&mq->thread_sem); 68 schedule(); 69 down(&mq->thread_sem); 70 continue; 71 } 72 set_current_state(TASK_RUNNING); 73 74 mq->issue_fn(mq, req); 75 } while (1); 76 up(&mq->thread_sem); 77 78 return 0; 79} 80 81/* 82 * Generic MMC request handler. This is called for any queue on a 83 * particular host. When the host is not busy, we look for a request 84 * on any queue on this host, and attempt to issue it. This may 85 * not be the queue we were asked to process. 86 */ 87static void mmc_request(struct request_queue *q) 88{ 89 struct mmc_queue *mq = q->queuedata; 90 struct request *req; 91 92 if (!mq) { 93 while ((req = blk_fetch_request(q)) != NULL) { 94 req->cmd_flags |= REQ_QUIET; 95 __blk_end_request_all(req, -EIO); 96 } 97 return; 98 } 99 100 if (!mq->req) 101 wake_up_process(mq->thread); 102} 103 104/** 105 * mmc_init_queue - initialise a queue structure. 106 * @mq: mmc queue 107 * @card: mmc card to attach this queue 108 * @lock: queue lock 109 * 110 * Initialise a MMC card request queue. 111 */ 112int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 113{ 114 struct mmc_host *host = card->host; 115 u64 limit = BLK_BOUNCE_HIGH; 116 int ret; 117 118 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 119 limit = *mmc_dev(host)->dma_mask; 120 121 mq->card = card; 122 mq->queue = blk_init_queue(mmc_request, lock); 123 if (!mq->queue) 124 return -ENOMEM; 125 126 mq->queue->queuedata = mq; 127 mq->req = NULL; 128 129 blk_queue_prep_rq(mq->queue, mmc_prep_request); 130 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 131 if (mmc_can_erase(card)) { 132 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 133 mq->queue->limits.max_discard_sectors = UINT_MAX; 134 if (card->erased_byte == 0) 135 mq->queue->limits.discard_zeroes_data = 1; 136 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 137 mq->queue->limits.discard_granularity = 138 card->erase_size << 9; 139 mq->queue->limits.discard_alignment = 140 card->erase_size << 9; 141 } 142 if (mmc_can_secure_erase_trim(card)) 143 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 144 mq->queue); 145 } 146 147#ifdef CONFIG_MMC_BLOCK_BOUNCE 148 if (host->max_segs == 1) { 149 unsigned int bouncesz; 150 151 bouncesz = MMC_QUEUE_BOUNCESZ; 152 153 if (bouncesz > host->max_req_size) 154 bouncesz = host->max_req_size; 155 if (bouncesz > host->max_seg_size) 156 bouncesz = host->max_seg_size; 157 if (bouncesz > (host->max_blk_count * 512)) 158 bouncesz = host->max_blk_count * 512; 159 160 if (bouncesz > 512) { 161 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 162 if (!mq->bounce_buf) { 163 printk(KERN_WARNING "%s: unable to " 164 "allocate bounce buffer\n", 165 mmc_card_name(card)); 166 } 167 } 168 169 if (mq->bounce_buf) { 170 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 171 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 172 blk_queue_max_segments(mq->queue, bouncesz / 512); 173 blk_queue_max_segment_size(mq->queue, bouncesz); 174 175 mq->sg = kmalloc(sizeof(struct scatterlist), 176 GFP_KERNEL); 177 if (!mq->sg) { 178 ret = -ENOMEM; 179 goto cleanup_queue; 180 } 181 sg_init_table(mq->sg, 1); 182 183 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 184 bouncesz / 512, GFP_KERNEL); 185 if (!mq->bounce_sg) { 186 ret = -ENOMEM; 187 goto cleanup_queue; 188 } 189 sg_init_table(mq->bounce_sg, bouncesz / 512); 190 } 191 } 192#endif 193 194 if (!mq->bounce_buf) { 195 blk_queue_bounce_limit(mq->queue, limit); 196 blk_queue_max_hw_sectors(mq->queue, 197 min(host->max_blk_count, host->max_req_size / 512)); 198 blk_queue_max_segments(mq->queue, host->max_segs); 199 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 200 201 mq->sg = kmalloc(sizeof(struct scatterlist) * 202 host->max_segs, GFP_KERNEL); 203 if (!mq->sg) { 204 ret = -ENOMEM; 205 goto cleanup_queue; 206 } 207 sg_init_table(mq->sg, host->max_segs); 208 } 209 210 sema_init(&mq->thread_sem, 1); 211 212 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d", 213 host->index); 214 215 if (IS_ERR(mq->thread)) { 216 ret = PTR_ERR(mq->thread); 217 goto free_bounce_sg; 218 } 219 220 return 0; 221 free_bounce_sg: 222 if (mq->bounce_sg) 223 kfree(mq->bounce_sg); 224 mq->bounce_sg = NULL; 225 cleanup_queue: 226 if (mq->sg) 227 kfree(mq->sg); 228 mq->sg = NULL; 229 if (mq->bounce_buf) 230 kfree(mq->bounce_buf); 231 mq->bounce_buf = NULL; 232 blk_cleanup_queue(mq->queue); 233 return ret; 234} 235 236void mmc_cleanup_queue(struct mmc_queue *mq) 237{ 238 struct request_queue *q = mq->queue; 239 unsigned long flags; 240 241 /* Make sure the queue isn't suspended, as that will deadlock */ 242 mmc_queue_resume(mq); 243 244 /* Then terminate our worker thread */ 245 kthread_stop(mq->thread); 246 247 /* Empty the queue */ 248 spin_lock_irqsave(q->queue_lock, flags); 249 q->queuedata = NULL; 250 blk_start_queue(q); 251 spin_unlock_irqrestore(q->queue_lock, flags); 252 253 if (mq->bounce_sg) 254 kfree(mq->bounce_sg); 255 mq->bounce_sg = NULL; 256 257 kfree(mq->sg); 258 mq->sg = NULL; 259 260 if (mq->bounce_buf) 261 kfree(mq->bounce_buf); 262 mq->bounce_buf = NULL; 263 264 mq->card = NULL; 265} 266EXPORT_SYMBOL(mmc_cleanup_queue); 267 268/** 269 * mmc_queue_suspend - suspend a MMC request queue 270 * @mq: MMC queue to suspend 271 * 272 * Stop the block request queue, and wait for our thread to 273 * complete any outstanding requests. This ensures that we 274 * won't suspend while a request is being processed. 275 */ 276void mmc_queue_suspend(struct mmc_queue *mq) 277{ 278 struct request_queue *q = mq->queue; 279 unsigned long flags; 280 281 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 282 mq->flags |= MMC_QUEUE_SUSPENDED; 283 284 spin_lock_irqsave(q->queue_lock, flags); 285 blk_stop_queue(q); 286 spin_unlock_irqrestore(q->queue_lock, flags); 287 288 down(&mq->thread_sem); 289 } 290} 291 292/** 293 * mmc_queue_resume - resume a previously suspended MMC request queue 294 * @mq: MMC queue to resume 295 */ 296void mmc_queue_resume(struct mmc_queue *mq) 297{ 298 struct request_queue *q = mq->queue; 299 unsigned long flags; 300 301 if (mq->flags & MMC_QUEUE_SUSPENDED) { 302 mq->flags &= ~MMC_QUEUE_SUSPENDED; 303 304 up(&mq->thread_sem); 305 306 spin_lock_irqsave(q->queue_lock, flags); 307 blk_start_queue(q); 308 spin_unlock_irqrestore(q->queue_lock, flags); 309 } 310} 311 312/* 313 * Prepare the sg list(s) to be handed of to the host driver 314 */ 315unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 316{ 317 unsigned int sg_len; 318 size_t buflen; 319 struct scatterlist *sg; 320 int i; 321 322 if (!mq->bounce_buf) 323 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 324 325 BUG_ON(!mq->bounce_sg); 326 327 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 328 329 mq->bounce_sg_len = sg_len; 330 331 buflen = 0; 332 for_each_sg(mq->bounce_sg, sg, sg_len, i) 333 buflen += sg->length; 334 335 sg_init_one(mq->sg, mq->bounce_buf, buflen); 336 337 return 1; 338} 339 340/* 341 * If writing, bounce the data to the buffer before the request 342 * is sent to the host driver 343 */ 344void mmc_queue_bounce_pre(struct mmc_queue *mq) 345{ 346 if (!mq->bounce_buf) 347 return; 348 349 if (rq_data_dir(mq->req) != WRITE) 350 return; 351 352 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 353 mq->bounce_buf, mq->sg[0].length); 354} 355 356/* 357 * If reading, bounce the data from the buffer after the request 358 * has been handled by the host driver 359 */ 360void mmc_queue_bounce_post(struct mmc_queue *mq) 361{ 362 if (!mq->bounce_buf) 363 return; 364 365 if (rq_data_dir(mq->req) != READ) 366 return; 367 368 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 369 mq->bounce_buf, mq->sg[0].length); 370} 371