at v2.6.23 393 lines 8.5 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/module.h> 13#include <linux/blkdev.h> 14#include <linux/freezer.h> 15#include <linux/kthread.h> 16 17#include <linux/mmc/card.h> 18#include <linux/mmc/host.h> 19#include "queue.h" 20 21#define MMC_QUEUE_BOUNCESZ 65536 22 23#define MMC_QUEUE_SUSPENDED (1 << 0) 24 25/* 26 * Prepare a MMC request. This just filters out odd stuff. 27 */ 28static int mmc_prep_request(struct request_queue *q, struct request *req) 29{ 30 /* 31 * We only like normal block requests. 32 */ 33 if (!blk_fs_request(req) && !blk_pc_request(req)) { 34 blk_dump_rq_flags(req, "MMC bad request"); 35 return BLKPREP_KILL; 36 } 37 38 req->cmd_flags |= REQ_DONTPREP; 39 40 return BLKPREP_OK; 41} 42 43static int mmc_queue_thread(void *d) 44{ 45 struct mmc_queue *mq = d; 46 struct request_queue *q = mq->queue; 47 48 current->flags |= PF_MEMALLOC; 49 50 down(&mq->thread_sem); 51 do { 52 struct request *req = NULL; 53 54 spin_lock_irq(q->queue_lock); 55 set_current_state(TASK_INTERRUPTIBLE); 56 if (!blk_queue_plugged(q)) 57 req = elv_next_request(q); 58 mq->req = req; 59 spin_unlock_irq(q->queue_lock); 60 61 if (!req) { 62 if (kthread_should_stop()) { 63 set_current_state(TASK_RUNNING); 64 break; 65 } 66 up(&mq->thread_sem); 67 schedule(); 68 down(&mq->thread_sem); 69 continue; 70 } 71 set_current_state(TASK_RUNNING); 72 73 mq->issue_fn(mq, req); 74 } while (1); 75 up(&mq->thread_sem); 76 77 return 0; 78} 79 80/* 81 * Generic MMC request handler. This is called for any queue on a 82 * particular host. When the host is not busy, we look for a request 83 * on any queue on this host, and attempt to issue it. This may 84 * not be the queue we were asked to process. 85 */ 86static void mmc_request(struct request_queue *q) 87{ 88 struct mmc_queue *mq = q->queuedata; 89 struct request *req; 90 int ret; 91 92 if (!mq) { 93 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 94 while ((req = elv_next_request(q)) != NULL) { 95 do { 96 ret = end_that_request_chunk(req, 0, 97 req->current_nr_sectors << 9); 98 } while (ret); 99 } 100 return; 101 } 102 103 if (!mq->req) 104 wake_up_process(mq->thread); 105} 106 107/** 108 * mmc_init_queue - initialise a queue structure. 109 * @mq: mmc queue 110 * @card: mmc card to attach this queue 111 * @lock: queue lock 112 * 113 * Initialise a MMC card request queue. 114 */ 115int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 116{ 117 struct mmc_host *host = card->host; 118 u64 limit = BLK_BOUNCE_HIGH; 119 int ret; 120 121 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 122 limit = *mmc_dev(host)->dma_mask; 123 124 mq->card = card; 125 mq->queue = blk_init_queue(mmc_request, lock); 126 if (!mq->queue) 127 return -ENOMEM; 128 129 mq->queue->queuedata = mq; 130 mq->req = NULL; 131 132 blk_queue_prep_rq(mq->queue, mmc_prep_request); 133 134#ifdef CONFIG_MMC_BLOCK_BOUNCE 135 if (host->max_hw_segs == 1) { 136 unsigned int bouncesz; 137 138 bouncesz = MMC_QUEUE_BOUNCESZ; 139 140 if (bouncesz > host->max_req_size) 141 bouncesz = host->max_req_size; 142 if (bouncesz > host->max_seg_size) 143 bouncesz = host->max_seg_size; 144 145 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 146 if (!mq->bounce_buf) { 147 printk(KERN_WARNING "%s: unable to allocate " 148 "bounce buffer\n", mmc_card_name(card)); 149 } else { 150 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); 151 blk_queue_max_sectors(mq->queue, bouncesz / 512); 152 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 153 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 154 blk_queue_max_segment_size(mq->queue, bouncesz); 155 156 mq->sg = kmalloc(sizeof(struct scatterlist), 157 GFP_KERNEL); 158 if (!mq->sg) { 159 ret = -ENOMEM; 160 goto cleanup_queue; 161 } 162 163 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 164 bouncesz / 512, GFP_KERNEL); 165 if (!mq->bounce_sg) { 166 ret = -ENOMEM; 167 goto cleanup_queue; 168 } 169 } 170 } 171#endif 172 173 if (!mq->bounce_buf) { 174 blk_queue_bounce_limit(mq->queue, limit); 175 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 176 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 177 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 178 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 179 180 mq->sg = kmalloc(sizeof(struct scatterlist) * 181 host->max_phys_segs, GFP_KERNEL); 182 if (!mq->sg) { 183 ret = -ENOMEM; 184 goto cleanup_queue; 185 } 186 } 187 188 init_MUTEX(&mq->thread_sem); 189 190 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); 191 if (IS_ERR(mq->thread)) { 192 ret = PTR_ERR(mq->thread); 193 goto free_bounce_sg; 194 } 195 196 return 0; 197 free_bounce_sg: 198 if (mq->bounce_sg) 199 kfree(mq->bounce_sg); 200 mq->bounce_sg = NULL; 201 cleanup_queue: 202 if (mq->sg) 203 kfree(mq->sg); 204 mq->sg = NULL; 205 if (mq->bounce_buf) 206 kfree(mq->bounce_buf); 207 mq->bounce_buf = NULL; 208 blk_cleanup_queue(mq->queue); 209 return ret; 210} 211 212void mmc_cleanup_queue(struct mmc_queue *mq) 213{ 214 struct request_queue *q = mq->queue; 215 unsigned long flags; 216 217 /* Mark that we should start throwing out stragglers */ 218 spin_lock_irqsave(q->queue_lock, flags); 219 q->queuedata = NULL; 220 spin_unlock_irqrestore(q->queue_lock, flags); 221 222 /* Make sure the queue isn't suspended, as that will deadlock */ 223 mmc_queue_resume(mq); 224 225 /* Then terminate our worker thread */ 226 kthread_stop(mq->thread); 227 228 if (mq->bounce_sg) 229 kfree(mq->bounce_sg); 230 mq->bounce_sg = NULL; 231 232 kfree(mq->sg); 233 mq->sg = NULL; 234 235 if (mq->bounce_buf) 236 kfree(mq->bounce_buf); 237 mq->bounce_buf = NULL; 238 239 blk_cleanup_queue(mq->queue); 240 241 mq->card = NULL; 242} 243EXPORT_SYMBOL(mmc_cleanup_queue); 244 245/** 246 * mmc_queue_suspend - suspend a MMC request queue 247 * @mq: MMC queue to suspend 248 * 249 * Stop the block request queue, and wait for our thread to 250 * complete any outstanding requests. This ensures that we 251 * won't suspend while a request is being processed. 252 */ 253void mmc_queue_suspend(struct mmc_queue *mq) 254{ 255 struct request_queue *q = mq->queue; 256 unsigned long flags; 257 258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 259 mq->flags |= MMC_QUEUE_SUSPENDED; 260 261 spin_lock_irqsave(q->queue_lock, flags); 262 blk_stop_queue(q); 263 spin_unlock_irqrestore(q->queue_lock, flags); 264 265 down(&mq->thread_sem); 266 } 267} 268 269/** 270 * mmc_queue_resume - resume a previously suspended MMC request queue 271 * @mq: MMC queue to resume 272 */ 273void mmc_queue_resume(struct mmc_queue *mq) 274{ 275 struct request_queue *q = mq->queue; 276 unsigned long flags; 277 278 if (mq->flags & MMC_QUEUE_SUSPENDED) { 279 mq->flags &= ~MMC_QUEUE_SUSPENDED; 280 281 up(&mq->thread_sem); 282 283 spin_lock_irqsave(q->queue_lock, flags); 284 blk_start_queue(q); 285 spin_unlock_irqrestore(q->queue_lock, flags); 286 } 287} 288 289static void copy_sg(struct scatterlist *dst, unsigned int dst_len, 290 struct scatterlist *src, unsigned int src_len) 291{ 292 unsigned int chunk; 293 char *dst_buf, *src_buf; 294 unsigned int dst_size, src_size; 295 296 dst_buf = NULL; 297 src_buf = NULL; 298 dst_size = 0; 299 src_size = 0; 300 301 while (src_len) { 302 BUG_ON(dst_len == 0); 303 304 if (dst_size == 0) { 305 dst_buf = page_address(dst->page) + dst->offset; 306 dst_size = dst->length; 307 } 308 309 if (src_size == 0) { 310 src_buf = page_address(src->page) + src->offset; 311 src_size = src->length; 312 } 313 314 chunk = min(dst_size, src_size); 315 316 memcpy(dst_buf, src_buf, chunk); 317 318 dst_buf += chunk; 319 src_buf += chunk; 320 dst_size -= chunk; 321 src_size -= chunk; 322 323 if (dst_size == 0) { 324 dst++; 325 dst_len--; 326 } 327 328 if (src_size == 0) { 329 src++; 330 src_len--; 331 } 332 } 333} 334 335unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 336{ 337 unsigned int sg_len; 338 339 if (!mq->bounce_buf) 340 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 341 342 BUG_ON(!mq->bounce_sg); 343 344 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 345 346 mq->bounce_sg_len = sg_len; 347 348 /* 349 * Shortcut in the event we only get a single entry. 350 */ 351 if (sg_len == 1) { 352 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); 353 return 1; 354 } 355 356 mq->sg[0].page = virt_to_page(mq->bounce_buf); 357 mq->sg[0].offset = offset_in_page(mq->bounce_buf); 358 mq->sg[0].length = 0; 359 360 while (sg_len) { 361 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; 362 sg_len--; 363 } 364 365 return 1; 366} 367 368void mmc_queue_bounce_pre(struct mmc_queue *mq) 369{ 370 if (!mq->bounce_buf) 371 return; 372 373 if (mq->bounce_sg_len == 1) 374 return; 375 if (rq_data_dir(mq->req) != WRITE) 376 return; 377 378 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); 379} 380 381void mmc_queue_bounce_post(struct mmc_queue *mq) 382{ 383 if (!mq->bounce_buf) 384 return; 385 386 if (mq->bounce_sg_len == 1) 387 return; 388 if (rq_data_dir(mq->req) != READ) 389 return; 390 391 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); 392} 393