at v2.6.32 365 lines 8.3 kB view raw
1/* 2 * linux/drivers/mmc/card/queue.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright 2006-2007 Pierre Ossman 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12#include <linux/module.h> 13#include <linux/blkdev.h> 14#include <linux/freezer.h> 15#include <linux/kthread.h> 16#include <linux/scatterlist.h> 17 18#include <linux/mmc/card.h> 19#include <linux/mmc/host.h> 20#include "queue.h" 21 22#define MMC_QUEUE_BOUNCESZ 65536 23 24#define MMC_QUEUE_SUSPENDED (1 << 0) 25 26/* 27 * Prepare a MMC request. This just filters out odd stuff. 28 */ 29static int mmc_prep_request(struct request_queue *q, struct request *req) 30{ 31 /* 32 * We only like normal block requests. 33 */ 34 if (!blk_fs_request(req)) { 35 blk_dump_rq_flags(req, "MMC bad request"); 36 return BLKPREP_KILL; 37 } 38 39 req->cmd_flags |= REQ_DONTPREP; 40 41 return BLKPREP_OK; 42} 43 44static int mmc_queue_thread(void *d) 45{ 46 struct mmc_queue *mq = d; 47 struct request_queue *q = mq->queue; 48 49 current->flags |= PF_MEMALLOC; 50 51 down(&mq->thread_sem); 52 do { 53 struct request *req = NULL; 54 55 spin_lock_irq(q->queue_lock); 56 set_current_state(TASK_INTERRUPTIBLE); 57 if (!blk_queue_plugged(q)) 58 req = blk_fetch_request(q); 59 mq->req = req; 60 spin_unlock_irq(q->queue_lock); 61 62 if (!req) { 63 if (kthread_should_stop()) { 64 set_current_state(TASK_RUNNING); 65 break; 66 } 67 up(&mq->thread_sem); 68 schedule(); 69 down(&mq->thread_sem); 70 continue; 71 } 72 set_current_state(TASK_RUNNING); 73 74 mq->issue_fn(mq, req); 75 } while (1); 76 up(&mq->thread_sem); 77 78 return 0; 79} 80 81/* 82 * Generic MMC request handler. This is called for any queue on a 83 * particular host. When the host is not busy, we look for a request 84 * on any queue on this host, and attempt to issue it. This may 85 * not be the queue we were asked to process. 86 */ 87static void mmc_request(struct request_queue *q) 88{ 89 struct mmc_queue *mq = q->queuedata; 90 struct request *req; 91 92 if (!mq) { 93 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 94 while ((req = blk_fetch_request(q)) != NULL) 95 __blk_end_request_all(req, -EIO); 96 return; 97 } 98 99 if (!mq->req) 100 wake_up_process(mq->thread); 101} 102 103/** 104 * mmc_init_queue - initialise a queue structure. 105 * @mq: mmc queue 106 * @card: mmc card to attach this queue 107 * @lock: queue lock 108 * 109 * Initialise a MMC card request queue. 110 */ 111int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 112{ 113 struct mmc_host *host = card->host; 114 u64 limit = BLK_BOUNCE_HIGH; 115 int ret; 116 117 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 118 limit = *mmc_dev(host)->dma_mask; 119 120 mq->card = card; 121 mq->queue = blk_init_queue(mmc_request, lock); 122 if (!mq->queue) 123 return -ENOMEM; 124 125 mq->queue->queuedata = mq; 126 mq->req = NULL; 127 128 blk_queue_prep_rq(mq->queue, mmc_prep_request); 129 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); 130 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 131 132#ifdef CONFIG_MMC_BLOCK_BOUNCE 133 if (host->max_hw_segs == 1) { 134 unsigned int bouncesz; 135 136 bouncesz = MMC_QUEUE_BOUNCESZ; 137 138 if (bouncesz > host->max_req_size) 139 bouncesz = host->max_req_size; 140 if (bouncesz > host->max_seg_size) 141 bouncesz = host->max_seg_size; 142 if (bouncesz > (host->max_blk_count * 512)) 143 bouncesz = host->max_blk_count * 512; 144 145 if (bouncesz > 512) { 146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 147 if (!mq->bounce_buf) { 148 printk(KERN_WARNING "%s: unable to " 149 "allocate bounce buffer\n", 150 mmc_card_name(card)); 151 } 152 } 153 154 if (mq->bounce_buf) { 155 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 156 blk_queue_max_sectors(mq->queue, bouncesz / 512); 157 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 158 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 159 blk_queue_max_segment_size(mq->queue, bouncesz); 160 161 mq->sg = kmalloc(sizeof(struct scatterlist), 162 GFP_KERNEL); 163 if (!mq->sg) { 164 ret = -ENOMEM; 165 goto cleanup_queue; 166 } 167 sg_init_table(mq->sg, 1); 168 169 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 170 bouncesz / 512, GFP_KERNEL); 171 if (!mq->bounce_sg) { 172 ret = -ENOMEM; 173 goto cleanup_queue; 174 } 175 sg_init_table(mq->bounce_sg, bouncesz / 512); 176 } 177 } 178#endif 179 180 if (!mq->bounce_buf) { 181 blk_queue_bounce_limit(mq->queue, limit); 182 blk_queue_max_sectors(mq->queue, 183 min(host->max_blk_count, host->max_req_size / 512)); 184 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 185 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 186 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 187 188 mq->sg = kmalloc(sizeof(struct scatterlist) * 189 host->max_phys_segs, GFP_KERNEL); 190 if (!mq->sg) { 191 ret = -ENOMEM; 192 goto cleanup_queue; 193 } 194 sg_init_table(mq->sg, host->max_phys_segs); 195 } 196 197 init_MUTEX(&mq->thread_sem); 198 199 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); 200 if (IS_ERR(mq->thread)) { 201 ret = PTR_ERR(mq->thread); 202 goto free_bounce_sg; 203 } 204 205 return 0; 206 free_bounce_sg: 207 if (mq->bounce_sg) 208 kfree(mq->bounce_sg); 209 mq->bounce_sg = NULL; 210 cleanup_queue: 211 if (mq->sg) 212 kfree(mq->sg); 213 mq->sg = NULL; 214 if (mq->bounce_buf) 215 kfree(mq->bounce_buf); 216 mq->bounce_buf = NULL; 217 blk_cleanup_queue(mq->queue); 218 return ret; 219} 220 221void mmc_cleanup_queue(struct mmc_queue *mq) 222{ 223 struct request_queue *q = mq->queue; 224 unsigned long flags; 225 226 /* Mark that we should start throwing out stragglers */ 227 spin_lock_irqsave(q->queue_lock, flags); 228 q->queuedata = NULL; 229 spin_unlock_irqrestore(q->queue_lock, flags); 230 231 /* Make sure the queue isn't suspended, as that will deadlock */ 232 mmc_queue_resume(mq); 233 234 /* Then terminate our worker thread */ 235 kthread_stop(mq->thread); 236 237 if (mq->bounce_sg) 238 kfree(mq->bounce_sg); 239 mq->bounce_sg = NULL; 240 241 kfree(mq->sg); 242 mq->sg = NULL; 243 244 if (mq->bounce_buf) 245 kfree(mq->bounce_buf); 246 mq->bounce_buf = NULL; 247 248 blk_cleanup_queue(mq->queue); 249 250 mq->card = NULL; 251} 252EXPORT_SYMBOL(mmc_cleanup_queue); 253 254/** 255 * mmc_queue_suspend - suspend a MMC request queue 256 * @mq: MMC queue to suspend 257 * 258 * Stop the block request queue, and wait for our thread to 259 * complete any outstanding requests. This ensures that we 260 * won't suspend while a request is being processed. 261 */ 262void mmc_queue_suspend(struct mmc_queue *mq) 263{ 264 struct request_queue *q = mq->queue; 265 unsigned long flags; 266 267 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 268 mq->flags |= MMC_QUEUE_SUSPENDED; 269 270 spin_lock_irqsave(q->queue_lock, flags); 271 blk_stop_queue(q); 272 spin_unlock_irqrestore(q->queue_lock, flags); 273 274 down(&mq->thread_sem); 275 } 276} 277 278/** 279 * mmc_queue_resume - resume a previously suspended MMC request queue 280 * @mq: MMC queue to resume 281 */ 282void mmc_queue_resume(struct mmc_queue *mq) 283{ 284 struct request_queue *q = mq->queue; 285 unsigned long flags; 286 287 if (mq->flags & MMC_QUEUE_SUSPENDED) { 288 mq->flags &= ~MMC_QUEUE_SUSPENDED; 289 290 up(&mq->thread_sem); 291 292 spin_lock_irqsave(q->queue_lock, flags); 293 blk_start_queue(q); 294 spin_unlock_irqrestore(q->queue_lock, flags); 295 } 296} 297 298/* 299 * Prepare the sg list(s) to be handed of to the host driver 300 */ 301unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 302{ 303 unsigned int sg_len; 304 size_t buflen; 305 struct scatterlist *sg; 306 int i; 307 308 if (!mq->bounce_buf) 309 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 310 311 BUG_ON(!mq->bounce_sg); 312 313 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 314 315 mq->bounce_sg_len = sg_len; 316 317 buflen = 0; 318 for_each_sg(mq->bounce_sg, sg, sg_len, i) 319 buflen += sg->length; 320 321 sg_init_one(mq->sg, mq->bounce_buf, buflen); 322 323 return 1; 324} 325 326/* 327 * If writing, bounce the data to the buffer before the request 328 * is sent to the host driver 329 */ 330void mmc_queue_bounce_pre(struct mmc_queue *mq) 331{ 332 unsigned long flags; 333 334 if (!mq->bounce_buf) 335 return; 336 337 if (rq_data_dir(mq->req) != WRITE) 338 return; 339 340 local_irq_save(flags); 341 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 342 mq->bounce_buf, mq->sg[0].length); 343 local_irq_restore(flags); 344} 345 346/* 347 * If reading, bounce the data from the buffer after the request 348 * has been handled by the host driver 349 */ 350void mmc_queue_bounce_post(struct mmc_queue *mq) 351{ 352 unsigned long flags; 353 354 if (!mq->bounce_buf) 355 return; 356 357 if (rq_data_dir(mq->req) != READ) 358 return; 359 360 local_irq_save(flags); 361 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 362 mq->bounce_buf, mq->sg[0].length); 363 local_irq_restore(flags); 364} 365