Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: queue: Introduce queue depth and use it to allocate and free

Add a mmc_queue member to record the size of the queue, which currently
supports 2 requests on-the-go at a time. Instead of allocating resources
for 2 slots in the queue, allow for an arbitrary number.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>

authored by

Adrian Hunter and committed by
Ulf Hansson
c5bda0ca c09949cf

+58 -58
+56 -57
drivers/mmc/card/queue.c
··· 190 190 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, 191 191 unsigned int bouncesz) 192 192 { 193 - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 194 - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 193 + int i; 195 194 196 - mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 197 - if (!mqrq_cur->bounce_buf) { 198 - pr_warn("%s: unable to allocate bounce cur buffer\n", 199 - mmc_card_name(mq->card)); 200 - return false; 201 - } 202 - 203 - mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 204 - if (!mqrq_prev->bounce_buf) { 205 - pr_warn("%s: unable to allocate bounce prev buffer\n", 206 - mmc_card_name(mq->card)); 207 - kfree(mqrq_cur->bounce_buf); 208 - mqrq_cur->bounce_buf = NULL; 209 - return false; 195 + for (i = 0; i < mq->qdepth; i++) { 196 + mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 197 + if (!mq->mqrq[i].bounce_buf) 198 + goto out_err; 210 199 } 211 200 212 201 return true; 202 + 203 + out_err: 204 + while (--i >= 0) { 205 + kfree(mq->mqrq[i].bounce_buf); 206 + mq->mqrq[i].bounce_buf = NULL; 207 + } 208 + pr_warn("%s: unable to allocate bounce buffers\n", 209 + mmc_card_name(mq->card)); 210 + return false; 213 211 } 214 212 215 213 static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, 216 214 unsigned int bouncesz) 217 215 { 218 - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 219 - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 220 - int ret; 216 + int i, ret; 221 217 222 - mqrq_cur->sg = mmc_alloc_sg(1, &ret); 223 - if (ret) 224 - return ret; 218 + for (i = 0; i < mq->qdepth; i++) { 219 + mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); 220 + if (ret) 221 + return ret; 225 222 226 - mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); 227 - if (ret) 228 - return ret; 223 + mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); 224 + if (ret) 225 + return ret; 226 + } 229 227 230 - mqrq_prev->sg = mmc_alloc_sg(1, &ret); 231 - if (ret) 232 - return ret; 233 - 234 - mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); 235 - 236 - return ret; 228 + return 0; 237 229 } 238 230 #endif 239 231 240 232 static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) 241 233 { 242 - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 243 - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 244 - int ret; 234 + int i, ret; 245 235 246 - mqrq_cur->sg = mmc_alloc_sg(max_segs, &ret); 247 - if (ret) 248 - return ret; 236 + for (i = 0; i < mq->qdepth; i++) { 237 + mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); 238 + if (ret) 239 + return ret; 240 + } 249 241 250 - mqrq_prev->sg = mmc_alloc_sg(max_segs, &ret); 242 + return 0; 243 + } 251 244 252 - return ret; 245 + static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) 246 + { 247 + kfree(mqrq->bounce_sg); 248 + mqrq->bounce_sg = NULL; 249 + 250 + kfree(mqrq->sg); 251 + mqrq->sg = NULL; 252 + 253 + kfree(mqrq->bounce_buf); 254 + mqrq->bounce_buf = NULL; 253 255 } 254 256 255 257 static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) 256 258 { 257 - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 258 - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; 259 + int i; 259 260 260 - kfree(mqrq_cur->bounce_sg); 261 - mqrq_cur->bounce_sg = NULL; 262 - kfree(mqrq_prev->bounce_sg); 263 - mqrq_prev->bounce_sg = NULL; 264 - 265 - kfree(mqrq_cur->sg); 266 - mqrq_cur->sg = NULL; 267 - kfree(mqrq_cur->bounce_buf); 268 - mqrq_cur->bounce_buf = NULL; 269 - 270 - kfree(mqrq_prev->sg); 271 - mqrq_prev->sg = NULL; 272 - kfree(mqrq_prev->bounce_buf); 273 - mqrq_prev->bounce_buf = NULL; 261 + for (i = 0; i < mq->qdepth; i++) 262 + mmc_queue_req_free_bufs(&mq->mqrq[i]); 274 263 } 275 264 276 265 /** ··· 277 288 struct mmc_host *host = card->host; 278 289 u64 limit = BLK_BOUNCE_HIGH; 279 290 bool bounce = false; 280 - int ret; 291 + int ret = -ENOMEM; 281 292 282 293 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 283 294 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; ··· 287 298 if (!mq->queue) 288 299 return -ENOMEM; 289 300 301 + mq->qdepth = 2; 302 + mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), 303 + GFP_KERNEL); 304 + if (!mq->mqrq) 305 + goto blk_cleanup; 290 306 mq->mqrq_cur = &mq->mqrq[0]; 291 307 mq->mqrq_prev = &mq->mqrq[1]; 292 308 mq->queue->queuedata = mq; ··· 356 362 357 363 cleanup_queue: 358 364 mmc_queue_reqs_free_bufs(mq); 365 + kfree(mq->mqrq); 366 + mq->mqrq = NULL; 367 + blk_cleanup: 359 368 blk_cleanup_queue(mq->queue); 360 369 return ret; 361 370 } ··· 381 384 spin_unlock_irqrestore(q->queue_lock, flags); 382 385 383 386 mmc_queue_reqs_free_bufs(mq); 387 + kfree(mq->mqrq); 388 + mq->mqrq = NULL; 384 389 385 390 mq->card = NULL; 386 391 }
+2 -1
drivers/mmc/card/queue.h
··· 42 42 bool asleep; 43 43 struct mmc_blk_data *blkdata; 44 44 struct request_queue *queue; 45 - struct mmc_queue_req mqrq[2]; 45 + struct mmc_queue_req *mqrq; 46 46 struct mmc_queue_req *mqrq_cur; 47 47 struct mmc_queue_req *mqrq_prev; 48 + int qdepth; 48 49 }; 49 50 50 51 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,