Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: stop abusing the request queue_lock pointer

Replace the lock in mmc_blk_data that is only used through a pointer
in struct mmc_queue and to protect fields in that structure with
an actual lock in struct mmc_queue.

Suggested-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
f5d72c5c f0484273

+28 -31
+11 -13
drivers/mmc/core/block.c
··· 100 100 * There is one mmc_blk_data per slot. 101 101 */ 102 102 struct mmc_blk_data { 103 - spinlock_t lock; 104 103 struct device *parent; 105 104 struct gendisk *disk; 106 105 struct mmc_queue queue; ··· 1482 1483 blk_mq_end_request(req, BLK_STS_OK); 1483 1484 } 1484 1485 1485 - spin_lock_irqsave(mq->lock, flags); 1486 + spin_lock_irqsave(&mq->lock, flags); 1486 1487 1487 1488 mq->in_flight[mmc_issue_type(mq, req)] -= 1; 1488 1489 ··· 1490 1491 1491 1492 mmc_cqe_check_busy(mq); 1492 1493 1493 - spin_unlock_irqrestore(mq->lock, flags); 1494 + spin_unlock_irqrestore(&mq->lock, flags); 1494 1495 1495 1496 if (!mq->cqe_busy) 1496 1497 blk_mq_run_hw_queues(q, true); ··· 1990 1991 unsigned long flags; 1991 1992 bool put_card; 1992 1993 1993 - spin_lock_irqsave(mq->lock, flags); 1994 + spin_lock_irqsave(&mq->lock, flags); 1994 1995 1995 1996 mq->in_flight[mmc_issue_type(mq, req)] -= 1; 1996 1997 1997 1998 put_card = (mmc_tot_in_flight(mq) == 0); 1998 1999 1999 - spin_unlock_irqrestore(mq->lock, flags); 2000 + spin_unlock_irqrestore(&mq->lock, flags); 2000 2001 2001 2002 if (put_card) 2002 2003 mmc_put_card(mq->card, &mq->ctx); ··· 2092 2093 * request does not need to wait (although it does need to 2093 2094 * complete complete_req first). 2094 2095 */ 2095 - spin_lock_irqsave(mq->lock, flags); 2096 + spin_lock_irqsave(&mq->lock, flags); 2096 2097 mq->complete_req = req; 2097 2098 mq->rw_wait = false; 2098 2099 waiting = mq->waiting; 2099 - spin_unlock_irqrestore(mq->lock, flags); 2100 + spin_unlock_irqrestore(&mq->lock, flags); 2100 2101 2101 2102 /* 2102 2103 * If 'waiting' then the waiting task will complete this ··· 2115 2116 /* Take the recovery path for errors or urgent background operations */ 2116 2117 if (mmc_blk_rq_error(&mqrq->brq) || 2117 2118 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2118 - spin_lock_irqsave(mq->lock, flags); 2119 + spin_lock_irqsave(&mq->lock, flags); 2119 2120 mq->recovery_needed = true; 2120 2121 mq->recovery_req = req; 2121 - spin_unlock_irqrestore(mq->lock, flags); 2122 + spin_unlock_irqrestore(&mq->lock, flags); 2122 2123 wake_up(&mq->wait); 2123 2124 schedule_work(&mq->recovery_work); 2124 2125 return; ··· 2141 2142 * Wait while there is another request in progress, but not if recovery 2142 2143 * is needed. Also indicate whether there is a request waiting to start. 2143 2144 */ 2144 - spin_lock_irqsave(mq->lock, flags); 2145 + spin_lock_irqsave(&mq->lock, flags); 2145 2146 if (mq->recovery_needed) { 2146 2147 *err = -EBUSY; 2147 2148 done = true; ··· 2149 2150 done = !mq->rw_wait; 2150 2151 } 2151 2152 mq->waiting = !done; 2152 - spin_unlock_irqrestore(mq->lock, flags); 2153 + spin_unlock_irqrestore(&mq->lock, flags); 2153 2154 2154 2155 return done; 2155 2156 } ··· 2326 2327 goto err_kfree; 2327 2328 } 2328 2329 2329 - spin_lock_init(&md->lock); 2330 2330 INIT_LIST_HEAD(&md->part); 2331 2331 INIT_LIST_HEAD(&md->rpmbs); 2332 2332 md->usage = 1; 2333 2333 2334 - ret = mmc_init_queue(&md->queue, card, &md->lock); 2334 + ret = mmc_init_queue(&md->queue, card); 2335 2335 if (ret) 2336 2336 goto err_putdisk; 2337 2337
+15 -16
drivers/mmc/core/queue.c
··· 89 89 struct mmc_queue *mq = q->queuedata; 90 90 unsigned long flags; 91 91 92 - spin_lock_irqsave(mq->lock, flags); 92 + spin_lock_irqsave(&mq->lock, flags); 93 93 __mmc_cqe_recovery_notifier(mq); 94 - spin_unlock_irqrestore(mq->lock, flags); 94 + spin_unlock_irqrestore(&mq->lock, flags); 95 95 } 96 96 97 97 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) ··· 128 128 unsigned long flags; 129 129 int ret; 130 130 131 - spin_lock_irqsave(mq->lock, flags); 131 + spin_lock_irqsave(&mq->lock, flags); 132 132 133 133 if (mq->recovery_needed || !mq->use_cqe) 134 134 ret = BLK_EH_RESET_TIMER; 135 135 else 136 136 ret = mmc_cqe_timed_out(req); 137 137 138 - spin_unlock_irqrestore(mq->lock, flags); 138 + spin_unlock_irqrestore(&mq->lock, flags); 139 139 140 140 return ret; 141 141 } ··· 157 157 158 158 mq->in_recovery = false; 159 159 160 - spin_lock_irq(mq->lock); 160 + spin_lock_irq(&mq->lock); 161 161 mq->recovery_needed = false; 162 - spin_unlock_irq(mq->lock); 162 + spin_unlock_irq(&mq->lock); 163 163 164 164 mmc_put_card(mq->card, &mq->ctx); 165 165 ··· 258 258 259 259 issue_type = mmc_issue_type(mq, req); 260 260 261 - spin_lock_irq(mq->lock); 261 + spin_lock_irq(&mq->lock); 262 262 263 263 if (mq->recovery_needed || mq->busy) { 264 - spin_unlock_irq(mq->lock); 264 + spin_unlock_irq(&mq->lock); 265 265 return BLK_STS_RESOURCE; 266 266 } 267 267 ··· 269 269 case MMC_ISSUE_DCMD: 270 270 if (mmc_cqe_dcmd_busy(mq)) { 271 271 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; 272 - spin_unlock_irq(mq->lock); 272 + spin_unlock_irq(&mq->lock); 273 273 return BLK_STS_RESOURCE; 274 274 } 275 275 break; ··· 294 294 get_card = (mmc_tot_in_flight(mq) == 1); 295 295 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); 296 296 297 - spin_unlock_irq(mq->lock); 297 + spin_unlock_irq(&mq->lock); 298 298 299 299 if (!(req->rq_flags & RQF_DONTPREP)) { 300 300 req_to_mmc_queue_req(req)->retries = 0; ··· 328 328 if (issued != MMC_REQ_STARTED) { 329 329 bool put_card = false; 330 330 331 - spin_lock_irq(mq->lock); 331 + spin_lock_irq(&mq->lock); 332 332 mq->in_flight[issue_type] -= 1; 333 333 if (mmc_tot_in_flight(mq) == 0) 334 334 put_card = true; 335 335 mq->busy = false; 336 - spin_unlock_irq(mq->lock); 336 + spin_unlock_irq(&mq->lock); 337 337 if (put_card) 338 338 mmc_put_card(card, &mq->ctx); 339 339 } else { ··· 385 385 * mmc_init_queue - initialise a queue structure. 386 386 * @mq: mmc queue 387 387 * @card: mmc card to attach this queue 388 - * @lock: queue lock 389 388 * 390 389 * Initialise a MMC card request queue. 391 390 */ 392 - int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 393 - spinlock_t *lock) 391 + int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) 394 392 { 395 393 struct mmc_host *host = card->host; 396 394 int ret; 397 395 398 396 mq->card = card; 399 - mq->lock = lock; 400 397 mq->use_cqe = host->cqe_enabled; 398 + 399 + spin_lock_init(&mq->lock); 401 400 402 401 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); 403 402 mq->tag_set.ops = &mmc_mq_ops;
+2 -2
drivers/mmc/core/queue.h
··· 73 73 74 74 struct mmc_queue { 75 75 struct mmc_card *card; 76 - spinlock_t *lock; 77 76 struct mmc_ctx ctx; 78 77 struct blk_mq_tag_set tag_set; 79 78 struct mmc_blk_data *blkdata; 80 79 struct request_queue *queue; 80 + spinlock_t lock; 81 81 int in_flight[MMC_ISSUE_MAX]; 82 82 unsigned int cqe_busy; 83 83 #define MMC_CQE_DCMD_BUSY BIT(0) ··· 96 96 struct work_struct complete_work; 97 97 }; 98 98 99 - extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); 99 + extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *); 100 100 extern void mmc_cleanup_queue(struct mmc_queue *); 101 101 extern void mmc_queue_suspend(struct mmc_queue *); 102 102 extern void mmc_queue_resume(struct mmc_queue *);