Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc_block: inform block layer about sector count restriction

Make sure we consider the maximum block count when we tell the block
layer about the maximum sector count. That way we don't have to chop
up the request ourselves.

Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>

+15 -28
+1 -22
drivers/mmc/card/block.c
··· 215 215 struct mmc_blk_data *md = mq->data; 216 216 struct mmc_card *card = md->queue.card; 217 217 struct mmc_blk_request brq; 218 - int ret = 1, data_size, i; 219 - struct scatterlist *sg; 218 + int ret = 1; 220 219 221 220 mmc_claim_host(card->host); 222 221 ··· 236 237 brq.stop.arg = 0; 237 238 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 238 239 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 239 - if (brq.data.blocks > card->host->max_blk_count) 240 - brq.data.blocks = card->host->max_blk_count; 241 240 242 241 if (brq.data.blocks > 1) { 243 242 /* SPI multiblock writes terminate using a special ··· 266 269 brq.data.sg_len = mmc_queue_map_sg(mq); 267 270 268 271 mmc_queue_bounce_pre(mq); 269 - 270 - /* 271 - * Adjust the sg list so it is the same size as the 272 - * request. 273 - */ 274 - if (brq.data.blocks != 275 - (req->nr_sectors >> (md->block_bits - 9))) { 276 - data_size = brq.data.blocks * brq.data.blksz; 277 - for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { 278 - data_size -= sg->length; 279 - if (data_size <= 0) { 280 - sg->length += data_size; 281 - i++; 282 - break; 283 - } 284 - } 285 - brq.data.sg_len = i; 286 - } 287 272 288 273 mmc_wait_for_req(card->host, &brq.mrq); 289 274
+14 -6
drivers/mmc/card/queue.c
··· 142 142 bouncesz = host->max_req_size; 143 143 if (bouncesz > host->max_seg_size) 144 144 bouncesz = host->max_seg_size; 145 + if (bouncesz > (host->max_blk_count * 512)) 146 + bouncesz = host->max_blk_count * 512; 145 147 146 - mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 147 - if (!mq->bounce_buf) { 148 - printk(KERN_WARNING "%s: unable to allocate " 149 - "bounce buffer\n", mmc_card_name(card)); 150 - } else { 148 + if (bouncesz > 512) { 149 + mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 150 + if (!mq->bounce_buf) { 151 + printk(KERN_WARNING "%s: unable to " 152 + "allocate bounce buffer\n", 153 + mmc_card_name(card)); 154 + } 155 + } 156 + 157 + if (mq->bounce_buf) { 151 158 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 152 159 blk_queue_max_sectors(mq->queue, bouncesz / 512); 153 160 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); ··· 182 175 183 176 if (!mq->bounce_buf) { 184 177 blk_queue_bounce_limit(mq->queue, limit); 185 - blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 178 + blk_queue_max_sectors(mq->queue, 179 + min(host->max_blk_count, host->max_req_size / 512)); 186 180 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 187 181 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 188 182 blk_queue_max_segment_size(mq->queue, host->max_seg_size);