Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Consolidate phys_segment and hw_segment limits

Except for SCSI no device drivers distinguish between physical and
hardware segment limits. Consolidate the two into a single segment
limit.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Martin K. Petersen and committed by
Jens Axboe
8a78362c 086fa5ff

+70 -136
+1 -1
arch/um/drivers/ubd_kern.c
··· 849 849 } 850 850 ubd_dev->queue->queuedata = ubd_dev; 851 851 852 - blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); 852 + blk_queue_max_segments(ubd_dev->queue, MAX_SG); 853 853 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); 854 854 if(err){ 855 855 *error_out = "Failed to register device";
+1 -2
block/blk-core.c
··· 1614 1614 * limitation. 1615 1615 */ 1616 1616 blk_recalc_rq_segments(rq); 1617 - if (rq->nr_phys_segments > queue_max_phys_segments(q) || 1618 - rq->nr_phys_segments > queue_max_hw_segments(q)) { 1617 + if (rq->nr_phys_segments > queue_max_segments(q)) { 1619 1618 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1620 1619 return -EIO; 1621 1620 }
+2 -6
block/blk-merge.c
··· 206 206 { 207 207 int nr_phys_segs = bio_phys_segments(q, bio); 208 208 209 - if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || 210 - req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { 209 + if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { 211 210 req->cmd_flags |= REQ_NOMERGE; 212 211 if (req == q->last_merge) 213 212 q->last_merge = NULL; ··· 299 300 total_phys_segments--; 300 301 } 301 302 302 - if (total_phys_segments > queue_max_phys_segments(q)) 303 - return 0; 304 - 305 - if (total_phys_segments > queue_max_hw_segments(q)) 303 + if (total_phys_segments > queue_max_segments(q)) 306 304 return 0; 307 305 308 306 /* Merge is OK... */
+13 -47
block/blk-settings.c
··· 91 91 */ 92 92 void blk_set_default_limits(struct queue_limits *lim) 93 93 { 94 - lim->max_phys_segments = MAX_PHYS_SEGMENTS; 95 - lim->max_hw_segments = MAX_HW_SEGMENTS; 94 + lim->max_segments = BLK_MAX_SEGMENTS; 96 95 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 97 96 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 98 97 lim->max_sectors = BLK_DEF_MAX_SECTORS; ··· 251 252 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 252 253 253 254 /** 254 - * blk_queue_max_phys_segments - set max phys segments for a request for this queue 255 + * blk_queue_max_segments - set max hw segments for a request for this queue 255 256 * @q: the request queue for the device 256 257 * @max_segments: max number of segments 257 258 * 258 259 * Description: 259 260 * Enables a low level driver to set an upper limit on the number of 260 - * physical data segments in a request. This would be the largest sized 261 - * scatter list the driver could handle. 261 + * hw data segments in a request. 262 262 **/ 263 - void blk_queue_max_phys_segments(struct request_queue *q, 264 - unsigned short max_segments) 263 + void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 265 264 { 266 265 if (!max_segments) { 267 266 max_segments = 1; ··· 267 270 __func__, max_segments); 268 271 } 269 272 270 - q->limits.max_phys_segments = max_segments; 273 + q->limits.max_segments = max_segments; 271 274 } 272 - EXPORT_SYMBOL(blk_queue_max_phys_segments); 273 - 274 - /** 275 - * blk_queue_max_hw_segments - set max hw segments for a request for this queue 276 - * @q: the request queue for the device 277 - * @max_segments: max number of segments 278 - * 279 - * Description: 280 - * Enables a low level driver to set an upper limit on the number of 281 - * hw data segments in a request. This would be the largest number of 282 - * address/length pairs the host adapter can actually give at once 283 - * to the device. 284 - **/ 285 - void blk_queue_max_hw_segments(struct request_queue *q, 286 - unsigned short max_segments) 287 - { 288 - if (!max_segments) { 289 - max_segments = 1; 290 - printk(KERN_INFO "%s: set to minimum %d\n", 291 - __func__, max_segments); 292 - } 293 - 294 - q->limits.max_hw_segments = max_segments; 295 - } 296 - EXPORT_SYMBOL(blk_queue_max_hw_segments); 275 + EXPORT_SYMBOL(blk_queue_max_segments); 297 276 298 277 /** 299 278 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg ··· 504 531 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 505 532 b->seg_boundary_mask); 506 533 507 - t->max_phys_segments = min_not_zero(t->max_phys_segments, 508 - b->max_phys_segments); 509 - 510 - t->max_hw_segments = min_not_zero(t->max_hw_segments, 511 - b->max_hw_segments); 534 + t->max_segments = min_not_zero(t->max_segments, b->max_segments); 512 535 513 536 t->max_segment_size = min_not_zero(t->max_segment_size, 514 537 b->max_segment_size); ··· 708 739 * does is adjust the queue so that the buf is always appended 709 740 * silently to the scatterlist. 710 741 * 711 - * Note: This routine adjusts max_hw_segments to make room for 712 - * appending the drain buffer. If you call 713 - * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after 714 - * calling this routine, you must set the limit to one fewer than your 715 - * device can support otherwise there won't be room for the drain 716 - * buffer. 742 + * Note: This routine adjusts max_hw_segments to make room for appending 743 + * the drain buffer. If you call blk_queue_max_segments() after calling 744 + * this routine, you must set the limit to one fewer than your device 745 + * can support otherwise there won't be room for the drain buffer. 717 746 */ 718 747 int blk_queue_dma_drain(struct request_queue *q, 719 748 dma_drain_needed_fn *dma_drain_needed, 720 749 void *buf, unsigned int size) 721 750 { 722 - if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) 751 + if (queue_max_segments(q) < 2) 723 752 return -EINVAL; 724 753 /* make room for appending the drain */ 725 - blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); 726 - blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); 754 + blk_queue_max_segments(q, queue_max_segments(q) - 1); 727 755 q->dma_drain_needed = dma_drain_needed; 728 756 q->dma_drain_buffer = buf; 729 757 q->dma_drain_size = size;
+1 -1
drivers/ata/sata_nv.c
··· 772 772 } 773 773 774 774 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); 775 - blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); 775 + blk_queue_max_segments(sdev->request_queue, sg_tablesize); 776 776 ata_port_printk(ap, KERN_INFO, 777 777 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", 778 778 (unsigned long long)*ap->host->dev->dma_mask,
+1 -1
drivers/block/DAC960.c
··· 2534 2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); 2535 2535 RequestQueue->queuedata = Controller; 2536 2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2537 - blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2537 + blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2538 2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); 2539 2539 disk->queue = RequestQueue; 2540 2540 sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
+1 -4
drivers/block/cciss.c
··· 1797 1797 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1798 1798 1799 1799 /* This is a hardware imposed limit. */ 1800 - blk_queue_max_hw_segments(disk->queue, h->maxsgentries); 1801 - 1802 - /* This is a limit in the driver and could be eliminated. */ 1803 - blk_queue_max_phys_segments(disk->queue, h->maxsgentries); 1800 + blk_queue_max_segments(disk->queue, h->maxsgentries); 1804 1801 1805 1802 blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); 1806 1803
+1 -4
drivers/block/cpqarray.c
··· 448 448 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); 449 449 450 450 /* This is a hardware imposed limit. */ 451 - blk_queue_max_hw_segments(q, SG_MAX); 451 + blk_queue_max_segments(q, SG_MAX); 452 452 453 - /* This is a driver limit and could be eliminated. */ 454 - blk_queue_max_phys_segments(q, SG_MAX); 455 - 456 453 init_timer(&hba[i]->timer); 457 454 hba[i]->timer.expires = jiffies + IDA_TIMER; 458 455 hba[i]->timer.data = (unsigned long)hba[i];
+1 -2
drivers/block/drbd/drbd_nl.c
··· 710 710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 711 711 712 712 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 713 - blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); 714 - blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); 713 + blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 715 714 blk_queue_max_segment_size(q, max_seg_s); 716 715 blk_queue_logical_block_size(q, 512); 717 716 blk_queue_segment_boundary(q, PAGE_SIZE-1);
+1 -2
drivers/block/paride/pf.c
··· 956 956 return -ENOMEM; 957 957 } 958 958 959 - blk_queue_max_phys_segments(pf_queue, cluster); 960 - blk_queue_max_hw_segments(pf_queue, cluster); 959 + blk_queue_max_segments(pf_queue, cluster); 961 960 962 961 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 963 962 struct gendisk *disk = pf->disk;
+2 -2
drivers/block/pktcdvd.c
··· 950 950 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 951 951 { 952 952 if ((pd->settings.size << 9) / CD_FRAMESIZE 953 - <= queue_max_phys_segments(q)) { 953 + <= queue_max_segments(q)) { 954 954 /* 955 955 * The cdrom device can handle one segment/frame 956 956 */ 957 957 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 958 958 return 0; 959 959 } else if ((pd->settings.size << 9) / PAGE_SIZE 960 - <= queue_max_phys_segments(q)) { 960 + <= queue_max_segments(q)) { 961 961 /* 962 962 * We can handle this case at the expense of some extra memory 963 963 * copies during write operations
+1 -2
drivers/block/ps3disk.c
··· 482 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 483 483 ps3disk_prepare_flush); 484 484 485 - blk_queue_max_phys_segments(queue, -1); 486 - blk_queue_max_hw_segments(queue, -1); 485 + blk_queue_max_segments(queue, -1); 487 486 blk_queue_max_segment_size(queue, dev->bounce_size); 488 487 489 488 gendisk = alloc_disk(PS3DISK_MINORS);
+1 -2
drivers/block/ps3vram.c
··· 751 751 priv->queue = queue; 752 752 queue->queuedata = dev; 753 753 blk_queue_make_request(queue, ps3vram_make_request); 754 - blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); 755 - blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); 754 + blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS); 756 755 blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); 757 756 blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); 758 757
+1 -2
drivers/block/sunvdc.c
··· 691 691 692 692 port->disk = g; 693 693 694 - blk_queue_max_hw_segments(q, port->ring_cookies); 695 - blk_queue_max_phys_segments(q, port->ring_cookies); 694 + blk_queue_max_segments(q, port->ring_cookies); 696 695 blk_queue_max_hw_sectors(q, port->max_xfer_size); 697 696 g->major = vdc_major; 698 697 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
+1 -2
drivers/block/sx8.c
··· 1518 1518 break; 1519 1519 } 1520 1520 disk->queue = q; 1521 - blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); 1522 - blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); 1521 + blk_queue_max_segments(q, CARM_MAX_REQ_SG); 1523 1522 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); 1524 1523 1525 1524 q->queuedata = port;
+1 -2
drivers/block/ub.c
··· 2320 2320 disk->queue = q; 2321 2321 2322 2322 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2323 - blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2324 - blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2323 + blk_queue_max_segments(q, UB_MAX_REQ_SG); 2325 2324 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2326 2325 blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); 2327 2326 blk_queue_logical_block_size(q, lun->capacity.bsize);
+1 -2
drivers/block/viodasd.c
··· 471 471 } 472 472 473 473 d->disk = g; 474 - blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); 475 - blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); 474 + blk_queue_max_segments(q, VIOMAXBLOCKDMA); 476 475 blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); 477 476 g->major = VIODASD_MAJOR; 478 477 g->first_minor = dev_no << PARTITION_SHIFT;
+1 -2
drivers/block/xen-blkfront.c
··· 353 353 blk_queue_max_segment_size(rq, PAGE_SIZE); 354 354 355 355 /* Ensure a merged request will fit in a single I/O ring slot. */ 356 - blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 357 - blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 356 + blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 358 357 359 358 /* Make sure buffer addresses are sector-aligned. */ 360 359 blk_queue_dma_alignment(rq, 511);
+1 -1
drivers/cdrom/gdrom.c
··· 741 741 { 742 742 blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 743 743 /* using DMA so memory will need to be contiguous */ 744 - blk_queue_max_hw_segments(gd.gdrom_rq, 1); 744 + blk_queue_max_segments(gd.gdrom_rq, 1); 745 745 /* set a large max size to get most from DMA */ 746 746 blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); 747 747 gd.disk->queue = gd.gdrom_rq;
+1 -2
drivers/cdrom/viocd.c
··· 616 616 gendisk->first_minor = deviceno; 617 617 strncpy(gendisk->disk_name, c->name, 618 618 sizeof(gendisk->disk_name)); 619 - blk_queue_max_hw_segments(q, 1); 620 - blk_queue_max_phys_segments(q, 1); 619 + blk_queue_max_segments(q, 1); 621 620 blk_queue_max_hw_sectors(q, 4096 / 512); 622 621 gendisk->queue = q; 623 622 gendisk->fops = &viocd_fops;
+1 -2
drivers/ide/ide-probe.c
··· 790 790 max_sg_entries >>= 1; 791 791 #endif /* CONFIG_PCI */ 792 792 793 - blk_queue_max_hw_segments(q, max_sg_entries); 794 - blk_queue_max_phys_segments(q, max_sg_entries); 793 + blk_queue_max_segments(q, max_sg_entries); 795 794 796 795 /* assign drive queue */ 797 796 drive->queue = q;
+1 -1
drivers/md/raid5.c
··· 3739 3739 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3740 3740 return 0; 3741 3741 blk_recount_segments(q, bi); 3742 - if (bi->bi_phys_segments > queue_max_phys_segments(q)) 3742 + if (bi->bi_phys_segments > queue_max_segments(q)) 3743 3743 return 0; 3744 3744 3745 3745 if (q->merge_bvec_fn)
+1 -2
drivers/memstick/core/mspro_block.c
··· 1227 1227 1228 1228 blk_queue_bounce_limit(msb->queue, limit); 1229 1229 blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); 1230 - blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); 1231 - blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); 1230 + blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); 1232 1231 blk_queue_max_segment_size(msb->queue, 1233 1232 MSPRO_BLOCK_MAX_PAGES * msb->page_size); 1234 1233
+1 -2
drivers/message/i2o/i2o_block.c
··· 1065 1065 queue = gd->queue; 1066 1066 queue->queuedata = i2o_blk_dev; 1067 1067 1068 - blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); 1069 1068 blk_queue_max_hw_sectors(queue, max_sectors); 1070 - blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); 1069 + blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); 1071 1070 1072 1071 osm_debug("max sectors = %d\n", queue->max_sectors); 1073 1072 osm_debug("phys segments = %d\n", queue->max_phys_segments);
+2 -4
drivers/mmc/card/queue.c
··· 155 155 if (mq->bounce_buf) { 156 156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 157 157 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 158 - blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 159 - blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 158 + blk_queue_max_segments(mq->queue, bouncesz / 512); 160 159 blk_queue_max_segment_size(mq->queue, bouncesz); 161 160 162 161 mq->sg = kmalloc(sizeof(struct scatterlist), ··· 181 182 blk_queue_bounce_limit(mq->queue, limit); 182 183 blk_queue_max_hw_sectors(mq->queue, 183 184 min(host->max_blk_count, host->max_req_size / 512)); 184 - blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 185 - blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 185 + blk_queue_max_segments(mq->queue, host->max_hw_segs); 186 186 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 187 187 188 188 mq->sg = kmalloc(sizeof(struct scatterlist) *
+1 -2
drivers/s390/block/dasd.c
··· 2130 2130 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2131 2131 max = block->base->discipline->max_blocks << block->s2b_shift; 2132 2132 blk_queue_max_hw_sectors(block->request_queue, max); 2133 - blk_queue_max_phys_segments(block->request_queue, -1L); 2134 - blk_queue_max_hw_segments(block->request_queue, -1L); 2133 + blk_queue_max_segments(block->request_queue, -1L); 2135 2134 /* with page sized segments we can translate each segement into 2136 2135 * one idaw/tidaw 2137 2136 */
+1 -2
drivers/s390/char/tape_block.c
··· 223 223 224 224 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 225 225 blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 226 - blk_queue_max_phys_segments(blkdat->request_queue, -1L); 227 - blk_queue_max_hw_segments(blkdat->request_queue, -1L); 226 + blk_queue_max_segments(blkdat->request_queue, -1L); 228 227 blk_queue_max_segment_size(blkdat->request_queue, -1L); 229 228 blk_queue_segment_boundary(blkdat->request_queue, -1L); 230 229
+2 -2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 4195 4195 if (tgt->service_parms.class3_parms[0] & 0x80000000) 4196 4196 rport->supported_classes |= FC_COS_CLASS3; 4197 4197 if (rport->rqst_q) 4198 - blk_queue_max_hw_segments(rport->rqst_q, 1); 4198 + blk_queue_max_segments(rport->rqst_q, 1); 4199 4199 } else 4200 4200 tgt_dbg(tgt, "rport add failed\n"); 4201 4201 spin_unlock_irqrestore(vhost->host->host_lock, flags); ··· 4669 4669 } 4670 4670 4671 4671 if (shost_to_fc_host(shost)->rqst_q) 4672 - blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); 4672 + blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); 4673 4673 dev_set_drvdata(dev, vhost); 4674 4674 spin_lock(&ibmvfc_driver_lock); 4675 4675 list_add_tail(&vhost->queue, &ibmvfc_head);
+2 -2
drivers/scsi/scsi_lib.c
··· 1624 1624 /* 1625 1625 * this limit is imposed by hardware restrictions 1626 1626 */ 1627 - blk_queue_max_hw_segments(q, shost->sg_tablesize); 1628 - blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1627 + blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1628 + SCSI_MAX_SG_CHAIN_SEGMENTS)); 1629 1629 1630 1630 blk_queue_max_hw_sectors(q, shost->max_sectors); 1631 1631 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
+2 -4
drivers/scsi/sg.c
··· 287 287 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 288 288 sdp->sgdebug = 0; 289 289 q = sdp->device->request_queue; 290 - sdp->sg_tablesize = min(queue_max_hw_segments(q), 291 - queue_max_phys_segments(q)); 290 + sdp->sg_tablesize = queue_max_segments(q); 292 291 } 293 292 if ((sfp = sg_add_sfp(sdp, dev))) 294 293 filp->private_data = sfp; ··· 1375 1376 sdp->device = scsidp; 1376 1377 INIT_LIST_HEAD(&sdp->sfds); 1377 1378 init_waitqueue_head(&sdp->o_excl_wait); 1378 - sdp->sg_tablesize = min(queue_max_hw_segments(q), 1379 - queue_max_phys_segments(q)); 1379 + sdp->sg_tablesize = queue_max_segments(q); 1380 1380 sdp->index = k; 1381 1381 kref_init(&sdp->d_ref); 1382 1382
+1 -2
drivers/scsi/st.c
··· 3983 3983 return -ENODEV; 3984 3984 } 3985 3985 3986 - i = min(queue_max_hw_segments(SDp->request_queue), 3987 - queue_max_phys_segments(SDp->request_queue)); 3986 + i = queue_max_segments(SDp->request_queue); 3988 3987 if (st_max_sg_segs < i) 3989 3988 i = st_max_sg_segs; 3990 3989 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
+1 -4
drivers/staging/hv/blkvsc_drv.c
··· 363 363 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); 364 364 365 365 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); 366 - blk_queue_max_phys_segments(blkdev->gd->queue, 367 - MAX_MULTIPAGE_BUFFER_COUNT); 368 - blk_queue_max_hw_segments(blkdev->gd->queue, 369 - MAX_MULTIPAGE_BUFFER_COUNT); 366 + blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); 370 367 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); 371 368 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); 372 369 blk_queue_dma_alignment(blkdev->gd->queue, 511);
+3 -6
fs/bio.c
··· 507 507 int nr_pages; 508 508 509 509 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 510 - if (nr_pages > queue_max_phys_segments(q)) 511 - nr_pages = queue_max_phys_segments(q); 512 - if (nr_pages > queue_max_hw_segments(q)) 513 - nr_pages = queue_max_hw_segments(q); 510 + if (nr_pages > queue_max_segments(q)) 511 + nr_pages = queue_max_segments(q); 514 512 515 513 return nr_pages; 516 514 } ··· 573 575 * make this too complex. 574 576 */ 575 577 576 - while (bio->bi_phys_segments >= queue_max_phys_segments(q) 577 - || bio->bi_phys_segments >= queue_max_hw_segments(q)) { 578 + while (bio->bi_phys_segments >= queue_max_segments(q)) { 578 579 579 580 if (retried_segments) 580 581 return 0;
+16 -11
include/linux/blkdev.h
··· 316 316 unsigned int discard_alignment; 317 317 318 318 unsigned short logical_block_size; 319 - unsigned short max_hw_segments; 320 - unsigned short max_phys_segments; 319 + unsigned short max_segments; 321 320 322 321 unsigned char misaligned; 323 322 unsigned char discard_misaligned; ··· 928 929 blk_queue_max_hw_sectors(q, max); 929 930 } 930 931 931 - extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 932 - extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 932 + extern void blk_queue_max_segments(struct request_queue *, unsigned short); 933 + 934 + static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) 935 + { 936 + blk_queue_max_segments(q, max); 937 + } 938 + 939 + static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) 940 + { 941 + blk_queue_max_segments(q, max); 942 + } 943 + 944 + 933 945 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 934 946 extern void blk_queue_max_discard_sectors(struct request_queue *q, 935 947 unsigned int max_discard_sectors); ··· 1065 1055 return q->limits.max_hw_sectors; 1066 1056 } 1067 1057 1068 - static inline unsigned short queue_max_hw_segments(struct request_queue *q) 1058 + static inline unsigned short queue_max_segments(struct request_queue *q) 1069 1059 { 1070 - return q->limits.max_hw_segments; 1071 - } 1072 - 1073 - static inline unsigned short queue_max_phys_segments(struct request_queue *q) 1074 - { 1075 - return q->limits.max_phys_segments; 1060 + return q->limits.max_segments; 1076 1061 } 1077 1062 1078 1063 static inline unsigned int queue_max_segment_size(struct request_queue *q)
+1 -1
include/linux/i2o.h
··· 385 385 /* defines for max_sectors and max_phys_segments */ 386 386 #define I2O_MAX_SECTORS 1024 387 387 #define I2O_MAX_SECTORS_LIMITED 128 388 - #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS 388 + #define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS 389 389 390 390 /* 391 391 * Message structures