Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Do away with the notion of hardsect_size

Until now we have had a 1:1 mapping between storage device physical
block size and the logical block sized used when addressing the device.
With SATA 4KB drives coming out that will no longer be the case. The
sector size will be 4KB but the logical block size will remain
512-bytes. Hence we need to distinguish between the physical block size
and the logical ditto.

This patch renames hardsect_size to logical_block_size.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Martin K. Petersen and committed by
Jens Axboe
e1defc4f 9bd7de51

+108 -98
+1 -1
arch/powerpc/sysdev/axonram.c
··· 250 250 251 251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); 252 252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request); 253 - blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 253 + blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 254 254 add_disk(bank->disk); 255 255 256 256 bank->irq_id = irq_of_parse_and_map(device->node, 0);
+1 -1
block/blk-integrity.c
··· 340 340 kobject_uevent(&bi->kobj, KOBJ_ADD); 341 341 342 342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; 343 - bi->sector_size = disk->queue->hardsect_size; 343 + bi->sector_size = queue_logical_block_size(disk->queue); 344 344 disk->integrity = bi; 345 345 } else 346 346 bi = disk->integrity;
+10 -11
block/blk-settings.c
··· 134 134 q->backing_dev_info.state = 0; 135 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 136 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 137 - blk_queue_hardsect_size(q, 512); 137 + blk_queue_logical_block_size(q, 512); 138 138 blk_queue_dma_alignment(q, 511); 139 139 blk_queue_congestion_threshold(q); 140 140 q->nr_batching = BLK_BATCH_REQ; ··· 288 288 EXPORT_SYMBOL(blk_queue_max_segment_size); 289 289 290 290 /** 291 - * blk_queue_hardsect_size - set hardware sector size for the queue 291 + * blk_queue_logical_block_size - set logical block size for the queue 292 292 * @q: the request queue for the device 293 - * @size: the hardware sector size, in bytes 293 + * @size: the logical block size, in bytes 294 294 * 295 295 * Description: 296 - * This should typically be set to the lowest possible sector size 297 - * that the hardware can operate on (possible without reverting to 298 - * even internal read-modify-write operations). Usually the default 299 - * of 512 covers most hardware. 296 + * This should be set to the lowest possible block size that the 297 + * storage device can address. The default of 512 covers most 298 + * hardware. 300 299 **/ 301 - void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) 300 + void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) 302 301 { 303 - q->hardsect_size = size; 302 + q->logical_block_size = size; 304 303 } 305 - EXPORT_SYMBOL(blk_queue_hardsect_size); 304 + EXPORT_SYMBOL(blk_queue_logical_block_size); 306 305 307 306 /* 308 307 * Returns the minimum that is _not_ zero, unless both are zero. ··· 323 324 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); 324 325 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); 325 326 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); 326 - t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 327 + t->logical_block_size = max(t->logical_block_size, b->logical_block_size); 327 328 if (!t->queue_lock) 328 329 WARN_ON_ONCE(1); 329 330 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+9 -3
block/blk-sysfs.c
··· 100 100 return queue_var_show(max_sectors_kb, (page)); 101 101 } 102 102 103 - static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) 103 + static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 104 104 { 105 - return queue_var_show(q->hardsect_size, page); 105 + return queue_var_show(queue_logical_block_size(q), page); 106 106 } 107 107 108 108 static ssize_t ··· 249 249 250 250 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 251 251 .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 252 - .show = queue_hw_sector_size_show, 252 + .show = queue_logical_block_size_show, 253 + }; 254 + 255 + static struct queue_sysfs_entry queue_logical_block_size_entry = { 256 + .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 257 + .show = queue_logical_block_size_show, 253 258 }; 254 259 255 260 static struct queue_sysfs_entry queue_nonrot_entry = { ··· 288 283 &queue_max_sectors_entry.attr, 289 284 &queue_iosched_entry.attr, 290 285 &queue_hw_sector_size_entry.attr, 286 + &queue_logical_block_size_entry.attr, 291 287 &queue_nonrot_entry.attr, 292 288 &queue_nomerges_entry.attr, 293 289 &queue_rq_affinity_entry.attr,
+1 -1
block/compat_ioctl.c
··· 763 763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 764 764 return compat_put_int(arg, block_size(bdev)); 765 765 case BLKSSZGET: /* get block device hardware sector size */ 766 - return compat_put_int(arg, bdev_hardsect_size(bdev)); 766 + return compat_put_int(arg, bdev_logical_block_size(bdev)); 767 767 case BLKSECTGET: 768 768 return compat_put_ushort(arg, 769 769 bdev_get_queue(bdev)->max_sectors);
+1 -1
block/ioctl.c
··· 311 311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 312 312 return put_int(arg, block_size(bdev)); 313 313 case BLKSSZGET: /* get block device hardware sector size */ 314 - return put_int(arg, bdev_hardsect_size(bdev)); 314 + return put_int(arg, bdev_logical_block_size(bdev)); 315 315 case BLKSECTGET: 316 316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); 317 317 case BLKRASET:
+3 -3
drivers/block/cciss.c
··· 1389 1389 1390 1390 disk->queue->queuedata = h; 1391 1391 1392 - blk_queue_hardsect_size(disk->queue, 1393 - h->drv[drv_index].block_size); 1392 + blk_queue_logical_block_size(disk->queue, 1393 + h->drv[drv_index].block_size); 1394 1394 1395 1395 /* Make sure all queue data is written out before */ 1396 1396 /* setting h->drv[drv_index].queue, as setting this */ ··· 2298 2298 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, 2299 2299 inq_buff, drv); 2300 2300 2301 - blk_queue_hardsect_size(drv->queue, drv->block_size); 2301 + blk_queue_logical_block_size(drv->queue, drv->block_size); 2302 2302 set_capacity(disk, drv->nr_blocks); 2303 2303 2304 2304 kfree(inq_buff);
+2 -2
drivers/block/cpqarray.c
··· 474 474 disk->fops = &ida_fops; 475 475 if (j && !drv->nr_blks) 476 476 continue; 477 - blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); 477 + blk_queue_logical_block_size(hba[i]->queue, drv->blk_size); 478 478 set_capacity(disk, drv->nr_blks); 479 479 disk->queue = hba[i]->queue; 480 480 disk->private_data = drv; ··· 1546 1546 drv_info_t *drv = &host->drv[i]; 1547 1547 if (i && !drv->nr_blks) 1548 1548 continue; 1549 - blk_queue_hardsect_size(host->queue, drv->blk_size); 1549 + blk_queue_logical_block_size(host->queue, drv->blk_size); 1550 1550 set_capacity(disk, drv->nr_blks); 1551 1551 disk->queue = host->queue; 1552 1552 disk->private_data = drv;
+1 -1
drivers/block/hd.c
··· 724 724 blk_queue_max_sectors(hd_queue, 255); 725 725 init_timer(&device_timer); 726 726 device_timer.function = hd_times_out; 727 - blk_queue_hardsect_size(hd_queue, 512); 727 + blk_queue_logical_block_size(hd_queue, 512); 728 728 729 729 if (!NR_HD) { 730 730 /*
+1 -1
drivers/block/mg_disk.c
··· 996 996 goto probe_err_6; 997 997 } 998 998 blk_queue_max_sectors(host->breq, MG_MAX_SECTS); 999 - blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); 999 + blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); 1000 1000 1001 1001 init_timer(&host->timer); 1002 1002 host->timer.function = mg_times_out;
+1 -1
drivers/block/pktcdvd.c
··· 2657 2657 struct request_queue *q = pd->disk->queue; 2658 2658 2659 2659 blk_queue_make_request(q, pkt_make_request); 2660 - blk_queue_hardsect_size(q, CD_FRAMESIZE); 2660 + blk_queue_logical_block_size(q, CD_FRAMESIZE); 2661 2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS); 2662 2662 blk_queue_merge_bvec(q, pkt_merge_bvec); 2663 2663 q->queuedata = pd;
+1 -1
drivers/block/ps3disk.c
··· 477 477 blk_queue_max_sectors(queue, dev->bounce_size >> 9); 478 478 blk_queue_segment_boundary(queue, -1UL); 479 479 blk_queue_dma_alignment(queue, dev->blk_size-1); 480 - blk_queue_hardsect_size(queue, dev->blk_size); 480 + blk_queue_logical_block_size(queue, dev->blk_size); 481 481 482 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 483 483 ps3disk_prepare_flush);
+3 -3
drivers/block/ub.c
··· 722 722 /* 723 723 * build the command 724 724 * 725 - * The call to blk_queue_hardsect_size() guarantees that request 725 + * The call to blk_queue_logical_block_size() guarantees that request 726 726 * is aligned, but it is given in terms of 512 byte units, always. 727 727 */ 728 728 block = blk_rq_pos(rq) >> lun->capacity.bshift; ··· 1749 1749 ub_revalidate(lun->udev, lun); 1750 1750 1751 1751 /* XXX Support sector size switching like in sr.c */ 1752 - blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1752 + blk_queue_logical_block_size(disk->queue, lun->capacity.bsize); 1753 1753 set_capacity(disk, lun->capacity.nsec); 1754 1754 // set_disk_ro(sdkp->disk, lun->readonly); 1755 1755 ··· 2324 2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2325 2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2326 2326 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2327 - blk_queue_hardsect_size(q, lun->capacity.bsize); 2327 + blk_queue_logical_block_size(q, lun->capacity.bsize); 2328 2328 2329 2329 lun->disk = disk; 2330 2330 q->queuedata = lun;
+1 -1
drivers/block/virtio_blk.c
··· 347 347 offsetof(struct virtio_blk_config, blk_size), 348 348 &blk_size); 349 349 if (!err) 350 - blk_queue_hardsect_size(vblk->disk->queue, blk_size); 350 + blk_queue_logical_block_size(vblk->disk->queue, blk_size); 351 351 352 352 add_disk(vblk->disk); 353 353 return 0;
+1 -1
drivers/block/xen-blkfront.c
··· 344 344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 345 345 346 346 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 347 - blk_queue_hardsect_size(rq, sector_size); 347 + blk_queue_logical_block_size(rq, sector_size); 348 348 blk_queue_max_sectors(rq, 512); 349 349 350 350 /* Each segment in a request is up to an aligned page in size. */
+1 -1
drivers/block/xsysace.c
··· 984 984 ace->queue = blk_init_queue(ace_request, &ace->lock); 985 985 if (ace->queue == NULL) 986 986 goto err_blk_initq; 987 - blk_queue_hardsect_size(ace->queue, 512); 987 + blk_queue_logical_block_size(ace->queue, 512); 988 988 989 989 /* 990 990 * Allocate and initialize GD structure
+1 -1
drivers/cdrom/gdrom.c
··· 739 739 740 740 static int __devinit probe_gdrom_setupqueue(void) 741 741 { 742 - blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 742 + blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 743 743 /* using DMA so memory will need to be contiguous */ 744 744 blk_queue_max_hw_segments(gd.gdrom_rq, 1); 745 745 /* set a large max size to get most from DMA */
+2 -2
drivers/cdrom/viocd.c
··· 469 469 case viocdopen: 470 470 if (event->xRc == 0) { 471 471 di = &viocd_diskinfo[bevent->disk]; 472 - blk_queue_hardsect_size(di->viocd_disk->queue, 473 - bevent->block_size); 472 + blk_queue_logical_block_size(di->viocd_disk->queue, 473 + bevent->block_size); 474 474 set_capacity(di->viocd_disk, 475 475 bevent->media_size * 476 476 bevent->block_size / 512);
+1 -1
drivers/char/raw.c
··· 71 71 err = bd_claim(bdev, raw_open); 72 72 if (err) 73 73 goto out1; 74 - err = set_blocksize(bdev, bdev_hardsect_size(bdev)); 74 + err = set_blocksize(bdev, bdev_logical_block_size(bdev)); 75 75 if (err) 76 76 goto out2; 77 77 filp->f_flags |= O_DIRECT;
+6 -6
drivers/ide/ide-cd.c
··· 182 182 (sense->information[2] << 8) | 183 183 (sense->information[3]); 184 184 185 - if (drive->queue->hardsect_size == 2048) 185 + if (queue_logical_block_size(drive->queue) == 2048) 186 186 /* device sector size is 2K */ 187 187 sector <<= 2; 188 188 ··· 737 737 struct request_queue *q = drive->queue; 738 738 int write = rq_data_dir(rq) == WRITE; 739 739 unsigned short sectors_per_frame = 740 - queue_hardsect_size(q) >> SECTOR_BITS; 740 + queue_logical_block_size(q) >> SECTOR_BITS; 741 741 742 742 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " 743 743 "secs_per_frame: %u", ··· 1021 1021 /* save a private copy of the TOC capacity for error handling */ 1022 1022 drive->probed_capacity = toc->capacity * sectors_per_frame; 1023 1023 1024 - blk_queue_hardsect_size(drive->queue, 1025 - sectors_per_frame << SECTOR_BITS); 1024 + blk_queue_logical_block_size(drive->queue, 1025 + sectors_per_frame << SECTOR_BITS); 1026 1026 1027 1027 /* first read just the header, so we know how long the TOC is */ 1028 1028 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, ··· 1338 1338 /* standard prep_rq_fn that builds 10 byte cmds */ 1339 1339 static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1340 1340 { 1341 - int hard_sect = queue_hardsect_size(q); 1341 + int hard_sect = queue_logical_block_size(q); 1342 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); 1343 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1344 1344 ··· 1543 1543 1544 1544 nslots = ide_cdrom_probe_capabilities(drive); 1545 1545 1546 - blk_queue_hardsect_size(q, CD_FRAMESIZE); 1546 + blk_queue_logical_block_size(q, CD_FRAMESIZE); 1547 1547 1548 1548 if (ide_cdrom_register(drive, nslots)) { 1549 1549 printk(KERN_ERR PFX "%s: %s failed to register device with the"
+2 -2
drivers/md/bitmap.c
··· 232 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 233 233 234 234 if (sync_page_io(rdev->bdev, target, 235 - roundup(size, bdev_hardsect_size(rdev->bdev)), 235 + roundup(size, bdev_logical_block_size(rdev->bdev)), 236 236 page, READ)) { 237 237 page->index = index; 238 238 attach_page_buffers(page, NULL); /* so that free_buffer will ··· 287 287 int size = PAGE_SIZE; 288 288 if (page->index == bitmap->file_pages-1) 289 289 size = roundup(bitmap->last_page_size, 290 - bdev_hardsect_size(rdev->bdev)); 290 + bdev_logical_block_size(rdev->bdev)); 291 291 /* Just make sure we aren't corrupting data or 292 292 * metadata 293 293 */
+1 -1
drivers/md/dm-exception-store.c
··· 178 178 } 179 179 180 180 /* Validate the chunk size against the device block size */ 181 - if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { 181 + if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 182 182 *error = "Chunk size is not a multiple of device blocksize"; 183 183 return -EINVAL; 184 184 }
+2 -1
drivers/md/dm-log.c
··· 413 413 * Buffer holds both header and bitset. 414 414 */ 415 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 416 - bitset_size, ti->limits.hardsect_size); 416 + bitset_size, 417 + ti->limits.logical_block_size); 417 418 418 419 if (buf_size > dev->bdev->bd_inode->i_size) { 419 420 DMWARN("log device %s too small: need %llu bytes",
+1 -1
drivers/md/dm-snap-persistent.c
··· 282 282 */ 283 283 if (!ps->store->chunk_size) { 284 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 285 - bdev_hardsect_size(ps->store->cow->bdev) >> 9); 285 + bdev_logical_block_size(ps->store->cow->bdev) >> 9); 286 286 ps->store->chunk_mask = ps->store->chunk_size - 1; 287 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 288 288 chunk_size_supplied = 0;
+7 -5
drivers/md/dm-table.c
··· 108 108 lhs->max_hw_segments = 109 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 110 110 111 - lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 + lhs->logical_block_size = max(lhs->logical_block_size, 112 + rhs->logical_block_size); 112 113 113 114 lhs->max_segment_size = 114 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); ··· 530 529 rs->max_hw_segments = 531 530 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 532 531 533 - rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 532 + rs->logical_block_size = max(rs->logical_block_size, 533 + queue_logical_block_size(q)); 534 534 535 535 rs->max_segment_size = 536 536 min_not_zero(rs->max_segment_size, q->max_segment_size); ··· 685 683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 686 684 if (!rs->max_hw_segments) 687 685 rs->max_hw_segments = MAX_HW_SEGMENTS; 688 - if (!rs->hardsect_size) 689 - rs->hardsect_size = 1 << SECTOR_SHIFT; 686 + if (!rs->logical_block_size) 687 + rs->logical_block_size = 1 << SECTOR_SHIFT; 690 688 if (!rs->max_segment_size) 691 689 rs->max_segment_size = MAX_SEGMENT_SIZE; 692 690 if (!rs->seg_boundary_mask) ··· 916 914 blk_queue_max_sectors(q, t->limits.max_sectors); 917 915 q->max_phys_segments = t->limits.max_phys_segments; 918 916 q->max_hw_segments = t->limits.max_hw_segments; 919 - q->hardsect_size = t->limits.hardsect_size; 917 + q->logical_block_size = t->limits.logical_block_size; 920 918 q->max_segment_size = t->limits.max_segment_size; 921 919 q->max_hw_sectors = t->limits.max_hw_sectors; 922 920 q->seg_boundary_mask = t->limits.seg_boundary_mask;
+1 -1
drivers/md/md.c
··· 1202 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1203 1203 1204 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1205 - bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1205 + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1206 1206 if (rdev->sb_size & bmask) 1207 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1208 1208
+1 -1
drivers/memstick/core/mspro_block.c
··· 1242 1242 1243 1243 sprintf(msb->disk->disk_name, "mspblk%d", disk_id); 1244 1244 1245 - blk_queue_hardsect_size(msb->queue, msb->page_size); 1245 + blk_queue_logical_block_size(msb->queue, msb->page_size); 1246 1246 1247 1247 capacity = be16_to_cpu(sys_info->user_block_count); 1248 1248 capacity *= be16_to_cpu(sys_info->block_size);
+3 -2
drivers/message/i2o/i2o_block.c
··· 794 794 if (c->adaptec) { 795 795 u8 cmd[10]; 796 796 u32 scsi_flags; 797 - u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 797 + u16 hwsec; 798 798 799 + hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; 799 800 memset(cmd, 0, 10); 800 801 801 802 sgl_offset = SGL_OFFSET_12; ··· 1079 1078 */ 1080 1079 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1081 1080 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1082 - blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1081 + blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); 1083 1082 } else 1084 1083 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1085 1084
+1 -1
drivers/mmc/card/block.c
··· 521 521 522 522 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 523 523 524 - blk_queue_hardsect_size(md->queue.queue, 512); 524 + blk_queue_logical_block_size(md->queue.queue, 512); 525 525 526 526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 527 527 /*
+1 -1
drivers/mtd/mtd_blkdevs.c
··· 378 378 } 379 379 380 380 tr->blkcore_priv->rq->queuedata = tr; 381 - blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 + blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 382 382 if (tr->discard) 383 383 blk_queue_set_discard(tr->blkcore_priv->rq, 384 384 blktrans_discard_request);
+1 -1
drivers/s390/block/dasd.c
··· 1990 1990 { 1991 1991 int max; 1992 1992 1993 - blk_queue_hardsect_size(block->request_queue, block->bp_block); 1993 + blk_queue_logical_block_size(block->request_queue, block->bp_block); 1994 1994 max = block->base->discipline->max_blocks << block->s2b_shift; 1995 1995 blk_queue_max_sectors(block->request_queue, max); 1996 1996 blk_queue_max_phys_segments(block->request_queue, -1L);
+1 -1
drivers/s390/block/dcssblk.c
··· 602 602 dev_info->gd->private_data = dev_info; 603 603 dev_info->gd->driverfs_dev = &dev_info->dev; 604 604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 605 - blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 605 + blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); 606 606 607 607 seg_byte_size = (dev_info->end - dev_info->start + 1); 608 608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
+1 -1
drivers/s390/block/xpram.c
··· 343 343 goto out; 344 344 } 345 345 blk_queue_make_request(xpram_queues[i], xpram_make_request); 346 - blk_queue_hardsect_size(xpram_queues[i], 4096); 346 + blk_queue_logical_block_size(xpram_queues[i], 4096); 347 347 } 348 348 349 349 /*
+1 -1
drivers/s390/char/tape_block.c
··· 222 222 if (rc) 223 223 goto cleanup_queue; 224 224 225 - blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 225 + blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 226 226 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 227 227 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 228 228 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
+1 -1
drivers/scsi/sd.c
··· 1510 1510 */ 1511 1511 sector_size = 512; 1512 1512 } 1513 - blk_queue_hardsect_size(sdp->request_queue, sector_size); 1513 + blk_queue_logical_block_size(sdp->request_queue, sector_size); 1514 1514 1515 1515 { 1516 1516 char cap_str_2[10], cap_str_10[10];
+1 -1
drivers/scsi/sr.c
··· 727 727 } 728 728 729 729 queue = cd->device->request_queue; 730 - blk_queue_hardsect_size(queue, sector_size); 730 + blk_queue_logical_block_size(queue, sector_size); 731 731 732 732 return; 733 733 }
+2 -1
fs/bio.c
··· 1490 1490 sector_t bio_sector_offset(struct bio *bio, unsigned short index, 1491 1491 unsigned int offset) 1492 1492 { 1493 - unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue); 1493 + unsigned int sector_sz; 1494 1494 struct bio_vec *bv; 1495 1495 sector_t sectors; 1496 1496 int i; 1497 1497 1498 + sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); 1498 1499 sectors = 0; 1499 1500 1500 1501 if (index >= bio->bi_idx)
+3 -3
fs/block_dev.c
··· 76 76 return -EINVAL; 77 77 78 78 /* Size cannot be smaller than the size supported by the device */ 79 - if (size < bdev_hardsect_size(bdev)) 79 + if (size < bdev_logical_block_size(bdev)) 80 80 return -EINVAL; 81 81 82 82 /* Don't change the size if it is same as current */ ··· 106 106 107 107 int sb_min_blocksize(struct super_block *sb, int size) 108 108 { 109 - int minsize = bdev_hardsect_size(sb->s_bdev); 109 + int minsize = bdev_logical_block_size(sb->s_bdev); 110 110 if (size < minsize) 111 111 size = minsize; 112 112 return sb_set_blocksize(sb, size); ··· 1117 1117 1118 1118 void bd_set_size(struct block_device *bdev, loff_t size) 1119 1119 { 1120 - unsigned bsize = bdev_hardsect_size(bdev); 1120 + unsigned bsize = bdev_logical_block_size(bdev); 1121 1121 1122 1122 bdev->bd_inode->i_size = size; 1123 1123 while (bsize < PAGE_CACHE_SIZE) {
+3 -3
fs/buffer.c
··· 1085 1085 __getblk_slow(struct block_device *bdev, sector_t block, int size) 1086 1086 { 1087 1087 /* Size must be multiple of hard sectorsize */ 1088 - if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 1088 + if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1089 1089 (size < 512 || size > PAGE_SIZE))) { 1090 1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1091 1091 size); 1092 - printk(KERN_ERR "hardsect size: %d\n", 1093 - bdev_hardsect_size(bdev)); 1092 + printk(KERN_ERR "logical block size: %d\n", 1093 + bdev_logical_block_size(bdev)); 1094 1094 1095 1095 dump_stack(); 1096 1096 return NULL;
+1 -1
fs/direct-io.c
··· 1127 1127 rw = WRITE_ODIRECT; 1128 1128 1129 1129 if (bdev) 1130 - bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); 1130 + bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); 1131 1131 1132 1132 if (offset & blocksize_mask) { 1133 1133 if (bdev)
+2 -2
fs/ext3/super.c
··· 1696 1696 goto failed_mount; 1697 1697 } 1698 1698 1699 - hblock = bdev_hardsect_size(sb->s_bdev); 1699 + hblock = bdev_logical_block_size(sb->s_bdev); 1700 1700 if (sb->s_blocksize != blocksize) { 1701 1701 /* 1702 1702 * Make sure the blocksize for the filesystem is larger ··· 2119 2119 } 2120 2120 2121 2121 blocksize = sb->s_blocksize; 2122 - hblock = bdev_hardsect_size(bdev); 2122 + hblock = bdev_logical_block_size(bdev); 2123 2123 if (blocksize < hblock) { 2124 2124 printk(KERN_ERR 2125 2125 "EXT3-fs: blocksize too small for journal device.\n");
+1 -1
fs/ext4/super.c
··· 2962 2962 } 2963 2963 2964 2964 blocksize = sb->s_blocksize; 2965 - hblock = bdev_hardsect_size(bdev); 2965 + hblock = bdev_logical_block_size(bdev); 2966 2966 if (blocksize < hblock) { 2967 2967 printk(KERN_ERR 2968 2968 "EXT4-fs: blocksize too small for journal device.\n");
+2 -2
fs/gfs2/ops_fstype.c
··· 526 526 } 527 527 528 528 /* Set up the buffer cache and SB for real */ 529 - if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) { 529 + if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { 530 530 ret = -EINVAL; 531 531 fs_err(sdp, "FS block size (%u) is too small for device " 532 532 "block size (%u)\n", 533 - sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev)); 533 + sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev)); 534 534 goto out; 535 535 } 536 536 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
+1 -1
fs/gfs2/rgrp.c
··· 845 845 struct super_block *sb = sdp->sd_vfs; 846 846 struct block_device *bdev = sb->s_bdev; 847 847 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / 848 - bdev_hardsect_size(sb->s_bdev); 848 + bdev_logical_block_size(sb->s_bdev); 849 849 u64 blk; 850 850 sector_t start = 0; 851 851 sector_t nr_sects = 0;
+1 -1
fs/nilfs2/the_nilfs.c
··· 515 515 516 516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 517 517 if (sb->s_blocksize != blocksize) { 518 - int hw_blocksize = bdev_hardsect_size(sb->s_bdev); 518 + int hw_blocksize = bdev_logical_block_size(sb->s_bdev); 519 519 520 520 if (blocksize < hw_blocksize) { 521 521 printk(KERN_ERR
+3 -3
fs/ntfs/super.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/string.h> 27 27 #include <linux/spinlock.h> 28 - #include <linux/blkdev.h> /* For bdev_hardsect_size(). */ 28 + #include <linux/blkdev.h> /* For bdev_logical_block_size(). */ 29 29 #include <linux/backing-dev.h> 30 30 #include <linux/buffer_head.h> 31 31 #include <linux/vfs.h> ··· 2785 2785 goto err_out_now; 2786 2786 2787 2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2788 - if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2788 + if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2789 2789 if (!silent) 2790 2790 ntfs_error(sb, "Device has unsupported sector size " 2791 2791 "(%i). The maximum supported sector " 2792 2792 "size on this architecture is %lu " 2793 2793 "bytes.", 2794 - bdev_hardsect_size(sb->s_bdev), 2794 + bdev_logical_block_size(sb->s_bdev), 2795 2795 PAGE_CACHE_SIZE); 2796 2796 goto err_out_now; 2797 2797 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 1371 1371 1372 1372 bdevname(reg->hr_bdev, reg->hr_dev_name); 1373 1373 1374 - sectsize = bdev_hardsect_size(reg->hr_bdev); 1374 + sectsize = bdev_logical_block_size(reg->hr_bdev); 1375 1375 if (sectsize != reg->hr_block_bytes) { 1376 1376 mlog(ML_ERROR, 1377 1377 "blocksize %u incorrect for device, expected %d",
+1 -1
fs/ocfs2/super.c
··· 713 713 *bh = NULL; 714 714 715 715 /* may be > 512 */ 716 - *sector_size = bdev_hardsect_size(sb->s_bdev); 716 + *sector_size = bdev_logical_block_size(sb->s_bdev); 717 717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) { 718 718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", 719 719 *sector_size, OCFS2_MAX_BLOCKSIZE);
+1 -1
fs/partitions/ibm.c
··· 76 76 Sector sect; 77 77 78 78 res = 0; 79 - blocksize = bdev_hardsect_size(bdev); 79 + blocksize = bdev_logical_block_size(bdev); 80 80 if (blocksize <= 0) 81 81 goto out_exit; 82 82 i_size = i_size_read(bdev->bd_inode);
+2 -2
fs/partitions/msdos.c
··· 110 110 Sector sect; 111 111 unsigned char *data; 112 112 u32 this_sector, this_size; 113 - int sector_size = bdev_hardsect_size(bdev) / 512; 113 + int sector_size = bdev_logical_block_size(bdev) / 512; 114 114 int loopct = 0; /* number of links followed 115 115 without finding a data partition */ 116 116 int i; ··· 415 415 416 416 int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) 417 417 { 418 - int sector_size = bdev_hardsect_size(bdev) / 512; 418 + int sector_size = bdev_logical_block_size(bdev) / 512; 419 419 Sector sect; 420 420 unsigned char *data; 421 421 struct partition *p;
+1 -1
fs/udf/super.c
··· 1915 1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 1916 1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1917 1917 } else { 1918 - uopt.blocksize = bdev_hardsect_size(sb->s_bdev); 1918 + uopt.blocksize = bdev_logical_block_size(sb->s_bdev); 1919 1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1920 1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { 1921 1921 if (!silent)
+1 -1
fs/xfs/linux-2.6/xfs_buf.c
··· 1501 1501 struct block_device *bdev) 1502 1502 { 1503 1503 return xfs_setsize_buftarg_flags(btp, 1504 - PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0); 1504 + PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); 1505 1505 } 1506 1506 1507 1507 int
+7 -7
include/linux/blkdev.h
··· 391 391 unsigned int max_hw_sectors; 392 392 unsigned short max_phys_segments; 393 393 unsigned short max_hw_segments; 394 - unsigned short hardsect_size; 394 + unsigned short logical_block_size; 395 395 unsigned int max_segment_size; 396 396 397 397 unsigned long seg_boundary_mask; ··· 901 901 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 902 902 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 903 903 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 904 - extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 904 + extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 905 905 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 906 906 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 907 907 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); ··· 988 988 989 989 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 990 990 991 - static inline int queue_hardsect_size(struct request_queue *q) 991 + static inline unsigned short queue_logical_block_size(struct request_queue *q) 992 992 { 993 993 int retval = 512; 994 994 995 - if (q && q->hardsect_size) 996 - retval = q->hardsect_size; 995 + if (q && q->logical_block_size) 996 + retval = q->logical_block_size; 997 997 998 998 return retval; 999 999 } 1000 1000 1001 - static inline int bdev_hardsect_size(struct block_device *bdev) 1001 + static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1002 1002 { 1003 - return queue_hardsect_size(bdev_get_queue(bdev)); 1003 + return queue_logical_block_size(bdev_get_queue(bdev)); 1004 1004 } 1005 1005 1006 1006 static inline int queue_dma_alignment(struct request_queue *q)
+1 -1
include/linux/device-mapper.h
··· 149 149 unsigned max_hw_sectors; 150 150 unsigned max_sectors; 151 151 unsigned max_segment_size; 152 - unsigned short hardsect_size; 152 + unsigned short logical_block_size; 153 153 unsigned short max_hw_segments; 154 154 unsigned short max_phys_segments; 155 155 unsigned char no_cluster; /* inverted so that 0 is default */