Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: move integrity information into queue_limits

Move the integrity information into the queue limits so that it can be
set atomically with other queue limits, and that the sysfs changes to
the read_verify and write_generate flags are properly synchronized.
This also allows to provide a more useful helper to stack the integrity
fields, although it still is separate from the main stacking function
as not all stackable devices want to inherit the integrity settings.
Even with that it greatly simplifies the code in md and dm.

Note that the integrity field is moved as-is into the queue limits.
While there are good arguments for removing the separate blk_integrity
structure, this would cause a lot of churn and might better be done at a
later time if desired. However the integrity field in the queue_limits
structure is now unconditional so that various ifdefs can be avoided or
replaced with IS_ENABLED(). Given that tiny size of it that seems like
a worthwhile trade off.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20240613084839.1044015-13-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
c6e56cf6 9f4aa46f

+289 -497
+3 -46
Documentation/block/data-integrity.rst
··· 153 153 4.2 Block Device 154 154 ---------------- 155 155 156 - Because the format of the protection data is tied to the physical 157 - disk, each block device has been extended with a block integrity 158 - profile (struct blk_integrity). This optional profile is registered 159 - with the block layer using blk_integrity_register(). 160 - 161 - The profile contains callback functions for generating and verifying 162 - the protection data, as well as getting and setting application tags. 163 - The profile also contains a few constants to aid in completing, 164 - merging and splitting the integrity metadata. 156 + Block devices can set up the integrity information in the integrity 157 + sub-struture of the queue_limits structure. 165 158 166 159 Layered block devices will need to pick a profile that's appropriate 167 - for all subdevices. blk_integrity_compare() can help with that. DM 160 + for all subdevices. queue_limits_stack_integrity() can help with that. DM 168 161 and MD linear, RAID0 and RAID1 are currently supported. RAID4/5/6 169 162 will require extra work due to the application tag. 170 163 ··· 242 249 It is up to the receiver to process them and verify data 243 250 integrity upon completion. 244 251 245 - 246 - 5.4 Registering A Block Device As Capable Of Exchanging Integrity Metadata 247 - -------------------------------------------------------------------------- 248 - 249 - To enable integrity exchange on a block device the gendisk must be 250 - registered as capable: 251 - 252 - `int blk_integrity_register(gendisk, blk_integrity);` 253 - 254 - The blk_integrity struct is a template and should contain the 255 - following:: 256 - 257 - static struct blk_integrity my_profile = { 258 - .name = "STANDARDSBODY-TYPE-VARIANT-CSUM", 259 - .generate_fn = my_generate_fn, 260 - .verify_fn = my_verify_fn, 261 - .tuple_size = sizeof(struct my_tuple_size), 262 - .tag_size = <tag bytes per hw sector>, 263 - }; 264 - 265 - 'name' is a text string which will be visible in sysfs. This is 266 - part of the userland API so chose it carefully and never change 267 - it. The format is standards body-type-variant. 268 - E.g. T10-DIF-TYPE1-IP or T13-EPP-0-CRC. 269 - 270 - 'generate_fn' generates appropriate integrity metadata (for WRITE). 271 - 272 - 'verify_fn' verifies that the data buffer matches the integrity 273 - metadata. 274 - 275 - 'tuple_size' must be set to match the size of the integrity 276 - metadata per sector. I.e. 8 for DIF and EPP. 277 - 278 - 'tag_size' must be set to identify how many bytes of tag space 279 - are available per hardware sector. For DIF this is either 2 or 280 - 0 depending on the value of the Control Mode Page ATO bit. 281 252 282 253 ---------------------------------------------------------------------- 283 254
+13 -111
block/blk-integrity.c
··· 107 107 } 108 108 EXPORT_SYMBOL(blk_rq_map_integrity_sg); 109 109 110 - /** 111 - * blk_integrity_compare - Compare integrity profile of two disks 112 - * @gd1: Disk to compare 113 - * @gd2: Disk to compare 114 - * 115 - * Description: Meta-devices like DM and MD need to verify that all 116 - * sub-devices use the same integrity format before advertising to 117 - * upper layers that they can send/receive integrity metadata. This 118 - * function can be used to check whether two gendisk devices have 119 - * compatible integrity formats. 120 - */ 121 - int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) 122 - { 123 - struct blk_integrity *b1 = &gd1->queue->integrity; 124 - struct blk_integrity *b2 = &gd2->queue->integrity; 125 - 126 - if (!b1->tuple_size && !b2->tuple_size) 127 - return 0; 128 - 129 - if (!b1->tuple_size || !b2->tuple_size) 130 - return -1; 131 - 132 - if (b1->interval_exp != b2->interval_exp) { 133 - pr_err("%s: %s/%s protection interval %u != %u\n", 134 - __func__, gd1->disk_name, gd2->disk_name, 135 - 1 << b1->interval_exp, 1 << b2->interval_exp); 136 - return -1; 137 - } 138 - 139 - if (b1->tuple_size != b2->tuple_size) { 140 - pr_err("%s: %s/%s tuple sz %u != %u\n", __func__, 141 - gd1->disk_name, gd2->disk_name, 142 - b1->tuple_size, b2->tuple_size); 143 - return -1; 144 - } 145 - 146 - if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { 147 - pr_err("%s: %s/%s tag sz %u != %u\n", __func__, 148 - gd1->disk_name, gd2->disk_name, 149 - b1->tag_size, b2->tag_size); 150 - return -1; 151 - } 152 - 153 - if (b1->csum_type != b2->csum_type || 154 - (b1->flags & BLK_INTEGRITY_REF_TAG) != 155 - (b2->flags & BLK_INTEGRITY_REF_TAG)) { 156 - pr_err("%s: %s/%s type %s != %s\n", __func__, 157 - gd1->disk_name, gd2->disk_name, 158 - blk_integrity_profile_name(b1), 159 - blk_integrity_profile_name(b2)); 160 - return -1; 161 - } 162 - 163 - return 0; 164 - } 165 - EXPORT_SYMBOL(blk_integrity_compare); 166 - 167 110 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, 168 111 struct request *next) 169 112 { ··· 160 217 161 218 static inline struct blk_integrity *dev_to_bi(struct device *dev) 162 219 { 163 - return &dev_to_disk(dev)->queue->integrity; 220 + return &dev_to_disk(dev)->queue->limits.integrity; 164 221 } 165 222 166 223 const char *blk_integrity_profile_name(struct blk_integrity *bi) ··· 189 246 static ssize_t flag_store(struct device *dev, struct device_attribute *attr, 190 247 const char *page, size_t count, unsigned char flag) 191 248 { 192 - struct blk_integrity *bi = dev_to_bi(dev); 249 + struct request_queue *q = dev_to_disk(dev)->queue; 250 + struct queue_limits lim; 193 251 unsigned long val; 194 252 int err; 195 253 ··· 198 254 if (err) 199 255 return err; 200 256 201 - /* the flags are inverted vs the values in the sysfs files */ 257 + /* note that the flags are inverted vs the values in the sysfs files */ 258 + lim = queue_limits_start_update(q); 202 259 if (val) 203 - bi->flags &= ~flag; 260 + lim.integrity.flags &= ~flag; 204 261 else 205 - bi->flags |= flag; 262 + lim.integrity.flags |= flag; 263 + 264 + blk_mq_freeze_queue(q); 265 + err = queue_limits_commit_update(q, &lim); 266 + blk_mq_unfreeze_queue(q); 267 + if (err) 268 + return err; 206 269 return count; 207 270 } 208 271 ··· 306 355 .name = "integrity", 307 356 .attrs = integrity_attrs, 308 357 }; 309 - 310 - /** 311 - * blk_integrity_register - Register a gendisk as being integrity-capable 312 - * @disk: struct gendisk pointer to make integrity-aware 313 - * @template: block integrity profile to register 314 - * 315 - * Description: When a device needs to advertise itself as being able to 316 - * send/receive integrity metadata it must use this function to register 317 - * the capability with the block layer. The template is a blk_integrity 318 - * struct with values appropriate for the underlying hardware. See 319 - * Documentation/block/data-integrity.rst. 320 - */ 321 - void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) 322 - { 323 - struct blk_integrity *bi = &disk->queue->integrity; 324 - 325 - bi->csum_type = template->csum_type; 326 - bi->flags = template->flags; 327 - bi->interval_exp = template->interval_exp ? : 328 - ilog2(queue_logical_block_size(disk->queue)); 329 - bi->tuple_size = template->tuple_size; 330 - bi->tag_size = template->tag_size; 331 - bi->pi_offset = template->pi_offset; 332 - 333 - #ifdef CONFIG_BLK_INLINE_ENCRYPTION 334 - if (disk->queue->crypto_profile) { 335 - pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); 336 - disk->queue->crypto_profile = NULL; 337 - } 338 - #endif 339 - } 340 - EXPORT_SYMBOL(blk_integrity_register); 341 - 342 - /** 343 - * blk_integrity_unregister - Unregister block integrity profile 344 - * @disk: disk whose integrity profile to unregister 345 - * 346 - * Description: This function unregisters the integrity capability from 347 - * a block device. 348 - */ 349 - void blk_integrity_unregister(struct gendisk *disk) 350 - { 351 - struct blk_integrity *bi = &disk->queue->integrity; 352 - 353 - if (!bi->tuple_size) 354 - return; 355 - memset(bi, 0, sizeof(*bi)); 356 - } 357 - EXPORT_SYMBOL(blk_integrity_unregister);
+112 -6
block/blk-settings.c
··· 6 6 #include <linux/module.h> 7 7 #include <linux/init.h> 8 8 #include <linux/bio.h> 9 - #include <linux/blkdev.h> 9 + #include <linux/blk-integrity.h> 10 10 #include <linux/pagemap.h> 11 11 #include <linux/backing-dev-defs.h> 12 12 #include <linux/gcd.h> ··· 97 97 return 0; 98 98 } 99 99 100 + static int blk_validate_integrity_limits(struct queue_limits *lim) 101 + { 102 + struct blk_integrity *bi = &lim->integrity; 103 + 104 + if (!bi->tuple_size) { 105 + if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE || 106 + bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) { 107 + pr_warn("invalid PI settings.\n"); 108 + return -EINVAL; 109 + } 110 + return 0; 111 + } 112 + 113 + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { 114 + pr_warn("integrity support disabled.\n"); 115 + return -EINVAL; 116 + } 117 + 118 + if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE && 119 + (bi->flags & BLK_INTEGRITY_REF_TAG)) { 120 + pr_warn("ref tag not support without checksum.\n"); 121 + return -EINVAL; 122 + } 123 + 124 + if (!bi->interval_exp) 125 + bi->interval_exp = ilog2(lim->logical_block_size); 126 + 127 + return 0; 128 + } 129 + 100 130 /* 101 131 * Check that the limits in lim are valid, initialize defaults for unset 102 132 * values, and cap values based on others where needed. ··· 135 105 { 136 106 unsigned int max_hw_sectors; 137 107 unsigned int logical_block_sectors; 108 + int err; 138 109 139 110 /* 140 111 * Unless otherwise specified, default to 512 byte logical blocks and a ··· 261 230 lim->misaligned = 0; 262 231 } 263 232 233 + err = blk_validate_integrity_limits(lim); 234 + if (err) 235 + return err; 264 236 return blk_validate_zoned_limits(lim); 265 237 } 266 238 ··· 297 263 struct queue_limits *lim) 298 264 __releases(q->limits_lock) 299 265 { 300 - int error = blk_validate_limits(lim); 266 + int error; 301 267 302 - if (!error) { 303 - q->limits = *lim; 304 - if (q->disk) 305 - blk_apply_bdi_limits(q->disk->bdi, lim); 268 + error = blk_validate_limits(lim); 269 + if (error) 270 + goto out_unlock; 271 + 272 + #ifdef CONFIG_BLK_INLINE_ENCRYPTION 273 + if (q->crypto_profile && lim->integrity.tag_size) { 274 + pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n"); 275 + error = -EINVAL; 276 + goto out_unlock; 306 277 } 278 + #endif 279 + 280 + q->limits = *lim; 281 + if (q->disk) 282 + blk_apply_bdi_limits(q->disk->bdi, lim); 283 + out_unlock: 307 284 mutex_unlock(&q->limits_lock); 308 285 return error; 309 286 } ··· 619 574 pfx, bdev); 620 575 } 621 576 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev); 577 + 578 + /** 579 + * queue_limits_stack_integrity - stack integrity profile 580 + * @t: target queue limits 581 + * @b: base queue limits 582 + * 583 + * Check if the integrity profile in the @b can be stacked into the 584 + * target @t. Stacking is possible if either: 585 + * 586 + * a) does not have any integrity information stacked into it yet 587 + * b) the integrity profile in @b is identical to the one in @t 588 + * 589 + * If @b can be stacked into @t, return %true. Else return %false and clear the 590 + * integrity information in @t. 591 + */ 592 + bool queue_limits_stack_integrity(struct queue_limits *t, 593 + struct queue_limits *b) 594 + { 595 + struct blk_integrity *ti = &t->integrity; 596 + struct blk_integrity *bi = &b->integrity; 597 + 598 + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 599 + return true; 600 + 601 + if (!ti->tuple_size) { 602 + /* inherit the settings from the first underlying device */ 603 + if (!(ti->flags & BLK_INTEGRITY_STACKED)) { 604 + ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE | 605 + (bi->flags & BLK_INTEGRITY_REF_TAG); 606 + ti->csum_type = bi->csum_type; 607 + ti->tuple_size = bi->tuple_size; 608 + ti->pi_offset = bi->pi_offset; 609 + ti->interval_exp = bi->interval_exp; 610 + ti->tag_size = bi->tag_size; 611 + goto done; 612 + } 613 + if (!bi->tuple_size) 614 + goto done; 615 + } 616 + 617 + if (ti->tuple_size != bi->tuple_size) 618 + goto incompatible; 619 + if (ti->interval_exp != bi->interval_exp) 620 + goto incompatible; 621 + if (ti->tag_size != bi->tag_size) 622 + goto incompatible; 623 + if (ti->csum_type != bi->csum_type) 624 + goto incompatible; 625 + if ((ti->flags & BLK_INTEGRITY_REF_TAG) != 626 + (bi->flags & BLK_INTEGRITY_REF_TAG)) 627 + goto incompatible; 628 + 629 + done: 630 + ti->flags |= BLK_INTEGRITY_STACKED; 631 + return true; 632 + 633 + incompatible: 634 + memset(ti, 0, sizeof(*ti)); 635 + return false; 636 + } 637 + EXPORT_SYMBOL_GPL(queue_limits_stack_integrity); 622 638 623 639 /** 624 640 * blk_queue_update_dma_pad - update pad mask
+6 -6
block/t10-pi.c
··· 116 116 */ 117 117 static void t10_pi_type1_prepare(struct request *rq) 118 118 { 119 - struct blk_integrity *bi = &rq->q->integrity; 119 + struct blk_integrity *bi = &rq->q->limits.integrity; 120 120 const int tuple_sz = bi->tuple_size; 121 121 u32 ref_tag = t10_pi_ref_tag(rq); 122 122 u8 offset = bi->pi_offset; ··· 167 167 */ 168 168 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) 169 169 { 170 - struct blk_integrity *bi = &rq->q->integrity; 170 + struct blk_integrity *bi = &rq->q->limits.integrity; 171 171 unsigned intervals = nr_bytes >> bi->interval_exp; 172 172 const int tuple_sz = bi->tuple_size; 173 173 u32 ref_tag = t10_pi_ref_tag(rq); ··· 290 290 291 291 static void ext_pi_type1_prepare(struct request *rq) 292 292 { 293 - struct blk_integrity *bi = &rq->q->integrity; 293 + struct blk_integrity *bi = &rq->q->limits.integrity; 294 294 const int tuple_sz = bi->tuple_size; 295 295 u64 ref_tag = ext_pi_ref_tag(rq); 296 296 u8 offset = bi->pi_offset; ··· 330 330 331 331 static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) 332 332 { 333 - struct blk_integrity *bi = &rq->q->integrity; 333 + struct blk_integrity *bi = &rq->q->limits.integrity; 334 334 unsigned intervals = nr_bytes >> bi->interval_exp; 335 335 const int tuple_sz = bi->tuple_size; 336 336 u64 ref_tag = ext_pi_ref_tag(rq); ··· 396 396 397 397 void blk_integrity_prepare(struct request *rq) 398 398 { 399 - struct blk_integrity *bi = &rq->q->integrity; 399 + struct blk_integrity *bi = &rq->q->limits.integrity; 400 400 401 401 if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) 402 402 return; ··· 409 409 410 410 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) 411 411 { 412 - struct blk_integrity *bi = &rq->q->integrity; 412 + struct blk_integrity *bi = &rq->q->limits.integrity; 413 413 414 414 if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) 415 415 return;
-1
drivers/md/dm-core.h
··· 206 206 207 207 bool integrity_supported:1; 208 208 bool singleton:1; 209 - unsigned integrity_added:1; 210 209 211 210 /* 212 211 * Indicates the rw permissions for the new logical device. This
+11 -16
drivers/md/dm-integrity.c
··· 3475 3475 limits->dma_alignment = limits->logical_block_size - 1; 3476 3476 limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; 3477 3477 } 3478 + 3479 + if (!ic->internal_hash) { 3480 + struct blk_integrity *bi = &limits->integrity; 3481 + 3482 + memset(bi, 0, sizeof(*bi)); 3483 + bi->tuple_size = ic->tag_size; 3484 + bi->tag_size = bi->tuple_size; 3485 + bi->interval_exp = 3486 + ic->sb->log2_sectors_per_block + SECTOR_SHIFT; 3487 + } 3488 + 3478 3489 limits->max_integrity_segments = USHRT_MAX; 3479 3490 } 3480 3491 ··· 3640 3629 sb_set_version(ic); 3641 3630 3642 3631 return 0; 3643 - } 3644 - 3645 - static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) 3646 - { 3647 - struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); 3648 - struct blk_integrity bi; 3649 - 3650 - memset(&bi, 0, sizeof(bi)); 3651 - bi.tuple_size = ic->tag_size; 3652 - bi.tag_size = bi.tuple_size; 3653 - bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; 3654 - 3655 - blk_integrity_register(disk, &bi); 3656 3632 } 3657 3633 3658 3634 static void dm_integrity_free_page_list(struct page_list *pl) ··· 4626 4628 goto bad; 4627 4629 } 4628 4630 } 4629 - 4630 - if (!ic->internal_hash) 4631 - dm_integrity_set(ti, ic); 4632 4631 4633 4632 ti->num_flush_bios = 1; 4634 4633 ti->flush_supported = true;
+27 -134
drivers/md/dm-table.c
··· 425 425 q->limits.logical_block_size, 426 426 q->limits.alignment_offset, 427 427 (unsigned long long) start << SECTOR_SHIFT); 428 + 429 + /* 430 + * Only stack the integrity profile if the target doesn't have native 431 + * integrity support. 432 + */ 433 + if (!dm_target_has_integrity(ti->type)) 434 + queue_limits_stack_integrity_bdev(limits, bdev); 428 435 return 0; 429 436 } 430 437 ··· 708 701 } 709 702 t->immutable_target_type = ti->type; 710 703 } 711 - 712 - if (dm_target_has_integrity(ti->type)) 713 - t->integrity_added = 1; 714 704 715 705 ti->table = t; 716 706 ti->begin = start; ··· 1123 1119 return r; 1124 1120 } 1125 1121 1126 - static bool integrity_profile_exists(struct gendisk *disk) 1127 - { 1128 - return !!blk_get_integrity(disk); 1129 - } 1130 - 1131 - /* 1132 - * Get a disk whose integrity profile reflects the table's profile. 1133 - * Returns NULL if integrity support was inconsistent or unavailable. 1134 - */ 1135 - static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) 1136 - { 1137 - struct list_head *devices = dm_table_get_devices(t); 1138 - struct dm_dev_internal *dd = NULL; 1139 - struct gendisk *prev_disk = NULL, *template_disk = NULL; 1140 - 1141 - for (unsigned int i = 0; i < t->num_targets; i++) { 1142 - struct dm_target *ti = dm_table_get_target(t, i); 1143 - 1144 - if (!dm_target_passes_integrity(ti->type)) 1145 - goto no_integrity; 1146 - } 1147 - 1148 - list_for_each_entry(dd, devices, list) { 1149 - template_disk = dd->dm_dev->bdev->bd_disk; 1150 - if (!integrity_profile_exists(template_disk)) 1151 - goto no_integrity; 1152 - else if (prev_disk && 1153 - blk_integrity_compare(prev_disk, template_disk) < 0) 1154 - goto no_integrity; 1155 - prev_disk = template_disk; 1156 - } 1157 - 1158 - return template_disk; 1159 - 1160 - no_integrity: 1161 - if (prev_disk) 1162 - DMWARN("%s: integrity not set: %s and %s profile mismatch", 1163 - dm_device_name(t->md), 1164 - prev_disk->disk_name, 1165 - template_disk->disk_name); 1166 - return NULL; 1167 - } 1168 - 1169 - /* 1170 - * Register the mapped device for blk_integrity support if the 1171 - * underlying devices have an integrity profile. But all devices may 1172 - * not have matching profiles (checking all devices isn't reliable 1173 - * during table load because this table may use other DM device(s) which 1174 - * must be resumed before they will have an initialized integity 1175 - * profile). Consequently, stacked DM devices force a 2 stage integrity 1176 - * profile validation: First pass during table load, final pass during 1177 - * resume. 1178 - */ 1179 - static int dm_table_register_integrity(struct dm_table *t) 1180 - { 1181 - struct mapped_device *md = t->md; 1182 - struct gendisk *template_disk = NULL; 1183 - 1184 - /* If target handles integrity itself do not register it here. */ 1185 - if (t->integrity_added) 1186 - return 0; 1187 - 1188 - template_disk = dm_table_get_integrity_disk(t); 1189 - if (!template_disk) 1190 - return 0; 1191 - 1192 - if (!integrity_profile_exists(dm_disk(md))) { 1193 - t->integrity_supported = true; 1194 - /* 1195 - * Register integrity profile during table load; we can do 1196 - * this because the final profile must match during resume. 1197 - */ 1198 - blk_integrity_register(dm_disk(md), 1199 - blk_get_integrity(template_disk)); 1200 - return 0; 1201 - } 1202 - 1203 - /* 1204 - * If DM device already has an initialized integrity 1205 - * profile the new profile should not conflict. 1206 - */ 1207 - if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { 1208 - DMERR("%s: conflict with existing integrity profile: %s profile mismatch", 1209 - dm_device_name(t->md), 1210 - template_disk->disk_name); 1211 - return 1; 1212 - } 1213 - 1214 - /* Preserve existing integrity profile */ 1215 - t->integrity_supported = true; 1216 - return 0; 1217 - } 1218 - 1219 1122 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1220 1123 1221 1124 struct dm_crypto_profile { ··· 1331 1420 r = dm_table_build_index(t); 1332 1421 if (r) { 1333 1422 DMERR("unable to build btrees"); 1334 - return r; 1335 - } 1336 - 1337 - r = dm_table_register_integrity(t); 1338 - if (r) { 1339 - DMERR("could not register integrity profile."); 1340 1423 return r; 1341 1424 } 1342 1425 ··· 1593 1688 1594 1689 blk_set_stacking_limits(limits); 1595 1690 1691 + t->integrity_supported = true; 1692 + for (unsigned int i = 0; i < t->num_targets; i++) { 1693 + struct dm_target *ti = dm_table_get_target(t, i); 1694 + 1695 + if (!dm_target_passes_integrity(ti->type)) 1696 + t->integrity_supported = false; 1697 + } 1698 + 1596 1699 for (unsigned int i = 0; i < t->num_targets; i++) { 1597 1700 struct dm_target *ti = dm_table_get_target(t, i); 1598 1701 ··· 1651 1738 dm_device_name(t->md), 1652 1739 (unsigned long long) ti->begin, 1653 1740 (unsigned long long) ti->len); 1741 + 1742 + if (t->integrity_supported || 1743 + dm_target_has_integrity(ti->type)) { 1744 + if (!queue_limits_stack_integrity(limits, &ti_limits)) { 1745 + DMWARN("%s: adding target device (start sect %llu len %llu) " 1746 + "disabled integrity support due to incompatibility", 1747 + dm_device_name(t->md), 1748 + (unsigned long long) ti->begin, 1749 + (unsigned long long) ti->len); 1750 + t->integrity_supported = false; 1751 + } 1752 + } 1654 1753 } 1655 1754 1656 1755 /* ··· 1684 1759 return -EINVAL; 1685 1760 1686 1761 return validate_hardware_logical_block_alignment(t, limits); 1687 - } 1688 - 1689 - /* 1690 - * Verify that all devices have an integrity profile that matches the 1691 - * DM device's registered integrity profile. If the profiles don't 1692 - * match then unregister the DM device's integrity profile. 1693 - */ 1694 - static void dm_table_verify_integrity(struct dm_table *t) 1695 - { 1696 - struct gendisk *template_disk = NULL; 1697 - 1698 - if (t->integrity_added) 1699 - return; 1700 - 1701 - if (t->integrity_supported) { 1702 - /* 1703 - * Verify that the original integrity profile 1704 - * matches all the devices in this table. 1705 - */ 1706 - template_disk = dm_table_get_integrity_disk(t); 1707 - if (template_disk && 1708 - blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) 1709 - return; 1710 - } 1711 - 1712 - if (integrity_profile_exists(dm_disk(t->md))) { 1713 - DMWARN("%s: unable to establish an integrity profile", 1714 - dm_device_name(t->md)); 1715 - blk_integrity_unregister(dm_disk(t->md)); 1716 - } 1717 1762 } 1718 1763 1719 1764 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, ··· 1898 2003 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 1899 2004 else 1900 2005 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 1901 - 1902 - dm_table_verify_integrity(t); 1903 2006 1904 2007 /* 1905 2008 * Some devices don't use blk_integrity but still want stable pages
+17 -55
drivers/md/md.c
··· 2410 2410 */ 2411 2411 int md_integrity_register(struct mddev *mddev) 2412 2412 { 2413 - struct md_rdev *rdev, *reference = NULL; 2414 - 2415 2413 if (list_empty(&mddev->disks)) 2416 2414 return 0; /* nothing to do */ 2417 - if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk)) 2418 - return 0; /* shouldn't register, or already is */ 2419 - rdev_for_each(rdev, mddev) { 2420 - /* skip spares and non-functional disks */ 2421 - if (test_bit(Faulty, &rdev->flags)) 2422 - continue; 2423 - if (rdev->raid_disk < 0) 2424 - continue; 2425 - if (!reference) { 2426 - /* Use the first rdev as the reference */ 2427 - reference = rdev; 2428 - continue; 2429 - } 2430 - /* does this rdev's profile match the reference profile? */ 2431 - if (blk_integrity_compare(reference->bdev->bd_disk, 2432 - rdev->bdev->bd_disk) < 0) 2433 - return -EINVAL; 2434 - } 2435 - if (!reference || !bdev_get_integrity(reference->bdev)) 2436 - return 0; 2437 - /* 2438 - * All component devices are integrity capable and have matching 2439 - * profiles, register the common profile for the md device. 2440 - */ 2441 - blk_integrity_register(mddev->gendisk, 2442 - bdev_get_integrity(reference->bdev)); 2415 + if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk)) 2416 + return 0; /* shouldn't register */ 2443 2417 2444 2418 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2445 2419 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || ··· 2432 2458 return 0; 2433 2459 } 2434 2460 EXPORT_SYMBOL(md_integrity_register); 2435 - 2436 - /* 2437 - * Attempt to add an rdev, but only if it is consistent with the current 2438 - * integrity profile 2439 - */ 2440 - int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2441 - { 2442 - struct blk_integrity *bi_mddev; 2443 - 2444 - if (mddev_is_dm(mddev)) 2445 - return 0; 2446 - 2447 - bi_mddev = blk_get_integrity(mddev->gendisk); 2448 - 2449 - if (!bi_mddev) /* nothing to do */ 2450 - return 0; 2451 - 2452 - if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2453 - pr_err("%s: incompatible integrity profile for %pg\n", 2454 - mdname(mddev), rdev->bdev); 2455 - return -ENXIO; 2456 - } 2457 - 2458 - return 0; 2459 - } 2460 - EXPORT_SYMBOL(md_integrity_add_rdev); 2461 2461 2462 2462 static bool rdev_read_only(struct md_rdev *rdev) 2463 2463 { ··· 5703 5755 int mdp_major = 0; 5704 5756 5705 5757 /* stack the limit for all rdevs into lim */ 5706 - void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim) 5758 + int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, 5759 + unsigned int flags) 5707 5760 { 5708 5761 struct md_rdev *rdev; 5709 5762 5710 5763 rdev_for_each(rdev, mddev) { 5711 5764 queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset, 5712 5765 mddev->gendisk->disk_name); 5766 + if ((flags & MDDEV_STACK_INTEGRITY) && 5767 + !queue_limits_stack_integrity_bdev(lim, rdev->bdev)) 5768 + return -EINVAL; 5713 5769 } 5770 + 5771 + return 0; 5714 5772 } 5715 5773 EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits); 5716 5774 ··· 5731 5777 lim = queue_limits_start_update(mddev->gendisk->queue); 5732 5778 queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, 5733 5779 mddev->gendisk->disk_name); 5780 + 5781 + if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) { 5782 + pr_err("%s: incompatible integrity profile for %pg\n", 5783 + mdname(mddev), rdev->bdev); 5784 + queue_limits_cancel_update(mddev->gendisk->queue); 5785 + return -ENXIO; 5786 + } 5787 + 5734 5788 return queue_limits_commit_update(mddev->gendisk->queue, &lim); 5735 5789 } 5736 5790 EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
+3 -2
drivers/md/md.h
··· 809 809 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 810 810 extern int md_check_no_bitmap(struct mddev *mddev); 811 811 extern int md_integrity_register(struct mddev *mddev); 812 - extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); 813 812 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 814 813 815 814 extern int mddev_init(struct mddev *mddev); ··· 907 908 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); 908 909 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); 909 910 int do_md_run(struct mddev *mddev); 910 - void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim); 911 + #define MDDEV_STACK_INTEGRITY (1u << 0) 912 + int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, 913 + unsigned int flags); 911 914 int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev); 912 915 void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes); 913 916
+6 -1
drivers/md/raid0.c
··· 377 377 static int raid0_set_limits(struct mddev *mddev) 378 378 { 379 379 struct queue_limits lim; 380 + int err; 380 381 381 382 blk_set_stacking_limits(&lim); 382 383 lim.max_hw_sectors = mddev->chunk_sectors; 383 384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; 384 385 lim.io_min = mddev->chunk_sectors << 9; 385 386 lim.io_opt = lim.io_min * mddev->raid_disks; 386 - mddev_stack_rdev_limits(mddev, &lim); 387 + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); 388 + if (err) { 389 + queue_limits_cancel_update(mddev->gendisk->queue); 390 + return err; 391 + } 387 392 return queue_limits_set(mddev->gendisk->queue, &lim); 388 393 } 389 394
+6 -4
drivers/md/raid1.c
··· 1907 1907 if (mddev->recovery_disabled == conf->recovery_disabled) 1908 1908 return -EBUSY; 1909 1909 1910 - if (md_integrity_add_rdev(rdev, mddev)) 1911 - return -ENXIO; 1912 - 1913 1910 if (rdev->raid_disk >= 0) 1914 1911 first = last = rdev->raid_disk; 1915 1912 ··· 3194 3197 static int raid1_set_limits(struct mddev *mddev) 3195 3198 { 3196 3199 struct queue_limits lim; 3200 + int err; 3197 3201 3198 3202 blk_set_stacking_limits(&lim); 3199 3203 lim.max_write_zeroes_sectors = 0; 3200 - mddev_stack_rdev_limits(mddev, &lim); 3204 + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); 3205 + if (err) { 3206 + queue_limits_cancel_update(mddev->gendisk->queue); 3207 + return err; 3208 + } 3201 3209 return queue_limits_set(mddev->gendisk->queue, &lim); 3202 3210 } 3203 3211
+6 -4
drivers/md/raid10.c
··· 2083 2083 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) 2084 2084 return -EINVAL; 2085 2085 2086 - if (md_integrity_add_rdev(rdev, mddev)) 2087 - return -ENXIO; 2088 - 2089 2086 if (rdev->raid_disk >= 0) 2090 2087 first = last = rdev->raid_disk; 2091 2088 ··· 3977 3980 { 3978 3981 struct r10conf *conf = mddev->private; 3979 3982 struct queue_limits lim; 3983 + int err; 3980 3984 3981 3985 blk_set_stacking_limits(&lim); 3982 3986 lim.max_write_zeroes_sectors = 0; 3983 3987 lim.io_min = mddev->chunk_sectors << 9; 3984 3988 lim.io_opt = lim.io_min * raid10_nr_stripes(conf); 3985 - mddev_stack_rdev_limits(mddev, &lim); 3989 + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); 3990 + if (err) { 3991 + queue_limits_cancel_update(mddev->gendisk->queue); 3992 + return err; 3993 + } 3986 3994 return queue_limits_set(mddev->gendisk->queue, &lim); 3987 3995 } 3988 3996
+1 -1
drivers/md/raid5.c
··· 7708 7708 lim.raid_partial_stripes_expensive = 1; 7709 7709 lim.discard_granularity = stripe; 7710 7710 lim.max_write_zeroes_sectors = 0; 7711 - mddev_stack_rdev_limits(mddev, &lim); 7711 + mddev_stack_rdev_limits(mddev, &lim, 0); 7712 7712 rdev_for_each(rdev, mddev) 7713 7713 queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset, 7714 7714 mddev->gendisk->disk_name);
+5 -8
drivers/nvdimm/btt.c
··· 1504 1504 }; 1505 1505 int rc; 1506 1506 1507 + if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { 1508 + lim.integrity.tuple_size = btt_meta_size(btt); 1509 + lim.integrity.tag_size = btt_meta_size(btt); 1510 + } 1511 + 1507 1512 btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); 1508 1513 if (IS_ERR(btt->btt_disk)) 1509 1514 return PTR_ERR(btt->btt_disk); ··· 1520 1515 1521 1516 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue); 1522 1517 blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); 1523 - 1524 - if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { 1525 - struct blk_integrity bi = { 1526 - .tuple_size = btt_meta_size(btt), 1527 - .tag_size = btt_meta_size(btt), 1528 - }; 1529 - blk_integrity_register(btt->btt_disk, &bi); 1530 - } 1531 1518 1532 1519 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); 1533 1520 rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
+36 -34
drivers/nvme/host/core.c
··· 1723 1723 return 0; 1724 1724 } 1725 1725 1726 - static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) 1726 + static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head, 1727 + struct queue_limits *lim) 1727 1728 { 1728 - struct blk_integrity integrity = { }; 1729 + struct blk_integrity *bi = &lim->integrity; 1729 1730 1730 - blk_integrity_unregister(disk); 1731 + memset(bi, 0, sizeof(*bi)); 1731 1732 1732 1733 if (!head->ms) 1733 1734 return true; ··· 1745 1744 case NVME_NS_DPS_PI_TYPE3: 1746 1745 switch (head->guard_type) { 1747 1746 case NVME_NVM_NS_16B_GUARD: 1748 - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC; 1749 - integrity.tag_size = sizeof(u16) + sizeof(u32); 1750 - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1747 + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; 1748 + bi->tag_size = sizeof(u16) + sizeof(u32); 1749 + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1751 1750 break; 1752 1751 case NVME_NVM_NS_64B_GUARD: 1753 - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC64; 1754 - integrity.tag_size = sizeof(u16) + 6; 1755 - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1752 + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; 1753 + bi->tag_size = sizeof(u16) + 6; 1754 + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1756 1755 break; 1757 1756 default: 1758 1757 break; ··· 1762 1761 case NVME_NS_DPS_PI_TYPE2: 1763 1762 switch (head->guard_type) { 1764 1763 case NVME_NVM_NS_16B_GUARD: 1765 - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC; 1766 - integrity.tag_size = sizeof(u16); 1767 - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE | 1768 - BLK_INTEGRITY_REF_TAG; 1764 + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; 1765 + bi->tag_size = sizeof(u16); 1766 + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | 1767 + BLK_INTEGRITY_REF_TAG; 1769 1768 break; 1770 1769 case NVME_NVM_NS_64B_GUARD: 1771 - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC64; 1772 - integrity.tag_size = sizeof(u16); 1773 - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE | 1774 - BLK_INTEGRITY_REF_TAG; 1770 + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; 1771 + bi->tag_size = sizeof(u16); 1772 + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | 1773 + BLK_INTEGRITY_REF_TAG; 1775 1774 break; 1776 1775 default: 1777 1776 break; ··· 1781 1780 break; 1782 1781 } 1783 1782 1784 - integrity.tuple_size = head->ms; 1785 - integrity.pi_offset = head->pi_offset; 1786 - blk_integrity_register(disk, &integrity); 1783 + bi->tuple_size = head->ms; 1784 + bi->pi_offset = head->pi_offset; 1787 1785 return true; 1788 1786 } 1789 1787 ··· 2105 2105 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 2106 2106 ns->head->ids.csi == NVME_CSI_ZNS) 2107 2107 nvme_update_zone_info(ns, &lim, &zi); 2108 - ret = queue_limits_commit_update(ns->disk->queue, &lim); 2109 - if (ret) { 2110 - blk_mq_unfreeze_queue(ns->disk->queue); 2111 - goto out; 2112 - } 2113 2108 2114 2109 /* 2115 2110 * Register a metadata profile for PI, or the plain non-integrity NVMe ··· 2112 2117 * I/O to namespaces with metadata except when the namespace supports 2113 2118 * PI, as it can strip/insert in that case. 2114 2119 */ 2115 - if (!nvme_init_integrity(ns->disk, ns->head)) 2120 + if (!nvme_init_integrity(ns->disk, ns->head, &lim)) 2116 2121 capacity = 0; 2122 + 2123 + ret = queue_limits_commit_update(ns->disk->queue, &lim); 2124 + if (ret) { 2125 + blk_mq_unfreeze_queue(ns->disk->queue); 2126 + goto out; 2127 + } 2117 2128 2118 2129 set_capacity_and_notify(ns->disk, capacity); 2119 2130 ··· 2192 2191 struct queue_limits lim; 2193 2192 2194 2193 blk_mq_freeze_queue(ns->head->disk->queue); 2195 - if (unsupported) 2196 - ns->head->disk->flags |= GENHD_FL_HIDDEN; 2197 - else 2198 - nvme_init_integrity(ns->head->disk, ns->head); 2199 - set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); 2200 - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2201 - nvme_mpath_revalidate_paths(ns); 2202 - 2203 2194 /* 2204 2195 * queue_limits mixes values that are the hardware limitations 2205 2196 * for bio splitting with what is the device configuration. ··· 2214 2221 lim.io_opt = ns_lim->io_opt; 2215 2222 queue_limits_stack_bdev(&lim, ns->disk->part0, 0, 2216 2223 ns->head->disk->disk_name); 2224 + if (unsupported) 2225 + ns->head->disk->flags |= GENHD_FL_HIDDEN; 2226 + else 2227 + nvme_init_integrity(ns->head->disk, ns->head, &lim); 2217 2228 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); 2229 + 2230 + set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); 2231 + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2232 + nvme_mpath_revalidate_paths(ns); 2233 + 2218 2234 blk_mq_unfreeze_queue(ns->head->disk->queue); 2219 2235 } 2220 2236
+5 -3
drivers/scsi/sd.c
··· 2482 2482 return 0; 2483 2483 } 2484 2484 2485 - static void sd_config_protection(struct scsi_disk *sdkp) 2485 + static void sd_config_protection(struct scsi_disk *sdkp, 2486 + struct queue_limits *lim) 2486 2487 { 2487 2488 struct scsi_device *sdp = sdkp->device; 2488 2489 2489 - sd_dif_config_host(sdkp); 2490 + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 2491 + sd_dif_config_host(sdkp, lim); 2490 2492 2491 2493 if (!sdkp->protection_type) 2492 2494 return; ··· 3679 3677 sd_read_app_tag_own(sdkp, buffer); 3680 3678 sd_read_write_same(sdkp, buffer); 3681 3679 sd_read_security(sdkp, buffer); 3682 - sd_config_protection(sdkp); 3680 + sd_config_protection(sdkp, &lim); 3683 3681 } 3684 3682 3685 3683 /*
+1 -11
drivers/scsi/sd.h
··· 220 220 return sector >> (ilog2(sdev->sector_size) - 9); 221 221 } 222 222 223 - #ifdef CONFIG_BLK_DEV_INTEGRITY 224 - 225 - extern void sd_dif_config_host(struct scsi_disk *); 226 - 227 - #else /* CONFIG_BLK_DEV_INTEGRITY */ 228 - 229 - static inline void sd_dif_config_host(struct scsi_disk *disk) 230 - { 231 - } 232 - 233 - #endif /* CONFIG_BLK_DEV_INTEGRITY */ 223 + void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim); 234 224 235 225 static inline int sd_is_zoned(struct scsi_disk *sdkp) 236 226 {
+14 -20
drivers/scsi/sd_dif.c
··· 24 24 /* 25 25 * Configure exchange of protection information between OS and HBA. 26 26 */ 27 - void sd_dif_config_host(struct scsi_disk *sdkp) 27 + void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim) 28 28 { 29 29 struct scsi_device *sdp = sdkp->device; 30 - struct gendisk *disk = sdkp->disk; 31 30 u8 type = sdkp->protection_type; 32 - struct blk_integrity bi; 31 + struct blk_integrity *bi = &lim->integrity; 33 32 int dif, dix; 33 + 34 + memset(bi, 0, sizeof(*bi)); 34 35 35 36 dif = scsi_host_dif_capable(sdp->host, type); 36 37 dix = scsi_host_dix_capable(sdp->host, type); ··· 40 39 dif = 0; dix = 1; 41 40 } 42 41 43 - if (!dix) { 44 - blk_integrity_unregister(disk); 42 + if (!dix) 45 43 return; 46 - } 47 - 48 - memset(&bi, 0, sizeof(bi)); 49 44 50 45 /* Enable DMA of protection information */ 51 46 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) 52 - bi.csum_type = BLK_INTEGRITY_CSUM_IP; 47 + bi->csum_type = BLK_INTEGRITY_CSUM_IP; 53 48 else 54 - bi.csum_type = BLK_INTEGRITY_CSUM_CRC; 49 + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; 55 50 56 51 if (type != T10_PI_TYPE3_PROTECTION) 57 - bi.flags |= BLK_INTEGRITY_REF_TAG; 52 + bi->flags |= BLK_INTEGRITY_REF_TAG; 58 53 59 - bi.tuple_size = sizeof(struct t10_pi_tuple); 54 + bi->tuple_size = sizeof(struct t10_pi_tuple); 60 55 61 56 if (dif && type) { 62 - bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 57 + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 63 58 64 59 if (!sdkp->ATO) 65 - goto out; 60 + return; 66 61 67 62 if (type == T10_PI_TYPE3_PROTECTION) 68 - bi.tag_size = sizeof(u16) + sizeof(u32); 63 + bi->tag_size = sizeof(u16) + sizeof(u32); 69 64 else 70 - bi.tag_size = sizeof(u16); 65 + bi->tag_size = sizeof(u16); 71 66 } 72 67 73 68 sd_first_printk(KERN_NOTICE, sdkp, 74 69 "Enabling DIX %s, application tag size %u bytes\n", 75 - blk_integrity_profile_name(&bi), bi.tag_size); 76 - out: 77 - blk_integrity_register(disk, &bi); 70 + blk_integrity_profile_name(bi), bi->tag_size); 78 71 } 79 -
+11 -16
include/linux/blk-integrity.h
··· 11 11 BLK_INTEGRITY_NOGENERATE = 1 << 1, 12 12 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 13 13 BLK_INTEGRITY_REF_TAG = 1 << 3, 14 + BLK_INTEGRITY_STACKED = 1 << 4, 14 15 }; 15 16 16 17 struct blk_integrity_iter { ··· 24 23 }; 25 24 26 25 const char *blk_integrity_profile_name(struct blk_integrity *bi); 26 + bool queue_limits_stack_integrity(struct queue_limits *t, 27 + struct queue_limits *b); 28 + static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t, 29 + struct block_device *bdev) 30 + { 31 + return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits); 32 + } 27 33 28 34 #ifdef CONFIG_BLK_DEV_INTEGRITY 29 - void blk_integrity_register(struct gendisk *, struct blk_integrity *); 30 - void blk_integrity_unregister(struct gendisk *); 31 - int blk_integrity_compare(struct gendisk *, struct gendisk *); 32 35 int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 33 36 struct scatterlist *); 34 37 int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); ··· 40 35 static inline bool 41 36 blk_integrity_queue_supports_integrity(struct request_queue *q) 42 37 { 43 - return q->integrity.tuple_size; 38 + return q->limits.integrity.tuple_size; 44 39 } 45 40 46 41 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 47 42 { 48 43 if (!blk_integrity_queue_supports_integrity(disk->queue)) 49 44 return NULL; 50 - return &disk->queue->integrity; 45 + return &disk->queue->limits.integrity; 51 46 } 52 47 53 48 static inline struct blk_integrity * ··· 124 119 { 125 120 return false; 126 121 } 127 - static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 128 - { 129 - return 0; 130 - } 131 - static inline void blk_integrity_register(struct gendisk *d, 132 - struct blk_integrity *b) 133 - { 134 - } 135 - static inline void blk_integrity_unregister(struct gendisk *d) 136 - { 137 - } 138 122 static inline unsigned short 139 123 queue_max_integrity_segments(const struct request_queue *q) 140 124 { ··· 151 157 return NULL; 152 158 } 153 159 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 160 + 154 161 #endif /* _LINUX_BLK_INTEGRITY_H */
+4 -8
include/linux/blkdev.h
··· 334 334 * due to possible offsets. 335 335 */ 336 336 unsigned int dma_alignment; 337 + 338 + struct blk_integrity integrity; 337 339 }; 338 340 339 341 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, ··· 420 418 struct kobject *mq_kobj; 421 419 422 420 struct queue_limits limits; 423 - 424 - #ifdef CONFIG_BLK_DEV_INTEGRITY 425 - struct blk_integrity integrity; 426 - #endif /* CONFIG_BLK_DEV_INTEGRITY */ 427 421 428 422 #ifdef CONFIG_PM 429 423 struct device *dev; ··· 1298 1300 { 1299 1301 struct request_queue *q = bdev_get_queue(bdev); 1300 1302 1301 - #ifdef CONFIG_BLK_DEV_INTEGRITY 1302 - /* BLK_INTEGRITY_CSUM_NONE is not available in blkdev.h */ 1303 - if (q->integrity.csum_type != 0) 1303 + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1304 + q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) 1304 1305 return true; 1305 - #endif 1306 1306 return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags); 1307 1307 } 1308 1308
+2 -10
include/linux/t10-pi.h
··· 39 39 40 40 static inline u32 t10_pi_ref_tag(struct request *rq) 41 41 { 42 - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 42 + unsigned int shift = rq->q->limits.integrity.interval_exp; 43 43 44 - #ifdef CONFIG_BLK_DEV_INTEGRITY 45 - if (rq->q->integrity.interval_exp) 46 - shift = rq->q->integrity.interval_exp; 47 - #endif 48 44 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; 49 45 } 50 46 ··· 61 65 62 66 static inline u64 ext_pi_ref_tag(struct request *rq) 63 67 { 64 - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 68 + unsigned int shift = rq->q->limits.integrity.interval_exp; 65 69 66 - #ifdef CONFIG_BLK_DEV_INTEGRITY 67 - if (rq->q->integrity.interval_exp) 68 - shift = rq->q->integrity.interval_exp; 69 - #endif 70 70 return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); 71 71 } 72 72