Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: decouple REQ_OP_SECURE_ERASE from REQ_OP_DISCARD

Secure erase is a very different operation from discard in that it is
a data integrity operation vs hint. Fully split the limits and helper
infrastructure to make the separation more clear.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> [drbd]
Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> [nifs2]
Acked-by: Jaegeuk Kim <jaegeuk@kernel.org> [f2fs]
Acked-by: Coly Li <colyli@suse.de> [bcache]
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
Acked-by: Chao Yu <chao@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-27-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
44abff2c 7b47ef52

+168 -99
+1 -1
block/blk-core.c
··· 824 824 goto not_supported; 825 825 break; 826 826 case REQ_OP_SECURE_ERASE: 827 - if (!blk_queue_secure_erase(q)) 827 + if (!bdev_max_secure_erase_sectors(bdev)) 828 828 goto not_supported; 829 829 break; 830 830 case REQ_OP_ZONE_APPEND:
+45 -19
block/blk-lib.c
··· 36 36 } 37 37 38 38 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 39 - sector_t nr_sects, gfp_t gfp_mask, int flags, 40 - struct bio **biop) 39 + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) 41 40 { 42 - struct request_queue *q = bdev_get_queue(bdev); 43 41 struct bio *bio = *biop; 44 - unsigned int op; 45 42 sector_t bs_mask; 46 43 47 44 if (bdev_read_only(bdev)) 48 45 return -EPERM; 49 - 50 - if (flags & BLKDEV_DISCARD_SECURE) { 51 - if (!blk_queue_secure_erase(q)) 52 - return -EOPNOTSUPP; 53 - op = REQ_OP_SECURE_ERASE; 54 - } else { 55 - if (!bdev_max_discard_sectors(bdev)) 56 - return -EOPNOTSUPP; 57 - op = REQ_OP_DISCARD; 58 - } 46 + if (!bdev_max_discard_sectors(bdev)) 47 + return -EOPNOTSUPP; 59 48 60 49 /* In case the discard granularity isn't set by buggy device driver */ 61 50 if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) { ··· 66 77 sector_t req_sects = 67 78 min(nr_sects, bio_discard_limit(bdev, sector)); 68 79 69 - bio = blk_next_bio(bio, bdev, 0, op, gfp_mask); 80 + bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); 70 81 bio->bi_iter.bi_sector = sector; 71 82 bio->bi_iter.bi_size = req_sects << 9; 72 83 sector += req_sects; ··· 92 103 * @sector: start sector 93 104 * @nr_sects: number of sectors to discard 94 105 * @gfp_mask: memory allocation flags (for bio_alloc) 95 - * @flags: BLKDEV_DISCARD_* flags to control behaviour 96 106 * 97 107 * Description: 98 108 * Issue a discard request for the sectors in question. 99 109 */ 100 110 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 101 - sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 111 + sector_t nr_sects, gfp_t gfp_mask) 102 112 { 103 113 struct bio *bio = NULL; 104 114 struct blk_plug plug; 105 115 int ret; 106 116 107 117 blk_start_plug(&plug); 108 - ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, 109 - &bio); 118 + ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); 110 119 if (!ret && bio) { 111 120 ret = submit_bio_wait(bio); 112 121 if (ret == -EOPNOTSUPP) ··· 301 314 return ret; 302 315 } 303 316 EXPORT_SYMBOL(blkdev_issue_zeroout); 317 + 318 + int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 319 + sector_t nr_sects, gfp_t gfp) 320 + { 321 + sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 322 + unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); 323 + struct bio *bio = NULL; 324 + struct blk_plug plug; 325 + int ret = 0; 326 + 327 + if (max_sectors == 0) 328 + return -EOPNOTSUPP; 329 + if ((sector | nr_sects) & bs_mask) 330 + return -EINVAL; 331 + if (bdev_read_only(bdev)) 332 + return -EPERM; 333 + 334 + blk_start_plug(&plug); 335 + for (;;) { 336 + unsigned int len = min_t(sector_t, nr_sects, max_sectors); 337 + 338 + bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); 339 + bio->bi_iter.bi_sector = sector; 340 + bio->bi_iter.bi_size = len; 341 + 342 + sector += len << SECTOR_SHIFT; 343 + nr_sects -= len << SECTOR_SHIFT; 344 + if (!nr_sects) { 345 + ret = submit_bio_wait(bio); 346 + bio_put(bio); 347 + break; 348 + } 349 + cond_resched(); 350 + } 351 + blk_finish_plug(&plug); 352 + 353 + return ret; 354 + } 355 + EXPORT_SYMBOL(blkdev_issue_secure_erase);
-1
block/blk-mq-debugfs.c
··· 115 115 QUEUE_FLAG_NAME(IO_STAT), 116 116 QUEUE_FLAG_NAME(NOXMERGES), 117 117 QUEUE_FLAG_NAME(ADD_RANDOM), 118 - QUEUE_FLAG_NAME(SECERASE), 119 118 QUEUE_FLAG_NAME(SAME_FORCE), 120 119 QUEUE_FLAG_NAME(DEAD), 121 120 QUEUE_FLAG_NAME(INIT_DONE),
+15 -1
block/blk-settings.c
··· 46 46 lim->max_zone_append_sectors = 0; 47 47 lim->max_discard_sectors = 0; 48 48 lim->max_hw_discard_sectors = 0; 49 + lim->max_secure_erase_sectors = 0; 49 50 lim->discard_granularity = 0; 50 51 lim->discard_alignment = 0; 51 52 lim->discard_misaligned = 0; ··· 176 175 q->limits.max_discard_sectors = max_discard_sectors; 177 176 } 178 177 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 178 + 179 + /** 180 + * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase 181 + * @q: the request queue for the device 182 + * @max_sectors: maximum number of sectors to secure_erase 183 + **/ 184 + void blk_queue_max_secure_erase_sectors(struct request_queue *q, 185 + unsigned int max_sectors) 186 + { 187 + q->limits.max_secure_erase_sectors = max_sectors; 188 + } 189 + EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors); 179 190 180 191 /** 181 192 * blk_queue_max_write_zeroes_sectors - set max sectors for a single ··· 674 661 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 675 662 t->discard_granularity; 676 663 } 677 - 664 + t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, 665 + b->max_secure_erase_sectors); 678 666 t->zone_write_granularity = max(t->zone_write_granularity, 679 667 b->zone_write_granularity); 680 668 t->zoned = max(t->zoned, b->zoned);
+1 -1
block/fops.c
··· 677 677 break; 678 678 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: 679 679 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 680 - len >> SECTOR_SHIFT, GFP_KERNEL, 0); 680 + len >> SECTOR_SHIFT, GFP_KERNEL); 681 681 break; 682 682 default: 683 683 error = -EOPNOTSUPP;
+35 -8
block/ioctl.c
··· 83 83 #endif 84 84 85 85 static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, 86 - unsigned long arg, unsigned long flags) 86 + unsigned long arg) 87 87 { 88 88 uint64_t range[2]; 89 89 uint64_t start, len; ··· 114 114 err = truncate_bdev_range(bdev, mode, start, start + len - 1); 115 115 if (err) 116 116 goto fail; 117 - 118 - err = blkdev_issue_discard(bdev, start >> 9, len >> 9, 119 - GFP_KERNEL, flags); 120 - 117 + err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); 121 118 fail: 122 119 filemap_invalidate_unlock(inode->i_mapping); 123 120 return err; 124 121 } 122 + 123 + static int blk_ioctl_secure_erase(struct block_device *bdev, fmode_t mode, 124 + void __user *argp) 125 + { 126 + uint64_t start, len; 127 + uint64_t range[2]; 128 + int err; 129 + 130 + if (!(mode & FMODE_WRITE)) 131 + return -EBADF; 132 + if (!bdev_max_secure_erase_sectors(bdev)) 133 + return -EOPNOTSUPP; 134 + if (copy_from_user(range, argp, sizeof(range))) 135 + return -EFAULT; 136 + 137 + start = range[0]; 138 + len = range[1]; 139 + if ((start & 511) || (len & 511)) 140 + return -EINVAL; 141 + if (start + len > bdev_nr_bytes(bdev)) 142 + return -EINVAL; 143 + 144 + filemap_invalidate_lock(bdev->bd_inode->i_mapping); 145 + err = truncate_bdev_range(bdev, mode, start, start + len - 1); 146 + if (!err) 147 + err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, 148 + GFP_KERNEL); 149 + filemap_invalidate_unlock(bdev->bd_inode->i_mapping); 150 + return err; 151 + } 152 + 125 153 126 154 static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, 127 155 unsigned long arg) ··· 478 450 case BLKROSET: 479 451 return blkdev_roset(bdev, mode, cmd, arg); 480 452 case BLKDISCARD: 481 - return blk_ioctl_discard(bdev, mode, arg, 0); 453 + return blk_ioctl_discard(bdev, mode, arg); 482 454 case BLKSECDISCARD: 483 - return blk_ioctl_discard(bdev, mode, arg, 484 - BLKDEV_DISCARD_SECURE); 455 + return blk_ioctl_secure_erase(bdev, mode, argp); 485 456 case BLKZEROOUT: 486 457 return blk_ioctl_zeroout(bdev, mode, arg); 487 458 case BLKGETDISKSEQ:
+3 -2
drivers/block/drbd/drbd_receiver.c
··· 1547 1547 start = tmp; 1548 1548 } 1549 1549 while (nr_sectors >= max_discard_sectors) { 1550 - err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0); 1550 + err |= blkdev_issue_discard(bdev, start, max_discard_sectors, 1551 + GFP_NOIO); 1551 1552 nr_sectors -= max_discard_sectors; 1552 1553 start += max_discard_sectors; 1553 1554 } ··· 1560 1559 nr = nr_sectors; 1561 1560 nr -= (unsigned int)nr % granularity; 1562 1561 if (nr) { 1563 - err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0); 1562 + err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO); 1564 1563 nr_sectors -= nr; 1565 1564 start += nr; 1566 1565 }
+2 -2
drivers/block/rnbd/rnbd-clt.c
··· 1365 1365 dev->queue->limits.discard_granularity = dev->discard_granularity; 1366 1366 dev->queue->limits.discard_alignment = dev->discard_alignment; 1367 1367 if (dev->secure_discard) 1368 - blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue); 1369 - 1368 + blk_queue_max_secure_erase_sectors(dev->queue, 1369 + dev->max_discard_sectors); 1370 1370 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); 1371 1371 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); 1372 1372 blk_queue_max_segments(dev->queue, dev->max_segments);
+1 -1
drivers/block/rnbd/rnbd-srv-dev.h
··· 44 44 45 45 static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev) 46 46 { 47 - return blk_queue_secure_erase(bdev_get_queue(dev->bdev)); 47 + return bdev_max_secure_erase_sectors(dev->bdev); 48 48 } 49 49 50 50 static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
+8 -7
drivers/block/xen-blkback/blkback.c
··· 970 970 int status = BLKIF_RSP_OKAY; 971 971 struct xen_blkif *blkif = ring->blkif; 972 972 struct block_device *bdev = blkif->vbd.bdev; 973 - unsigned long secure; 974 973 struct phys_req preq; 975 974 976 975 xen_blkif_get(blkif); ··· 986 987 } 987 988 ring->st_ds_req++; 988 989 989 - secure = (blkif->vbd.discard_secure && 990 - (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? 991 - BLKDEV_DISCARD_SECURE : 0; 990 + if (blkif->vbd.discard_secure && 991 + (req->u.discard.flag & BLKIF_DISCARD_SECURE)) 992 + err = blkdev_issue_secure_erase(bdev, 993 + req->u.discard.sector_number, 994 + req->u.discard.nr_sectors, GFP_KERNEL); 995 + else 996 + err = blkdev_issue_discard(bdev, req->u.discard.sector_number, 997 + req->u.discard.nr_sectors, GFP_KERNEL); 992 998 993 - err = blkdev_issue_discard(bdev, req->u.discard.sector_number, 994 - req->u.discard.nr_sectors, 995 - GFP_KERNEL, secure); 996 999 fail_response: 997 1000 if (err == -EOPNOTSUPP) { 998 1001 pr_debug("discard op failed, not supported\n");
+1 -4
drivers/block/xen-blkback/xenbus.c
··· 484 484 { 485 485 struct xen_vbd *vbd; 486 486 struct block_device *bdev; 487 - struct request_queue *q; 488 487 489 488 vbd = &blkif->vbd; 490 489 vbd->handle = handle; ··· 515 516 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) 516 517 vbd->type |= VDISK_REMOVABLE; 517 518 518 - q = bdev_get_queue(bdev); 519 519 if (bdev_write_cache(bdev)) 520 520 vbd->flush_support = true; 521 - 522 - if (q && blk_queue_secure_erase(q)) 521 + if (bdev_max_secure_erase_sectors(bdev)) 523 522 vbd->discard_secure = true; 524 523 525 524 vbd->feature_gnt_persistent = feature_persistent;
+3 -2
drivers/block/xen-blkfront.c
··· 949 949 info->physical_sector_size; 950 950 rq->limits.discard_alignment = info->discard_alignment; 951 951 if (info->feature_secdiscard) 952 - blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); 952 + blk_queue_max_secure_erase_sectors(rq, 953 + get_capacity(gd)); 953 954 } 954 955 955 956 /* Hard sector size and max sectors impersonate the equiv. hardware. */ ··· 1607 1606 info->feature_discard = 0; 1608 1607 info->feature_secdiscard = 0; 1609 1608 blk_queue_max_discard_sectors(rq, 0); 1610 - blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq); 1609 + blk_queue_max_secure_erase_sectors(rq, 0); 1611 1610 } 1612 1611 break; 1613 1612 case BLKIF_OP_FLUSH_DISKCACHE:
+1 -1
drivers/md/bcache/alloc.c
··· 336 336 mutex_unlock(&ca->set->bucket_lock); 337 337 blkdev_issue_discard(ca->bdev, 338 338 bucket_to_sector(ca->set, bucket), 339 - ca->sb.bucket_size, GFP_KERNEL, 0); 339 + ca->sb.bucket_size, GFP_KERNEL); 340 340 mutex_lock(&ca->set->bucket_lock); 341 341 } 342 342
+3 -5
drivers/md/dm-table.c
··· 1920 1920 struct dm_dev *dev, sector_t start, 1921 1921 sector_t len, void *data) 1922 1922 { 1923 - struct request_queue *q = bdev_get_queue(dev->bdev); 1924 - 1925 - return !blk_queue_secure_erase(q); 1923 + return !bdev_max_secure_erase_sectors(dev->bdev); 1926 1924 } 1927 1925 1928 1926 static bool dm_table_supports_secure_erase(struct dm_table *t) ··· 1973 1975 q->limits.discard_misaligned = 0; 1974 1976 } 1975 1977 1976 - if (dm_table_supports_secure_erase(t)) 1977 - blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); 1978 + if (!dm_table_supports_secure_erase(t)) 1979 + q->limits.max_secure_erase_sectors = 0; 1978 1980 1979 1981 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1980 1982 wc = true;
+2 -2
drivers/md/dm-thin.c
··· 398 398 sector_t s = block_to_sectors(tc->pool, data_b); 399 399 sector_t len = block_to_sectors(tc->pool, data_e - data_b); 400 400 401 - return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, 402 - GFP_NOWAIT, 0, &op->bio); 401 + return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT, 402 + &op->bio); 403 403 } 404 404 405 405 static void end_discard(struct discard_op *op, int r)
+1 -1
drivers/md/md.c
··· 8584 8584 { 8585 8585 struct bio *discard_bio = NULL; 8586 8586 8587 - if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0, 8587 + if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 8588 8588 &discard_bio) || !discard_bio) 8589 8589 return; 8590 8590
+3 -3
drivers/md/raid5-cache.c
··· 1344 1344 if (log->last_checkpoint < end) { 1345 1345 blkdev_issue_discard(bdev, 1346 1346 log->last_checkpoint + log->rdev->data_offset, 1347 - end - log->last_checkpoint, GFP_NOIO, 0); 1347 + end - log->last_checkpoint, GFP_NOIO); 1348 1348 } else { 1349 1349 blkdev_issue_discard(bdev, 1350 1350 log->last_checkpoint + log->rdev->data_offset, 1351 1351 log->device_size - log->last_checkpoint, 1352 - GFP_NOIO, 0); 1352 + GFP_NOIO); 1353 1353 blkdev_issue_discard(bdev, log->rdev->data_offset, end, 1354 - GFP_NOIO, 0); 1354 + GFP_NOIO); 1355 1355 } 1356 1356 } 1357 1357
+1 -1
drivers/mmc/core/queue.c
··· 189 189 if (card->pref_erase > max_discard) 190 190 q->limits.discard_granularity = SECTOR_SIZE; 191 191 if (mmc_can_secure_erase_trim(card)) 192 - blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); 192 + blk_queue_max_secure_erase_sectors(q, max_discard); 193 193 } 194 194 195 195 static unsigned short mmc_get_max_segments(struct mmc_host *host)
+1 -1
drivers/nvme/target/io-cmd-bdev.c
··· 360 360 ret = __blkdev_issue_discard(ns->bdev, 361 361 nvmet_lba_to_sect(ns, range->slba), 362 362 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 363 - GFP_KERNEL, 0, bio); 363 + GFP_KERNEL, bio); 364 364 if (ret && ret != -EOPNOTSUPP) { 365 365 req->error_slba = le64_to_cpu(range->slba); 366 366 return errno_to_nvme_status(req, ret);
+1 -1
drivers/target/target_core_file.c
··· 558 558 ret = blkdev_issue_discard(bdev, 559 559 target_to_linux_sector(dev, lba), 560 560 target_to_linux_sector(dev, nolb), 561 - GFP_KERNEL, 0); 561 + GFP_KERNEL); 562 562 if (ret < 0) { 563 563 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 564 564 ret);
+1 -1
drivers/target/target_core_iblock.c
··· 434 434 ret = blkdev_issue_discard(bdev, 435 435 target_to_linux_sector(dev, lba), 436 436 target_to_linux_sector(dev, nolb), 437 - GFP_KERNEL, 0); 437 + GFP_KERNEL); 438 438 if (ret < 0) { 439 439 pr_err("blkdev_issue_discard() failed: %d\n", ret); 440 440 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+2 -2
fs/btrfs/extent-tree.c
··· 1239 1239 1240 1240 if (size) { 1241 1241 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9, 1242 - GFP_NOFS, 0); 1242 + GFP_NOFS); 1243 1243 if (!ret) 1244 1244 *discarded_bytes += size; 1245 1245 else if (ret != -EOPNOTSUPP) ··· 1256 1256 1257 1257 if (bytes_left) { 1258 1258 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9, 1259 - GFP_NOFS, 0); 1259 + GFP_NOFS); 1260 1260 if (!ret) 1261 1261 *discarded_bytes += bytes_left; 1262 1262 }
+1 -1
fs/ext4/mballoc.c
··· 3629 3629 return __blkdev_issue_discard(sb->s_bdev, 3630 3630 (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3631 3631 (sector_t)count << (sb->s_blocksize_bits - 9), 3632 - GFP_NOFS, 0, biop); 3632 + GFP_NOFS, biop); 3633 3633 } else 3634 3634 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3635 3635 }
+8 -8
fs/f2fs/file.c
··· 3685 3685 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, 3686 3686 pgoff_t off, block_t block, block_t len, u32 flags) 3687 3687 { 3688 - struct request_queue *q = bdev_get_queue(bdev); 3689 3688 sector_t sector = SECTOR_FROM_BLOCK(block); 3690 3689 sector_t nr_sects = SECTOR_FROM_BLOCK(len); 3691 3690 int ret = 0; 3692 3691 3693 - if (!q) 3694 - return -ENXIO; 3695 - 3696 - if (flags & F2FS_TRIM_FILE_DISCARD) 3697 - ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS, 3698 - blk_queue_secure_erase(q) ? 3699 - BLKDEV_DISCARD_SECURE : 0); 3692 + if (flags & F2FS_TRIM_FILE_DISCARD) { 3693 + if (bdev_max_secure_erase_sectors(bdev)) 3694 + ret = blkdev_issue_secure_erase(bdev, sector, nr_sects, 3695 + GFP_NOFS); 3696 + else 3697 + ret = blkdev_issue_discard(bdev, sector, nr_sects, 3698 + GFP_NOFS); 3699 + } 3700 3700 3701 3701 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { 3702 3702 if (IS_ENCRYPTED(inode))
+1 -1
fs/f2fs/segment.c
··· 1244 1244 err = __blkdev_issue_discard(bdev, 1245 1245 SECTOR_FROM_BLOCK(start), 1246 1246 SECTOR_FROM_BLOCK(len), 1247 - GFP_NOFS, 0, &bio); 1247 + GFP_NOFS, &bio); 1248 1248 submit: 1249 1249 if (err) { 1250 1250 spin_lock_irqsave(&dc->lock, flags);
+1 -1
fs/jbd2/journal.c
··· 1825 1825 err = blkdev_issue_discard(journal->j_dev, 1826 1826 byte_start >> SECTOR_SHIFT, 1827 1827 byte_count >> SECTOR_SHIFT, 1828 - GFP_NOFS, 0); 1828 + GFP_NOFS); 1829 1829 } else if (flags & JBD2_JOURNAL_FLUSH_ZEROOUT) { 1830 1830 err = blkdev_issue_zeroout(journal->j_dev, 1831 1831 byte_start >> SECTOR_SHIFT,
+2 -2
fs/nilfs2/sufile.c
··· 1100 1100 ret = blkdev_issue_discard(nilfs->ns_bdev, 1101 1101 start * sects_per_block, 1102 1102 nblocks * sects_per_block, 1103 - GFP_NOFS, 0); 1103 + GFP_NOFS); 1104 1104 if (ret < 0) { 1105 1105 put_bh(su_bh); 1106 1106 goto out_sem; ··· 1134 1134 ret = blkdev_issue_discard(nilfs->ns_bdev, 1135 1135 start * sects_per_block, 1136 1136 nblocks * sects_per_block, 1137 - GFP_NOFS, 0); 1137 + GFP_NOFS); 1138 1138 if (!ret) 1139 1139 ndiscarded += nblocks; 1140 1140 }
+2 -2
fs/nilfs2/the_nilfs.c
··· 672 672 ret = blkdev_issue_discard(nilfs->ns_bdev, 673 673 start * sects_per_block, 674 674 nblocks * sects_per_block, 675 - GFP_NOFS, 0); 675 + GFP_NOFS); 676 676 if (ret < 0) 677 677 return ret; 678 678 nblocks = 0; ··· 682 682 ret = blkdev_issue_discard(nilfs->ns_bdev, 683 683 start * sects_per_block, 684 684 nblocks * sects_per_block, 685 - GFP_NOFS, 0); 685 + GFP_NOFS); 686 686 return ret; 687 687 } 688 688
+1 -1
fs/ntfs3/super.c
··· 1333 1333 return 0; 1334 1334 1335 1335 err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9, 1336 - GFP_NOFS, 0); 1336 + GFP_NOFS); 1337 1337 1338 1338 if (err == -EOPNOTSUPP) 1339 1339 sbi->flags |= NTFS_FLAGS_NODISCARD;
+1 -1
fs/xfs/xfs_discard.c
··· 114 114 } 115 115 116 116 trace_xfs_discard_extent(mp, agno, fbno, flen); 117 - error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); 117 + error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS); 118 118 if (error) 119 119 goto out_del_cursor; 120 120 *blocks_trimmed += flen;
+1 -1
fs/xfs/xfs_log_cil.c
··· 605 605 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 606 606 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 607 607 XFS_FSB_TO_BB(mp, busyp->length), 608 - GFP_NOFS, 0, &bio); 608 + GFP_NOFS, &bio); 609 609 if (error && error != -EOPNOTSUPP) { 610 610 xfs_info(mp, 611 611 "discard failed for extent [0x%llx,%u], error %d",
+16 -11
include/linux/blkdev.h
··· 248 248 unsigned int io_opt; 249 249 unsigned int max_discard_sectors; 250 250 unsigned int max_hw_discard_sectors; 251 + unsigned int max_secure_erase_sectors; 251 252 unsigned int max_write_zeroes_sectors; 252 253 unsigned int max_zone_append_sectors; 253 254 unsigned int discard_granularity; ··· 543 542 #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 544 543 #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 545 544 #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 546 - #define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ 547 545 #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 548 546 #define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ 549 547 #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ ··· 583 583 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 584 584 #define blk_queue_zone_resetall(q) \ 585 585 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 586 - #define blk_queue_secure_erase(q) \ 587 - (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 588 586 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 589 587 #define blk_queue_pci_p2pdma(q) \ 590 588 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) ··· 945 947 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 946 948 extern void blk_queue_max_discard_segments(struct request_queue *, 947 949 unsigned short); 950 + void blk_queue_max_secure_erase_sectors(struct request_queue *q, 951 + unsigned int max_sectors); 948 952 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 949 953 extern void blk_queue_max_discard_sectors(struct request_queue *q, 950 954 unsigned int max_discard_sectors); ··· 1087 1087 1088 1088 extern void blk_io_schedule(void); 1089 1089 1090 - #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1091 - 1092 - extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1093 - sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1094 - extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1095 - sector_t nr_sects, gfp_t gfp_mask, int flags, 1096 - struct bio **biop); 1090 + int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1091 + sector_t nr_sects, gfp_t gfp_mask); 1092 + int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1093 + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1094 + int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1095 + sector_t nr_sects, gfp_t gfp); 1097 1096 1098 1097 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1099 1098 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ ··· 1111 1112 SECTOR_SHIFT), 1112 1113 nr_blocks << (sb->s_blocksize_bits - 1113 1114 SECTOR_SHIFT), 1114 - gfp_mask, flags); 1115 + gfp_mask); 1115 1116 } 1116 1117 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1117 1118 sector_t nr_blocks, gfp_t gfp_mask) ··· 1259 1260 static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1260 1261 { 1261 1262 return bdev_get_queue(bdev)->limits.discard_granularity; 1263 + } 1264 + 1265 + static inline unsigned int 1266 + bdev_max_secure_erase_sectors(struct block_device *bdev) 1267 + { 1268 + return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 1262 1269 } 1263 1270 1264 1271 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+3 -3
mm/swapfile.c
··· 179 179 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 180 180 if (nr_blocks) { 181 181 err = blkdev_issue_discard(si->bdev, start_block, 182 - nr_blocks, GFP_KERNEL, 0); 182 + nr_blocks, GFP_KERNEL); 183 183 if (err) 184 184 return err; 185 185 cond_resched(); ··· 190 190 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 191 191 192 192 err = blkdev_issue_discard(si->bdev, start_block, 193 - nr_blocks, GFP_KERNEL, 0); 193 + nr_blocks, GFP_KERNEL); 194 194 if (err) 195 195 break; 196 196 ··· 254 254 start_block <<= PAGE_SHIFT - 9; 255 255 nr_blocks <<= PAGE_SHIFT - 9; 256 256 if (blkdev_issue_discard(si->bdev, start_block, 257 - nr_blocks, GFP_NOIO, 0)) 257 + nr_blocks, GFP_NOIO)) 258 258 break; 259 259 260 260 se = next_se(se);