Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block/drbd: Use the enum req_op and blk_opf_t types

Improve static type checking by using the enum req_op type for variables
that represent a request operation and the new blk_opf_t type for
variables that represent request flags.

Reviewed-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
Cc: Lars Ellenberg <lars.ellenberg@linbit.com>
Cc: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-14-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Bart Van Assche and committed by
Jens Axboe
9945172a ba91fd01

+15 -13
+5 -4
drivers/block/drbd/drbd_actlog.c
··· 124 124 125 125 static int _drbd_md_sync_page_io(struct drbd_device *device, 126 126 struct drbd_backing_dev *bdev, 127 - sector_t sector, int op) 127 + sector_t sector, enum req_op op) 128 128 { 129 129 struct bio *bio; 130 130 /* we do all our meta data IO in aligned 4k blocks. */ 131 131 const int size = 4096; 132 - int err, op_flags = 0; 132 + int err; 133 + blk_opf_t op_flags = 0; 133 134 134 135 device->md_io.done = 0; 135 136 device->md_io.error = -ENODEV; ··· 175 174 } 176 175 177 176 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, 178 - sector_t sector, int op) 177 + sector_t sector, enum req_op op) 179 178 { 180 179 int err; 181 180 D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); ··· 386 385 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates; 387 386 rcu_read_unlock(); 388 387 if (write_al_updates) { 389 - if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { 388 + if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { 390 389 err = -EIO; 391 390 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); 392 391 } else {
+1 -1
drivers/block/drbd/drbd_bitmap.c
··· 977 977 static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) 978 978 { 979 979 struct drbd_device *device = ctx->device; 980 - unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; 980 + enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE; 981 981 struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, 982 982 GFP_NOIO, &drbd_md_io_bio_set); 983 983 struct drbd_bitmap *b = device->bitmap;
+3 -3
drivers/block/drbd/drbd_int.h
··· 1495 1495 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); 1496 1496 extern void drbd_md_put_buffer(struct drbd_device *device); 1497 1497 extern int drbd_md_sync_page_io(struct drbd_device *device, 1498 - struct drbd_backing_dev *bdev, sector_t sector, int op); 1498 + struct drbd_backing_dev *bdev, sector_t sector, enum req_op op); 1499 1499 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); 1500 1500 extern void wait_until_done_or_force_detached(struct drbd_device *device, 1501 1501 struct drbd_backing_dev *bdev, unsigned int *done); ··· 1547 1547 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, 1548 1548 bool throttle_if_app_is_waiting); 1549 1549 extern int drbd_submit_peer_request(struct drbd_device *, 1550 - struct drbd_peer_request *, const unsigned, 1551 - const unsigned, const int); 1550 + struct drbd_peer_request *, enum req_op, 1551 + blk_opf_t, int); 1552 1552 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); 1553 1553 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, 1554 1554 sector_t, unsigned int,
+6 -5
drivers/block/drbd/drbd_receiver.c
··· 1621 1621 /* TODO allocate from our own bio_set. */ 1622 1622 int drbd_submit_peer_request(struct drbd_device *device, 1623 1623 struct drbd_peer_request *peer_req, 1624 - const unsigned op, const unsigned op_flags, 1624 + const enum req_op op, const blk_opf_t op_flags, 1625 1625 const int fault_type) 1626 1626 { 1627 1627 struct bio *bios = NULL; ··· 2383 2383 /* see also bio_flags_to_wire() 2384 2384 * DRBD_REQ_*, because we need to semantically map the flags to data packet 2385 2385 * flags and back. We may replicate to other kernel versions. */ 2386 - static unsigned long wire_flags_to_bio_flags(u32 dpf) 2386 + static blk_opf_t wire_flags_to_bio_flags(u32 dpf) 2387 2387 { 2388 2388 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 2389 2389 (dpf & DP_FUA ? REQ_FUA : 0) | 2390 2390 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); 2391 2391 } 2392 2392 2393 - static unsigned long wire_flags_to_bio_op(u32 dpf) 2393 + static enum req_op wire_flags_to_bio_op(u32 dpf) 2394 2394 { 2395 2395 if (dpf & DP_ZEROES) 2396 2396 return REQ_OP_WRITE_ZEROES; ··· 2543 2543 struct drbd_peer_request *peer_req; 2544 2544 struct p_data *p = pi->data; 2545 2545 u32 peer_seq = be32_to_cpu(p->seq_num); 2546 - int op, op_flags; 2546 + enum req_op op; 2547 + blk_opf_t op_flags; 2547 2548 u32 dp_flags; 2548 2549 int err, tp; 2549 2550 ··· 4952 4951 4953 4952 if (get_ldev(device)) { 4954 4953 struct drbd_peer_request *peer_req; 4955 - const int op = REQ_OP_WRITE_ZEROES; 4954 + const enum req_op op = REQ_OP_WRITE_ZEROES; 4956 4955 4957 4956 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, 4958 4957 size, 0, GFP_NOIO);