Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md/core: Combine two sync_page_io() arguments

Improve uniformity in the kernel of handling of request operation and
flags by passing these as a single argument.

Cc: Song Liu <song@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-32-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Bart Van Assche and committed by
Jens Axboe
4ce4c73f 13a1f650

+29 -30
+1 -1
drivers/md/dm-raid.c
··· 2036 2036 2037 2037 rdev->sb_loaded = 0; 2038 2038 2039 - if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) { 2039 + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) { 2040 2040 DMERR("Failed to read superblock of device at position %d", 2041 2041 rdev->raid_disk); 2042 2042 md_error(rdev->mddev, rdev);
+1 -1
drivers/md/md-bitmap.c
··· 165 165 166 166 if (sync_page_io(rdev, target, 167 167 roundup(size, bdev_logical_block_size(rdev->bdev)), 168 - page, REQ_OP_READ, 0, true)) { 168 + page, REQ_OP_READ, true)) { 169 169 page->index = index; 170 170 return 0; 171 171 }
+5 -5
drivers/md/md.c
··· 993 993 } 994 994 995 995 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 996 - struct page *page, int op, int op_flags, bool metadata_op) 996 + struct page *page, blk_opf_t opf, bool metadata_op) 997 997 { 998 998 struct bio bio; 999 999 struct bio_vec bvec; 1000 1000 1001 1001 if (metadata_op && rdev->meta_bdev) 1002 - bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); 1002 + bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); 1003 1003 else 1004 - bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); 1004 + bio_init(&bio, rdev->bdev, &bvec, 1, opf); 1005 1005 1006 1006 if (metadata_op) 1007 1007 bio.bi_iter.bi_sector = sector + rdev->sb_start; ··· 1024 1024 if (rdev->sb_loaded) 1025 1025 return 0; 1026 1026 1027 - if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) 1027 + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) 1028 1028 goto fail; 1029 1029 rdev->sb_loaded = 1; 1030 1030 return 0; ··· 1722 1722 return -EINVAL; 1723 1723 bb_sector = (long long)offset; 1724 1724 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1725 - rdev->bb_page, REQ_OP_READ, 0, true)) 1725 + rdev->bb_page, REQ_OP_READ, true)) 1726 1726 return -EIO; 1727 1727 bbp = (__le64 *)page_address(rdev->bb_page); 1728 1728 rdev->badblocks.shift = sb->bblog_shift;
+1 -2
drivers/md/md.h
··· 738 738 sector_t sector, int size, struct page *page); 739 739 extern int md_super_wait(struct mddev *mddev); 740 740 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 741 - struct page *page, int op, int op_flags, 742 - bool metadata_op); 741 + struct page *page, blk_opf_t opf, bool metadata_op); 743 742 extern void md_do_sync(struct md_thread *thread); 744 743 extern void md_new_event(void); 745 744 extern void md_allow_write(struct mddev *mddev);
+4 -4
drivers/md/raid1.c
··· 1988 1988 } 1989 1989 1990 1990 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, 1991 - int sectors, struct page *page, int rw) 1991 + int sectors, struct page *page, int rw) 1992 1992 { 1993 - if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 1993 + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) 1994 1994 /* success */ 1995 1995 return 1; 1996 1996 if (rw == WRITE) { ··· 2057 2057 rdev = conf->mirrors[d].rdev; 2058 2058 if (sync_page_io(rdev, sect, s<<9, 2059 2059 pages[idx], 2060 - REQ_OP_READ, 0, false)) { 2060 + REQ_OP_READ, false)) { 2061 2061 success = 1; 2062 2062 break; 2063 2063 } ··· 2305 2305 atomic_inc(&rdev->nr_pending); 2306 2306 rcu_read_unlock(); 2307 2307 if (sync_page_io(rdev, sect, s<<9, 2308 - conf->tmppage, REQ_OP_READ, 0, false)) 2308 + conf->tmppage, REQ_OP_READ, false)) 2309 2309 success = 1; 2310 2310 rdev_dec_pending(rdev, mddev); 2311 2311 if (success)
+5 -5
drivers/md/raid10.c
··· 2512 2512 addr, 2513 2513 s << 9, 2514 2514 pages[idx], 2515 - REQ_OP_READ, 0, false); 2515 + REQ_OP_READ, false); 2516 2516 if (ok) { 2517 2517 rdev = conf->mirrors[dw].rdev; 2518 2518 addr = r10_bio->devs[1].addr + sect; ··· 2520 2520 addr, 2521 2521 s << 9, 2522 2522 pages[idx], 2523 - REQ_OP_WRITE, 0, false); 2523 + REQ_OP_WRITE, false); 2524 2524 if (!ok) { 2525 2525 set_bit(WriteErrorSeen, &rdev->flags); 2526 2526 if (!test_and_set_bit(WantReplacement, ··· 2644 2644 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2645 2645 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2646 2646 return -1; 2647 - if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 2647 + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) 2648 2648 /* success */ 2649 2649 return 1; 2650 2650 if (rw == WRITE) { ··· 2726 2726 sect, 2727 2727 s<<9, 2728 2728 conf->tmppage, 2729 - REQ_OP_READ, 0, false); 2729 + REQ_OP_READ, false); 2730 2730 rdev_dec_pending(rdev, mddev); 2731 2731 rcu_read_lock(); 2732 2732 if (success) ··· 5107 5107 addr, 5108 5108 s << 9, 5109 5109 pages[idx], 5110 - REQ_OP_READ, 0, false); 5110 + REQ_OP_READ, false); 5111 5111 rdev_dec_pending(rdev, mddev); 5112 5112 rcu_read_lock(); 5113 5113 if (success)
+6 -6
drivers/md/raid5-cache.c
··· 1788 1788 mb = page_address(page); 1789 1789 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 1790 1790 mb, PAGE_SIZE)); 1791 - if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 1791 + if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE | 1792 1792 REQ_SYNC | REQ_FUA, false)) { 1793 1793 __free_page(page); 1794 1794 return -EIO; ··· 1898 1898 atomic_inc(&rdev->nr_pending); 1899 1899 rcu_read_unlock(); 1900 1900 sync_page_io(rdev, sh->sector, PAGE_SIZE, 1901 - sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1901 + sh->dev[disk_index].page, REQ_OP_WRITE, 1902 1902 false); 1903 1903 rdev_dec_pending(rdev, rdev->mddev); 1904 1904 rcu_read_lock(); ··· 1908 1908 atomic_inc(&rrdev->nr_pending); 1909 1909 rcu_read_unlock(); 1910 1910 sync_page_io(rrdev, sh->sector, PAGE_SIZE, 1911 - sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1911 + sh->dev[disk_index].page, REQ_OP_WRITE, 1912 1912 false); 1913 1913 rdev_dec_pending(rrdev, rrdev->mddev); 1914 1914 rcu_read_lock(); ··· 2394 2394 PAGE_SIZE)); 2395 2395 kunmap_atomic(addr); 2396 2396 sync_page_io(log->rdev, write_pos, PAGE_SIZE, 2397 - dev->page, REQ_OP_WRITE, 0, false); 2397 + dev->page, REQ_OP_WRITE, false); 2398 2398 write_pos = r5l_ring_add(log, write_pos, 2399 2399 BLOCK_SECTORS); 2400 2400 offset += sizeof(__le32) + ··· 2406 2406 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 2407 2407 mb, PAGE_SIZE)); 2408 2408 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 2409 - REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); 2409 + REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false); 2410 2410 sh->log_start = ctx->pos; 2411 2411 list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 2412 2412 atomic_inc(&log->stripe_in_journal_count); ··· 2971 2971 if (!page) 2972 2972 return -ENOMEM; 2973 2973 2974 - if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { 2974 + if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) { 2975 2975 ret = -EIO; 2976 2976 goto ioerr; 2977 2977 }
+6 -6
drivers/md/raid5-ppl.c
··· 897 897 __func__, indent, "", rdev->bdev, 898 898 (unsigned long long)sector); 899 899 if (!sync_page_io(rdev, sector, block_size, page2, 900 - REQ_OP_READ, 0, false)) { 900 + REQ_OP_READ, false)) { 901 901 md_error(mddev, rdev); 902 902 pr_debug("%s:%*s read failed!\n", __func__, 903 903 indent, ""); ··· 919 919 (unsigned long long)(ppl_sector + i)); 920 920 if (!sync_page_io(log->rdev, 921 921 ppl_sector - log->rdev->data_offset + i, 922 - block_size, page2, REQ_OP_READ, 0, 922 + block_size, page2, REQ_OP_READ, 923 923 false)) { 924 924 pr_debug("%s:%*s read failed!\n", __func__, 925 925 indent, ""); ··· 946 946 (unsigned long long)parity_sector, 947 947 parity_rdev->bdev); 948 948 if (!sync_page_io(parity_rdev, parity_sector, block_size, 949 - page1, REQ_OP_WRITE, 0, false)) { 949 + page1, REQ_OP_WRITE, false)) { 950 950 pr_debug("%s:%*s parity write error!\n", __func__, 951 951 indent, ""); 952 952 md_error(mddev, parity_rdev); ··· 998 998 int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size; 999 999 1000 1000 if (!sync_page_io(rdev, sector - rdev->data_offset, 1001 - s, page, REQ_OP_READ, 0, false)) { 1001 + s, page, REQ_OP_READ, false)) { 1002 1002 md_error(mddev, rdev); 1003 1003 ret = -EIO; 1004 1004 goto out; ··· 1062 1062 1063 1063 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, 1064 1064 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | 1065 - REQ_FUA, 0, false)) { 1065 + REQ_FUA, false)) { 1066 1066 md_error(rdev->mddev, rdev); 1067 1067 ret = -EIO; 1068 1068 } ··· 1100 1100 if (!sync_page_io(rdev, 1101 1101 rdev->ppl.sector - rdev->data_offset + 1102 1102 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, 1103 - 0, false)) { 1103 + false)) { 1104 1104 md_error(mddev, rdev); 1105 1105 ret = -EIO; 1106 1106 /* if not able to read - don't recover any PPL */