btrfs: add unlikely annotations to branches leading to EIO

The unlikely() annotation is a static prediction hint that compiler may
use to reorder code out of hot path. We use it elsewhere (namely
tree-checker.c) for error branches that almost never happen, where
EIO is one of them.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>

+81 -83
+2 -2
fs/btrfs/backref.c
··· 859 859 free_pref(ref); 860 860 return PTR_ERR(eb); 861 861 } 862 - if (!extent_buffer_uptodate(eb)) { 862 + if (unlikely(!extent_buffer_uptodate(eb))) { 863 863 free_pref(ref); 864 864 free_extent_buffer(eb); 865 865 return -EIO; ··· 1614 1614 ret = PTR_ERR(eb); 1615 1615 goto out; 1616 1616 } 1617 - if (!extent_buffer_uptodate(eb)) { 1617 + if (unlikely(!extent_buffer_uptodate(eb))) { 1618 1618 free_extent_buffer(eb); 1619 1619 ret = -EIO; 1620 1620 goto out;
+2 -2
fs/btrfs/bio.c
··· 849 849 if (ret < 0) 850 850 goto out_counter_dec; 851 851 852 - if (!smap.dev->bdev || 853 - !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) { 852 + if (unlikely(!smap.dev->bdev || 853 + !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state))) { 854 854 ret = -EIO; 855 855 goto out_counter_dec; 856 856 }
+1 -1
fs/btrfs/defrag.c
··· 924 924 folio_put(folio); 925 925 goto again; 926 926 } 927 - if (!folio_test_uptodate(folio)) { 927 + if (unlikely(!folio_test_uptodate(folio))) { 928 928 folio_unlock(folio); 929 929 folio_put(folio); 930 930 return ERR_PTR(-EIO);
+2 -4
fs/btrfs/dev-replace.c
··· 177 177 * allow 'btrfs dev replace_cancel' if src/tgt device is 178 178 * missing 179 179 */ 180 - if (!dev_replace->srcdev && 181 - !btrfs_test_opt(fs_info, DEGRADED)) { 180 + if (unlikely(!dev_replace->srcdev && !btrfs_test_opt(fs_info, DEGRADED))) { 182 181 ret = -EIO; 183 182 btrfs_warn(fs_info, 184 183 "cannot mount because device replace operation is ongoing and"); ··· 185 186 "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 186 187 src_devid); 187 188 } 188 - if (!dev_replace->tgtdev && 189 - !btrfs_test_opt(fs_info, DEGRADED)) { 189 + if (unlikely(!dev_replace->tgtdev && !btrfs_test_opt(fs_info, DEGRADED))) { 190 190 ret = -EIO; 191 191 btrfs_warn(fs_info, 192 192 "cannot mount because device replace operation is ongoing and");
+12 -12
fs/btrfs/disk-io.c
··· 370 370 ASSERT(check); 371 371 372 372 found_start = btrfs_header_bytenr(eb); 373 - if (found_start != eb->start) { 373 + if (unlikely(found_start != eb->start)) { 374 374 btrfs_err_rl(fs_info, 375 375 "bad tree block start, mirror %u want %llu have %llu", 376 376 eb->read_mirror, eb->start, found_start); 377 377 ret = -EIO; 378 378 goto out; 379 379 } 380 - if (check_tree_block_fsid(eb)) { 380 + if (unlikely(check_tree_block_fsid(eb))) { 381 381 btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u", 382 382 eb->start, eb->read_mirror); 383 383 ret = -EIO; 384 384 goto out; 385 385 } 386 386 found_level = btrfs_header_level(eb); 387 - if (found_level >= BTRFS_MAX_LEVEL) { 387 + if (unlikely(found_level >= BTRFS_MAX_LEVEL)) { 388 388 btrfs_err(fs_info, 389 389 "bad tree block level, mirror %u level %d on logical %llu", 390 390 eb->read_mirror, btrfs_header_level(eb), eb->start); ··· 410 410 } 411 411 } 412 412 413 - if (found_level != check->level) { 413 + if (unlikely(found_level != check->level)) { 414 414 btrfs_err(fs_info, 415 415 "level verify failed on logical %llu mirror %u wanted %u found %u", 416 416 eb->start, eb->read_mirror, check->level, found_level); ··· 1046 1046 root->node = NULL; 1047 1047 goto fail; 1048 1048 } 1049 - if (!btrfs_buffer_uptodate(root->node, generation, false)) { 1049 + if (unlikely(!btrfs_buffer_uptodate(root->node, generation, false))) { 1050 1050 ret = -EIO; 1051 1051 goto fail; 1052 1052 } ··· 2058 2058 u64 bytenr = btrfs_super_log_root(disk_super); 2059 2059 int level = btrfs_super_log_root_level(disk_super); 2060 2060 2061 - if (fs_devices->rw_devices == 0) { 2061 + if (unlikely(fs_devices->rw_devices == 0)) { 2062 2062 btrfs_warn(fs_info, "log replay required on RO media"); 2063 2063 return -EIO; 2064 2064 } ··· 2079 2079 btrfs_put_root(log_tree_root); 2080 2080 return ret; 2081 2081 } 2082 - if (!extent_buffer_uptodate(log_tree_root->node)) { 2082 + if (unlikely(!extent_buffer_uptodate(log_tree_root->node))) { 2083 2083 btrfs_err(fs_info, "failed to read log tree"); 2084 2084 btrfs_put_root(log_tree_root); 2085 2085 return -EIO; ··· 2641 2641 root->node = NULL; 2642 2642 return ret; 2643 2643 } 2644 - if (!extent_buffer_uptodate(root->node)) { 2644 + if (unlikely(!extent_buffer_uptodate(root->node))) { 2645 2645 free_extent_buffer(root->node); 2646 2646 root->node = NULL; 2647 2647 return -EIO; ··· 3469 3469 * below in btrfs_init_dev_replace(). 3470 3470 */ 3471 3471 btrfs_free_extra_devids(fs_devices); 3472 - if (!fs_devices->latest_dev->bdev) { 3472 + if (unlikely(!fs_devices->latest_dev->bdev)) { 3473 3473 btrfs_err(fs_info, "failed to read devices"); 3474 3474 ret = -EIO; 3475 3475 goto fail_tree_roots; ··· 3963 3963 * Checks last_flush_error of disks in order to determine the device 3964 3964 * state. 3965 3965 */ 3966 - if (errors_wait && !btrfs_check_rw_degradable(info, NULL)) 3966 + if (unlikely(errors_wait && !btrfs_check_rw_degradable(info, NULL))) 3967 3967 return -EIO; 3968 3968 3969 3969 return 0; ··· 4076 4076 if (ret) 4077 4077 total_errors++; 4078 4078 } 4079 - if (total_errors > max_errors) { 4079 + if (unlikely(total_errors > max_errors)) { 4080 4080 btrfs_err(fs_info, "%d errors while writing supers", 4081 4081 total_errors); 4082 4082 mutex_unlock(&fs_info->fs_devices->device_list_mutex); ··· 4101 4101 total_errors++; 4102 4102 } 4103 4103 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4104 - if (total_errors > max_errors) { 4104 + if (unlikely(total_errors > max_errors)) { 4105 4105 btrfs_handle_fs_error(fs_info, -EIO, 4106 4106 "%d errors while writing supers", 4107 4107 total_errors);
+2 -2
fs/btrfs/extent-tree.c
··· 5638 5638 ref.parent = path->nodes[level]->start; 5639 5639 } else { 5640 5640 ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level])); 5641 - if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) { 5641 + if (unlikely(btrfs_root_id(root) != btrfs_header_owner(path->nodes[level]))) { 5642 5642 btrfs_err(root->fs_info, "mismatched block owner"); 5643 5643 return -EIO; 5644 5644 } ··· 5774 5774 5775 5775 level--; 5776 5776 ASSERT(level == btrfs_header_level(next)); 5777 - if (level != btrfs_header_level(next)) { 5777 + if (unlikely(level != btrfs_header_level(next))) { 5778 5778 btrfs_err(root->fs_info, "mismatched level"); 5779 5779 ret = -EIO; 5780 5780 goto out_unlock;
+1 -1
fs/btrfs/extent_io.c
··· 3880 3880 return ret; 3881 3881 3882 3882 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); 3883 - if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3883 + if (unlikely(!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) 3884 3884 return -EIO; 3885 3885 return 0; 3886 3886 }
+1 -1
fs/btrfs/extent_map.c
··· 1057 1057 btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL); 1058 1058 write_lock(&em_tree->lock); 1059 1059 em = btrfs_lookup_extent_mapping(em_tree, start, len); 1060 - if (!em) { 1060 + if (unlikely(!em)) { 1061 1061 ret = -EIO; 1062 1062 goto out_unlock; 1063 1063 }
+1 -1
fs/btrfs/file.c
··· 815 815 if (ret) 816 816 return ret; 817 817 folio_lock(folio); 818 - if (!folio_test_uptodate(folio)) { 818 + if (unlikely(!folio_test_uptodate(folio))) { 819 819 folio_unlock(folio); 820 820 return -EIO; 821 821 }
+6 -6
fs/btrfs/free-space-tree.c
··· 137 137 if (ret < 0) 138 138 return ret; 139 139 140 - if (ret == 0) { 140 + if (unlikely(ret == 0)) { 141 141 DEBUG_WARN(); 142 142 return -EIO; 143 143 } 144 144 145 - if (p->slots[0] == 0) { 145 + if (unlikely(p->slots[0] == 0)) { 146 146 DEBUG_WARN("no previous slot found"); 147 147 return -EIO; 148 148 } ··· 293 293 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 294 294 btrfs_release_path(path); 295 295 296 - if (extent_count != expected_extent_count) { 296 + if (unlikely(extent_count != expected_extent_count)) { 297 297 btrfs_err(fs_info, 298 298 "incorrect extent count for %llu; counted %u, expected %u", 299 299 block_group->start, extent_count, ··· 465 465 start_bit = find_next_bit_le(bitmap, nrbits, end_bit); 466 466 } 467 467 468 - if (extent_count != expected_extent_count) { 468 + if (unlikely(extent_count != expected_extent_count)) { 469 469 btrfs_err(fs_info, 470 470 "incorrect extent count for %llu; counted %u, expected %u", 471 471 block_group->start, extent_count, ··· 1611 1611 extent_count++; 1612 1612 } 1613 1613 1614 - if (extent_count != expected_extent_count) { 1614 + if (unlikely(extent_count != expected_extent_count)) { 1615 1615 btrfs_err(fs_info, 1616 1616 "incorrect extent count for %llu; counted %u, expected %u", 1617 1617 block_group->start, extent_count, ··· 1672 1672 extent_count++; 1673 1673 } 1674 1674 1675 - if (extent_count != expected_extent_count) { 1675 + if (unlikely(extent_count != expected_extent_count)) { 1676 1676 btrfs_err(fs_info, 1677 1677 "incorrect extent count for %llu; counted %u, expected %u", 1678 1678 block_group->start, extent_count,
+6 -6
fs/btrfs/inode.c
··· 3104 3104 if (!freespace_inode) 3105 3105 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3106 3106 3107 - if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3107 + if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) { 3108 3108 ret = -EIO; 3109 3109 goto out; 3110 3110 } ··· 3370 3370 const u8 * const csum_expected) 3371 3371 { 3372 3372 btrfs_calculate_block_csum(fs_info, paddr, csum); 3373 - if (memcmp(csum, csum_expected, fs_info->csum_size)) 3373 + if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0)) 3374 3374 return -EIO; 3375 3375 return 0; 3376 3376 } ··· 4842 4842 folio_put(folio); 4843 4843 goto again; 4844 4844 } 4845 - if (!folio_test_uptodate(folio)) { 4845 + if (unlikely(!folio_test_uptodate(folio))) { 4846 4846 ret = -EIO; 4847 4847 goto out_unlock; 4848 4848 } ··· 4986 4986 folio_put(folio); 4987 4987 goto again; 4988 4988 } 4989 - if (!folio_test_uptodate(folio)) { 4989 + if (unlikely(!folio_test_uptodate(folio))) { 4990 4990 ret = -EIO; 4991 4991 goto out_unlock; 4992 4992 } ··· 7179 7179 insert: 7180 7180 ret = 0; 7181 7181 btrfs_release_path(path); 7182 - if (em->start > start || btrfs_extent_map_end(em) <= start) { 7182 + if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) { 7183 7183 btrfs_err(fs_info, 7184 7184 "bad extent! em: [%llu %llu] passed [%llu %llu]", 7185 7185 em->start, em->len, start, len); ··· 9298 9298 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9299 9299 extent_start, 0); 9300 9300 if (ret) { 9301 - if (ret > 0) { 9301 + if (unlikely(ret > 0)) { 9302 9302 /* The extent item disappeared? */ 9303 9303 return -EIO; 9304 9304 }
+2 -2
fs/btrfs/qgroup.c
··· 2538 2538 return -EUCLEAN; 2539 2539 } 2540 2540 2541 - if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { 2541 + if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) { 2542 2542 ret = -EIO; 2543 2543 goto out; 2544 2544 } ··· 4843 4843 reloc_eb = NULL; 4844 4844 goto free_out; 4845 4845 } 4846 - if (!extent_buffer_uptodate(reloc_eb)) { 4846 + if (unlikely(!extent_buffer_uptodate(reloc_eb))) { 4847 4847 ret = -EIO; 4848 4848 goto free_out; 4849 4849 }
+7 -7
fs/btrfs/raid56.c
··· 1167 1167 /* Check if we have reached tolerance early. */ 1168 1168 found_errors = get_rbio_veritical_errors(rbio, sector_nr, 1169 1169 NULL, NULL); 1170 - if (found_errors > rbio->bioc->max_errors) 1170 + if (unlikely(found_errors > rbio->bioc->max_errors)) 1171 1171 return -EIO; 1172 1172 return 0; 1173 1173 } ··· 1847 1847 if (!found_errors) 1848 1848 return 0; 1849 1849 1850 - if (found_errors > rbio->bioc->max_errors) 1850 + if (unlikely(found_errors > rbio->bioc->max_errors)) 1851 1851 return -EIO; 1852 1852 1853 1853 /* ··· 2399 2399 int found_errors; 2400 2400 2401 2401 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL); 2402 - if (found_errors > rbio->bioc->max_errors) { 2402 + if (unlikely(found_errors > rbio->bioc->max_errors)) { 2403 2403 ret = -EIO; 2404 2404 break; 2405 2405 } ··· 2688 2688 2689 2689 found_errors = get_rbio_veritical_errors(rbio, sector_nr, 2690 2690 &faila, &failb); 2691 - if (found_errors > rbio->bioc->max_errors) { 2691 + if (unlikely(found_errors > rbio->bioc->max_errors)) { 2692 2692 ret = -EIO; 2693 2693 goto out; 2694 2694 } ··· 2712 2712 * data, so the capability of the repair is declined. (In the 2713 2713 * case of RAID5, we can not repair anything.) 2714 2714 */ 2715 - if (dfail > rbio->bioc->max_errors - 1) { 2715 + if (unlikely(dfail > rbio->bioc->max_errors - 1)) { 2716 2716 ret = -EIO; 2717 2717 goto out; 2718 2718 } ··· 2729 2729 * scrubbing parity, luckily, use the other one to repair the 2730 2730 * data, or we can not repair the data stripe. 2731 2731 */ 2732 - if (failp != rbio->scrubp) { 2732 + if (unlikely(failp != rbio->scrubp)) { 2733 2733 ret = -EIO; 2734 2734 goto out; 2735 2735 } ··· 2820 2820 int found_errors; 2821 2821 2822 2822 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL); 2823 - if (found_errors > rbio->bioc->max_errors) { 2823 + if (unlikely(found_errors > rbio->bioc->max_errors)) { 2824 2824 ret = -EIO; 2825 2825 break; 2826 2826 }
+3 -3
fs/btrfs/relocation.c
··· 2270 2270 2271 2271 bytenr = btrfs_node_blockptr(upper->eb, slot); 2272 2272 if (lowest) { 2273 - if (bytenr != node->bytenr) { 2273 + if (unlikely(bytenr != node->bytenr)) { 2274 2274 btrfs_err(root->fs_info, 2275 2275 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2276 2276 bytenr, node->bytenr, slot, ··· 2447 2447 eb = read_tree_block(fs_info, block->bytenr, &check); 2448 2448 if (IS_ERR(eb)) 2449 2449 return PTR_ERR(eb); 2450 - if (!extent_buffer_uptodate(eb)) { 2450 + if (unlikely(!extent_buffer_uptodate(eb))) { 2451 2451 free_extent_buffer(eb); 2452 2452 return -EIO; 2453 2453 } ··· 2832 2832 if (!folio_test_uptodate(folio)) { 2833 2833 btrfs_read_folio(NULL, folio); 2834 2834 folio_lock(folio); 2835 - if (!folio_test_uptodate(folio)) { 2835 + if (unlikely(!folio_test_uptodate(folio))) { 2836 2836 ret = -EIO; 2837 2837 goto release_folio; 2838 2838 }
+7 -7
fs/btrfs/scrub.c
··· 1987 1987 * metadata, we should immediately abort. 1988 1988 */ 1989 1989 for (int i = 0; i < nr_stripes; i++) { 1990 - if (stripe_has_metadata_error(&sctx->stripes[i])) { 1990 + if (unlikely(stripe_has_metadata_error(&sctx->stripes[i]))) { 1991 1991 ret = -EIO; 1992 1992 goto out; 1993 1993 } ··· 2181 2181 * As we may hit an empty data stripe while it's missing. 2182 2182 */ 2183 2183 bitmap_and(&error, &error, &has_extent, stripe->nr_sectors); 2184 - if (!bitmap_empty(&error, stripe->nr_sectors)) { 2184 + if (unlikely(!bitmap_empty(&error, stripe->nr_sectors))) { 2185 2185 btrfs_err(fs_info, 2186 2186 "scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", 2187 2187 full_stripe_start, i, stripe->nr_sectors, ··· 2875 2875 btrfs_put_block_group(cache); 2876 2876 if (ret) 2877 2877 break; 2878 - if (sctx->is_dev_replace && 2879 - atomic64_read(&dev_replace->num_write_errors) > 0) { 2878 + if (unlikely(sctx->is_dev_replace && 2879 + atomic64_read(&dev_replace->num_write_errors) > 0)) { 2880 2880 ret = -EIO; 2881 2881 break; 2882 2882 } ··· 2904 2904 if (ret < 0) 2905 2905 return ret; 2906 2906 ret = btrfs_check_super_csum(fs_info, sb); 2907 - if (ret != 0) { 2907 + if (unlikely(ret != 0)) { 2908 2908 btrfs_err_rl(fs_info, 2909 2909 "scrub: super block at physical %llu devid %llu has bad csum", 2910 2910 physical, dev->devid); ··· 3080 3080 } 3081 3081 3082 3082 mutex_lock(&fs_info->scrub_lock); 3083 - if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 3084 - test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { 3083 + if (unlikely(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 3084 + test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state))) { 3085 3085 mutex_unlock(&fs_info->scrub_lock); 3086 3086 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3087 3087 ret = -EIO;
+5 -5
fs/btrfs/send.c
··· 646 646 ret = kernel_write(filp, buf + pos, len - pos, off); 647 647 if (ret < 0) 648 648 return ret; 649 - if (ret == 0) 649 + if (unlikely(ret == 0)) 650 650 return -EIO; 651 651 pos += ret; 652 652 } ··· 1723 1723 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1724 1724 if (ret < 0) 1725 1725 return ret; 1726 - if (ret) { 1726 + if (unlikely(ret)) { 1727 1727 /* 1728 1728 * An empty symlink inode. Can happen in rare error paths when 1729 1729 * creating a symlink (transaction committed before the inode ··· 5199 5199 if (!folio_test_uptodate(folio)) { 5200 5200 btrfs_read_folio(NULL, folio); 5201 5201 folio_lock(folio); 5202 - if (!folio_test_uptodate(folio)) { 5202 + if (unlikely(!folio_test_uptodate(folio))) { 5203 5203 folio_unlock(folio); 5204 5204 btrfs_err(fs_info, 5205 5205 "send: IO error at offset %llu for inode %llu root %llu", ··· 6961 6961 { 6962 6962 int ret = 0; 6963 6963 6964 - if (sctx->cur_ino != sctx->cmp_key->objectid) { 6964 + if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) { 6965 6965 inconsistent_snapshot_error(sctx, result, "reference"); 6966 6966 return -EIO; 6967 6967 } ··· 6989 6989 { 6990 6990 int ret = 0; 6991 6991 6992 - if (sctx->cur_ino != sctx->cmp_key->objectid) { 6992 + if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) { 6993 6993 inconsistent_snapshot_error(sctx, result, "xattr"); 6994 6994 return -EIO; 6995 6995 }
+1 -1
fs/btrfs/zlib.c
··· 291 291 ret = zlib_deflate(&workspace->strm, Z_FINISH); 292 292 if (ret == Z_STREAM_END) 293 293 break; 294 - if (ret != Z_OK && ret != Z_BUF_ERROR) { 294 + if (unlikely(ret != Z_OK && ret != Z_BUF_ERROR)) { 295 295 zlib_deflateEnd(&workspace->strm); 296 296 ret = -EIO; 297 297 goto out;
+19 -19
fs/btrfs/zoned.c
··· 274 274 return ret; 275 275 } 276 276 *nr_zones = ret; 277 - if (!ret) 277 + if (unlikely(!ret)) 278 278 return -EIO; 279 279 280 280 /* Populate cache */ ··· 503 503 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; 504 504 } 505 505 506 - if (nreported != zone_info->nr_zones) { 506 + if (unlikely(nreported != zone_info->nr_zones)) { 507 507 btrfs_err(device->fs_info, 508 508 "inconsistent number of zones on %s (%u/%u)", 509 509 rcu_dereference(device->name), nreported, ··· 513 513 } 514 514 515 515 if (max_active_zones) { 516 - if (nactive > max_active_zones) { 516 + if (unlikely(nactive > max_active_zones)) { 517 517 if (bdev_max_active_zones(bdev) == 0) { 518 518 max_active_zones = 0; 519 519 zone_info->max_active_zones = 0; ··· 901 901 zones); 902 902 if (ret < 0) 903 903 return ret; 904 - if (ret != BTRFS_NR_SB_LOG_ZONES) 904 + if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES)) 905 905 return -EIO; 906 906 907 907 return sb_log_location(bdev, zones, rw, bytenr_ret); ··· 1357 1357 return 0; 1358 1358 } 1359 1359 1360 - if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) { 1360 + if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) { 1361 1361 btrfs_err(fs_info, 1362 1362 "zoned: unexpected conventional zone %llu on device %s (devid %llu)", 1363 1363 zone.start << SECTOR_SHIFT, rcu_dereference(device->name), ··· 1399 1399 struct zone_info *info, 1400 1400 unsigned long *active) 1401 1401 { 1402 - if (info->alloc_offset == WP_MISSING_DEV) { 1402 + if (unlikely(info->alloc_offset == WP_MISSING_DEV)) { 1403 1403 btrfs_err(bg->fs_info, 1404 1404 "zoned: cannot recover write pointer for zone %llu", 1405 1405 info->physical); ··· 1428 1428 1429 1429 bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity); 1430 1430 1431 - if (zone_info[0].alloc_offset == WP_MISSING_DEV) { 1431 + if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) { 1432 1432 btrfs_err(bg->fs_info, 1433 1433 "zoned: cannot recover write pointer for zone %llu", 1434 1434 zone_info[0].physical); 1435 1435 return -EIO; 1436 1436 } 1437 - if (zone_info[1].alloc_offset == WP_MISSING_DEV) { 1437 + if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) { 1438 1438 btrfs_err(bg->fs_info, 1439 1439 "zoned: cannot recover write pointer for zone %llu", 1440 1440 zone_info[1].physical); ··· 1447 1447 if (zone_info[1].alloc_offset == WP_CONVENTIONAL) 1448 1448 zone_info[1].alloc_offset = last_alloc; 1449 1449 1450 - if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) { 1450 + if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) { 1451 1451 btrfs_err(bg->fs_info, 1452 1452 "zoned: write pointer offset mismatch of zones in DUP profile"); 1453 1453 return -EIO; 1454 1454 } 1455 1455 1456 1456 if (test_bit(0, active) != test_bit(1, active)) { 1457 - if (!btrfs_zone_activate(bg)) 1457 + if (unlikely(!btrfs_zone_activate(bg))) 1458 1458 return -EIO; 1459 1459 } else if (test_bit(0, active)) { 1460 1460 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); ··· 1489 1489 if (zone_info[i].alloc_offset == WP_CONVENTIONAL) 1490 1490 zone_info[i].alloc_offset = last_alloc; 1491 1491 1492 - if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) && 1493 - !btrfs_test_opt(fs_info, DEGRADED)) { 1492 + if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) && 1493 + !btrfs_test_opt(fs_info, DEGRADED))) { 1494 1494 btrfs_err(fs_info, 1495 1495 "zoned: write pointer offset mismatch of zones in %s profile", 1496 1496 btrfs_bg_type_to_raid_name(map->type)); 1497 1497 return -EIO; 1498 1498 } 1499 1499 if (test_bit(0, active) != test_bit(i, active)) { 1500 - if (!btrfs_test_opt(fs_info, DEGRADED) && 1501 - !btrfs_zone_activate(bg)) { 1500 + if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) && 1501 + !btrfs_zone_activate(bg))) { 1502 1502 return -EIO; 1503 1503 } 1504 1504 } else { ··· 1554 1554 } 1555 1555 1556 1556 if (test_bit(0, active) != test_bit(i, active)) { 1557 - if (!btrfs_zone_activate(bg)) 1557 + if (unlikely(!btrfs_zone_activate(bg))) 1558 1558 return -EIO; 1559 1559 } else { 1560 1560 if (test_bit(0, active)) ··· 1586 1586 continue; 1587 1587 1588 1588 if (test_bit(0, active) != test_bit(i, active)) { 1589 - if (!btrfs_zone_activate(bg)) 1589 + if (unlikely(!btrfs_zone_activate(bg))) 1590 1590 return -EIO; 1591 1591 } else { 1592 1592 if (test_bit(0, active)) ··· 1643 1643 return 0; 1644 1644 1645 1645 /* Sanity check */ 1646 - if (!IS_ALIGNED(length, fs_info->zone_size)) { 1646 + if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) { 1647 1647 btrfs_err(fs_info, 1648 1648 "zoned: block group %llu len %llu unaligned to zone size %llu", 1649 1649 logical, length, fs_info->zone_size); ··· 1756 1756 return -EINVAL; 1757 1757 } 1758 1758 1759 - if (cache->alloc_offset > cache->zone_capacity) { 1759 + if (unlikely(cache->alloc_offset > cache->zone_capacity)) { 1760 1760 btrfs_err(fs_info, 1761 1761 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu", 1762 1762 cache->alloc_offset, cache->zone_capacity, ··· 2087 2087 2088 2088 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, 2089 2089 &mapped_length, &bioc, NULL, NULL); 2090 - if (ret || !bioc || mapped_length < PAGE_SIZE) { 2090 + if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) { 2091 2091 ret = -EIO; 2092 2092 goto out_put_bioc; 2093 2093 }
+1 -1
fs/btrfs/zstd.c
··· 654 654 if (workspace->in_buf.pos == workspace->in_buf.size) { 655 655 kunmap_local(workspace->in_buf.src); 656 656 folio_in_index++; 657 - if (folio_in_index >= total_folios_in) { 657 + if (unlikely(folio_in_index >= total_folios_in)) { 658 658 workspace->in_buf.src = NULL; 659 659 ret = -EIO; 660 660 goto done;