Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: address indent/space issues

Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>

authored by

Heinz Mauelshagen and committed by
Mike Snitzer
255e2646 96422281

+24 -25
+1 -1
drivers/md/dm-cache-policy.h
··· 76 76 * background work. 77 77 */ 78 78 int (*get_background_work)(struct dm_cache_policy *p, bool idle, 79 - struct policy_work **result); 79 + struct policy_work **result); 80 80 81 81 /* 82 82 * You must pass in the same work pointer that you were given, not
+1 -1
drivers/md/dm-crypt.c
··· 2503 2503 type = &key_type_encrypted; 2504 2504 set_key = set_key_encrypted; 2505 2505 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) && 2506 - !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { 2506 + !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { 2507 2507 type = &key_type_trusted; 2508 2508 set_key = set_key_trusted; 2509 2509 } else {
+2 -3
drivers/md/dm-integrity.c
··· 2301 2301 else 2302 2302 skip_check: 2303 2303 dec_in_flight(dio); 2304 - 2305 2304 } else { 2306 2305 INIT_WORK(&dio->work, integrity_metadata); 2307 2306 queue_work(ic->metadata_wq, &dio->work); ··· 4084 4085 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) { 4085 4086 if (val < 1 << SECTOR_SHIFT || 4086 4087 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || 4087 - (val & (val -1))) { 4088 + (val & (val - 1))) { 4088 4089 r = -EINVAL; 4089 4090 ti->error = "Invalid block_size argument"; 4090 4091 goto bad; ··· 4404 4405 if (ic->internal_hash) { 4405 4406 size_t recalc_tags_size; 4406 4407 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); 4407 - if (!ic->recalc_wq ) { 4408 + if (!ic->recalc_wq) { 4408 4409 ti->error = "Cannot allocate workqueue"; 4409 4410 r = -ENOMEM; 4410 4411 goto bad;
+4 -4
drivers/md/dm-log.c
··· 758 758 log_clear_bit(lc, lc->recovering_bits, region); 759 759 if (in_sync) { 760 760 log_set_bit(lc, lc->sync_bits, region); 761 - lc->sync_count++; 762 - } else if (log_test_bit(lc->sync_bits, region)) { 761 + lc->sync_count++; 762 + } else if (log_test_bit(lc->sync_bits, region)) { 763 763 lc->sync_count--; 764 764 log_clear_bit(lc, lc->sync_bits, region); 765 765 } ··· 767 767 768 768 static region_t core_get_sync_count(struct dm_dirty_log *log) 769 769 { 770 - struct log_c *lc = (struct log_c *) log->context; 770 + struct log_c *lc = (struct log_c *) log->context; 771 771 772 - return lc->sync_count; 772 + return lc->sync_count; 773 773 } 774 774 775 775 #define DMEMIT_SYNC \
+4 -4
drivers/md/dm-raid.c
··· 363 363 const int mode; 364 364 const char *param; 365 365 } _raid456_journal_mode[] = { 366 - { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" }, 367 - { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" } 366 + { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" }, 367 + { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" } 368 368 }; 369 369 370 370 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */ ··· 1115 1115 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 1116 1116 * [region_size <sectors>] Defines granularity of bitmap 1117 1117 * [journal_dev <dev>] raid4/5/6 journaling deviice 1118 - * (i.e. write hole closing log) 1118 + * (i.e. write hole closing log) 1119 1119 * 1120 1120 * RAID10-only options: 1121 1121 * [raid10_copies <# copies>] Number of copies. (Default: 2) ··· 4002 4002 } 4003 4003 4004 4004 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */ 4005 - if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && 4005 + if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && 4006 4006 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) || 4007 4007 (rs->requested_bitmap_chunk_sectors && 4008 4008 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
+1 -1
drivers/md/dm-raid1.c
··· 904 904 if (IS_ERR(ms->io_client)) { 905 905 ti->error = "Error creating dm_io client"; 906 906 kfree(ms); 907 - return NULL; 907 + return NULL; 908 908 } 909 909 910 910 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
+2 -2
drivers/md/dm-table.c
··· 73 73 n = get_child(n, CHILDREN_PER_NODE - 1); 74 74 75 75 if (n >= t->counts[l]) 76 - return (sector_t) - 1; 76 + return (sector_t) -1; 77 77 78 78 return get_node(t, l, n)[KEYS_PER_NODE - 1]; 79 79 } ··· 1530 1530 if (ti->type->iterate_devices && 1531 1531 ti->type->iterate_devices(ti, func, data)) 1532 1532 return true; 1533 - } 1533 + } 1534 1534 1535 1535 return false; 1536 1536 }
+3 -3
drivers/md/dm-thin.c
··· 1181 1181 discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO); 1182 1182 discard_parent->bi_end_io = passdown_endio; 1183 1183 discard_parent->bi_private = m; 1184 - if (m->maybe_shared) 1185 - passdown_double_checking_shared_status(m, discard_parent); 1186 - else { 1184 + if (m->maybe_shared) 1185 + passdown_double_checking_shared_status(m, discard_parent); 1186 + else { 1187 1187 struct discard_op op; 1188 1188 1189 1189 begin_discard(&op, tc, discard_parent);
+1 -1
drivers/md/dm-writecache.c
··· 531 531 req.notify.context = &endio; 532 532 533 533 /* writing via async dm-io (implied by notify.fn above) won't return an error */ 534 - (void) dm_io(&req, 1, &region, NULL); 534 + (void) dm_io(&req, 1, &region, NULL); 535 535 i = j; 536 536 } 537 537
+3 -3
drivers/md/persistent-data/dm-btree.c
··· 727 727 * nodes, so saves metadata space. 728 728 */ 729 729 static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index, 730 - struct dm_btree_value_type *vt, uint64_t key) 730 + struct dm_btree_value_type *vt, uint64_t key) 731 731 { 732 732 int r; 733 733 unsigned int middle_index; ··· 782 782 if (shadow_current(s) != right) 783 783 unlock_block(s->info, right); 784 784 785 - return r; 785 + return r; 786 786 } 787 787 788 788 ··· 1217 1217 static bool need_insert(struct btree_node *node, uint64_t *keys, 1218 1218 unsigned int level, unsigned int index) 1219 1219 { 1220 - return ((index >= le32_to_cpu(node->header.nr_entries)) || 1220 + return ((index >= le32_to_cpu(node->header.nr_entries)) || 1221 1221 (le64_to_cpu(node->keys[index]) != keys[level])); 1222 1222 } 1223 1223
+1 -1
drivers/md/persistent-data/dm-space-map-common.c
··· 391 391 } 392 392 393 393 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, 394 - dm_block_t begin, dm_block_t end, dm_block_t *b) 394 + dm_block_t begin, dm_block_t end, dm_block_t *b) 395 395 { 396 396 int r; 397 397 uint32_t count;
+1 -1
drivers/md/persistent-data/dm-space-map-common.h
··· 121 121 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, 122 122 dm_block_t end, dm_block_t *result); 123 123 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, 124 - dm_block_t begin, dm_block_t end, dm_block_t *result); 124 + dm_block_t begin, dm_block_t end, dm_block_t *result); 125 125 126 126 /* 127 127 * The next three functions return (via nr_allocations) the net number of