dm space map metadata: fix refcount decrement below 0 which caused corruption

This has been a relatively long-standing issue that wasn't nailed down
until Teng-Feng Yang's meticulous bug report to dm-devel on 3/7/2014,
see: http://www.redhat.com/archives/dm-devel/2014-March/msg00021.html

From that report:
"When decreasing the reference count of a metadata block with its
reference count equals 3, we will call dm_btree_remove() to remove
this enrty from the B+tree which keeps the reference count info in
metadata device.

The B+tree will try to rebalance the entry of the child nodes in each
node it traversed, and the rebalance process contains the following
steps.

(1) Finding the corresponding children in current node (shadow_current(s))
(2) Shadow the children block (issue BOP_INC)
(3) redistribute keys among children, and free children if necessary (issue BOP_DEC)

Since the update of a metadata block's reference count could be
recursive, we will stash these reference count update operations in
smm->uncommitted and then process them in a FILO fashion.

The problem is that step(3) could free the children which is created
in step(2), so the BOP_DEC issued in step(3) will be carried out
before the BOP_INC issued in step(2) since these BOPs will be
processed in FILO fashion. Once the BOP_DEC from step(3) tries to
decrease the reference count of newly shadow block, it will report
failure for its reference equals 0 before decreasing. It looks like we
can solve this issue by processing these BOPs in a FIFO fashion
instead of FILO."

Commit 5b564d80 ("dm space map: disallow decrementing a reference count
below zero") changed the code to report an error for this temporary
refcount decrement below zero. So what was previously a harmless
invalid refcount became a hard failure due to the new error path:

device-mapper: space map common: unable to decrement a reference count below 0
device-mapper: thin: 253:6: dm_thin_insert_block() failed: error = -22
device-mapper: thin: 253:6: switching pool to read-only mode

This bug is in dm persistent-data code that is common to the DM thin and
cache targets. So any users of those targets should apply this fix.

Fix this by applying recursive space map operations in FIFO order rather
than FILO.

Resolves: https://bugzilla.kernel.org/show_bug.cgi?id=68801

Reported-by: Apollon Oikonomopoulos <apoikos@debian.org>
Reported-by: edwillam1007@gmail.com
Reported-by: Teng-Feng Yang <shinrairis@gmail.com>
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org # 3.13+

authored by

Joe Thornber and committed by
Mike Snitzer
cebc2de4 f6d16d32

+92 -21
+92 -21
drivers/md/persistent-data/dm-space-map-metadata.c
··· 91 91 dm_block_t block; 92 92 }; 93 93 94 + struct bop_ring_buffer { 95 + unsigned begin; 96 + unsigned end; 97 + struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; 98 + }; 99 + 100 + static void brb_init(struct bop_ring_buffer *brb) 101 + { 102 + brb->begin = 0; 103 + brb->end = 0; 104 + } 105 + 106 + static bool brb_empty(struct bop_ring_buffer *brb) 107 + { 108 + return brb->begin == brb->end; 109 + } 110 + 111 + static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) 112 + { 113 + unsigned r = old + 1; 114 + return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; 115 + } 116 + 117 + static int brb_push(struct bop_ring_buffer *brb, 118 + enum block_op_type type, dm_block_t b) 119 + { 120 + struct block_op *bop; 121 + unsigned next = brb_next(brb, brb->end); 122 + 123 + /* 124 + * We don't allow the last bop to be filled, this way we can 125 + * differentiate between full and empty. 126 + */ 127 + if (next == brb->begin) 128 + return -ENOMEM; 129 + 130 + bop = brb->bops + brb->end; 131 + bop->type = type; 132 + bop->block = b; 133 + 134 + brb->end = next; 135 + 136 + return 0; 137 + } 138 + 139 + static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) 140 + { 141 + struct block_op *bop; 142 + 143 + if (brb_empty(brb)) 144 + return -ENODATA; 145 + 146 + bop = brb->bops + brb->begin; 147 + result->type = bop->type; 148 + result->block = bop->block; 149 + 150 + brb->begin = brb_next(brb, brb->begin); 151 + 152 + return 0; 153 + } 154 + 155 + /*----------------------------------------------------------------*/ 156 + 94 157 struct sm_metadata { 95 158 struct dm_space_map sm; 96 159 ··· 164 101 165 102 unsigned recursion_count; 166 103 unsigned allocated_this_transaction; 167 - unsigned nr_uncommitted; 168 - struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; 104 + struct bop_ring_buffer uncommitted; 169 105 170 106 struct threshold threshold; 171 107 }; 172 108 173 109 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) 174 110 { 175 - struct block_op *op; 111 + int r = brb_push(&smm->uncommitted, type, b); 176 112 177 - if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { 113 + if (r) { 178 114 DMERR("too many recursive allocations"); 179 115 return -ENOMEM; 180 116 } 181 - 182 - op = smm->uncommitted + smm->nr_uncommitted++; 183 - op->type = type; 184 - op->block = b; 185 117 186 118 return 0; 187 119 } ··· 216 158 return -ENOMEM; 217 159 } 218 160 219 - if (smm->recursion_count == 1 && smm->nr_uncommitted) { 220 - while (smm->nr_uncommitted && !r) { 221 - smm->nr_uncommitted--; 222 - r = commit_bop(smm, smm->uncommitted + 223 - smm->nr_uncommitted); 161 + if (smm->recursion_count == 1) { 162 + while (!brb_empty(&smm->uncommitted)) { 163 + struct block_op bop; 164 + 165 + r = brb_pop(&smm->uncommitted, &bop); 166 + if (r) { 167 + DMERR("bug in bop ring buffer"); 168 + break; 169 + } 170 + 171 + r = commit_bop(smm, &bop); 224 172 if (r) 225 173 break; 226 174 } ··· 281 217 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, 282 218 uint32_t *result) 283 219 { 284 - int r, i; 220 + int r; 221 + unsigned i; 285 222 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 286 223 unsigned adjustment = 0; 287 224 ··· 290 225 * We may have some uncommitted adjustments to add. This list 291 226 * should always be really short. 292 227 */ 293 - for (i = 0; i < smm->nr_uncommitted; i++) { 294 - struct block_op *op = smm->uncommitted + i; 228 + for (i = smm->uncommitted.begin; 229 + i != smm->uncommitted.end; 230 + i = brb_next(&smm->uncommitted, i)) { 231 + struct block_op *op = smm->uncommitted.bops + i; 295 232 296 233 if (op->block != b) 297 234 continue; ··· 321 254 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, 322 255 dm_block_t b, int *result) 323 256 { 324 - int r, i, adjustment = 0; 257 + int r, adjustment = 0; 258 + unsigned i; 325 259 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 326 260 uint32_t rc; 327 261 ··· 330 262 * We may have some uncommitted adjustments to add. This list 331 263 * should always be really short. 332 264 */ 333 - for (i = 0; i < smm->nr_uncommitted; i++) { 334 - struct block_op *op = smm->uncommitted + i; 265 + for (i = smm->uncommitted.begin; 266 + i != smm->uncommitted.end; 267 + i = brb_next(&smm->uncommitted, i)) { 268 + 269 + struct block_op *op = smm->uncommitted.bops + i; 335 270 336 271 if (op->block != b) 337 272 continue; ··· 742 671 smm->begin = superblock + 1; 743 672 smm->recursion_count = 0; 744 673 smm->allocated_this_transaction = 0; 745 - smm->nr_uncommitted = 0; 674 + brb_init(&smm->uncommitted); 746 675 threshold_init(&smm->threshold); 747 676 748 677 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); ··· 786 715 smm->begin = 0; 787 716 smm->recursion_count = 0; 788 717 smm->allocated_this_transaction = 0; 789 - smm->nr_uncommitted = 0; 718 + brb_init(&smm->uncommitted); 790 719 threshold_init(&smm->threshold); 791 720 792 721 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));