Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcache: Convert bch_btree_insert() to bch_btree_map_leaf_nodes()

Last of the btree_map() conversions. Main visible effect is
bch_btree_insert() is no longer taking a struct btree_op as an argument
anymore - there's no fancy state machine stuff going on, it's just a
normal function.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>

+45 -54
+38 -43
drivers/md/bcache/btree.c
··· 2174 2174 return ret; 2175 2175 } 2176 2176 2177 - static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, 2178 - struct keylist *keys, atomic_t *journal_ref, 2179 - struct bkey *replace_key) 2177 + struct btree_insert_op { 2178 + struct btree_op op; 2179 + struct keylist *keys; 2180 + atomic_t *journal_ref; 2181 + struct bkey *replace_key; 2182 + }; 2183 + 2184 + int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2180 2185 { 2181 - if (bch_keylist_empty(keys)) 2182 - return 0; 2186 + struct btree_insert_op *op = container_of(b_op, 2187 + struct btree_insert_op, op); 2183 2188 2184 - if (b->level) { 2185 - struct bkey *k; 2186 - 2187 - k = bch_next_recurse_key(b, &START_KEY(keys->keys)); 2188 - if (!k) { 2189 - btree_bug(b, "no key to recurse on at level %i/%i", 2190 - b->level, b->c->root->level); 2191 - 2192 - bch_keylist_reset(keys); 2193 - return -EIO; 2194 - } 2195 - 2196 - return btree(insert_recurse, k, b, op, keys, 2197 - journal_ref, replace_key); 2198 - } else { 2199 - return bch_btree_insert_node(b, op, keys, 2200 - journal_ref, replace_key); 2201 - } 2189 + int ret = bch_btree_insert_node(b, &op->op, op->keys, 2190 + op->journal_ref, op->replace_key); 2191 + if (ret && !bch_keylist_empty(op->keys)) 2192 + return ret; 2193 + else 2194 + return MAP_DONE; 2202 2195 } 2203 2196 2204 - int bch_btree_insert(struct btree_op *op, struct cache_set *c, 2205 - struct keylist *keys, atomic_t *journal_ref, 2206 - struct bkey *replace_key) 2197 + int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2198 + atomic_t *journal_ref, struct bkey *replace_key) 2207 2199 { 2200 + struct btree_insert_op op; 2208 2201 int ret = 0; 2209 2202 2203 + BUG_ON(current->bio_list); 2210 2204 BUG_ON(bch_keylist_empty(keys)); 2211 2205 2212 - while (!bch_keylist_empty(keys)) { 2213 - op->lock = 0; 2214 - ret = btree_root(insert_recurse, c, op, keys, 2215 - journal_ref, replace_key); 2206 + bch_btree_op_init(&op.op, 0); 2207 + op.keys = keys; 2208 + op.journal_ref = journal_ref; 2209 + op.replace_key = replace_key; 2216 2210 2217 - if (ret == -EAGAIN) { 2218 - BUG(); 2219 - ret = 0; 2220 - } else if (ret) { 2221 - struct bkey *k; 2222 - 2223 - pr_err("error %i", ret); 2224 - 2225 - while ((k = bch_keylist_pop(keys))) 2226 - bkey_put(c, k, 0); 2227 - } 2211 + while (!ret && !bch_keylist_empty(keys)) { 2212 + op.op.lock = 0; 2213 + ret = bch_btree_map_leaf_nodes(&op.op, c, 2214 + &START_KEY(keys->keys), 2215 + btree_insert_fn); 2228 2216 } 2229 2217 2230 - if (op->insert_collision) 2231 - return -ESRCH; 2218 + if (ret) { 2219 + struct bkey *k; 2220 + 2221 + pr_err("error %i", ret); 2222 + 2223 + while ((k = bch_keylist_pop(keys))) 2224 + bkey_put(c, k, 0); 2225 + } else if (op.op.insert_collision) 2226 + ret = -ESRCH; 2232 2227 2233 2228 return ret; 2234 2229 }
+2 -2
drivers/md/bcache/btree.h
··· 281 281 282 282 int bch_btree_insert_check_key(struct btree *, struct btree_op *, 283 283 struct bkey *); 284 - int bch_btree_insert(struct btree_op *, struct cache_set *, 285 - struct keylist *, atomic_t *, struct bkey *); 284 + int bch_btree_insert(struct cache_set *, struct keylist *, 285 + atomic_t *, struct bkey *); 286 286 287 287 int bch_gc_thread_start(struct cache_set *); 288 288 size_t bch_btree_gc_finish(struct cache_set *);
+1 -3
drivers/md/bcache/journal.c
··· 302 302 303 303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start; 304 304 struct keylist keylist; 305 - struct btree_op op; 306 305 307 306 bch_keylist_init(&keylist); 308 - bch_btree_op_init(&op, SHRT_MAX); 309 307 310 308 list_for_each_entry(i, list, list) { 311 309 BUG_ON(i->pin && atomic_read(i->pin) != 1); ··· 320 322 bkey_copy(keylist.top, k); 321 323 bch_keylist_push(&keylist); 322 324 323 - ret = bch_btree_insert(&op, s, &keylist, i->pin, NULL); 325 + ret = bch_btree_insert(s, &keylist, i->pin, NULL); 324 326 if (ret) 325 327 goto err; 326 328
+1 -1
drivers/md/bcache/request.c
··· 237 237 s->flush_journal 238 238 ? &s->cl : NULL); 239 239 240 - ret = bch_btree_insert(&s->op, s->c, &s->insert_keys, 240 + ret = bch_btree_insert(s->c, &s->insert_keys, 241 241 journal_ref, replace_key); 242 242 if (ret == -ESRCH) { 243 243 s->insert_collision = true;
+3 -5
drivers/md/bcache/writeback.c
··· 139 139 140 140 /* This is kind of a dumb way of signalling errors. */ 141 141 if (KEY_DIRTY(&w->key)) { 142 - unsigned i; 143 - struct btree_op op; 144 - struct keylist keys; 145 142 int ret; 143 + unsigned i; 144 + struct keylist keys; 146 145 147 - bch_btree_op_init(&op, -1); 148 146 bch_keylist_init(&keys); 149 147 150 148 bkey_copy(keys.top, &w->key); ··· 152 154 for (i = 0; i < KEY_PTRS(&w->key); i++) 153 155 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 154 156 155 - ret = bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key); 157 + ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); 156 158 157 159 if (ret) 158 160 trace_bcache_writeback_collision(&w->key);