Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nilfs2: Convert to XArray

This is close to a 1:1 replacement of radix tree APIs with their XArray
equivalents. It would be possible to optimise nilfs_copy_back_pages(),
but that doesn't seem to be in the performance path. Also, I think
it has a pre-existing bug, and I've added a note to that effect in the
source code.

Signed-off-by: Matthew Wilcox <willy@infradead.org>

+22 -33
+9 -17
fs/nilfs2/btnode.c
··· 168 168 ctxt->newbh = NULL; 169 169 170 170 if (inode->i_blkbits == PAGE_SHIFT) { 171 - lock_page(obh->b_page); 172 - /* 173 - * We cannot call radix_tree_preload for the kernels older 174 - * than 2.6.23, because it is not exported for modules. 175 - */ 171 + struct page *opage = obh->b_page; 172 + lock_page(opage); 176 173 retry: 177 - err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 178 - if (err) 179 - goto failed_unlock; 180 174 /* BUG_ON(oldkey != obh->b_page->index); */ 181 - if (unlikely(oldkey != obh->b_page->index)) 182 - NILFS_PAGE_BUG(obh->b_page, 175 + if (unlikely(oldkey != opage->index)) 176 + NILFS_PAGE_BUG(opage, 183 177 "invalid oldkey %lld (newkey=%lld)", 184 178 (unsigned long long)oldkey, 185 179 (unsigned long long)newkey); 186 180 187 181 xa_lock_irq(&btnc->i_pages); 188 - err = radix_tree_insert(&btnc->i_pages, newkey, obh->b_page); 182 + err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); 189 183 xa_unlock_irq(&btnc->i_pages); 190 184 /* 191 185 * Note: page->index will not change to newkey until ··· 187 193 * To protect the page in intermediate state, the page lock 188 194 * is held. 189 195 */ 190 - radix_tree_preload_end(); 191 196 if (!err) 192 197 return 0; 193 198 else if (err != -EEXIST) ··· 196 203 if (!err) 197 204 goto retry; 198 205 /* fallback to copy mode */ 199 - unlock_page(obh->b_page); 206 + unlock_page(opage); 200 207 } 201 208 202 209 nbh = nilfs_btnode_create_block(btnc, newkey); ··· 236 243 mark_buffer_dirty(obh); 237 244 238 245 xa_lock_irq(&btnc->i_pages); 239 - radix_tree_delete(&btnc->i_pages, oldkey); 240 - radix_tree_tag_set(&btnc->i_pages, newkey, 241 - PAGECACHE_TAG_DIRTY); 246 + __xa_erase(&btnc->i_pages, oldkey); 247 + __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); 242 248 xa_unlock_irq(&btnc->i_pages); 243 249 244 250 opage->index = obh->b_blocknr = newkey; ··· 267 275 268 276 if (nbh == NULL) { /* blocksize == pagesize */ 269 277 xa_lock_irq(&btnc->i_pages); 270 - radix_tree_delete(&btnc->i_pages, newkey); 278 + __xa_erase(&btnc->i_pages, newkey); 271 279 xa_unlock_irq(&btnc->i_pages); 272 280 unlock_page(ctxt->bh->b_page); 273 281 } else
+13 -16
fs/nilfs2/page.c
··· 289 289 * @dmap: destination page cache 290 290 * @smap: source page cache 291 291 * 292 - * No pages must no be added to the cache during this process. 292 + * No pages must be added to the cache during this process. 293 293 * This must be ensured by the caller. 294 294 */ 295 295 void nilfs_copy_back_pages(struct address_space *dmap, ··· 298 298 struct pagevec pvec; 299 299 unsigned int i, n; 300 300 pgoff_t index = 0; 301 - int err; 302 301 303 302 pagevec_init(&pvec); 304 303 repeat: ··· 312 313 lock_page(page); 313 314 dpage = find_lock_page(dmap, offset); 314 315 if (dpage) { 315 - /* override existing page on the destination cache */ 316 + /* overwrite existing page in the destination cache */ 316 317 WARN_ON(PageDirty(dpage)); 317 318 nilfs_copy_page(dpage, page, 0); 318 319 unlock_page(dpage); 319 320 put_page(dpage); 321 + /* Do we not need to remove page from smap here? */ 320 322 } else { 321 - struct page *page2; 323 + struct page *p; 322 324 323 325 /* move the page to the destination cache */ 324 326 xa_lock_irq(&smap->i_pages); 325 - page2 = radix_tree_delete(&smap->i_pages, offset); 326 - WARN_ON(page2 != page); 327 - 327 + p = __xa_erase(&smap->i_pages, offset); 328 + WARN_ON(page != p); 328 329 smap->nrpages--; 329 330 xa_unlock_irq(&smap->i_pages); 330 331 331 332 xa_lock_irq(&dmap->i_pages); 332 - err = radix_tree_insert(&dmap->i_pages, offset, page); 333 - if (unlikely(err < 0)) { 334 - WARN_ON(err == -EEXIST); 333 + p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); 334 + if (unlikely(p)) { 335 + /* Probably -ENOMEM */ 335 336 page->mapping = NULL; 336 - put_page(page); /* for cache */ 337 + put_page(page); 337 338 } else { 338 339 page->mapping = dmap; 339 340 dmap->nrpages++; 340 341 if (PageDirty(page)) 341 - radix_tree_tag_set(&dmap->i_pages, 342 - offset, 343 - PAGECACHE_TAG_DIRTY); 342 + __xa_set_mark(&dmap->i_pages, offset, 343 + PAGECACHE_TAG_DIRTY); 344 344 } 345 345 xa_unlock_irq(&dmap->i_pages); 346 346 } ··· 465 467 if (mapping) { 466 468 xa_lock_irq(&mapping->i_pages); 467 469 if (test_bit(PG_dirty, &page->flags)) { 468 - radix_tree_tag_clear(&mapping->i_pages, 469 - page_index(page), 470 + __xa_clear_mark(&mapping->i_pages, page_index(page), 470 471 PAGECACHE_TAG_DIRTY); 471 472 xa_unlock_irq(&mapping->i_pages); 472 473 return clear_page_dirty_for_io(page);