Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
"I fixed up a regression from 4.0 where conversion between different
raid levels would sometimes bail out without converting.

Filipe tracked down a race where it was possible to double allocate
chunks on the drive.

Mark has a fix for fiemap. All three will get bundled off for stable
as well"

* 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
Btrfs: fix regression in raid level conversion
Btrfs: fix racy system chunk allocation when setting block group ro
btrfs: clear 'ret' in btrfs_check_shared() loop

Changed files
+38
fs
+17
fs/btrfs/backref.c
··· 880 880 * indirect refs to their parent bytenr. 881 881 * When roots are found, they're added to the roots list 882 882 * 883 + * NOTE: This can return values > 0 884 + * 883 885 * FIXME some caching might speed things up 884 886 */ 885 887 static int find_parent_nodes(struct btrfs_trans_handle *trans, ··· 1200 1198 return ret; 1201 1199 } 1202 1200 1201 + /** 1202 + * btrfs_check_shared - tell us whether an extent is shared 1203 + * 1204 + * @trans: optional trans handle 1205 + * 1206 + * btrfs_check_shared uses the backref walking code but will short 1207 + * circuit as soon as it finds a root or inode that doesn't match the 1208 + * one passed in. This provides a significant performance benefit for 1209 + * callers (such as fiemap) which want to know whether the extent is 1210 + * shared but do not need a ref count. 1211 + * 1212 + * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. 1213 + */ 1203 1214 int btrfs_check_shared(struct btrfs_trans_handle *trans, 1204 1215 struct btrfs_fs_info *fs_info, u64 root_objectid, 1205 1216 u64 inum, u64 bytenr) ··· 1241 1226 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1242 1227 roots, NULL, root_objectid, inum); 1243 1228 if (ret == BACKREF_FOUND_SHARED) { 1229 + /* this is the only condition under which we return 1 */ 1244 1230 ret = 1; 1245 1231 break; 1246 1232 } 1247 1233 if (ret < 0 && ret != -ENOENT) 1248 1234 break; 1235 + ret = 0; 1249 1236 node = ulist_next(tmp, &uiter); 1250 1237 if (!node) 1251 1238 break;
+20
fs/btrfs/extent-tree.c
··· 8829 8829 goto again; 8830 8830 } 8831 8831 8832 + /* 8833 + * if we are changing raid levels, try to allocate a corresponding 8834 + * block group with the new raid level. 8835 + */ 8836 + alloc_flags = update_block_group_flags(root, cache->flags); 8837 + if (alloc_flags != cache->flags) { 8838 + ret = do_chunk_alloc(trans, root, alloc_flags, 8839 + CHUNK_ALLOC_FORCE); 8840 + /* 8841 + * ENOSPC is allowed here, we may have enough space 8842 + * already allocated at the new raid level to 8843 + * carry on 8844 + */ 8845 + if (ret == -ENOSPC) 8846 + ret = 0; 8847 + if (ret < 0) 8848 + goto out; 8849 + } 8832 8850 8833 8851 ret = set_block_group_ro(cache, 0); 8834 8852 if (!ret) ··· 8860 8842 out: 8861 8843 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 8862 8844 alloc_flags = update_block_group_flags(root, cache->flags); 8845 + lock_chunks(root->fs_info->chunk_root); 8863 8846 check_system_chunk(trans, root, alloc_flags); 8847 + unlock_chunks(root->fs_info->chunk_root); 8864 8848 } 8865 8849 mutex_unlock(&root->fs_info->ro_block_group_mutex); 8866 8850
+1
fs/btrfs/volumes.c
··· 4625 4625 { 4626 4626 u64 chunk_offset; 4627 4627 4628 + ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); 4628 4629 chunk_offset = find_next_chunk(extent_root->fs_info); 4629 4630 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 4630 4631 }