Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-pull

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-pull: (64 commits)
[XFS] Remove vn_revalidate calls in xfs.
[XFS] Now that xfs_setattr is only used for attributes set from ->setattr
[XFS] xfs_setattr currently doesn't just handle the attributes set through
[XFS] fix use after free with external logs or real-time devices
[XFS] A bug was found in xfs_bmap_add_extent_unwritten_real(). In a
[XFS] fix compilation without CONFIG_PROC_FS
[XFS] s/XFS_PURGE_INODE/IRELE/g s/VN_HOLD(XFS_ITOV())/IHOLD()/
[XFS] fix mount option parsing in remount
[XFS] Disable queue flag test in barrier check.
[XFS] streamline init/exit path
[XFS] Fix up problem when CONFIG_XFS_POSIX_ACL is not set and yet we still
[XFS] Don't assert if trying to mount with blocksize > pagesize
[XFS] Don't update mtime on rename source
[XFS] Allow xfs_bmbt_split() to fallback to the lowspace allocator
[XFS] Restore the lowspace extent allocator algorithm
[XFS] use minleft when allocating in xfs_bmbt_split()
[XFS] attrmulti cleanup
[XFS] Check for invalid flags in xfs_attrlist_by_handle.
[XFS] Fix CI lookup in leaf-form directories
[XFS] Use the generic xattr methods.
...

+3113 -3106
+102
fs/dcache.c
··· 1220 1220 return new; 1221 1221 } 1222 1222 1223 + /** 1224 + * d_add_ci - lookup or allocate new dentry with case-exact name 1225 + * @inode: the inode case-insensitive lookup has found 1226 + * @dentry: the negative dentry that was passed to the parent's lookup func 1227 + * @name: the case-exact name to be associated with the returned dentry 1228 + * 1229 + * This is to avoid filling the dcache with case-insensitive names to the 1230 + * same inode, only the actual correct case is stored in the dcache for 1231 + * case-insensitive filesystems. 1232 + * 1233 + * For a case-insensitive lookup match and if the the case-exact dentry 1234 + * already exists in in the dcache, use it and return it. 1235 + * 1236 + * If no entry exists with the exact case name, allocate new dentry with 1237 + * the exact case, and return the spliced entry. 1238 + */ 1239 + struct dentry *d_add_ci(struct inode *inode, struct dentry *dentry, 1240 + struct qstr *name) 1241 + { 1242 + int error; 1243 + struct dentry *found; 1244 + struct dentry *new; 1245 + 1246 + /* Does a dentry matching the name exist already? */ 1247 + found = d_hash_and_lookup(dentry->d_parent, name); 1248 + /* If not, create it now and return */ 1249 + if (!found) { 1250 + new = d_alloc(dentry->d_parent, name); 1251 + if (!new) { 1252 + error = -ENOMEM; 1253 + goto err_out; 1254 + } 1255 + found = d_splice_alias(inode, new); 1256 + if (found) { 1257 + dput(new); 1258 + return found; 1259 + } 1260 + return new; 1261 + } 1262 + /* Matching dentry exists, check if it is negative. */ 1263 + if (found->d_inode) { 1264 + if (unlikely(found->d_inode != inode)) { 1265 + /* This can't happen because bad inodes are unhashed. */ 1266 + BUG_ON(!is_bad_inode(inode)); 1267 + BUG_ON(!is_bad_inode(found->d_inode)); 1268 + } 1269 + /* 1270 + * Already have the inode and the dentry attached, decrement 1271 + * the reference count to balance the iget() done 1272 + * earlier on. We found the dentry using d_lookup() so it 1273 + * cannot be disconnected and thus we do not need to worry 1274 + * about any NFS/disconnectedness issues here. 1275 + */ 1276 + iput(inode); 1277 + return found; 1278 + } 1279 + /* 1280 + * Negative dentry: instantiate it unless the inode is a directory and 1281 + * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED), 1282 + * in which case d_move() that in place of the found dentry. 1283 + */ 1284 + if (!S_ISDIR(inode->i_mode)) { 1285 + /* Not a directory; everything is easy. */ 1286 + d_instantiate(found, inode); 1287 + return found; 1288 + } 1289 + spin_lock(&dcache_lock); 1290 + if (list_empty(&inode->i_dentry)) { 1291 + /* 1292 + * Directory without a 'disconnected' dentry; we need to do 1293 + * d_instantiate() by hand because it takes dcache_lock which 1294 + * we already hold. 1295 + */ 1296 + list_add(&found->d_alias, &inode->i_dentry); 1297 + found->d_inode = inode; 1298 + spin_unlock(&dcache_lock); 1299 + security_d_instantiate(found, inode); 1300 + return found; 1301 + } 1302 + /* 1303 + * Directory with a 'disconnected' dentry; get a reference to the 1304 + * 'disconnected' dentry. 1305 + */ 1306 + new = list_entry(inode->i_dentry.next, struct dentry, d_alias); 1307 + dget_locked(new); 1308 + spin_unlock(&dcache_lock); 1309 + /* Do security vodoo. */ 1310 + security_d_instantiate(found, inode); 1311 + /* Move new in place of found. */ 1312 + d_move(new, found); 1313 + /* Balance the iget() we did above. */ 1314 + iput(inode); 1315 + /* Throw away found. */ 1316 + dput(found); 1317 + /* Use new as the actual dentry. */ 1318 + return new; 1319 + 1320 + err_out: 1321 + iput(inode); 1322 + return ERR_PTR(error); 1323 + } 1223 1324 1224 1325 /** 1225 1326 * d_lookup - search for a dentry ··· 2355 2254 EXPORT_SYMBOL(d_prune_aliases); 2356 2255 EXPORT_SYMBOL(d_rehash); 2357 2256 EXPORT_SYMBOL(d_splice_alias); 2257 + EXPORT_SYMBOL(d_add_ci); 2358 2258 EXPORT_SYMBOL(d_validate); 2359 2259 EXPORT_SYMBOL(dget_locked); 2360 2260 EXPORT_SYMBOL(dput);
+2 -1
fs/xfs/Makefile
··· 106 106 xfs_iops.o \ 107 107 xfs_lrw.o \ 108 108 xfs_super.o \ 109 - xfs_vnode.o) 109 + xfs_vnode.o \ 110 + xfs_xattr.o) 110 111 111 112 # Objects in support/ 112 113 xfs-y += $(addprefix support/, \
+3 -3
fs/xfs/linux-2.6/kmem.c
··· 90 90 } 91 91 92 92 void 93 - kmem_free(void *ptr, size_t size) 93 + kmem_free(const void *ptr) 94 94 { 95 95 if (!is_vmalloc_addr(ptr)) { 96 96 kfree(ptr); ··· 100 100 } 101 101 102 102 void * 103 - kmem_realloc(void *ptr, size_t newsize, size_t oldsize, 103 + kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, 104 104 unsigned int __nocast flags) 105 105 { 106 106 void *new; ··· 110 110 if (new) 111 111 memcpy(new, ptr, 112 112 ((oldsize < newsize) ? oldsize : newsize)); 113 - kmem_free(ptr, oldsize); 113 + kmem_free(ptr); 114 114 } 115 115 return new; 116 116 }
+2 -2
fs/xfs/linux-2.6/kmem.h
··· 57 57 extern void *kmem_alloc(size_t, unsigned int __nocast); 58 58 extern void *kmem_zalloc(size_t, unsigned int __nocast); 59 59 extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast); 60 - extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); 61 - extern void kmem_free(void *, size_t); 60 + extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); 61 + extern void kmem_free(const void *); 62 62 63 63 /* 64 64 * Zone interfaces
+2 -3
fs/xfs/linux-2.6/xfs_aops.c
··· 409 409 STATIC void 410 410 xfs_start_page_writeback( 411 411 struct page *page, 412 - struct writeback_control *wbc, 413 412 int clear_dirty, 414 413 int buffers) 415 414 { ··· 857 858 done = 1; 858 859 } 859 860 } 860 - xfs_start_page_writeback(page, wbc, !page_dirty, count); 861 + xfs_start_page_writeback(page, !page_dirty, count); 861 862 } 862 863 863 864 return done; ··· 1129 1130 SetPageUptodate(page); 1130 1131 1131 1132 if (startio) 1132 - xfs_start_page_writeback(page, wbc, 1, count); 1133 + xfs_start_page_writeback(page, 1, count); 1133 1134 1134 1135 if (ioend && iomap_valid) { 1135 1136 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
+5 -9
fs/xfs/linux-2.6/xfs_buf.c
··· 310 310 xfs_buf_t *bp) 311 311 { 312 312 if (bp->b_pages != bp->b_page_array) { 313 - kmem_free(bp->b_pages, 314 - bp->b_page_count * sizeof(struct page *)); 313 + kmem_free(bp->b_pages); 315 314 } 316 315 } 317 316 ··· 1397 1398 xfs_free_bufhash( 1398 1399 xfs_buftarg_t *btp) 1399 1400 { 1400 - kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t)); 1401 + kmem_free(btp->bt_hash); 1401 1402 btp->bt_hash = NULL; 1402 1403 } 1403 1404 ··· 1427 1428 1428 1429 void 1429 1430 xfs_free_buftarg( 1430 - xfs_buftarg_t *btp, 1431 - int external) 1431 + xfs_buftarg_t *btp) 1432 1432 { 1433 1433 xfs_flush_buftarg(btp, 1); 1434 1434 xfs_blkdev_issue_flush(btp); 1435 - if (external) 1436 - xfs_blkdev_put(btp->bt_bdev); 1437 1435 xfs_free_bufhash(btp); 1438 1436 iput(btp->bt_mapping->host); 1439 1437 ··· 1440 1444 xfs_unregister_buftarg(btp); 1441 1445 kthread_stop(btp->bt_task); 1442 1446 1443 - kmem_free(btp, sizeof(*btp)); 1447 + kmem_free(btp); 1444 1448 } 1445 1449 1446 1450 STATIC int ··· 1571 1575 return btp; 1572 1576 1573 1577 error: 1574 - kmem_free(btp, sizeof(*btp)); 1578 + kmem_free(btp); 1575 1579 return NULL; 1576 1580 } 1577 1581
+1 -1
fs/xfs/linux-2.6/xfs_buf.h
··· 429 429 * Handling of buftargs. 430 430 */ 431 431 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 432 - extern void xfs_free_buftarg(xfs_buftarg_t *, int); 432 + extern void xfs_free_buftarg(xfs_buftarg_t *); 433 433 extern void xfs_wait_buftarg(xfs_buftarg_t *); 434 434 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 435 435 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+1 -1
fs/xfs/linux-2.6/xfs_export.c
··· 215 215 struct xfs_inode *cip; 216 216 struct dentry *parent; 217 217 218 - error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip); 218 + error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); 219 219 if (unlikely(error)) 220 220 return ERR_PTR(-error); 221 221
+337 -39
fs/xfs/linux-2.6/xfs_ioctl.c
··· 48 48 #include "xfs_dfrag.h" 49 49 #include "xfs_fsops.h" 50 50 #include "xfs_vnodeops.h" 51 + #include "xfs_quota.h" 52 + #include "xfs_inode_item.h" 51 53 52 54 #include <linux/capability.h> 53 55 #include <linux/dcache.h> ··· 470 468 if (al_hreq.buflen > XATTR_LIST_MAX) 471 469 return -XFS_ERROR(EINVAL); 472 470 471 + /* 472 + * Reject flags, only allow namespaces. 473 + */ 474 + if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 475 + return -XFS_ERROR(EINVAL); 476 + 473 477 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode); 474 478 if (error) 475 479 goto out; ··· 595 587 goto out; 596 588 597 589 error = E2BIG; 598 - size = am_hreq.opcount * sizeof(attr_multiop_t); 590 + size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 599 591 if (!size || size > 16 * PAGE_SIZE) 600 592 goto out_vn_rele; 601 593 ··· 688 680 return -XFS_ERROR(EFAULT); 689 681 690 682 if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) 691 - attr_flags |= ATTR_NONBLOCK; 683 + attr_flags |= XFS_ATTR_NONBLOCK; 692 684 if (ioflags & IO_INVIS) 693 - attr_flags |= ATTR_DMI; 685 + attr_flags |= XFS_ATTR_DMI; 694 686 695 687 error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, 696 688 NULL, attr_flags); ··· 881 873 return 0; 882 874 } 883 875 876 + STATIC void 877 + xfs_set_diflags( 878 + struct xfs_inode *ip, 879 + unsigned int xflags) 880 + { 881 + unsigned int di_flags; 882 + 883 + /* can't set PREALLOC this way, just preserve it */ 884 + di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); 885 + if (xflags & XFS_XFLAG_IMMUTABLE) 886 + di_flags |= XFS_DIFLAG_IMMUTABLE; 887 + if (xflags & XFS_XFLAG_APPEND) 888 + di_flags |= XFS_DIFLAG_APPEND; 889 + if (xflags & XFS_XFLAG_SYNC) 890 + di_flags |= XFS_DIFLAG_SYNC; 891 + if (xflags & XFS_XFLAG_NOATIME) 892 + di_flags |= XFS_DIFLAG_NOATIME; 893 + if (xflags & XFS_XFLAG_NODUMP) 894 + di_flags |= XFS_DIFLAG_NODUMP; 895 + if (xflags & XFS_XFLAG_PROJINHERIT) 896 + di_flags |= XFS_DIFLAG_PROJINHERIT; 897 + if (xflags & XFS_XFLAG_NODEFRAG) 898 + di_flags |= XFS_DIFLAG_NODEFRAG; 899 + if (xflags & XFS_XFLAG_FILESTREAM) 900 + di_flags |= XFS_DIFLAG_FILESTREAM; 901 + if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 902 + if (xflags & XFS_XFLAG_RTINHERIT) 903 + di_flags |= XFS_DIFLAG_RTINHERIT; 904 + if (xflags & XFS_XFLAG_NOSYMLINKS) 905 + di_flags |= XFS_DIFLAG_NOSYMLINKS; 906 + if (xflags & XFS_XFLAG_EXTSZINHERIT) 907 + di_flags |= XFS_DIFLAG_EXTSZINHERIT; 908 + } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 909 + if (xflags & XFS_XFLAG_REALTIME) 910 + di_flags |= XFS_DIFLAG_REALTIME; 911 + if (xflags & XFS_XFLAG_EXTSIZE) 912 + di_flags |= XFS_DIFLAG_EXTSIZE; 913 + } 914 + 915 + ip->i_d.di_flags = di_flags; 916 + } 917 + 918 + STATIC void 919 + xfs_diflags_to_linux( 920 + struct xfs_inode *ip) 921 + { 922 + struct inode *inode = XFS_ITOV(ip); 923 + unsigned int xflags = xfs_ip2xflags(ip); 924 + 925 + if (xflags & XFS_XFLAG_IMMUTABLE) 926 + inode->i_flags |= S_IMMUTABLE; 927 + else 928 + inode->i_flags &= ~S_IMMUTABLE; 929 + if (xflags & XFS_XFLAG_APPEND) 930 + inode->i_flags |= S_APPEND; 931 + else 932 + inode->i_flags &= ~S_APPEND; 933 + if (xflags & XFS_XFLAG_SYNC) 934 + inode->i_flags |= S_SYNC; 935 + else 936 + inode->i_flags &= ~S_SYNC; 937 + if (xflags & XFS_XFLAG_NOATIME) 938 + inode->i_flags |= S_NOATIME; 939 + else 940 + inode->i_flags &= ~S_NOATIME; 941 + } 942 + 943 + #define FSX_PROJID 1 944 + #define FSX_EXTSIZE 2 945 + #define FSX_XFLAGS 4 946 + #define FSX_NONBLOCK 8 947 + 948 + STATIC int 949 + xfs_ioctl_setattr( 950 + xfs_inode_t *ip, 951 + struct fsxattr *fa, 952 + int mask) 953 + { 954 + struct xfs_mount *mp = ip->i_mount; 955 + struct xfs_trans *tp; 956 + unsigned int lock_flags = 0; 957 + struct xfs_dquot *udqp = NULL, *gdqp = NULL; 958 + struct xfs_dquot *olddquot = NULL; 959 + int code; 960 + 961 + xfs_itrace_entry(ip); 962 + 963 + if (mp->m_flags & XFS_MOUNT_RDONLY) 964 + return XFS_ERROR(EROFS); 965 + if (XFS_FORCED_SHUTDOWN(mp)) 966 + return XFS_ERROR(EIO); 967 + 968 + /* 969 + * If disk quotas is on, we make sure that the dquots do exist on disk, 970 + * before we start any other transactions. Trying to do this later 971 + * is messy. We don't care to take a readlock to look at the ids 972 + * in inode here, because we can't hold it across the trans_reserve. 973 + * If the IDs do change before we take the ilock, we're covered 974 + * because the i_*dquot fields will get updated anyway. 975 + */ 976 + if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { 977 + code = XFS_QM_DQVOPALLOC(mp, ip, ip->i_d.di_uid, 978 + ip->i_d.di_gid, fa->fsx_projid, 979 + XFS_QMOPT_PQUOTA, &udqp, &gdqp); 980 + if (code) 981 + return code; 982 + } 983 + 984 + /* 985 + * For the other attributes, we acquire the inode lock and 986 + * first do an error checking pass. 987 + */ 988 + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 989 + code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); 990 + if (code) 991 + goto error_return; 992 + 993 + lock_flags = XFS_ILOCK_EXCL; 994 + xfs_ilock(ip, lock_flags); 995 + 996 + /* 997 + * CAP_FOWNER overrides the following restrictions: 998 + * 999 + * The user ID of the calling process must be equal 1000 + * to the file owner ID, except in cases where the 1001 + * CAP_FSETID capability is applicable. 1002 + */ 1003 + if (current->fsuid != ip->i_d.di_uid && !capable(CAP_FOWNER)) { 1004 + code = XFS_ERROR(EPERM); 1005 + goto error_return; 1006 + } 1007 + 1008 + /* 1009 + * Do a quota reservation only if projid is actually going to change. 1010 + */ 1011 + if (mask & FSX_PROJID) { 1012 + if (XFS_IS_PQUOTA_ON(mp) && 1013 + ip->i_d.di_projid != fa->fsx_projid) { 1014 + ASSERT(tp); 1015 + code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, 1016 + capable(CAP_FOWNER) ? 1017 + XFS_QMOPT_FORCE_RES : 0); 1018 + if (code) /* out of quota */ 1019 + goto error_return; 1020 + } 1021 + } 1022 + 1023 + if (mask & FSX_EXTSIZE) { 1024 + /* 1025 + * Can't change extent size if any extents are allocated. 1026 + */ 1027 + if (ip->i_d.di_nextents && 1028 + ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != 1029 + fa->fsx_extsize)) { 1030 + code = XFS_ERROR(EINVAL); /* EFBIG? */ 1031 + goto error_return; 1032 + } 1033 + 1034 + /* 1035 + * Extent size must be a multiple of the appropriate block 1036 + * size, if set at all. 1037 + */ 1038 + if (fa->fsx_extsize != 0) { 1039 + xfs_extlen_t size; 1040 + 1041 + if (XFS_IS_REALTIME_INODE(ip) || 1042 + ((mask & FSX_XFLAGS) && 1043 + (fa->fsx_xflags & XFS_XFLAG_REALTIME))) { 1044 + size = mp->m_sb.sb_rextsize << 1045 + mp->m_sb.sb_blocklog; 1046 + } else { 1047 + size = mp->m_sb.sb_blocksize; 1048 + } 1049 + 1050 + if (fa->fsx_extsize % size) { 1051 + code = XFS_ERROR(EINVAL); 1052 + goto error_return; 1053 + } 1054 + } 1055 + } 1056 + 1057 + 1058 + if (mask & FSX_XFLAGS) { 1059 + /* 1060 + * Can't change realtime flag if any extents are allocated. 1061 + */ 1062 + if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 1063 + (XFS_IS_REALTIME_INODE(ip)) != 1064 + (fa->fsx_xflags & XFS_XFLAG_REALTIME)) { 1065 + code = XFS_ERROR(EINVAL); /* EFBIG? */ 1066 + goto error_return; 1067 + } 1068 + 1069 + /* 1070 + * If realtime flag is set then must have realtime data. 1071 + */ 1072 + if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) { 1073 + if ((mp->m_sb.sb_rblocks == 0) || 1074 + (mp->m_sb.sb_rextsize == 0) || 1075 + (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { 1076 + code = XFS_ERROR(EINVAL); 1077 + goto error_return; 1078 + } 1079 + } 1080 + 1081 + /* 1082 + * Can't modify an immutable/append-only file unless 1083 + * we have appropriate permission. 1084 + */ 1085 + if ((ip->i_d.di_flags & 1086 + (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) || 1087 + (fa->fsx_xflags & 1088 + (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && 1089 + !capable(CAP_LINUX_IMMUTABLE)) { 1090 + code = XFS_ERROR(EPERM); 1091 + goto error_return; 1092 + } 1093 + } 1094 + 1095 + xfs_trans_ijoin(tp, ip, lock_flags); 1096 + xfs_trans_ihold(tp, ip); 1097 + 1098 + /* 1099 + * Change file ownership. Must be the owner or privileged. 1100 + * If the system was configured with the "restricted_chown" 1101 + * option, the owner is not permitted to give away the file, 1102 + * and can change the group id only to a group of which he 1103 + * or she is a member. 1104 + */ 1105 + if (mask & FSX_PROJID) { 1106 + /* 1107 + * CAP_FSETID overrides the following restrictions: 1108 + * 1109 + * The set-user-ID and set-group-ID bits of a file will be 1110 + * cleared upon successful return from chown() 1111 + */ 1112 + if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && 1113 + !capable(CAP_FSETID)) 1114 + ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); 1115 + 1116 + /* 1117 + * Change the ownerships and register quota modifications 1118 + * in the transaction. 1119 + */ 1120 + if (ip->i_d.di_projid != fa->fsx_projid) { 1121 + if (XFS_IS_PQUOTA_ON(mp)) { 1122 + olddquot = XFS_QM_DQVOPCHOWN(mp, tp, ip, 1123 + &ip->i_gdquot, gdqp); 1124 + } 1125 + ip->i_d.di_projid = fa->fsx_projid; 1126 + 1127 + /* 1128 + * We may have to rev the inode as well as 1129 + * the superblock version number since projids didn't 1130 + * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. 1131 + */ 1132 + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) 1133 + xfs_bump_ino_vers2(tp, ip); 1134 + } 1135 + 1136 + } 1137 + 1138 + if (mask & FSX_EXTSIZE) 1139 + ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; 1140 + if (mask & FSX_XFLAGS) { 1141 + xfs_set_diflags(ip, fa->fsx_xflags); 1142 + xfs_diflags_to_linux(ip); 1143 + } 1144 + 1145 + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1146 + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 1147 + 1148 + XFS_STATS_INC(xs_ig_attrchg); 1149 + 1150 + /* 1151 + * If this is a synchronous mount, make sure that the 1152 + * transaction goes to disk before returning to the user. 1153 + * This is slightly sub-optimal in that truncates require 1154 + * two sync transactions instead of one for wsync filesystems. 1155 + * One for the truncate and one for the timestamps since we 1156 + * don't want to change the timestamps unless we're sure the 1157 + * truncate worked. Truncates are less than 1% of the laddis 1158 + * mix so this probably isn't worth the trouble to optimize. 1159 + */ 1160 + if (mp->m_flags & XFS_MOUNT_WSYNC) 1161 + xfs_trans_set_sync(tp); 1162 + code = xfs_trans_commit(tp, 0); 1163 + xfs_iunlock(ip, lock_flags); 1164 + 1165 + /* 1166 + * Release any dquot(s) the inode had kept before chown. 1167 + */ 1168 + XFS_QM_DQRELE(mp, olddquot); 1169 + XFS_QM_DQRELE(mp, udqp); 1170 + XFS_QM_DQRELE(mp, gdqp); 1171 + 1172 + if (code) 1173 + return code; 1174 + 1175 + if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) { 1176 + XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL, 1177 + NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0, 1178 + (mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0); 1179 + } 1180 + 1181 + return 0; 1182 + 1183 + error_return: 1184 + XFS_QM_DQRELE(mp, udqp); 1185 + XFS_QM_DQRELE(mp, gdqp); 1186 + xfs_trans_cancel(tp, 0); 1187 + if (lock_flags) 1188 + xfs_iunlock(ip, lock_flags); 1189 + return code; 1190 + } 1191 + 884 1192 STATIC int 885 1193 xfs_ioc_fssetxattr( 886 1194 xfs_inode_t *ip, ··· 1204 880 void __user *arg) 1205 881 { 1206 882 struct fsxattr fa; 1207 - struct bhv_vattr *vattr; 1208 - int error; 1209 - int attr_flags; 883 + unsigned int mask; 1210 884 1211 885 if (copy_from_user(&fa, arg, sizeof(fa))) 1212 886 return -EFAULT; 1213 887 1214 - vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); 1215 - if (unlikely(!vattr)) 1216 - return -ENOMEM; 1217 - 1218 - attr_flags = 0; 888 + mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID; 1219 889 if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) 1220 - attr_flags |= ATTR_NONBLOCK; 890 + mask |= FSX_NONBLOCK; 1221 891 1222 - vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; 1223 - vattr->va_xflags = fa.fsx_xflags; 1224 - vattr->va_extsize = fa.fsx_extsize; 1225 - vattr->va_projid = fa.fsx_projid; 1226 - 1227 - error = -xfs_setattr(ip, vattr, attr_flags, NULL); 1228 - if (!error) 1229 - vn_revalidate(XFS_ITOV(ip)); /* update flags */ 1230 - kfree(vattr); 1231 - return 0; 892 + return -xfs_ioctl_setattr(ip, &fa, mask); 1232 893 } 1233 894 1234 895 STATIC int ··· 1235 926 struct file *filp, 1236 927 void __user *arg) 1237 928 { 1238 - struct bhv_vattr *vattr; 929 + struct fsxattr fa; 1239 930 unsigned int flags; 1240 - int attr_flags; 1241 - int error; 931 + unsigned int mask; 1242 932 1243 933 if (copy_from_user(&flags, arg, sizeof(flags))) 1244 934 return -EFAULT; ··· 1247 939 FS_SYNC_FL)) 1248 940 return -EOPNOTSUPP; 1249 941 1250 - vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); 1251 - if (unlikely(!vattr)) 1252 - return -ENOMEM; 1253 - 1254 - attr_flags = 0; 942 + mask = FSX_XFLAGS; 1255 943 if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) 1256 - attr_flags |= ATTR_NONBLOCK; 944 + mask |= FSX_NONBLOCK; 945 + fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1257 946 1258 - vattr->va_mask = XFS_AT_XFLAGS; 1259 - vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1260 - 1261 - error = -xfs_setattr(ip, vattr, attr_flags, NULL); 1262 - if (likely(!error)) 1263 - vn_revalidate(XFS_ITOV(ip)); /* update flags */ 1264 - kfree(vattr); 1265 - return error; 947 + return -xfs_ioctl_setattr(ip, &fa, mask); 1266 948 } 1267 949 1268 950 STATIC int
+107 -238
fs/xfs/linux-2.6/xfs_iops.c
··· 181 181 mark_inode_dirty_sync(inode); 182 182 } 183 183 184 - 185 - /* 186 - * Pull the link count and size up from the xfs inode to the linux inode 187 - */ 188 - STATIC void 189 - xfs_validate_fields( 190 - struct inode *inode) 191 - { 192 - struct xfs_inode *ip = XFS_I(inode); 193 - loff_t size; 194 - 195 - /* we're under i_sem so i_size can't change under us */ 196 - size = XFS_ISIZE(ip); 197 - if (i_size_read(inode) != size) 198 - i_size_write(inode, size); 199 - } 200 - 201 184 /* 202 185 * Hook in SELinux. This is not quite correct yet, what we really need 203 186 * here (as we do for default ACLs) is a mechanism by which creation of ··· 228 245 xfs_cleanup_inode( 229 246 struct inode *dir, 230 247 struct inode *inode, 231 - struct dentry *dentry, 232 - int mode) 248 + struct dentry *dentry) 233 249 { 234 250 struct xfs_name teardown; 235 251 ··· 239 257 */ 240 258 xfs_dentry_to_name(&teardown, dentry); 241 259 242 - if (S_ISDIR(mode)) 243 - xfs_rmdir(XFS_I(dir), &teardown, XFS_I(inode)); 244 - else 245 - xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 260 + xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 246 261 iput(inode); 247 262 } 248 263 ··· 254 275 struct xfs_inode *ip = NULL; 255 276 xfs_acl_t *default_acl = NULL; 256 277 struct xfs_name name; 257 - attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; 278 + int (*test_default_acl)(struct inode *) = _ACL_DEFAULT_EXISTS; 258 279 int error; 259 280 260 281 /* ··· 314 335 } 315 336 316 337 317 - if (S_ISDIR(mode)) 318 - xfs_validate_fields(inode); 319 338 d_instantiate(dentry, inode); 320 - xfs_validate_fields(dir); 321 339 return -error; 322 340 323 341 out_cleanup_inode: 324 - xfs_cleanup_inode(dir, inode, dentry, mode); 342 + xfs_cleanup_inode(dir, inode, dentry); 325 343 out_free_acl: 326 344 if (default_acl) 327 345 _ACL_FREE(default_acl); ··· 358 382 return ERR_PTR(-ENAMETOOLONG); 359 383 360 384 xfs_dentry_to_name(&name, dentry); 361 - error = xfs_lookup(XFS_I(dir), &name, &cip); 385 + error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); 362 386 if (unlikely(error)) { 363 387 if (unlikely(error != ENOENT)) 364 388 return ERR_PTR(-error); ··· 367 391 } 368 392 369 393 return d_splice_alias(cip->i_vnode, dentry); 394 + } 395 + 396 + STATIC struct dentry * 397 + xfs_vn_ci_lookup( 398 + struct inode *dir, 399 + struct dentry *dentry, 400 + struct nameidata *nd) 401 + { 402 + struct xfs_inode *ip; 403 + struct xfs_name xname; 404 + struct xfs_name ci_name; 405 + struct qstr dname; 406 + int error; 407 + 408 + if (dentry->d_name.len >= MAXNAMELEN) 409 + return ERR_PTR(-ENAMETOOLONG); 410 + 411 + xfs_dentry_to_name(&xname, dentry); 412 + error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); 413 + if (unlikely(error)) { 414 + if (unlikely(error != ENOENT)) 415 + return ERR_PTR(-error); 416 + /* 417 + * call d_add(dentry, NULL) here when d_drop_negative_children 418 + * is called in xfs_vn_mknod (ie. allow negative dentries 419 + * with CI filesystems). 420 + */ 421 + return NULL; 422 + } 423 + 424 + /* if exact match, just splice and exit */ 425 + if (!ci_name.name) 426 + return d_splice_alias(ip->i_vnode, dentry); 427 + 428 + /* else case-insensitive match... */ 429 + dname.name = ci_name.name; 430 + dname.len = ci_name.len; 431 + dentry = d_add_ci(ip->i_vnode, dentry, &dname); 432 + kmem_free(ci_name.name); 433 + return dentry; 370 434 } 371 435 372 436 STATIC int ··· 430 414 } 431 415 432 416 xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); 433 - xfs_validate_fields(inode); 434 417 d_instantiate(dentry, inode); 435 418 return 0; 436 419 } ··· 439 424 struct inode *dir, 440 425 struct dentry *dentry) 441 426 { 442 - struct inode *inode; 443 427 struct xfs_name name; 444 428 int error; 445 429 446 - inode = dentry->d_inode; 447 430 xfs_dentry_to_name(&name, dentry); 448 431 449 - error = xfs_remove(XFS_I(dir), &name, XFS_I(inode)); 450 - if (likely(!error)) { 451 - xfs_validate_fields(dir); /* size needs update */ 452 - xfs_validate_fields(inode); 453 - } 454 - return -error; 432 + error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); 433 + if (error) 434 + return error; 435 + 436 + /* 437 + * With unlink, the VFS makes the dentry "negative": no inode, 438 + * but still hashed. This is incompatible with case-insensitive 439 + * mode, so invalidate (unhash) the dentry in CI-mode. 440 + */ 441 + if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) 442 + d_invalidate(dentry); 443 + return 0; 455 444 } 456 445 457 446 STATIC int ··· 485 466 goto out_cleanup_inode; 486 467 487 468 d_instantiate(dentry, inode); 488 - xfs_validate_fields(dir); 489 - xfs_validate_fields(inode); 490 469 return 0; 491 470 492 471 out_cleanup_inode: 493 - xfs_cleanup_inode(dir, inode, dentry, 0); 472 + xfs_cleanup_inode(dir, inode, dentry); 494 473 out: 495 - return -error; 496 - } 497 - 498 - STATIC int 499 - xfs_vn_rmdir( 500 - struct inode *dir, 501 - struct dentry *dentry) 502 - { 503 - struct inode *inode = dentry->d_inode; 504 - struct xfs_name name; 505 - int error; 506 - 507 - xfs_dentry_to_name(&name, dentry); 508 - 509 - error = xfs_rmdir(XFS_I(dir), &name, XFS_I(inode)); 510 - if (likely(!error)) { 511 - xfs_validate_fields(inode); 512 - xfs_validate_fields(dir); 513 - } 514 474 return -error; 515 475 } 516 476 ··· 503 505 struct inode *new_inode = ndentry->d_inode; 504 506 struct xfs_name oname; 505 507 struct xfs_name nname; 506 - int error; 507 508 508 509 xfs_dentry_to_name(&oname, odentry); 509 510 xfs_dentry_to_name(&nname, ndentry); 510 511 511 - error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), 512 + return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), 512 513 XFS_I(ndir), &nname, new_inode ? 513 514 XFS_I(new_inode) : NULL); 514 - if (likely(!error)) { 515 - if (new_inode) 516 - xfs_validate_fields(new_inode); 517 - xfs_validate_fields(odir); 518 - if (ndir != odir) 519 - xfs_validate_fields(ndir); 520 - } 521 - return -error; 522 515 } 523 516 524 517 /* ··· 648 659 STATIC int 649 660 xfs_vn_setattr( 650 661 struct dentry *dentry, 651 - struct iattr *attr) 662 + struct iattr *iattr) 652 663 { 653 - struct inode *inode = dentry->d_inode; 654 - unsigned int ia_valid = attr->ia_valid; 655 - bhv_vattr_t vattr = { 0 }; 656 - int flags = 0; 657 - int error; 658 - 659 - if (ia_valid & ATTR_UID) { 660 - vattr.va_mask |= XFS_AT_UID; 661 - vattr.va_uid = attr->ia_uid; 662 - } 663 - if (ia_valid & ATTR_GID) { 664 - vattr.va_mask |= XFS_AT_GID; 665 - vattr.va_gid = attr->ia_gid; 666 - } 667 - if (ia_valid & ATTR_SIZE) { 668 - vattr.va_mask |= XFS_AT_SIZE; 669 - vattr.va_size = attr->ia_size; 670 - } 671 - if (ia_valid & ATTR_ATIME) { 672 - vattr.va_mask |= XFS_AT_ATIME; 673 - vattr.va_atime = attr->ia_atime; 674 - inode->i_atime = attr->ia_atime; 675 - } 676 - if (ia_valid & ATTR_MTIME) { 677 - vattr.va_mask |= XFS_AT_MTIME; 678 - vattr.va_mtime = attr->ia_mtime; 679 - } 680 - if (ia_valid & ATTR_CTIME) { 681 - vattr.va_mask |= XFS_AT_CTIME; 682 - vattr.va_ctime = attr->ia_ctime; 683 - } 684 - if (ia_valid & ATTR_MODE) { 685 - vattr.va_mask |= XFS_AT_MODE; 686 - vattr.va_mode = attr->ia_mode; 687 - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 688 - inode->i_mode &= ~S_ISGID; 689 - } 690 - 691 - if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) 692 - flags |= ATTR_UTIME; 693 - #ifdef ATTR_NO_BLOCK 694 - if ((ia_valid & ATTR_NO_BLOCK)) 695 - flags |= ATTR_NONBLOCK; 696 - #endif 697 - 698 - error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL); 699 - if (likely(!error)) 700 - vn_revalidate(vn_from_inode(inode)); 701 - return -error; 664 + return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0, NULL); 702 665 } 703 666 704 667 /* ··· 666 725 error = block_truncate_page(inode->i_mapping, inode->i_size, 667 726 xfs_get_blocks); 668 727 WARN_ON(error); 669 - } 670 - 671 - STATIC int 672 - xfs_vn_setxattr( 673 - struct dentry *dentry, 674 - const char *name, 675 - const void *data, 676 - size_t size, 677 - int flags) 678 - { 679 - bhv_vnode_t *vp = vn_from_inode(dentry->d_inode); 680 - char *attr = (char *)name; 681 - attrnames_t *namesp; 682 - int xflags = 0; 683 - int error; 684 - 685 - namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT); 686 - if (!namesp) 687 - return -EOPNOTSUPP; 688 - attr += namesp->attr_namelen; 689 - error = namesp->attr_capable(vp, NULL); 690 - if (error) 691 - return error; 692 - 693 - /* Convert Linux syscall to XFS internal ATTR flags */ 694 - if (flags & XATTR_CREATE) 695 - xflags |= ATTR_CREATE; 696 - if (flags & XATTR_REPLACE) 697 - xflags |= ATTR_REPLACE; 698 - xflags |= namesp->attr_flag; 699 - return namesp->attr_set(vp, attr, (void *)data, size, xflags); 700 - } 701 - 702 - STATIC ssize_t 703 - xfs_vn_getxattr( 704 - struct dentry *dentry, 705 - const char *name, 706 - void *data, 707 - size_t size) 708 - { 709 - bhv_vnode_t *vp = vn_from_inode(dentry->d_inode); 710 - char *attr = (char *)name; 711 - attrnames_t *namesp; 712 - int xflags = 0; 713 - ssize_t error; 714 - 715 - namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT); 716 - if (!namesp) 717 - return -EOPNOTSUPP; 718 - attr += namesp->attr_namelen; 719 - error = namesp->attr_capable(vp, NULL); 720 - if (error) 721 - return error; 722 - 723 - /* Convert Linux syscall to XFS internal ATTR flags */ 724 - if (!size) { 725 - xflags |= ATTR_KERNOVAL; 726 - data = NULL; 727 - } 728 - xflags |= namesp->attr_flag; 729 - return namesp->attr_get(vp, attr, (void *)data, size, xflags); 730 - } 731 - 732 - STATIC ssize_t 733 - xfs_vn_listxattr( 734 - struct dentry *dentry, 735 - char *data, 736 - size_t size) 737 - { 738 - bhv_vnode_t *vp = vn_from_inode(dentry->d_inode); 739 - int error, xflags = ATTR_KERNAMELS; 740 - ssize_t result; 741 - 742 - if (!size) 743 - xflags |= ATTR_KERNOVAL; 744 - xflags |= capable(CAP_SYS_ADMIN) ? ATTR_KERNFULLS : ATTR_KERNORMALS; 745 - 746 - error = attr_generic_list(vp, data, size, xflags, &result); 747 - if (error < 0) 748 - return error; 749 - return result; 750 - } 751 - 752 - STATIC int 753 - xfs_vn_removexattr( 754 - struct dentry *dentry, 755 - const char *name) 756 - { 757 - bhv_vnode_t *vp = vn_from_inode(dentry->d_inode); 758 - char *attr = (char *)name; 759 - attrnames_t *namesp; 760 - int xflags = 0; 761 - int error; 762 - 763 - namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT); 764 - if (!namesp) 765 - return -EOPNOTSUPP; 766 - attr += namesp->attr_namelen; 767 - error = namesp->attr_capable(vp, NULL); 768 - if (error) 769 - return error; 770 - xflags |= namesp->attr_flag; 771 - return namesp->attr_remove(vp, attr, xflags); 772 728 } 773 729 774 730 STATIC long ··· 691 853 692 854 xfs_ilock(ip, XFS_IOLOCK_EXCL); 693 855 error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, 694 - 0, NULL, ATTR_NOLOCK); 856 + 0, NULL, XFS_ATTR_NOLOCK); 695 857 if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && 696 858 offset + len > i_size_read(inode)) 697 859 new_size = offset + len; 698 860 699 861 /* Change file size if needed */ 700 862 if (new_size) { 701 - bhv_vattr_t va; 863 + struct iattr iattr; 702 864 703 - va.va_mask = XFS_AT_SIZE; 704 - va.va_size = new_size; 705 - error = xfs_setattr(ip, &va, ATTR_NOLOCK, NULL); 865 + iattr.ia_valid = ATTR_SIZE; 866 + iattr.ia_size = new_size; 867 + error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK, NULL); 706 868 } 707 869 708 870 xfs_iunlock(ip, XFS_IOLOCK_EXCL); ··· 715 877 .truncate = xfs_vn_truncate, 716 878 .getattr = xfs_vn_getattr, 717 879 .setattr = xfs_vn_setattr, 718 - .setxattr = xfs_vn_setxattr, 719 - .getxattr = xfs_vn_getxattr, 880 + .setxattr = generic_setxattr, 881 + .getxattr = generic_getxattr, 882 + .removexattr = generic_removexattr, 720 883 .listxattr = xfs_vn_listxattr, 721 - .removexattr = xfs_vn_removexattr, 722 884 .fallocate = xfs_vn_fallocate, 723 885 }; 724 886 ··· 729 891 .unlink = xfs_vn_unlink, 730 892 .symlink = xfs_vn_symlink, 731 893 .mkdir = xfs_vn_mkdir, 732 - .rmdir = xfs_vn_rmdir, 894 + /* 895 + * Yes, XFS uses the same method for rmdir and unlink. 896 + * 897 + * There are some subtile differences deeper in the code, 898 + * but we use S_ISDIR to check for those. 899 + */ 900 + .rmdir = xfs_vn_unlink, 733 901 .mknod = xfs_vn_mknod, 734 902 .rename = xfs_vn_rename, 735 903 .permission = xfs_vn_permission, 736 904 .getattr = xfs_vn_getattr, 737 905 .setattr = xfs_vn_setattr, 738 - .setxattr = xfs_vn_setxattr, 739 - .getxattr = xfs_vn_getxattr, 906 + .setxattr = generic_setxattr, 907 + .getxattr = generic_getxattr, 908 + .removexattr = generic_removexattr, 740 909 .listxattr = xfs_vn_listxattr, 741 - .removexattr = xfs_vn_removexattr, 910 + }; 911 + 912 + const struct inode_operations xfs_dir_ci_inode_operations = { 913 + .create = xfs_vn_create, 914 + .lookup = xfs_vn_ci_lookup, 915 + .link = xfs_vn_link, 916 + .unlink = xfs_vn_unlink, 917 + .symlink = xfs_vn_symlink, 918 + .mkdir = xfs_vn_mkdir, 919 + /* 920 + * Yes, XFS uses the same method for rmdir and unlink. 921 + * 922 + * There are some subtile differences deeper in the code, 923 + * but we use S_ISDIR to check for those. 924 + */ 925 + .rmdir = xfs_vn_unlink, 926 + .mknod = xfs_vn_mknod, 927 + .rename = xfs_vn_rename, 928 + .permission = xfs_vn_permission, 929 + .getattr = xfs_vn_getattr, 930 + .setattr = xfs_vn_setattr, 931 + .setxattr = generic_setxattr, 932 + .getxattr = generic_getxattr, 933 + .removexattr = generic_removexattr, 934 + .listxattr = xfs_vn_listxattr, 742 935 }; 743 936 744 937 const struct inode_operations xfs_symlink_inode_operations = { ··· 779 910 .permission = xfs_vn_permission, 780 911 .getattr = xfs_vn_getattr, 781 912 .setattr = xfs_vn_setattr, 782 - .setxattr = xfs_vn_setxattr, 783 - .getxattr = xfs_vn_getxattr, 913 + .setxattr = generic_setxattr, 914 + .getxattr = generic_getxattr, 915 + .removexattr = generic_removexattr, 784 916 .listxattr = xfs_vn_listxattr, 785 - .removexattr = xfs_vn_removexattr, 786 917 };
+2
fs/xfs/linux-2.6/xfs_iops.h
··· 20 20 21 21 extern const struct inode_operations xfs_inode_operations; 22 22 extern const struct inode_operations xfs_dir_inode_operations; 23 + extern const struct inode_operations xfs_dir_ci_inode_operations; 23 24 extern const struct inode_operations xfs_symlink_inode_operations; 24 25 25 26 extern const struct file_operations xfs_file_operations; 26 27 extern const struct file_operations xfs_dir_file_operations; 27 28 extern const struct file_operations xfs_invis_file_operations; 28 29 30 + extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); 29 31 30 32 struct xfs_inode; 31 33 extern void xfs_ichgtime(struct xfs_inode *, int);
+8
fs/xfs/linux-2.6/xfs_linux.h
··· 76 76 #include <linux/log2.h> 77 77 #include <linux/spinlock.h> 78 78 #include <linux/random.h> 79 + #include <linux/ctype.h> 79 80 80 81 #include <asm/page.h> 81 82 #include <asm/div64.h> ··· 299 298 do_div(x, y); 300 299 return x; 301 300 } 301 + 302 + /* ARM old ABI has some weird alignment/padding */ 303 + #if defined(__arm__) && !defined(__ARM_EABI__) 304 + #define __arch_pack __attribute__((packed)) 305 + #else 306 + #define __arch_pack 307 + #endif 302 308 303 309 #endif /* __XFS_LINUX__ */
+12 -3
fs/xfs/linux-2.6/xfs_stats.c
··· 98 98 return len; 99 99 } 100 100 101 - void 101 + int 102 102 xfs_init_procfs(void) 103 103 { 104 104 if (!proc_mkdir("fs/xfs", NULL)) 105 - return; 106 - create_proc_read_entry("fs/xfs/stat", 0, NULL, xfs_read_xfsstats, NULL); 105 + goto out; 106 + 107 + if (!create_proc_read_entry("fs/xfs/stat", 0, NULL, 108 + xfs_read_xfsstats, NULL)) 109 + goto out_remove_entry; 110 + return 0; 111 + 112 + out_remove_entry: 113 + remove_proc_entry("fs/xfs", NULL); 114 + out: 115 + return -ENOMEM; 107 116 } 108 117 109 118 void
+9 -3
fs/xfs/linux-2.6/xfs_stats.h
··· 134 134 #define XFS_STATS_DEC(v) (per_cpu(xfsstats, current_cpu()).v--) 135 135 #define XFS_STATS_ADD(v, inc) (per_cpu(xfsstats, current_cpu()).v += (inc)) 136 136 137 - extern void xfs_init_procfs(void); 137 + extern int xfs_init_procfs(void); 138 138 extern void xfs_cleanup_procfs(void); 139 139 140 140 ··· 144 144 # define XFS_STATS_DEC(count) 145 145 # define XFS_STATS_ADD(count, inc) 146 146 147 - static inline void xfs_init_procfs(void) { }; 148 - static inline void xfs_cleanup_procfs(void) { }; 147 + static inline int xfs_init_procfs(void) 148 + { 149 + return 0; 150 + } 151 + 152 + static inline void xfs_cleanup_procfs(void) 153 + { 154 + } 149 155 150 156 #endif /* !CONFIG_PROC_FS */ 151 157
+852 -86
fs/xfs/linux-2.6/xfs_super.c
··· 52 52 #include "xfs_version.h" 53 53 #include "xfs_log_priv.h" 54 54 #include "xfs_trans_priv.h" 55 + #include "xfs_filestream.h" 56 + #include "xfs_da_btree.h" 57 + #include "xfs_dir2_trace.h" 58 + #include "xfs_extfree_item.h" 59 + #include "xfs_mru_cache.h" 60 + #include "xfs_inode_item.h" 55 61 56 62 #include <linux/namei.h> 57 63 #include <linux/init.h> ··· 66 60 #include <linux/writeback.h> 67 61 #include <linux/kthread.h> 68 62 #include <linux/freezer.h> 63 + #include <linux/parser.h> 69 64 70 65 static struct quotactl_ops xfs_quotactl_operations; 71 66 static struct super_operations xfs_super_operations; ··· 81 74 { 82 75 struct xfs_mount_args *args; 83 76 84 - args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP); 77 + args = kzalloc(sizeof(struct xfs_mount_args), GFP_KERNEL); 78 + if (!args) 79 + return NULL; 80 + 85 81 args->logbufs = args->logbufsize = -1; 86 82 strncpy(args->fsname, sb->s_id, MAXNAMELEN); 87 83 ··· 147 137 #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ 148 138 #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ 149 139 #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ 140 + 141 + /* 142 + * Table driven mount option parser. 143 + * 144 + * Currently only used for remount, but it will be used for mount 145 + * in the future, too. 146 + */ 147 + enum { 148 + Opt_barrier, Opt_nobarrier, Opt_err 149 + }; 150 + 151 + static match_table_t tokens = { 152 + {Opt_barrier, "barrier"}, 153 + {Opt_nobarrier, "nobarrier"}, 154 + {Opt_err, NULL} 155 + }; 156 + 150 157 151 158 STATIC unsigned long 152 159 suffix_strtoul(char *s, char **endp, unsigned int base) ··· 341 314 args->flags |= XFSMNT_ATTR2; 342 315 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 343 316 args->flags &= ~XFSMNT_ATTR2; 317 + args->flags |= XFSMNT_NOATTR2; 344 318 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 345 319 args->flags2 |= XFSMNT2_FILESTREAMS; 346 320 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { ··· 592 564 inode->i_mapping->a_ops = &xfs_address_space_operations; 593 565 break; 594 566 case S_IFDIR: 595 - inode->i_op = &xfs_dir_inode_operations; 567 + if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) 568 + inode->i_op = &xfs_dir_ci_inode_operations; 569 + else 570 + inode->i_op = &xfs_dir_inode_operations; 596 571 inode->i_fop = &xfs_dir_file_operations; 597 572 break; 598 573 case S_IFLNK: ··· 764 733 return; 765 734 } 766 735 767 - if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered == 768 - QUEUE_ORDERED_NONE) { 769 - xfs_fs_cmn_err(CE_NOTE, mp, 770 - "Disabling barriers, not supported by the underlying device"); 771 - mp->m_flags &= ~XFS_MOUNT_BARRIER; 772 - return; 773 - } 774 - 775 736 if (xfs_readonly_buftarg(mp->m_ddev_targp)) { 776 737 xfs_fs_cmn_err(CE_NOTE, mp, 777 738 "Disabling barriers, underlying device is readonly"); ··· 785 762 xfs_buftarg_t *buftarg) 786 763 { 787 764 blkdev_issue_flush(buftarg->bt_bdev, NULL); 765 + } 766 + 767 + STATIC void 768 + xfs_close_devices( 769 + struct xfs_mount *mp) 770 + { 771 + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 772 + struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 773 + xfs_free_buftarg(mp->m_logdev_targp); 774 + xfs_blkdev_put(logdev); 775 + } 776 + if (mp->m_rtdev_targp) { 777 + struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 778 + xfs_free_buftarg(mp->m_rtdev_targp); 779 + xfs_blkdev_put(rtdev); 780 + } 781 + xfs_free_buftarg(mp->m_ddev_targp); 782 + } 783 + 784 + /* 785 + * The file system configurations are: 786 + * (1) device (partition) with data and internal log 787 + * (2) logical volume with data and log subvolumes. 788 + * (3) logical volume with data, log, and realtime subvolumes. 789 + * 790 + * We only have to handle opening the log and realtime volumes here if 791 + * they are present. The data subvolume has already been opened by 792 + * get_sb_bdev() and is stored in sb->s_bdev. 793 + */ 794 + STATIC int 795 + xfs_open_devices( 796 + struct xfs_mount *mp, 797 + struct xfs_mount_args *args) 798 + { 799 + struct block_device *ddev = mp->m_super->s_bdev; 800 + struct block_device *logdev = NULL, *rtdev = NULL; 801 + int error; 802 + 803 + /* 804 + * Open real time and log devices - order is important. 805 + */ 806 + if (args->logname[0]) { 807 + error = xfs_blkdev_get(mp, args->logname, &logdev); 808 + if (error) 809 + goto out; 810 + } 811 + 812 + if (args->rtname[0]) { 813 + error = xfs_blkdev_get(mp, args->rtname, &rtdev); 814 + if (error) 815 + goto out_close_logdev; 816 + 817 + if (rtdev == ddev || rtdev == logdev) { 818 + cmn_err(CE_WARN, 819 + "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); 820 + error = EINVAL; 821 + goto out_close_rtdev; 822 + } 823 + } 824 + 825 + /* 826 + * Setup xfs_mount buffer target pointers 827 + */ 828 + error = ENOMEM; 829 + mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0); 830 + if (!mp->m_ddev_targp) 831 + goto out_close_rtdev; 832 + 833 + if (rtdev) { 834 + mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1); 835 + if (!mp->m_rtdev_targp) 836 + goto out_free_ddev_targ; 837 + } 838 + 839 + if (logdev && logdev != ddev) { 840 + mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1); 841 + if (!mp->m_logdev_targp) 842 + goto out_free_rtdev_targ; 843 + } else { 844 + mp->m_logdev_targp = mp->m_ddev_targp; 845 + } 846 + 847 + return 0; 848 + 849 + out_free_rtdev_targ: 850 + if (mp->m_rtdev_targp) 851 + xfs_free_buftarg(mp->m_rtdev_targp); 852 + out_free_ddev_targ: 853 + xfs_free_buftarg(mp->m_ddev_targp); 854 + out_close_rtdev: 855 + if (rtdev) 856 + xfs_blkdev_put(rtdev); 857 + out_close_logdev: 858 + if (logdev && logdev != ddev) 859 + xfs_blkdev_put(logdev); 860 + out: 861 + return error; 862 + } 863 + 864 + /* 865 + * Setup xfs_mount buffer target pointers based on superblock 866 + */ 867 + STATIC int 868 + xfs_setup_devices( 869 + struct xfs_mount *mp) 870 + { 871 + int error; 872 + 873 + error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, 874 + mp->m_sb.sb_sectsize); 875 + if (error) 876 + return error; 877 + 878 + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 879 + unsigned int log_sector_size = BBSIZE; 880 + 881 + if (xfs_sb_version_hassector(&mp->m_sb)) 882 + log_sector_size = mp->m_sb.sb_logsectsize; 883 + error = xfs_setsize_buftarg(mp->m_logdev_targp, 884 + mp->m_sb.sb_blocksize, 885 + log_sector_size); 886 + if (error) 887 + return error; 888 + } 889 + if (mp->m_rtdev_targp) { 890 + error = xfs_setsize_buftarg(mp->m_rtdev_targp, 891 + mp->m_sb.sb_blocksize, 892 + mp->m_sb.sb_sectsize); 893 + if (error) 894 + return error; 895 + } 896 + 897 + return 0; 788 898 } 789 899 790 900 /* ··· 1002 846 void *vnode) 1003 847 { 1004 848 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 1005 - } 1006 - 1007 - STATIC int __init 1008 - xfs_init_zones(void) 1009 - { 1010 - xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", 1011 - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | 1012 - KM_ZONE_SPREAD, 1013 - xfs_fs_inode_init_once); 1014 - if (!xfs_vnode_zone) 1015 - goto out; 1016 - 1017 - xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); 1018 - if (!xfs_ioend_zone) 1019 - goto out_destroy_vnode_zone; 1020 - 1021 - xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, 1022 - xfs_ioend_zone); 1023 - if (!xfs_ioend_pool) 1024 - goto out_free_ioend_zone; 1025 - return 0; 1026 - 1027 - out_free_ioend_zone: 1028 - kmem_zone_destroy(xfs_ioend_zone); 1029 - out_destroy_vnode_zone: 1030 - kmem_zone_destroy(xfs_vnode_zone); 1031 - out: 1032 - return -ENOMEM; 1033 - } 1034 - 1035 - STATIC void 1036 - xfs_destroy_zones(void) 1037 - { 1038 - mempool_destroy(xfs_ioend_pool); 1039 - kmem_zone_destroy(xfs_vnode_zone); 1040 - kmem_zone_destroy(xfs_ioend_zone); 1041 849 } 1042 850 1043 851 /* ··· 1193 1073 list_del(&work->w_list); 1194 1074 if (work == &mp->m_sync_work) 1195 1075 continue; 1196 - kmem_free(work, sizeof(struct bhv_vfs_sync_work)); 1076 + kmem_free(work); 1197 1077 } 1198 1078 } 1199 1079 ··· 1205 1085 struct super_block *sb) 1206 1086 { 1207 1087 struct xfs_mount *mp = XFS_M(sb); 1088 + struct xfs_inode *rip = mp->m_rootip; 1089 + int unmount_event_flags = 0; 1208 1090 int error; 1209 1091 1210 1092 kthread_stop(mp->m_sync_task); 1211 1093 1212 1094 xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI); 1213 - error = xfs_unmount(mp, 0, NULL); 1214 - if (error) 1215 - printk("XFS: unmount got error=%d\n", error); 1095 + 1096 + #ifdef HAVE_DMAPI 1097 + if (mp->m_flags & XFS_MOUNT_DMAPI) { 1098 + unmount_event_flags = 1099 + (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 1100 + 0 : DM_FLAGS_UNWANTED; 1101 + /* 1102 + * Ignore error from dmapi here, first unmount is not allowed 1103 + * to fail anyway, and second we wouldn't want to fail a 1104 + * unmount because of dmapi. 1105 + */ 1106 + XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL, 1107 + NULL, NULL, 0, 0, unmount_event_flags); 1108 + } 1109 + #endif 1110 + 1111 + /* 1112 + * Blow away any referenced inode in the filestreams cache. 1113 + * This can and will cause log traffic as inodes go inactive 1114 + * here. 1115 + */ 1116 + xfs_filestream_unmount(mp); 1117 + 1118 + XFS_bflush(mp->m_ddev_targp); 1119 + error = xfs_unmount_flush(mp, 0); 1120 + WARN_ON(error); 1121 + 1122 + IRELE(rip); 1123 + 1124 + /* 1125 + * If we're forcing a shutdown, typically because of a media error, 1126 + * we want to make sure we invalidate dirty pages that belong to 1127 + * referenced vnodes as well. 1128 + */ 1129 + if (XFS_FORCED_SHUTDOWN(mp)) { 1130 + error = xfs_sync(mp, SYNC_WAIT | SYNC_CLOSE); 1131 + ASSERT(error != EFSCORRUPTED); 1132 + } 1133 + 1134 + if (mp->m_flags & XFS_MOUNT_DMAPI) { 1135 + XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0, 1136 + unmount_event_flags); 1137 + } 1138 + 1139 + xfs_unmountfs(mp); 1140 + xfs_icsb_destroy_counters(mp); 1141 + xfs_close_devices(mp); 1142 + xfs_qmops_put(mp); 1143 + xfs_dmops_put(mp); 1144 + kfree(mp); 1216 1145 } 1217 1146 1218 1147 STATIC void ··· 1384 1215 char *options) 1385 1216 { 1386 1217 struct xfs_mount *mp = XFS_M(sb); 1387 - struct xfs_mount_args *args = xfs_args_allocate(sb, 0); 1388 - int error; 1218 + substring_t args[MAX_OPT_ARGS]; 1219 + char *p; 1389 1220 1390 - error = xfs_parseargs(mp, options, args, 1); 1391 - if (!error) 1392 - error = xfs_mntupdate(mp, flags, args); 1393 - kmem_free(args, sizeof(*args)); 1394 - return -error; 1221 + while ((p = strsep(&options, ",")) != NULL) { 1222 + int token; 1223 + 1224 + if (!*p) 1225 + continue; 1226 + 1227 + token = match_token(p, tokens, args); 1228 + switch (token) { 1229 + case Opt_barrier: 1230 + mp->m_flags |= XFS_MOUNT_BARRIER; 1231 + 1232 + /* 1233 + * Test if barriers are actually working if we can, 1234 + * else delay this check until the filesystem is 1235 + * marked writeable. 1236 + */ 1237 + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) 1238 + xfs_mountfs_check_barriers(mp); 1239 + break; 1240 + case Opt_nobarrier: 1241 + mp->m_flags &= ~XFS_MOUNT_BARRIER; 1242 + break; 1243 + default: 1244 + printk(KERN_INFO 1245 + "XFS: mount option \"%s\" not supported for remount\n", p); 1246 + return -EINVAL; 1247 + } 1248 + } 1249 + 1250 + /* rw/ro -> rw */ 1251 + if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1252 + mp->m_flags &= ~XFS_MOUNT_RDONLY; 1253 + if (mp->m_flags & XFS_MOUNT_BARRIER) 1254 + xfs_mountfs_check_barriers(mp); 1255 + } 1256 + 1257 + /* rw -> ro */ 1258 + if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1259 + xfs_filestream_flush(mp); 1260 + xfs_sync(mp, SYNC_DATA_QUIESCE); 1261 + xfs_attr_quiesce(mp); 1262 + mp->m_flags |= XFS_MOUNT_RDONLY; 1263 + } 1264 + 1265 + return 0; 1395 1266 } 1396 1267 1397 1268 /* ··· 1508 1299 Q_XSETPQLIM), id, (caddr_t)fdq); 1509 1300 } 1510 1301 1302 + /* 1303 + * This function fills in xfs_mount_t fields based on mount args. 1304 + * Note: the superblock has _not_ yet been read in. 1305 + */ 1306 + STATIC int 1307 + xfs_start_flags( 1308 + struct xfs_mount_args *ap, 1309 + struct xfs_mount *mp) 1310 + { 1311 + /* Values are in BBs */ 1312 + if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { 1313 + /* 1314 + * At this point the superblock has not been read 1315 + * in, therefore we do not know the block size. 1316 + * Before the mount call ends we will convert 1317 + * these to FSBs. 1318 + */ 1319 + mp->m_dalign = ap->sunit; 1320 + mp->m_swidth = ap->swidth; 1321 + } 1322 + 1323 + if (ap->logbufs != -1 && 1324 + ap->logbufs != 0 && 1325 + (ap->logbufs < XLOG_MIN_ICLOGS || 1326 + ap->logbufs > XLOG_MAX_ICLOGS)) { 1327 + cmn_err(CE_WARN, 1328 + "XFS: invalid logbufs value: %d [not %d-%d]", 1329 + ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1330 + return XFS_ERROR(EINVAL); 1331 + } 1332 + mp->m_logbufs = ap->logbufs; 1333 + if (ap->logbufsize != -1 && 1334 + ap->logbufsize != 0 && 1335 + (ap->logbufsize < XLOG_MIN_RECORD_BSIZE || 1336 + ap->logbufsize > XLOG_MAX_RECORD_BSIZE || 1337 + !is_power_of_2(ap->logbufsize))) { 1338 + cmn_err(CE_WARN, 1339 + "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1340 + ap->logbufsize); 1341 + return XFS_ERROR(EINVAL); 1342 + } 1343 + mp->m_logbsize = ap->logbufsize; 1344 + mp->m_fsname_len = strlen(ap->fsname) + 1; 1345 + mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); 1346 + strcpy(mp->m_fsname, ap->fsname); 1347 + if (ap->rtname[0]) { 1348 + mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP); 1349 + strcpy(mp->m_rtname, ap->rtname); 1350 + } 1351 + if (ap->logname[0]) { 1352 + mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP); 1353 + strcpy(mp->m_logname, ap->logname); 1354 + } 1355 + 1356 + if (ap->flags & XFSMNT_WSYNC) 1357 + mp->m_flags |= XFS_MOUNT_WSYNC; 1358 + #if XFS_BIG_INUMS 1359 + if (ap->flags & XFSMNT_INO64) { 1360 + mp->m_flags |= XFS_MOUNT_INO64; 1361 + mp->m_inoadd = XFS_INO64_OFFSET; 1362 + } 1363 + #endif 1364 + if (ap->flags & XFSMNT_RETERR) 1365 + mp->m_flags |= XFS_MOUNT_RETERR; 1366 + if (ap->flags & XFSMNT_NOALIGN) 1367 + mp->m_flags |= XFS_MOUNT_NOALIGN; 1368 + if (ap->flags & XFSMNT_SWALLOC) 1369 + mp->m_flags |= XFS_MOUNT_SWALLOC; 1370 + if (ap->flags & XFSMNT_OSYNCISOSYNC) 1371 + mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; 1372 + if (ap->flags & XFSMNT_32BITINODES) 1373 + mp->m_flags |= XFS_MOUNT_32BITINODES; 1374 + 1375 + if (ap->flags & XFSMNT_IOSIZE) { 1376 + if (ap->iosizelog > XFS_MAX_IO_LOG || 1377 + ap->iosizelog < XFS_MIN_IO_LOG) { 1378 + cmn_err(CE_WARN, 1379 + "XFS: invalid log iosize: %d [not %d-%d]", 1380 + ap->iosizelog, XFS_MIN_IO_LOG, 1381 + XFS_MAX_IO_LOG); 1382 + return XFS_ERROR(EINVAL); 1383 + } 1384 + 1385 + mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 1386 + mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; 1387 + } 1388 + 1389 + if (ap->flags & XFSMNT_IKEEP) 1390 + mp->m_flags |= XFS_MOUNT_IKEEP; 1391 + if (ap->flags & XFSMNT_DIRSYNC) 1392 + mp->m_flags |= XFS_MOUNT_DIRSYNC; 1393 + if (ap->flags & XFSMNT_ATTR2) 1394 + mp->m_flags |= XFS_MOUNT_ATTR2; 1395 + if (ap->flags & XFSMNT_NOATTR2) 1396 + mp->m_flags |= XFS_MOUNT_NOATTR2; 1397 + 1398 + if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE) 1399 + mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 1400 + 1401 + /* 1402 + * no recovery flag requires a read-only mount 1403 + */ 1404 + if (ap->flags & XFSMNT_NORECOVERY) { 1405 + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 1406 + cmn_err(CE_WARN, 1407 + "XFS: tried to mount a FS read-write without recovery!"); 1408 + return XFS_ERROR(EINVAL); 1409 + } 1410 + mp->m_flags |= XFS_MOUNT_NORECOVERY; 1411 + } 1412 + 1413 + if (ap->flags & XFSMNT_NOUUID) 1414 + mp->m_flags |= XFS_MOUNT_NOUUID; 1415 + if (ap->flags & XFSMNT_BARRIER) 1416 + mp->m_flags |= XFS_MOUNT_BARRIER; 1417 + else 1418 + mp->m_flags &= ~XFS_MOUNT_BARRIER; 1419 + 1420 + if (ap->flags2 & XFSMNT2_FILESTREAMS) 1421 + mp->m_flags |= XFS_MOUNT_FILESTREAMS; 1422 + 1423 + if (ap->flags & XFSMNT_DMAPI) 1424 + mp->m_flags |= XFS_MOUNT_DMAPI; 1425 + return 0; 1426 + } 1427 + 1428 + /* 1429 + * This function fills in xfs_mount_t fields based on mount args. 1430 + * Note: the superblock _has_ now been read in. 1431 + */ 1432 + STATIC int 1433 + xfs_finish_flags( 1434 + struct xfs_mount_args *ap, 1435 + struct xfs_mount *mp) 1436 + { 1437 + int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 1438 + 1439 + /* Fail a mount where the logbuf is smaller then the log stripe */ 1440 + if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1441 + if ((ap->logbufsize <= 0) && 1442 + (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) { 1443 + mp->m_logbsize = mp->m_sb.sb_logsunit; 1444 + } else if (ap->logbufsize > 0 && 1445 + ap->logbufsize < mp->m_sb.sb_logsunit) { 1446 + cmn_err(CE_WARN, 1447 + "XFS: logbuf size must be greater than or equal to log stripe size"); 1448 + return XFS_ERROR(EINVAL); 1449 + } 1450 + } else { 1451 + /* Fail a mount if the logbuf is larger than 32K */ 1452 + if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) { 1453 + cmn_err(CE_WARN, 1454 + "XFS: logbuf size for version 1 logs must be 16K or 32K"); 1455 + return XFS_ERROR(EINVAL); 1456 + } 1457 + } 1458 + 1459 + /* 1460 + * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1461 + * told by noattr2 to turn it off 1462 + */ 1463 + if (xfs_sb_version_hasattr2(&mp->m_sb) && 1464 + !(ap->flags & XFSMNT_NOATTR2)) 1465 + mp->m_flags |= XFS_MOUNT_ATTR2; 1466 + 1467 + /* 1468 + * prohibit r/w mounts of read-only filesystems 1469 + */ 1470 + if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1471 + cmn_err(CE_WARN, 1472 + "XFS: cannot mount a read-only filesystem as read-write"); 1473 + return XFS_ERROR(EROFS); 1474 + } 1475 + 1476 + /* 1477 + * check for shared mount. 1478 + */ 1479 + if (ap->flags & XFSMNT_SHARED) { 1480 + if (!xfs_sb_version_hasshared(&mp->m_sb)) 1481 + return XFS_ERROR(EINVAL); 1482 + 1483 + /* 1484 + * For IRIX 6.5, shared mounts must have the shared 1485 + * version bit set, have the persistent readonly 1486 + * field set, must be version 0 and can only be mounted 1487 + * read-only. 1488 + */ 1489 + if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) || 1490 + (mp->m_sb.sb_shared_vn != 0)) 1491 + return XFS_ERROR(EINVAL); 1492 + 1493 + mp->m_flags |= XFS_MOUNT_SHARED; 1494 + 1495 + /* 1496 + * Shared XFS V0 can't deal with DMI. Return EINVAL. 1497 + */ 1498 + if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI)) 1499 + return XFS_ERROR(EINVAL); 1500 + } 1501 + 1502 + if (ap->flags & XFSMNT_UQUOTA) { 1503 + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 1504 + if (ap->flags & XFSMNT_UQUOTAENF) 1505 + mp->m_qflags |= XFS_UQUOTA_ENFD; 1506 + } 1507 + 1508 + if (ap->flags & XFSMNT_GQUOTA) { 1509 + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 1510 + if (ap->flags & XFSMNT_GQUOTAENF) 1511 + mp->m_qflags |= XFS_OQUOTA_ENFD; 1512 + } else if (ap->flags & XFSMNT_PQUOTA) { 1513 + mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 1514 + if (ap->flags & XFSMNT_PQUOTAENF) 1515 + mp->m_qflags |= XFS_OQUOTA_ENFD; 1516 + } 1517 + 1518 + return 0; 1519 + } 1520 + 1511 1521 STATIC int 1512 1522 xfs_fs_fill_super( 1513 1523 struct super_block *sb, ··· 1735 1307 { 1736 1308 struct inode *root; 1737 1309 struct xfs_mount *mp = NULL; 1738 - struct xfs_mount_args *args = xfs_args_allocate(sb, silent); 1739 - int error; 1310 + struct xfs_mount_args *args; 1311 + int flags = 0, error = ENOMEM; 1740 1312 1741 - mp = xfs_mount_init(); 1313 + args = xfs_args_allocate(sb, silent); 1314 + if (!args) 1315 + return -ENOMEM; 1742 1316 1317 + mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1318 + if (!mp) 1319 + goto out_free_args; 1320 + 1321 + spin_lock_init(&mp->m_sb_lock); 1322 + mutex_init(&mp->m_ilock); 1323 + mutex_init(&mp->m_growlock); 1324 + atomic_set(&mp->m_active_trans, 0); 1743 1325 INIT_LIST_HEAD(&mp->m_sync_list); 1744 1326 spin_lock_init(&mp->m_sync_lock); 1745 1327 init_waitqueue_head(&mp->m_wait_single_sync_task); ··· 1762 1324 1763 1325 error = xfs_parseargs(mp, (char *)data, args, 0); 1764 1326 if (error) 1765 - goto fail_vfsop; 1327 + goto out_free_mp; 1766 1328 1767 1329 sb_min_blocksize(sb, BBSIZE); 1330 + sb->s_xattr = xfs_xattr_handlers; 1768 1331 sb->s_export_op = &xfs_export_operations; 1769 1332 sb->s_qcop = &xfs_quotactl_operations; 1770 1333 sb->s_op = &xfs_super_operations; 1771 1334 1772 - error = xfs_mount(mp, args, NULL); 1335 + error = xfs_dmops_get(mp, args); 1773 1336 if (error) 1774 - goto fail_vfsop; 1337 + goto out_free_mp; 1338 + error = xfs_qmops_get(mp, args); 1339 + if (error) 1340 + goto out_put_dmops; 1341 + 1342 + if (args->flags & XFSMNT_QUIET) 1343 + flags |= XFS_MFSI_QUIET; 1344 + 1345 + error = xfs_open_devices(mp, args); 1346 + if (error) 1347 + goto out_put_qmops; 1348 + 1349 + if (xfs_icsb_init_counters(mp)) 1350 + mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1351 + 1352 + /* 1353 + * Setup flags based on mount(2) options and then the superblock 1354 + */ 1355 + error = xfs_start_flags(args, mp); 1356 + if (error) 1357 + goto out_destroy_counters; 1358 + error = xfs_readsb(mp, flags); 1359 + if (error) 1360 + goto out_destroy_counters; 1361 + error = xfs_finish_flags(args, mp); 1362 + if (error) 1363 + goto out_free_sb; 1364 + 1365 + error = xfs_setup_devices(mp); 1366 + if (error) 1367 + goto out_free_sb; 1368 + 1369 + if (mp->m_flags & XFS_MOUNT_BARRIER) 1370 + xfs_mountfs_check_barriers(mp); 1371 + 1372 + error = xfs_filestream_mount(mp); 1373 + if (error) 1374 + goto out_free_sb; 1375 + 1376 + error = xfs_mountfs(mp, flags); 1377 + if (error) 1378 + goto out_filestream_unmount; 1379 + 1380 + XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname); 1775 1381 1776 1382 sb->s_dirt = 1; 1777 1383 sb->s_magic = XFS_SB_MAGIC; ··· 1850 1368 1851 1369 xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); 1852 1370 1853 - kmem_free(args, sizeof(*args)); 1371 + kfree(args); 1854 1372 return 0; 1855 1373 1856 - fail_vnrele: 1374 + out_filestream_unmount: 1375 + xfs_filestream_unmount(mp); 1376 + out_free_sb: 1377 + xfs_freesb(mp); 1378 + out_destroy_counters: 1379 + xfs_icsb_destroy_counters(mp); 1380 + xfs_close_devices(mp); 1381 + out_put_qmops: 1382 + xfs_qmops_put(mp); 1383 + out_put_dmops: 1384 + xfs_dmops_put(mp); 1385 + out_free_mp: 1386 + kfree(mp); 1387 + out_free_args: 1388 + kfree(args); 1389 + return -error; 1390 + 1391 + fail_vnrele: 1857 1392 if (sb->s_root) { 1858 1393 dput(sb->s_root); 1859 1394 sb->s_root = NULL; ··· 1878 1379 iput(root); 1879 1380 } 1880 1381 1881 - fail_unmount: 1882 - xfs_unmount(mp, 0, NULL); 1382 + fail_unmount: 1383 + /* 1384 + * Blow away any referenced inode in the filestreams cache. 1385 + * This can and will cause log traffic as inodes go inactive 1386 + * here. 1387 + */ 1388 + xfs_filestream_unmount(mp); 1883 1389 1884 - fail_vfsop: 1885 - kmem_free(args, sizeof(*args)); 1886 - return -error; 1390 + XFS_bflush(mp->m_ddev_targp); 1391 + error = xfs_unmount_flush(mp, 0); 1392 + WARN_ON(error); 1393 + 1394 + IRELE(mp->m_rootip); 1395 + 1396 + xfs_unmountfs(mp); 1397 + goto out_destroy_counters; 1887 1398 } 1888 1399 1889 1400 STATIC int ··· 1938 1429 .fs_flags = FS_REQUIRES_DEV, 1939 1430 }; 1940 1431 1432 + STATIC int __init 1433 + xfs_alloc_trace_bufs(void) 1434 + { 1435 + #ifdef XFS_ALLOC_TRACE 1436 + xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL); 1437 + if (!xfs_alloc_trace_buf) 1438 + goto out; 1439 + #endif 1440 + #ifdef XFS_BMAP_TRACE 1441 + xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL); 1442 + if (!xfs_bmap_trace_buf) 1443 + goto out_free_alloc_trace; 1444 + #endif 1445 + #ifdef XFS_BMBT_TRACE 1446 + xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL); 1447 + if (!xfs_bmbt_trace_buf) 1448 + goto out_free_bmap_trace; 1449 + #endif 1450 + #ifdef XFS_ATTR_TRACE 1451 + xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL); 1452 + if (!xfs_attr_trace_buf) 1453 + goto out_free_bmbt_trace; 1454 + #endif 1455 + #ifdef XFS_DIR2_TRACE 1456 + xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL); 1457 + if (!xfs_dir2_trace_buf) 1458 + goto out_free_attr_trace; 1459 + #endif 1460 + 1461 + return 0; 1462 + 1463 + #ifdef XFS_DIR2_TRACE 1464 + out_free_attr_trace: 1465 + #endif 1466 + #ifdef XFS_ATTR_TRACE 1467 + ktrace_free(xfs_attr_trace_buf); 1468 + out_free_bmbt_trace: 1469 + #endif 1470 + #ifdef XFS_BMBT_TRACE 1471 + ktrace_free(xfs_bmbt_trace_buf); 1472 + out_free_bmap_trace: 1473 + #endif 1474 + #ifdef XFS_BMAP_TRACE 1475 + ktrace_free(xfs_bmap_trace_buf); 1476 + out_free_alloc_trace: 1477 + #endif 1478 + #ifdef XFS_ALLOC_TRACE 1479 + ktrace_free(xfs_alloc_trace_buf); 1480 + out: 1481 + #endif 1482 + return -ENOMEM; 1483 + } 1484 + 1485 + STATIC void 1486 + xfs_free_trace_bufs(void) 1487 + { 1488 + #ifdef XFS_DIR2_TRACE 1489 + ktrace_free(xfs_dir2_trace_buf); 1490 + #endif 1491 + #ifdef XFS_ATTR_TRACE 1492 + ktrace_free(xfs_attr_trace_buf); 1493 + #endif 1494 + #ifdef XFS_BMBT_TRACE 1495 + ktrace_free(xfs_bmbt_trace_buf); 1496 + #endif 1497 + #ifdef XFS_BMAP_TRACE 1498 + ktrace_free(xfs_bmap_trace_buf); 1499 + #endif 1500 + #ifdef XFS_ALLOC_TRACE 1501 + ktrace_free(xfs_alloc_trace_buf); 1502 + #endif 1503 + } 1941 1504 1942 1505 STATIC int __init 1943 - init_xfs_fs( void ) 1506 + xfs_init_zones(void) 1507 + { 1508 + xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", 1509 + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | 1510 + KM_ZONE_SPREAD, 1511 + xfs_fs_inode_init_once); 1512 + if (!xfs_vnode_zone) 1513 + goto out; 1514 + 1515 + xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); 1516 + if (!xfs_ioend_zone) 1517 + goto out_destroy_vnode_zone; 1518 + 1519 + xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, 1520 + xfs_ioend_zone); 1521 + if (!xfs_ioend_pool) 1522 + goto out_destroy_ioend_zone; 1523 + 1524 + xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 1525 + "xfs_log_ticket"); 1526 + if (!xfs_log_ticket_zone) 1527 + goto out_destroy_ioend_pool; 1528 + 1529 + xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), 1530 + "xfs_bmap_free_item"); 1531 + if (!xfs_bmap_free_item_zone) 1532 + goto out_destroy_log_ticket_zone; 1533 + xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 1534 + "xfs_btree_cur"); 1535 + if (!xfs_btree_cur_zone) 1536 + goto out_destroy_bmap_free_item_zone; 1537 + 1538 + xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 1539 + "xfs_da_state"); 1540 + if (!xfs_da_state_zone) 1541 + goto out_destroy_btree_cur_zone; 1542 + 1543 + xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 1544 + if (!xfs_dabuf_zone) 1545 + goto out_destroy_da_state_zone; 1546 + 1547 + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1548 + if (!xfs_ifork_zone) 1549 + goto out_destroy_dabuf_zone; 1550 + 1551 + xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1552 + if (!xfs_trans_zone) 1553 + goto out_destroy_ifork_zone; 1554 + 1555 + /* 1556 + * The size of the zone allocated buf log item is the maximum 1557 + * size possible under XFS. This wastes a little bit of memory, 1558 + * but it is much faster. 1559 + */ 1560 + xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1561 + (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / 1562 + NBWORD) * sizeof(int))), "xfs_buf_item"); 1563 + if (!xfs_buf_item_zone) 1564 + goto out_destroy_trans_zone; 1565 + 1566 + xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1567 + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1568 + sizeof(xfs_extent_t))), "xfs_efd_item"); 1569 + if (!xfs_efd_zone) 1570 + goto out_destroy_buf_item_zone; 1571 + 1572 + xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 1573 + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 1574 + sizeof(xfs_extent_t))), "xfs_efi_item"); 1575 + if (!xfs_efi_zone) 1576 + goto out_destroy_efd_zone; 1577 + 1578 + xfs_inode_zone = 1579 + kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 1580 + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | 1581 + KM_ZONE_SPREAD, NULL); 1582 + if (!xfs_inode_zone) 1583 + goto out_destroy_efi_zone; 1584 + 1585 + xfs_ili_zone = 1586 + kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 1587 + KM_ZONE_SPREAD, NULL); 1588 + if (!xfs_ili_zone) 1589 + goto out_destroy_inode_zone; 1590 + 1591 + #ifdef CONFIG_XFS_POSIX_ACL 1592 + xfs_acl_zone = kmem_zone_init(sizeof(xfs_acl_t), "xfs_acl"); 1593 + if (!xfs_acl_zone) 1594 + goto out_destroy_ili_zone; 1595 + #endif 1596 + 1597 + return 0; 1598 + 1599 + #ifdef CONFIG_XFS_POSIX_ACL 1600 + out_destroy_ili_zone: 1601 + #endif 1602 + kmem_zone_destroy(xfs_ili_zone); 1603 + out_destroy_inode_zone: 1604 + kmem_zone_destroy(xfs_inode_zone); 1605 + out_destroy_efi_zone: 1606 + kmem_zone_destroy(xfs_efi_zone); 1607 + out_destroy_efd_zone: 1608 + kmem_zone_destroy(xfs_efd_zone); 1609 + out_destroy_buf_item_zone: 1610 + kmem_zone_destroy(xfs_buf_item_zone); 1611 + out_destroy_trans_zone: 1612 + kmem_zone_destroy(xfs_trans_zone); 1613 + out_destroy_ifork_zone: 1614 + kmem_zone_destroy(xfs_ifork_zone); 1615 + out_destroy_dabuf_zone: 1616 + kmem_zone_destroy(xfs_dabuf_zone); 1617 + out_destroy_da_state_zone: 1618 + kmem_zone_destroy(xfs_da_state_zone); 1619 + out_destroy_btree_cur_zone: 1620 + kmem_zone_destroy(xfs_btree_cur_zone); 1621 + out_destroy_bmap_free_item_zone: 1622 + kmem_zone_destroy(xfs_bmap_free_item_zone); 1623 + out_destroy_log_ticket_zone: 1624 + kmem_zone_destroy(xfs_log_ticket_zone); 1625 + out_destroy_ioend_pool: 1626 + mempool_destroy(xfs_ioend_pool); 1627 + out_destroy_ioend_zone: 1628 + kmem_zone_destroy(xfs_ioend_zone); 1629 + out_destroy_vnode_zone: 1630 + kmem_zone_destroy(xfs_vnode_zone); 1631 + out: 1632 + return -ENOMEM; 1633 + } 1634 + 1635 + STATIC void 1636 + xfs_destroy_zones(void) 1637 + { 1638 + #ifdef CONFIG_XFS_POSIX_ACL 1639 + kmem_zone_destroy(xfs_acl_zone); 1640 + #endif 1641 + kmem_zone_destroy(xfs_ili_zone); 1642 + kmem_zone_destroy(xfs_inode_zone); 1643 + kmem_zone_destroy(xfs_efi_zone); 1644 + kmem_zone_destroy(xfs_efd_zone); 1645 + kmem_zone_destroy(xfs_buf_item_zone); 1646 + kmem_zone_destroy(xfs_trans_zone); 1647 + kmem_zone_destroy(xfs_ifork_zone); 1648 + kmem_zone_destroy(xfs_dabuf_zone); 1649 + kmem_zone_destroy(xfs_da_state_zone); 1650 + kmem_zone_destroy(xfs_btree_cur_zone); 1651 + kmem_zone_destroy(xfs_bmap_free_item_zone); 1652 + kmem_zone_destroy(xfs_log_ticket_zone); 1653 + mempool_destroy(xfs_ioend_pool); 1654 + kmem_zone_destroy(xfs_ioend_zone); 1655 + kmem_zone_destroy(xfs_vnode_zone); 1656 + 1657 + } 1658 + 1659 + STATIC int __init 1660 + init_xfs_fs(void) 1944 1661 { 1945 1662 int error; 1946 1663 static char message[] __initdata = KERN_INFO \ ··· 2175 1440 printk(message); 2176 1441 2177 1442 ktrace_init(64); 1443 + vn_init(); 1444 + xfs_dir_startup(); 2178 1445 2179 1446 error = xfs_init_zones(); 2180 - if (error < 0) 2181 - goto undo_zones; 1447 + if (error) 1448 + goto out; 1449 + 1450 + error = xfs_alloc_trace_bufs(); 1451 + if (error) 1452 + goto out_destroy_zones; 1453 + 1454 + error = xfs_mru_cache_init(); 1455 + if (error) 1456 + goto out_free_trace_buffers; 1457 + 1458 + error = xfs_filestream_init(); 1459 + if (error) 1460 + goto out_mru_cache_uninit; 2182 1461 2183 1462 error = xfs_buf_init(); 2184 - if (error < 0) 2185 - goto undo_buffers; 1463 + if (error) 1464 + goto out_filestream_uninit; 2186 1465 2187 - vn_init(); 2188 - xfs_init(); 2189 - uuid_init(); 1466 + error = xfs_init_procfs(); 1467 + if (error) 1468 + goto out_buf_terminate; 1469 + 1470 + error = xfs_sysctl_register(); 1471 + if (error) 1472 + goto out_cleanup_procfs; 1473 + 2190 1474 vfs_initquota(); 2191 1475 2192 1476 error = register_filesystem(&xfs_fs_type); 2193 1477 if (error) 2194 - goto undo_register; 1478 + goto out_sysctl_unregister; 2195 1479 return 0; 2196 1480 2197 - undo_register: 1481 + out_sysctl_unregister: 1482 + xfs_sysctl_unregister(); 1483 + out_cleanup_procfs: 1484 + xfs_cleanup_procfs(); 1485 + out_buf_terminate: 2198 1486 xfs_buf_terminate(); 2199 - 2200 - undo_buffers: 1487 + out_filestream_uninit: 1488 + xfs_filestream_uninit(); 1489 + out_mru_cache_uninit: 1490 + xfs_mru_cache_uninit(); 1491 + out_free_trace_buffers: 1492 + xfs_free_trace_bufs(); 1493 + out_destroy_zones: 2201 1494 xfs_destroy_zones(); 2202 - 2203 - undo_zones: 1495 + out: 2204 1496 return error; 2205 1497 } 2206 1498 2207 1499 STATIC void __exit 2208 - exit_xfs_fs( void ) 1500 + exit_xfs_fs(void) 2209 1501 { 2210 1502 vfs_exitquota(); 2211 1503 unregister_filesystem(&xfs_fs_type); 2212 - xfs_cleanup(); 1504 + xfs_sysctl_unregister(); 1505 + xfs_cleanup_procfs(); 2213 1506 xfs_buf_terminate(); 1507 + xfs_filestream_uninit(); 1508 + xfs_mru_cache_uninit(); 1509 + xfs_free_trace_bufs(); 2214 1510 xfs_destroy_zones(); 2215 1511 ktrace_uninit(); 2216 1512 }
+1 -3
fs/xfs/linux-2.6/xfs_super.h
··· 107 107 extern void xfs_flush_inode(struct xfs_inode *); 108 108 extern void xfs_flush_device(struct xfs_inode *); 109 109 110 - extern int xfs_blkdev_get(struct xfs_mount *, const char *, 111 - struct block_device **); 112 - extern void xfs_blkdev_put(struct block_device *); 113 110 extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); 114 111 115 112 extern const struct export_operations xfs_export_operations; 113 + extern struct xattr_handler *xfs_xattr_handlers[]; 116 114 117 115 #define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) 118 116
+5 -3
fs/xfs/linux-2.6/xfs_sysctl.c
··· 259 259 {} 260 260 }; 261 261 262 - void 262 + int 263 263 xfs_sysctl_register(void) 264 264 { 265 265 xfs_table_header = register_sysctl_table(xfs_root_table); 266 + if (!xfs_table_header) 267 + return -ENOMEM; 268 + return 0; 266 269 } 267 270 268 271 void 269 272 xfs_sysctl_unregister(void) 270 273 { 271 - if (xfs_table_header) 272 - unregister_sysctl_table(xfs_table_header); 274 + unregister_sysctl_table(xfs_table_header); 273 275 }
+2 -2
fs/xfs/linux-2.6/xfs_sysctl.h
··· 93 93 extern xfs_param_t xfs_params; 94 94 95 95 #ifdef CONFIG_SYSCTL 96 - extern void xfs_sysctl_register(void); 96 + extern int xfs_sysctl_register(void); 97 97 extern void xfs_sysctl_unregister(void); 98 98 #else 99 - # define xfs_sysctl_register() do { } while (0) 99 + # define xfs_sysctl_register() (0) 100 100 # define xfs_sysctl_unregister() do { } while (0) 101 101 #endif /* CONFIG_SYSCTL */ 102 102
-50
fs/xfs/linux-2.6/xfs_vnode.c
··· 82 82 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); 83 83 } 84 84 85 - /* 86 - * Revalidate the Linux inode from the XFS inode. 87 - * Note: i_size _not_ updated; we must hold the inode 88 - * semaphore when doing that - callers responsibility. 89 - */ 90 - int 91 - vn_revalidate( 92 - bhv_vnode_t *vp) 93 - { 94 - struct inode *inode = vn_to_inode(vp); 95 - struct xfs_inode *ip = XFS_I(inode); 96 - struct xfs_mount *mp = ip->i_mount; 97 - unsigned long xflags; 98 - 99 - xfs_itrace_entry(ip); 100 - 101 - if (XFS_FORCED_SHUTDOWN(mp)) 102 - return -EIO; 103 - 104 - xfs_ilock(ip, XFS_ILOCK_SHARED); 105 - inode->i_mode = ip->i_d.di_mode; 106 - inode->i_uid = ip->i_d.di_uid; 107 - inode->i_gid = ip->i_d.di_gid; 108 - inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; 109 - inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 110 - inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; 111 - inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; 112 - 113 - xflags = xfs_ip2xflags(ip); 114 - if (xflags & XFS_XFLAG_IMMUTABLE) 115 - inode->i_flags |= S_IMMUTABLE; 116 - else 117 - inode->i_flags &= ~S_IMMUTABLE; 118 - if (xflags & XFS_XFLAG_APPEND) 119 - inode->i_flags |= S_APPEND; 120 - else 121 - inode->i_flags &= ~S_APPEND; 122 - if (xflags & XFS_XFLAG_SYNC) 123 - inode->i_flags |= S_SYNC; 124 - else 125 - inode->i_flags &= ~S_SYNC; 126 - if (xflags & XFS_XFLAG_NOATIME) 127 - inode->i_flags |= S_NOATIME; 128 - else 129 - inode->i_flags &= ~S_NOATIME; 130 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 131 - 132 - xfs_iflags_clear(ip, XFS_IMODIFIED); 133 - return 0; 134 - } 135 85 136 86 /* 137 87 * Add a reference to a referenced vnode.
-89
fs/xfs/linux-2.6/xfs_vnode.h
··· 19 19 #define __XFS_VNODE_H__ 20 20 21 21 struct file; 22 - struct bhv_vattr; 23 22 struct xfs_iomap; 24 23 struct attrlist_cursor_kern; 25 24 ··· 65 66 Prevent VM access to the pages until 66 67 the operation completes. */ 67 68 68 - /* 69 - * Vnode attributes. va_mask indicates those attributes the caller 70 - * wants to set or extract. 71 - */ 72 - typedef struct bhv_vattr { 73 - int va_mask; /* bit-mask of attributes present */ 74 - mode_t va_mode; /* file access mode and type */ 75 - xfs_nlink_t va_nlink; /* number of references to file */ 76 - uid_t va_uid; /* owner user id */ 77 - gid_t va_gid; /* owner group id */ 78 - xfs_ino_t va_nodeid; /* file id */ 79 - xfs_off_t va_size; /* file size in bytes */ 80 - u_long va_blocksize; /* blocksize preferred for i/o */ 81 - struct timespec va_atime; /* time of last access */ 82 - struct timespec va_mtime; /* time of last modification */ 83 - struct timespec va_ctime; /* time file changed */ 84 - u_int va_gen; /* generation number of file */ 85 - xfs_dev_t va_rdev; /* device the special file represents */ 86 - __int64_t va_nblocks; /* number of blocks allocated */ 87 - u_long va_xflags; /* random extended file flags */ 88 - u_long va_extsize; /* file extent size */ 89 - u_long va_nextents; /* number of extents in file */ 90 - u_long va_anextents; /* number of attr extents in file */ 91 - prid_t va_projid; /* project id */ 92 - } bhv_vattr_t; 93 - 94 - /* 95 - * setattr or getattr attributes 96 - */ 97 - #define XFS_AT_TYPE 0x00000001 98 - #define XFS_AT_MODE 0x00000002 99 - #define XFS_AT_UID 0x00000004 100 - #define XFS_AT_GID 0x00000008 101 - #define XFS_AT_FSID 0x00000010 102 - #define XFS_AT_NODEID 0x00000020 103 - #define XFS_AT_NLINK 0x00000040 104 - #define XFS_AT_SIZE 0x00000080 105 - #define XFS_AT_ATIME 0x00000100 106 - #define XFS_AT_MTIME 0x00000200 107 - #define XFS_AT_CTIME 0x00000400 108 - #define XFS_AT_RDEV 0x00000800 109 - #define XFS_AT_BLKSIZE 0x00001000 110 - #define XFS_AT_NBLOCKS 0x00002000 111 - #define XFS_AT_VCODE 0x00004000 112 - #define XFS_AT_MAC 0x00008000 113 - #define XFS_AT_UPDATIME 0x00010000 114 - #define XFS_AT_UPDMTIME 0x00020000 115 - #define XFS_AT_UPDCTIME 0x00040000 116 - #define XFS_AT_ACL 0x00080000 117 - #define XFS_AT_CAP 0x00100000 118 - #define XFS_AT_INF 0x00200000 119 - #define XFS_AT_XFLAGS 0x00400000 120 - #define XFS_AT_EXTSIZE 0x00800000 121 - #define XFS_AT_NEXTENTS 0x01000000 122 - #define XFS_AT_ANEXTENTS 0x02000000 123 - #define XFS_AT_PROJID 0x04000000 124 - #define XFS_AT_SIZE_NOPERM 0x08000000 125 - #define XFS_AT_GENCOUNT 0x10000000 126 - 127 - #define XFS_AT_ALL (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\ 128 - XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\ 129 - XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\ 130 - XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\ 131 - XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\ 132 - XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT) 133 - 134 - #define XFS_AT_STAT (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\ 135 - XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\ 136 - XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\ 137 - XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID) 138 - 139 - #define XFS_AT_TIMES (XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME) 140 - 141 - #define XFS_AT_UPDTIMES (XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME) 142 - 143 - #define XFS_AT_NOSET (XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\ 144 - XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\ 145 - XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT) 146 69 147 70 extern void vn_init(void); 148 - extern int vn_revalidate(bhv_vnode_t *); 149 71 150 72 /* 151 73 * Yeah, these don't take vnode anymore at all, all this should be ··· 139 219 #define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ 140 220 PAGECACHE_TAG_DIRTY) 141 221 142 - /* 143 - * Flags to vop_setattr/getattr. 144 - */ 145 - #define ATTR_UTIME 0x01 /* non-default utime(2) request */ 146 - #define ATTR_DMI 0x08 /* invocation from a DMI function */ 147 - #define ATTR_LAZY 0x80 /* set/get attributes lazily */ 148 - #define ATTR_NONBLOCK 0x100 /* return EAGAIN if operation would block */ 149 - #define ATTR_NOLOCK 0x200 /* Don't grab any conflicting locks */ 150 - #define ATTR_NOSIZETOK 0x400 /* Don't get the SIZE token */ 151 222 152 223 /* 153 224 * Tracking vnode activity.
+330
fs/xfs/linux-2.6/xfs_xattr.c
··· 1 + /* 2 + * Copyright (C) 2008 Christoph Hellwig. 3 + * Portions Copyright (C) 2000-2008 Silicon Graphics, Inc. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + 19 + #include "xfs.h" 20 + #include "xfs_da_btree.h" 21 + #include "xfs_bmap_btree.h" 22 + #include "xfs_inode.h" 23 + #include "xfs_attr.h" 24 + #include "xfs_attr_leaf.h" 25 + #include "xfs_acl.h" 26 + #include "xfs_vnodeops.h" 27 + 28 + #include <linux/posix_acl_xattr.h> 29 + #include <linux/xattr.h> 30 + 31 + 32 + /* 33 + * ACL handling. Should eventually be moved into xfs_acl.c 34 + */ 35 + 36 + static int 37 + xfs_decode_acl(const char *name) 38 + { 39 + if (strcmp(name, "posix_acl_access") == 0) 40 + return _ACL_TYPE_ACCESS; 41 + else if (strcmp(name, "posix_acl_default") == 0) 42 + return _ACL_TYPE_DEFAULT; 43 + return -EINVAL; 44 + } 45 + 46 + /* 47 + * Get system extended attributes which at the moment only 48 + * includes Posix ACLs. 49 + */ 50 + static int 51 + xfs_xattr_system_get(struct inode *inode, const char *name, 52 + void *buffer, size_t size) 53 + { 54 + int acl; 55 + 56 + acl = xfs_decode_acl(name); 57 + if (acl < 0) 58 + return acl; 59 + 60 + return xfs_acl_vget(inode, buffer, size, acl); 61 + } 62 + 63 + static int 64 + xfs_xattr_system_set(struct inode *inode, const char *name, 65 + const void *value, size_t size, int flags) 66 + { 67 + int acl; 68 + 69 + acl = xfs_decode_acl(name); 70 + if (acl < 0) 71 + return acl; 72 + if (flags & XATTR_CREATE) 73 + return -EINVAL; 74 + 75 + if (!value) 76 + return xfs_acl_vremove(inode, acl); 77 + 78 + return xfs_acl_vset(inode, (void *)value, size, acl); 79 + } 80 + 81 + static struct xattr_handler xfs_xattr_system_handler = { 82 + .prefix = XATTR_SYSTEM_PREFIX, 83 + .get = xfs_xattr_system_get, 84 + .set = xfs_xattr_system_set, 85 + }; 86 + 87 + 88 + /* 89 + * Real xattr handling. The only difference between the namespaces is 90 + * a flag passed to the low-level attr code. 91 + */ 92 + 93 + static int 94 + __xfs_xattr_get(struct inode *inode, const char *name, 95 + void *value, size_t size, int xflags) 96 + { 97 + struct xfs_inode *ip = XFS_I(inode); 98 + int error, asize = size; 99 + 100 + if (strcmp(name, "") == 0) 101 + return -EINVAL; 102 + 103 + /* Convert Linux syscall to XFS internal ATTR flags */ 104 + if (!size) { 105 + xflags |= ATTR_KERNOVAL; 106 + value = NULL; 107 + } 108 + 109 + error = -xfs_attr_get(ip, name, value, &asize, xflags); 110 + if (error) 111 + return error; 112 + return asize; 113 + } 114 + 115 + static int 116 + __xfs_xattr_set(struct inode *inode, const char *name, const void *value, 117 + size_t size, int flags, int xflags) 118 + { 119 + struct xfs_inode *ip = XFS_I(inode); 120 + 121 + if (strcmp(name, "") == 0) 122 + return -EINVAL; 123 + 124 + /* Convert Linux syscall to XFS internal ATTR flags */ 125 + if (flags & XATTR_CREATE) 126 + xflags |= ATTR_CREATE; 127 + if (flags & XATTR_REPLACE) 128 + xflags |= ATTR_REPLACE; 129 + 130 + if (!value) 131 + return -xfs_attr_remove(ip, name, xflags); 132 + return -xfs_attr_set(ip, name, (void *)value, size, xflags); 133 + } 134 + 135 + static int 136 + xfs_xattr_user_get(struct inode *inode, const char *name, 137 + void *value, size_t size) 138 + { 139 + return __xfs_xattr_get(inode, name, value, size, 0); 140 + } 141 + 142 + static int 143 + xfs_xattr_user_set(struct inode *inode, const char *name, 144 + const void *value, size_t size, int flags) 145 + { 146 + return __xfs_xattr_set(inode, name, value, size, flags, 0); 147 + } 148 + 149 + static struct xattr_handler xfs_xattr_user_handler = { 150 + .prefix = XATTR_USER_PREFIX, 151 + .get = xfs_xattr_user_get, 152 + .set = xfs_xattr_user_set, 153 + }; 154 + 155 + 156 + static int 157 + xfs_xattr_trusted_get(struct inode *inode, const char *name, 158 + void *value, size_t size) 159 + { 160 + return __xfs_xattr_get(inode, name, value, size, ATTR_ROOT); 161 + } 162 + 163 + static int 164 + xfs_xattr_trusted_set(struct inode *inode, const char *name, 165 + const void *value, size_t size, int flags) 166 + { 167 + return __xfs_xattr_set(inode, name, value, size, flags, ATTR_ROOT); 168 + } 169 + 170 + static struct xattr_handler xfs_xattr_trusted_handler = { 171 + .prefix = XATTR_TRUSTED_PREFIX, 172 + .get = xfs_xattr_trusted_get, 173 + .set = xfs_xattr_trusted_set, 174 + }; 175 + 176 + 177 + static int 178 + xfs_xattr_secure_get(struct inode *inode, const char *name, 179 + void *value, size_t size) 180 + { 181 + return __xfs_xattr_get(inode, name, value, size, ATTR_SECURE); 182 + } 183 + 184 + static int 185 + xfs_xattr_secure_set(struct inode *inode, const char *name, 186 + const void *value, size_t size, int flags) 187 + { 188 + return __xfs_xattr_set(inode, name, value, size, flags, ATTR_SECURE); 189 + } 190 + 191 + static struct xattr_handler xfs_xattr_security_handler = { 192 + .prefix = XATTR_SECURITY_PREFIX, 193 + .get = xfs_xattr_secure_get, 194 + .set = xfs_xattr_secure_set, 195 + }; 196 + 197 + 198 + struct xattr_handler *xfs_xattr_handlers[] = { 199 + &xfs_xattr_user_handler, 200 + &xfs_xattr_trusted_handler, 201 + &xfs_xattr_security_handler, 202 + &xfs_xattr_system_handler, 203 + NULL 204 + }; 205 + 206 + static unsigned int xfs_xattr_prefix_len(int flags) 207 + { 208 + if (flags & XFS_ATTR_SECURE) 209 + return sizeof("security"); 210 + else if (flags & XFS_ATTR_ROOT) 211 + return sizeof("trusted"); 212 + else 213 + return sizeof("user"); 214 + } 215 + 216 + static const char *xfs_xattr_prefix(int flags) 217 + { 218 + if (flags & XFS_ATTR_SECURE) 219 + return xfs_xattr_security_handler.prefix; 220 + else if (flags & XFS_ATTR_ROOT) 221 + return xfs_xattr_trusted_handler.prefix; 222 + else 223 + return xfs_xattr_user_handler.prefix; 224 + } 225 + 226 + static int 227 + xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags, 228 + char *name, int namelen, int valuelen, char *value) 229 + { 230 + unsigned int prefix_len = xfs_xattr_prefix_len(flags); 231 + char *offset; 232 + int arraytop; 233 + 234 + ASSERT(context->count >= 0); 235 + 236 + /* 237 + * Only show root namespace entries if we are actually allowed to 238 + * see them. 239 + */ 240 + if ((flags & XFS_ATTR_ROOT) && !capable(CAP_SYS_ADMIN)) 241 + return 0; 242 + 243 + arraytop = context->count + prefix_len + namelen + 1; 244 + if (arraytop > context->firstu) { 245 + context->count = -1; /* insufficient space */ 246 + return 1; 247 + } 248 + offset = (char *)context->alist + context->count; 249 + strncpy(offset, xfs_xattr_prefix(flags), prefix_len); 250 + offset += prefix_len; 251 + strncpy(offset, name, namelen); /* real name */ 252 + offset += namelen; 253 + *offset = '\0'; 254 + context->count += prefix_len + namelen + 1; 255 + return 0; 256 + } 257 + 258 + static int 259 + xfs_xattr_put_listent_sizes(struct xfs_attr_list_context *context, int flags, 260 + char *name, int namelen, int valuelen, char *value) 261 + { 262 + context->count += xfs_xattr_prefix_len(flags) + namelen + 1; 263 + return 0; 264 + } 265 + 266 + static int 267 + list_one_attr(const char *name, const size_t len, void *data, 268 + size_t size, ssize_t *result) 269 + { 270 + char *p = data + *result; 271 + 272 + *result += len; 273 + if (!size) 274 + return 0; 275 + if (*result > size) 276 + return -ERANGE; 277 + 278 + strcpy(p, name); 279 + return 0; 280 + } 281 + 282 + ssize_t 283 + xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) 284 + { 285 + struct xfs_attr_list_context context; 286 + struct attrlist_cursor_kern cursor = { 0 }; 287 + struct inode *inode = dentry->d_inode; 288 + int error; 289 + 290 + /* 291 + * First read the regular on-disk attributes. 292 + */ 293 + memset(&context, 0, sizeof(context)); 294 + context.dp = XFS_I(inode); 295 + context.cursor = &cursor; 296 + context.resynch = 1; 297 + context.alist = data; 298 + context.bufsize = size; 299 + context.firstu = context.bufsize; 300 + 301 + if (size) 302 + context.put_listent = xfs_xattr_put_listent; 303 + else 304 + context.put_listent = xfs_xattr_put_listent_sizes; 305 + 306 + xfs_attr_list_int(&context); 307 + if (context.count < 0) 308 + return -ERANGE; 309 + 310 + /* 311 + * Then add the two synthetic ACL attributes. 312 + */ 313 + if (xfs_acl_vhasacl_access(inode)) { 314 + error = list_one_attr(POSIX_ACL_XATTR_ACCESS, 315 + strlen(POSIX_ACL_XATTR_ACCESS) + 1, 316 + data, size, &context.count); 317 + if (error) 318 + return error; 319 + } 320 + 321 + if (xfs_acl_vhasacl_default(inode)) { 322 + error = list_one_attr(POSIX_ACL_XATTR_DEFAULT, 323 + strlen(POSIX_ACL_XATTR_DEFAULT) + 1, 324 + data, size, &context.count); 325 + if (error) 326 + return error; 327 + } 328 + 329 + return context.count; 330 + }
+1 -2
fs/xfs/quota/xfs_dquot.c
··· 1435 1435 /* ARGSUSED */ 1436 1436 int 1437 1437 xfs_qm_dqpurge( 1438 - xfs_dquot_t *dqp, 1439 - uint flags) 1438 + xfs_dquot_t *dqp) 1440 1439 { 1441 1440 xfs_dqhash_t *thishash; 1442 1441 xfs_mount_t *mp = dqp->q_mount;
+1 -1
fs/xfs/quota/xfs_dquot.h
··· 164 164 165 165 extern void xfs_qm_dqdestroy(xfs_dquot_t *); 166 166 extern int xfs_qm_dqflush(xfs_dquot_t *, uint); 167 - extern int xfs_qm_dqpurge(xfs_dquot_t *, uint); 167 + extern int xfs_qm_dqpurge(xfs_dquot_t *); 168 168 extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); 169 169 extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); 170 170 extern int xfs_qm_dqflock_nowait(xfs_dquot_t *);
+2 -2
fs/xfs/quota/xfs_dquot_item.c
··· 576 576 * xfs_trans_delete_ail() drops the AIL lock. 577 577 */ 578 578 xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs); 579 - kmem_free(qfs, sizeof(xfs_qoff_logitem_t)); 580 - kmem_free(qfe, sizeof(xfs_qoff_logitem_t)); 579 + kmem_free(qfs); 580 + kmem_free(qfe); 581 581 return (xfs_lsn_t)-1; 582 582 } 583 583
+12 -12
fs/xfs/quota/xfs_qm.c
··· 192 192 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); 193 193 xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); 194 194 } 195 - kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t)); 196 - kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t)); 195 + kmem_free(xqm->qm_usr_dqhtable); 196 + kmem_free(xqm->qm_grp_dqhtable); 197 197 xqm->qm_usr_dqhtable = NULL; 198 198 xqm->qm_grp_dqhtable = NULL; 199 199 xqm->qm_dqhashmask = 0; ··· 201 201 #ifdef DEBUG 202 202 mutex_destroy(&qcheck_lock); 203 203 #endif 204 - kmem_free(xqm, sizeof(xfs_qm_t)); 204 + kmem_free(xqm); 205 205 } 206 206 207 207 /* ··· 445 445 } 446 446 } 447 447 if (uqp) { 448 - XFS_PURGE_INODE(uqp); 448 + IRELE(uqp); 449 449 mp->m_quotainfo->qi_uquotaip = NULL; 450 450 } 451 451 if (gqp) { 452 - XFS_PURGE_INODE(gqp); 452 + IRELE(gqp); 453 453 mp->m_quotainfo->qi_gquotaip = NULL; 454 454 } 455 455 out: ··· 631 631 * freelist in INACTIVE state. 632 632 */ 633 633 nextdqp = dqp->MPL_NEXT; 634 - nmisses += xfs_qm_dqpurge(dqp, flags); 634 + nmisses += xfs_qm_dqpurge(dqp); 635 635 dqp = nextdqp; 636 636 } 637 637 xfs_qm_mplist_unlock(mp); ··· 1134 1134 * and change the superblock accordingly. 1135 1135 */ 1136 1136 if ((error = xfs_qm_init_quotainos(mp))) { 1137 - kmem_free(qinf, sizeof(xfs_quotainfo_t)); 1137 + kmem_free(qinf); 1138 1138 mp->m_quotainfo = NULL; 1139 1139 return error; 1140 1140 } ··· 1240 1240 xfs_qm_list_destroy(&qi->qi_dqlist); 1241 1241 1242 1242 if (qi->qi_uquotaip) { 1243 - XFS_PURGE_INODE(qi->qi_uquotaip); 1243 + IRELE(qi->qi_uquotaip); 1244 1244 qi->qi_uquotaip = NULL; /* paranoia */ 1245 1245 } 1246 1246 if (qi->qi_gquotaip) { 1247 - XFS_PURGE_INODE(qi->qi_gquotaip); 1247 + IRELE(qi->qi_gquotaip); 1248 1248 qi->qi_gquotaip = NULL; 1249 1249 } 1250 1250 mutex_destroy(&qi->qi_quotaofflock); 1251 - kmem_free(qi, sizeof(xfs_quotainfo_t)); 1251 + kmem_free(qi); 1252 1252 mp->m_quotainfo = NULL; 1253 1253 } 1254 1254 ··· 1394 1394 * locked exclusively and joined to the transaction already. 1395 1395 */ 1396 1396 ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL)); 1397 - VN_HOLD(XFS_ITOV((*ip))); 1397 + IHOLD(*ip); 1398 1398 1399 1399 /* 1400 1400 * Make the changes in the superblock, and log those too. ··· 1623 1623 break; 1624 1624 } while (nmaps > 0); 1625 1625 1626 - kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map)); 1626 + kmem_free(map); 1627 1627 1628 1628 return error; 1629 1629 }
+6 -6
fs/xfs/quota/xfs_qm_syscalls.c
··· 362 362 * if we don't need them anymore. 363 363 */ 364 364 if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) { 365 - XFS_PURGE_INODE(XFS_QI_UQIP(mp)); 365 + IRELE(XFS_QI_UQIP(mp)); 366 366 XFS_QI_UQIP(mp) = NULL; 367 367 } 368 368 if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && XFS_QI_GQIP(mp)) { 369 - XFS_PURGE_INODE(XFS_QI_GQIP(mp)); 369 + IRELE(XFS_QI_GQIP(mp)); 370 370 XFS_QI_GQIP(mp) = NULL; 371 371 } 372 372 out_error: ··· 1449 1449 for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { 1450 1450 xfs_dqtest_cmp(d); 1451 1451 e = (xfs_dqtest_t *) d->HL_NEXT; 1452 - kmem_free(d, sizeof(xfs_dqtest_t)); 1452 + kmem_free(d); 1453 1453 d = e; 1454 1454 } 1455 1455 h1 = &qmtest_gdqtab[i]; 1456 1456 for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { 1457 1457 xfs_dqtest_cmp(d); 1458 1458 e = (xfs_dqtest_t *) d->HL_NEXT; 1459 - kmem_free(d, sizeof(xfs_dqtest_t)); 1459 + kmem_free(d); 1460 1460 d = e; 1461 1461 } 1462 1462 } ··· 1467 1467 } else { 1468 1468 cmn_err(CE_DEBUG, "******** quotacheck successful! ********"); 1469 1469 } 1470 - kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); 1471 - kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); 1470 + kmem_free(qmtest_udqtab); 1471 + kmem_free(qmtest_gdqtab); 1472 1472 mutex_unlock(&qcheck_lock); 1473 1473 return (qmtest_nfails); 1474 1474 }
-3
fs/xfs/quota/xfs_quota_priv.h
··· 158 158 #define XFS_IS_SUSER_DQUOT(dqp) \ 159 159 (!((dqp)->q_core.d_id)) 160 160 161 - #define XFS_PURGE_INODE(ip) \ 162 - IRELE(ip); 163 - 164 161 #define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ 165 162 (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ 166 163 (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
+2 -2
fs/xfs/support/ktrace.c
··· 89 89 if (sleep & KM_SLEEP) 90 90 panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); 91 91 92 - kmem_free(ktp, sizeof(*ktp)); 92 + kmem_free(ktp); 93 93 94 94 return NULL; 95 95 } ··· 126 126 } else { 127 127 entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); 128 128 129 - kmem_free(ktp->kt_entries, entries_size); 129 + kmem_free(ktp->kt_entries); 130 130 } 131 131 132 132 kmem_zone_free(ktrace_hdr_zone, ktp);
+1 -7
fs/xfs/support/uuid.c
··· 17 17 */ 18 18 #include <xfs.h> 19 19 20 - static mutex_t uuid_monitor; 20 + static DEFINE_MUTEX(uuid_monitor); 21 21 static int uuid_table_size; 22 22 static uuid_t *uuid_table; 23 23 ··· 131 131 } 132 132 ASSERT(i < uuid_table_size); 133 133 mutex_unlock(&uuid_monitor); 134 - } 135 - 136 - void __init 137 - uuid_init(void) 138 - { 139 - mutex_init(&uuid_monitor); 140 134 }
-1
fs/xfs/support/uuid.h
··· 22 22 unsigned char __u_bits[16]; 23 23 } uuid_t; 24 24 25 - extern void uuid_init(void); 26 25 extern void uuid_create_nil(uuid_t *uuid); 27 26 extern int uuid_is_nil(uuid_t *uuid); 28 27 extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
+10 -11
fs/xfs/xfs_acl.c
··· 341 341 342 342 /* If the file has no ACL return -1. */ 343 343 rval = sizeof(xfs_acl_t); 344 - if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval, 345 - ATTR_ROOT | ATTR_KERNACCESS)) { 344 + if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval, ATTR_ROOT)) { 346 345 _ACL_FREE(acl); 347 346 return -1; 348 347 } ··· 719 720 xfs_acl_t *acl, 720 721 int *basicperms) 721 722 { 722 - bhv_vattr_t va; 723 + struct iattr iattr; 723 724 xfs_acl_entry_t *ap; 724 725 xfs_acl_entry_t *gap = NULL; 725 726 int i, nomask = 1; ··· 733 734 * Copy the u::, g::, o::, and m:: bits from the ACL into the 734 735 * mode. The m:: bits take precedence over the g:: bits. 735 736 */ 736 - va.va_mask = XFS_AT_MODE; 737 - va.va_mode = xfs_vtoi(vp)->i_d.di_mode; 738 - va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); 737 + iattr.ia_valid = ATTR_MODE; 738 + iattr.ia_mode = xfs_vtoi(vp)->i_d.di_mode; 739 + iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); 739 740 ap = acl->acl_entry; 740 741 for (i = 0; i < acl->acl_cnt; ++i) { 741 742 switch (ap->ae_tag) { 742 743 case ACL_USER_OBJ: 743 - va.va_mode |= ap->ae_perm << 6; 744 + iattr.ia_mode |= ap->ae_perm << 6; 744 745 break; 745 746 case ACL_GROUP_OBJ: 746 747 gap = ap; 747 748 break; 748 749 case ACL_MASK: /* more than just standard modes */ 749 750 nomask = 0; 750 - va.va_mode |= ap->ae_perm << 3; 751 + iattr.ia_mode |= ap->ae_perm << 3; 751 752 *basicperms = 0; 752 753 break; 753 754 case ACL_OTHER: 754 - va.va_mode |= ap->ae_perm; 755 + iattr.ia_mode |= ap->ae_perm; 755 756 break; 756 757 default: /* more than just standard modes */ 757 758 *basicperms = 0; ··· 762 763 763 764 /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */ 764 765 if (gap && nomask) 765 - va.va_mode |= gap->ae_perm << 3; 766 + iattr.ia_mode |= gap->ae_perm << 3; 766 767 767 - return xfs_setattr(xfs_vtoi(vp), &va, 0, sys_cred); 768 + return xfs_setattr(xfs_vtoi(vp), &iattr, 0, sys_cred); 768 769 } 769 770 770 771 /*
+2 -2
fs/xfs/xfs_acl.h
··· 46 46 #define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) 47 47 #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) 48 48 49 + #define _ACL_TYPE_ACCESS 1 50 + #define _ACL_TYPE_DEFAULT 2 49 51 50 52 #ifdef CONFIG_XFS_POSIX_ACL 51 53 ··· 68 66 extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int); 69 67 extern int xfs_acl_vremove(bhv_vnode_t *, int); 70 68 71 - #define _ACL_TYPE_ACCESS 1 72 - #define _ACL_TYPE_DEFAULT 2 73 69 #define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) 74 70 75 71 #define _ACL_INHERIT(c,m,d) (xfs_acl_inherit(c,m,d))
+92 -516
fs/xfs/xfs_attr.c
··· 16 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 17 */ 18 18 19 - #include <linux/capability.h> 20 - 21 19 #include "xfs.h" 22 20 #include "xfs_fs.h" 23 21 #include "xfs_types.h" ··· 54 56 * 55 57 * Provide the external interfaces to manage attribute lists. 56 58 */ 57 - 58 - #define ATTR_SYSCOUNT 2 59 - static struct attrnames posix_acl_access; 60 - static struct attrnames posix_acl_default; 61 - static struct attrnames *attr_system_names[ATTR_SYSCOUNT]; 62 59 63 60 /*======================================================================== 64 61 * Function prototypes for the kernel. ··· 109 116 return 0; 110 117 } 111 118 119 + STATIC int 120 + xfs_inode_hasattr( 121 + struct xfs_inode *ip) 122 + { 123 + if (!XFS_IFORK_Q(ip) || 124 + (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 125 + ip->i_d.di_anextents == 0)) 126 + return 0; 127 + return 1; 128 + } 129 + 112 130 /*======================================================================== 113 131 * Overall external interface routines. 114 132 *========================================================================*/ ··· 131 127 xfs_da_args_t args; 132 128 int error; 133 129 134 - if ((XFS_IFORK_Q(ip) == 0) || 135 - (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 136 - ip->i_d.di_anextents == 0)) 137 - return(ENOATTR); 130 + if (!xfs_inode_hasattr(ip)) 131 + return ENOATTR; 138 132 139 133 /* 140 134 * Fill in the arg structure for this request. ··· 150 148 /* 151 149 * Decide on what work routines to call based on the inode size. 152 150 */ 153 - if (XFS_IFORK_Q(ip) == 0 || 154 - (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 155 - ip->i_d.di_anextents == 0)) { 156 - error = XFS_ERROR(ENOATTR); 157 - } else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 151 + if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 158 152 error = xfs_attr_shortform_getvalue(&args); 159 153 } else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) { 160 154 error = xfs_attr_leaf_get(&args); ··· 239 241 args.firstblock = &firstblock; 240 242 args.flist = &flist; 241 243 args.whichfork = XFS_ATTR_FORK; 242 - args.addname = 1; 243 - args.oknoent = 1; 244 + args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; 244 245 245 246 /* 246 247 * Determine space new attribute will use, and if it would be ··· 526 529 /* 527 530 * Decide on what work routines to call based on the inode size. 528 531 */ 529 - if (XFS_IFORK_Q(dp) == 0 || 530 - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 531 - dp->i_d.di_anextents == 0)) { 532 + if (!xfs_inode_hasattr(dp)) { 532 533 error = XFS_ERROR(ENOATTR); 533 534 goto out; 534 535 } ··· 596 601 return error; 597 602 598 603 xfs_ilock(dp, XFS_ILOCK_SHARED); 599 - if (XFS_IFORK_Q(dp) == 0 || 600 - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 601 - dp->i_d.di_anextents == 0)) { 604 + if (!xfs_inode_hasattr(dp)) { 602 605 xfs_iunlock(dp, XFS_ILOCK_SHARED); 603 - return(XFS_ERROR(ENOATTR)); 606 + return XFS_ERROR(ENOATTR); 604 607 } 605 608 xfs_iunlock(dp, XFS_ILOCK_SHARED); 606 609 607 610 return xfs_attr_remove_int(dp, &xname, flags); 608 611 } 609 612 610 - STATIC int 613 + int 611 614 xfs_attr_list_int(xfs_attr_list_context_t *context) 612 615 { 613 616 int error; 614 617 xfs_inode_t *dp = context->dp; 615 618 619 + XFS_STATS_INC(xs_attr_list); 620 + 621 + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 622 + return EIO; 623 + 624 + xfs_ilock(dp, XFS_ILOCK_SHARED); 625 + xfs_attr_trace_l_c("syscall start", context); 626 + 616 627 /* 617 628 * Decide on what work routines to call based on the inode size. 618 629 */ 619 - if (XFS_IFORK_Q(dp) == 0 || 620 - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 621 - dp->i_d.di_anextents == 0)) { 630 + if (!xfs_inode_hasattr(dp)) { 622 631 error = 0; 623 632 } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 624 633 error = xfs_attr_shortform_list(context); ··· 631 632 } else { 632 633 error = xfs_attr_node_list(context); 633 634 } 635 + 636 + xfs_iunlock(dp, XFS_ILOCK_SHARED); 637 + xfs_attr_trace_l_c("syscall end", context); 638 + 634 639 return error; 635 640 } 636 641 ··· 651 648 */ 652 649 /*ARGSUSED*/ 653 650 STATIC int 654 - xfs_attr_put_listent(xfs_attr_list_context_t *context, attrnames_t *namesp, 651 + xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags, 655 652 char *name, int namelen, 656 653 int valuelen, char *value) 657 654 { 655 + struct attrlist *alist = (struct attrlist *)context->alist; 658 656 attrlist_ent_t *aep; 659 657 int arraytop; 660 658 661 659 ASSERT(!(context->flags & ATTR_KERNOVAL)); 662 660 ASSERT(context->count >= 0); 663 661 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); 664 - ASSERT(context->firstu >= sizeof(*context->alist)); 662 + ASSERT(context->firstu >= sizeof(*alist)); 665 663 ASSERT(context->firstu <= context->bufsize); 666 664 667 - arraytop = sizeof(*context->alist) + 668 - context->count * sizeof(context->alist->al_offset[0]); 665 + /* 666 + * Only list entries in the right namespace. 667 + */ 668 + if (((context->flags & ATTR_SECURE) == 0) != 669 + ((flags & XFS_ATTR_SECURE) == 0)) 670 + return 0; 671 + if (((context->flags & ATTR_ROOT) == 0) != 672 + ((flags & XFS_ATTR_ROOT) == 0)) 673 + return 0; 674 + 675 + arraytop = sizeof(*alist) + 676 + context->count * sizeof(alist->al_offset[0]); 669 677 context->firstu -= ATTR_ENTSIZE(namelen); 670 678 if (context->firstu < arraytop) { 671 679 xfs_attr_trace_l_c("buffer full", context); 672 - context->alist->al_more = 1; 680 + alist->al_more = 1; 673 681 context->seen_enough = 1; 674 682 return 1; 675 683 } 676 684 677 - aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]); 685 + aep = (attrlist_ent_t *)&context->alist[context->firstu]; 678 686 aep->a_valuelen = valuelen; 679 687 memcpy(aep->a_name, name, namelen); 680 - aep->a_name[ namelen ] = 0; 681 - context->alist->al_offset[ context->count++ ] = context->firstu; 682 - context->alist->al_count = context->count; 688 + aep->a_name[namelen] = 0; 689 + alist->al_offset[context->count++] = context->firstu; 690 + alist->al_count = context->count; 683 691 xfs_attr_trace_l_c("add", context); 684 - return 0; 685 - } 686 - 687 - STATIC int 688 - xfs_attr_kern_list(xfs_attr_list_context_t *context, attrnames_t *namesp, 689 - char *name, int namelen, 690 - int valuelen, char *value) 691 - { 692 - char *offset; 693 - int arraytop; 694 - 695 - ASSERT(context->count >= 0); 696 - 697 - arraytop = context->count + namesp->attr_namelen + namelen + 1; 698 - if (arraytop > context->firstu) { 699 - context->count = -1; /* insufficient space */ 700 - return 1; 701 - } 702 - offset = (char *)context->alist + context->count; 703 - strncpy(offset, namesp->attr_name, namesp->attr_namelen); 704 - offset += namesp->attr_namelen; 705 - strncpy(offset, name, namelen); /* real name */ 706 - offset += namelen; 707 - *offset = '\0'; 708 - context->count += namesp->attr_namelen + namelen + 1; 709 - return 0; 710 - } 711 - 712 - /*ARGSUSED*/ 713 - STATIC int 714 - xfs_attr_kern_list_sizes(xfs_attr_list_context_t *context, attrnames_t *namesp, 715 - char *name, int namelen, 716 - int valuelen, char *value) 717 - { 718 - context->count += namesp->attr_namelen + namelen + 1; 719 692 return 0; 720 693 } 721 694 ··· 711 732 attrlist_cursor_kern_t *cursor) 712 733 { 713 734 xfs_attr_list_context_t context; 735 + struct attrlist *alist; 714 736 int error; 715 - 716 - XFS_STATS_INC(xs_attr_list); 717 737 718 738 /* 719 739 * Validate the cursor. ··· 734 756 /* 735 757 * Initialize the output buffer. 736 758 */ 759 + memset(&context, 0, sizeof(context)); 737 760 context.dp = dp; 738 761 context.cursor = cursor; 739 - context.count = 0; 740 - context.dupcnt = 0; 741 762 context.resynch = 1; 742 763 context.flags = flags; 743 - context.seen_enough = 0; 744 - context.alist = (attrlist_t *)buffer; 745 - context.put_value = 0; 764 + context.alist = buffer; 765 + context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ 766 + context.firstu = context.bufsize; 767 + context.put_listent = xfs_attr_put_listent; 746 768 747 - if (flags & ATTR_KERNAMELS) { 748 - context.bufsize = bufsize; 749 - context.firstu = context.bufsize; 750 - if (flags & ATTR_KERNOVAL) 751 - context.put_listent = xfs_attr_kern_list_sizes; 752 - else 753 - context.put_listent = xfs_attr_kern_list; 754 - } else { 755 - context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ 756 - context.firstu = context.bufsize; 757 - context.alist->al_count = 0; 758 - context.alist->al_more = 0; 759 - context.alist->al_offset[0] = context.bufsize; 760 - context.put_listent = xfs_attr_put_listent; 761 - } 762 - 763 - if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 764 - return EIO; 765 - 766 - xfs_ilock(dp, XFS_ILOCK_SHARED); 767 - xfs_attr_trace_l_c("syscall start", &context); 769 + alist = (struct attrlist *)context.alist; 770 + alist->al_count = 0; 771 + alist->al_more = 0; 772 + alist->al_offset[0] = context.bufsize; 768 773 769 774 error = xfs_attr_list_int(&context); 770 - 771 - xfs_iunlock(dp, XFS_ILOCK_SHARED); 772 - xfs_attr_trace_l_c("syscall end", &context); 773 - 774 - if (context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS)) { 775 - /* must return negated buffer size or the error */ 776 - if (context.count < 0) 777 - error = XFS_ERROR(ERANGE); 778 - else 779 - error = -context.count; 780 - } else 781 - ASSERT(error >= 0); 782 - 775 + ASSERT(error >= 0); 783 776 return error; 784 777 } 785 778 ··· 765 816 ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); 766 817 767 818 xfs_ilock(dp, XFS_ILOCK_SHARED); 768 - if ((XFS_IFORK_Q(dp) == 0) || 769 - (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || 770 - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 771 - dp->i_d.di_anextents == 0)) { 819 + if (!xfs_inode_hasattr(dp) || 820 + dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 772 821 xfs_iunlock(dp, XFS_ILOCK_SHARED); 773 - return(0); 822 + return 0; 774 823 } 775 824 xfs_iunlock(dp, XFS_ILOCK_SHARED); 776 825 ··· 801 854 /* 802 855 * Decide on what work routines to call based on the inode size. 803 856 */ 804 - if ((XFS_IFORK_Q(dp) == 0) || 805 - (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || 806 - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 807 - dp->i_d.di_anextents == 0)) { 857 + if (!xfs_inode_hasattr(dp) || 858 + dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 808 859 error = 0; 809 860 goto out; 810 861 } ··· 919 974 xfs_da_brelse(args->trans, bp); 920 975 return(retval); 921 976 } 922 - args->rename = 1; /* an atomic rename */ 977 + args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */ 923 978 args->blkno2 = args->blkno; /* set 2nd entry info*/ 924 979 args->index2 = args->index; 925 980 args->rmtblkno2 = args->rmtblkno; ··· 999 1054 * so that one disappears and one appears atomically. Then we 1000 1055 * must remove the "old" attribute/value pair. 1001 1056 */ 1002 - if (args->rename) { 1057 + if (args->op_flags & XFS_DA_OP_RENAME) { 1003 1058 /* 1004 1059 * In a separate transaction, set the incomplete flag on the 1005 1060 * "old" attr and clear the incomplete flag on the "new" attr. ··· 1252 1307 } else if (retval == EEXIST) { 1253 1308 if (args->flags & ATTR_CREATE) 1254 1309 goto out; 1255 - args->rename = 1; /* atomic rename op */ 1310 + args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ 1256 1311 args->blkno2 = args->blkno; /* set 2nd entry info*/ 1257 1312 args->index2 = args->index; 1258 1313 args->rmtblkno2 = args->rmtblkno; ··· 1370 1425 * so that one disappears and one appears atomically. Then we 1371 1426 * must remove the "old" attribute/value pair. 1372 1427 */ 1373 - if (args->rename) { 1428 + if (args->op_flags & XFS_DA_OP_RENAME) { 1374 1429 /* 1375 1430 * In a separate transaction, set the incomplete flag on the 1376 1431 * "old" attr and clear the incomplete flag on the "new" attr. ··· 2245 2300 void 2246 2301 xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context) 2247 2302 { 2248 - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, 2249 - (__psunsigned_t)context->dp, 2250 - (__psunsigned_t)context->cursor->hashval, 2251 - (__psunsigned_t)context->cursor->blkno, 2252 - (__psunsigned_t)context->cursor->offset, 2253 - (__psunsigned_t)context->alist, 2254 - (__psunsigned_t)context->bufsize, 2255 - (__psunsigned_t)context->count, 2256 - (__psunsigned_t)context->firstu, 2257 - (__psunsigned_t) 2258 - ((context->count > 0) && 2259 - !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL))) 2260 - ? (ATTR_ENTRY(context->alist, 2261 - context->count-1)->a_valuelen) 2262 - : 0, 2263 - (__psunsigned_t)context->dupcnt, 2264 - (__psunsigned_t)context->flags, 2303 + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context, 2265 2304 (__psunsigned_t)NULL, 2266 2305 (__psunsigned_t)NULL, 2267 2306 (__psunsigned_t)NULL); ··· 2258 2329 xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, 2259 2330 struct xfs_da_intnode *node) 2260 2331 { 2261 - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, 2262 - (__psunsigned_t)context->dp, 2263 - (__psunsigned_t)context->cursor->hashval, 2264 - (__psunsigned_t)context->cursor->blkno, 2265 - (__psunsigned_t)context->cursor->offset, 2266 - (__psunsigned_t)context->alist, 2267 - (__psunsigned_t)context->bufsize, 2268 - (__psunsigned_t)context->count, 2269 - (__psunsigned_t)context->firstu, 2270 - (__psunsigned_t) 2271 - ((context->count > 0) && 2272 - !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL))) 2273 - ? (ATTR_ENTRY(context->alist, 2274 - context->count-1)->a_valuelen) 2275 - : 0, 2276 - (__psunsigned_t)context->dupcnt, 2277 - (__psunsigned_t)context->flags, 2332 + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context, 2278 2333 (__psunsigned_t)be16_to_cpu(node->hdr.count), 2279 2334 (__psunsigned_t)be32_to_cpu(node->btree[0].hashval), 2280 2335 (__psunsigned_t)be32_to_cpu(node->btree[ ··· 2272 2359 xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, 2273 2360 struct xfs_da_node_entry *btree) 2274 2361 { 2275 - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, 2276 - (__psunsigned_t)context->dp, 2277 - (__psunsigned_t)context->cursor->hashval, 2278 - (__psunsigned_t)context->cursor->blkno, 2279 - (__psunsigned_t)context->cursor->offset, 2280 - (__psunsigned_t)context->alist, 2281 - (__psunsigned_t)context->bufsize, 2282 - (__psunsigned_t)context->count, 2283 - (__psunsigned_t)context->firstu, 2284 - (__psunsigned_t) 2285 - ((context->count > 0) && 2286 - !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL))) 2287 - ? (ATTR_ENTRY(context->alist, 2288 - context->count-1)->a_valuelen) 2289 - : 0, 2290 - (__psunsigned_t)context->dupcnt, 2291 - (__psunsigned_t)context->flags, 2362 + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context, 2292 2363 (__psunsigned_t)be32_to_cpu(btree->hashval), 2293 2364 (__psunsigned_t)be32_to_cpu(btree->before), 2294 2365 (__psunsigned_t)NULL); ··· 2285 2388 xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, 2286 2389 struct xfs_attr_leafblock *leaf) 2287 2390 { 2288 - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, 2289 - (__psunsigned_t)context->dp, 2290 - (__psunsigned_t)context->cursor->hashval, 2291 - (__psunsigned_t)context->cursor->blkno, 2292 - (__psunsigned_t)context->cursor->offset, 2293 - (__psunsigned_t)context->alist, 2294 - (__psunsigned_t)context->bufsize, 2295 - (__psunsigned_t)context->count, 2296 - (__psunsigned_t)context->firstu, 2297 - (__psunsigned_t) 2298 - ((context->count > 0) && 2299 - !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL))) 2300 - ? (ATTR_ENTRY(context->alist, 2301 - context->count-1)->a_valuelen) 2302 - : 0, 2303 - (__psunsigned_t)context->dupcnt, 2304 - (__psunsigned_t)context->flags, 2391 + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context, 2305 2392 (__psunsigned_t)be16_to_cpu(leaf->hdr.count), 2306 2393 (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval), 2307 2394 (__psunsigned_t)be32_to_cpu(leaf->entries[ ··· 2298 2417 */ 2299 2418 void 2300 2419 xfs_attr_trace_enter(int type, char *where, 2301 - __psunsigned_t a2, __psunsigned_t a3, 2302 - __psunsigned_t a4, __psunsigned_t a5, 2303 - __psunsigned_t a6, __psunsigned_t a7, 2304 - __psunsigned_t a8, __psunsigned_t a9, 2305 - __psunsigned_t a10, __psunsigned_t a11, 2306 - __psunsigned_t a12, __psunsigned_t a13, 2307 - __psunsigned_t a14, __psunsigned_t a15) 2420 + struct xfs_attr_list_context *context, 2421 + __psunsigned_t a13, __psunsigned_t a14, 2422 + __psunsigned_t a15) 2308 2423 { 2309 2424 ASSERT(xfs_attr_trace_buf); 2310 2425 ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type), 2311 - (void *)where, 2312 - (void *)a2, (void *)a3, (void *)a4, 2313 - (void *)a5, (void *)a6, (void *)a7, 2314 - (void *)a8, (void *)a9, (void *)a10, 2315 - (void *)a11, (void *)a12, (void *)a13, 2316 - (void *)a14, (void *)a15); 2426 + (void *)((__psunsigned_t)where), 2427 + (void *)((__psunsigned_t)context->dp), 2428 + (void *)((__psunsigned_t)context->cursor->hashval), 2429 + (void *)((__psunsigned_t)context->cursor->blkno), 2430 + (void *)((__psunsigned_t)context->cursor->offset), 2431 + (void *)((__psunsigned_t)context->alist), 2432 + (void *)((__psunsigned_t)context->bufsize), 2433 + (void *)((__psunsigned_t)context->count), 2434 + (void *)((__psunsigned_t)context->firstu), 2435 + NULL, 2436 + (void *)((__psunsigned_t)context->dupcnt), 2437 + (void *)((__psunsigned_t)context->flags), 2438 + (void *)a13, (void *)a14, (void *)a15); 2317 2439 } 2318 2440 #endif /* XFS_ATTR_TRACE */ 2319 - 2320 - 2321 - /*======================================================================== 2322 - * System (pseudo) namespace attribute interface routines. 2323 - *========================================================================*/ 2324 - 2325 - STATIC int 2326 - posix_acl_access_set( 2327 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2328 - { 2329 - return xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS); 2330 - } 2331 - 2332 - STATIC int 2333 - posix_acl_access_remove( 2334 - bhv_vnode_t *vp, char *name, int xflags) 2335 - { 2336 - return xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); 2337 - } 2338 - 2339 - STATIC int 2340 - posix_acl_access_get( 2341 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2342 - { 2343 - return xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS); 2344 - } 2345 - 2346 - STATIC int 2347 - posix_acl_access_exists( 2348 - bhv_vnode_t *vp) 2349 - { 2350 - return xfs_acl_vhasacl_access(vp); 2351 - } 2352 - 2353 - STATIC int 2354 - posix_acl_default_set( 2355 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2356 - { 2357 - return xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT); 2358 - } 2359 - 2360 - STATIC int 2361 - posix_acl_default_get( 2362 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2363 - { 2364 - return xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT); 2365 - } 2366 - 2367 - STATIC int 2368 - posix_acl_default_remove( 2369 - bhv_vnode_t *vp, char *name, int xflags) 2370 - { 2371 - return xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT); 2372 - } 2373 - 2374 - STATIC int 2375 - posix_acl_default_exists( 2376 - bhv_vnode_t *vp) 2377 - { 2378 - return xfs_acl_vhasacl_default(vp); 2379 - } 2380 - 2381 - static struct attrnames posix_acl_access = { 2382 - .attr_name = "posix_acl_access", 2383 - .attr_namelen = sizeof("posix_acl_access") - 1, 2384 - .attr_get = posix_acl_access_get, 2385 - .attr_set = posix_acl_access_set, 2386 - .attr_remove = posix_acl_access_remove, 2387 - .attr_exists = posix_acl_access_exists, 2388 - }; 2389 - 2390 - static struct attrnames posix_acl_default = { 2391 - .attr_name = "posix_acl_default", 2392 - .attr_namelen = sizeof("posix_acl_default") - 1, 2393 - .attr_get = posix_acl_default_get, 2394 - .attr_set = posix_acl_default_set, 2395 - .attr_remove = posix_acl_default_remove, 2396 - .attr_exists = posix_acl_default_exists, 2397 - }; 2398 - 2399 - static struct attrnames *attr_system_names[] = 2400 - { &posix_acl_access, &posix_acl_default }; 2401 - 2402 - 2403 - /*======================================================================== 2404 - * Namespace-prefix-style attribute name interface routines. 2405 - *========================================================================*/ 2406 - 2407 - STATIC int 2408 - attr_generic_set( 2409 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2410 - { 2411 - return -xfs_attr_set(xfs_vtoi(vp), name, data, size, xflags); 2412 - } 2413 - 2414 - STATIC int 2415 - attr_generic_get( 2416 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2417 - { 2418 - int error, asize = size; 2419 - 2420 - error = xfs_attr_get(xfs_vtoi(vp), name, data, &asize, xflags); 2421 - if (!error) 2422 - return asize; 2423 - return -error; 2424 - } 2425 - 2426 - STATIC int 2427 - attr_generic_remove( 2428 - bhv_vnode_t *vp, char *name, int xflags) 2429 - { 2430 - return -xfs_attr_remove(xfs_vtoi(vp), name, xflags); 2431 - } 2432 - 2433 - STATIC int 2434 - attr_generic_listadd( 2435 - attrnames_t *prefix, 2436 - attrnames_t *namesp, 2437 - void *data, 2438 - size_t size, 2439 - ssize_t *result) 2440 - { 2441 - char *p = data + *result; 2442 - 2443 - *result += prefix->attr_namelen; 2444 - *result += namesp->attr_namelen + 1; 2445 - if (!size) 2446 - return 0; 2447 - if (*result > size) 2448 - return -ERANGE; 2449 - strcpy(p, prefix->attr_name); 2450 - p += prefix->attr_namelen; 2451 - strcpy(p, namesp->attr_name); 2452 - p += namesp->attr_namelen + 1; 2453 - return 0; 2454 - } 2455 - 2456 - STATIC int 2457 - attr_system_list( 2458 - bhv_vnode_t *vp, 2459 - void *data, 2460 - size_t size, 2461 - ssize_t *result) 2462 - { 2463 - attrnames_t *namesp; 2464 - int i, error = 0; 2465 - 2466 - for (i = 0; i < ATTR_SYSCOUNT; i++) { 2467 - namesp = attr_system_names[i]; 2468 - if (!namesp->attr_exists || !namesp->attr_exists(vp)) 2469 - continue; 2470 - error = attr_generic_listadd(&attr_system, namesp, 2471 - data, size, result); 2472 - if (error) 2473 - break; 2474 - } 2475 - return error; 2476 - } 2477 - 2478 - int 2479 - attr_generic_list( 2480 - bhv_vnode_t *vp, void *data, size_t size, int xflags, ssize_t *result) 2481 - { 2482 - attrlist_cursor_kern_t cursor = { 0 }; 2483 - int error; 2484 - 2485 - error = xfs_attr_list(xfs_vtoi(vp), data, size, xflags, &cursor); 2486 - if (error > 0) 2487 - return -error; 2488 - *result = -error; 2489 - return attr_system_list(vp, data, size, result); 2490 - } 2491 - 2492 - attrnames_t * 2493 - attr_lookup_namespace( 2494 - char *name, 2495 - struct attrnames **names, 2496 - int nnames) 2497 - { 2498 - int i; 2499 - 2500 - for (i = 0; i < nnames; i++) 2501 - if (!strncmp(name, names[i]->attr_name, names[i]->attr_namelen)) 2502 - return names[i]; 2503 - return NULL; 2504 - } 2505 - 2506 - /* 2507 - * Some checks to prevent people abusing EAs to get over quota: 2508 - * - Don't allow modifying user EAs on devices/symlinks; 2509 - * - Don't allow modifying user EAs if sticky bit set; 2510 - */ 2511 - STATIC int 2512 - attr_user_capable( 2513 - bhv_vnode_t *vp, 2514 - cred_t *cred) 2515 - { 2516 - struct inode *inode = vn_to_inode(vp); 2517 - 2518 - if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 2519 - return -EPERM; 2520 - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && 2521 - !capable(CAP_SYS_ADMIN)) 2522 - return -EPERM; 2523 - if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) && 2524 - (current_fsuid(cred) != inode->i_uid) && !capable(CAP_FOWNER)) 2525 - return -EPERM; 2526 - return 0; 2527 - } 2528 - 2529 - STATIC int 2530 - attr_trusted_capable( 2531 - bhv_vnode_t *vp, 2532 - cred_t *cred) 2533 - { 2534 - struct inode *inode = vn_to_inode(vp); 2535 - 2536 - if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 2537 - return -EPERM; 2538 - if (!capable(CAP_SYS_ADMIN)) 2539 - return -EPERM; 2540 - return 0; 2541 - } 2542 - 2543 - STATIC int 2544 - attr_system_set( 2545 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2546 - { 2547 - attrnames_t *namesp; 2548 - int error; 2549 - 2550 - if (xflags & ATTR_CREATE) 2551 - return -EINVAL; 2552 - 2553 - namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT); 2554 - if (!namesp) 2555 - return -EOPNOTSUPP; 2556 - error = namesp->attr_set(vp, name, data, size, xflags); 2557 - if (!error) 2558 - error = vn_revalidate(vp); 2559 - return error; 2560 - } 2561 - 2562 - STATIC int 2563 - attr_system_get( 2564 - bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) 2565 - { 2566 - attrnames_t *namesp; 2567 - 2568 - namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT); 2569 - if (!namesp) 2570 - return -EOPNOTSUPP; 2571 - return namesp->attr_get(vp, name, data, size, xflags); 2572 - } 2573 - 2574 - STATIC int 2575 - attr_system_remove( 2576 - bhv_vnode_t *vp, char *name, int xflags) 2577 - { 2578 - attrnames_t *namesp; 2579 - 2580 - namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT); 2581 - if (!namesp) 2582 - return -EOPNOTSUPP; 2583 - return namesp->attr_remove(vp, name, xflags); 2584 - } 2585 - 2586 - struct attrnames attr_system = { 2587 - .attr_name = "system.", 2588 - .attr_namelen = sizeof("system.") - 1, 2589 - .attr_flag = ATTR_SYSTEM, 2590 - .attr_get = attr_system_get, 2591 - .attr_set = attr_system_set, 2592 - .attr_remove = attr_system_remove, 2593 - .attr_capable = (attrcapable_t)fs_noerr, 2594 - }; 2595 - 2596 - struct attrnames attr_trusted = { 2597 - .attr_name = "trusted.", 2598 - .attr_namelen = sizeof("trusted.") - 1, 2599 - .attr_flag = ATTR_ROOT, 2600 - .attr_get = attr_generic_get, 2601 - .attr_set = attr_generic_set, 2602 - .attr_remove = attr_generic_remove, 2603 - .attr_capable = attr_trusted_capable, 2604 - }; 2605 - 2606 - struct attrnames attr_secure = { 2607 - .attr_name = "security.", 2608 - .attr_namelen = sizeof("security.") - 1, 2609 - .attr_flag = ATTR_SECURE, 2610 - .attr_get = attr_generic_get, 2611 - .attr_set = attr_generic_set, 2612 - .attr_remove = attr_generic_remove, 2613 - .attr_capable = (attrcapable_t)fs_noerr, 2614 - }; 2615 - 2616 - struct attrnames attr_user = { 2617 - .attr_name = "user.", 2618 - .attr_namelen = sizeof("user.") - 1, 2619 - .attr_get = attr_generic_get, 2620 - .attr_set = attr_generic_set, 2621 - .attr_remove = attr_generic_remove, 2622 - .attr_capable = attr_user_capable, 2623 - }; 2624 - 2625 - struct attrnames *attr_namespaces[] = 2626 - { &attr_system, &attr_trusted, &attr_secure, &attr_user };
+30 -60
fs/xfs/xfs_attr.h
··· 18 18 #ifndef __XFS_ATTR_H__ 19 19 #define __XFS_ATTR_H__ 20 20 21 + struct xfs_inode; 22 + struct xfs_da_args; 23 + struct xfs_attr_list_context; 24 + 21 25 /* 22 - * xfs_attr.h 23 - * 24 26 * Large attribute lists are structured around Btrees where all the data 25 27 * elements are in the leaf nodes. Attribute names are hashed into an int, 26 28 * then that int is used as the index into the Btree. Since the hashval ··· 37 35 * External interfaces 38 36 *========================================================================*/ 39 37 40 - struct cred; 41 - struct xfs_attr_list_context; 42 - 43 - typedef int (*attrset_t)(bhv_vnode_t *, char *, void *, size_t, int); 44 - typedef int (*attrget_t)(bhv_vnode_t *, char *, void *, size_t, int); 45 - typedef int (*attrremove_t)(bhv_vnode_t *, char *, int); 46 - typedef int (*attrexists_t)(bhv_vnode_t *); 47 - typedef int (*attrcapable_t)(bhv_vnode_t *, struct cred *); 48 - 49 - typedef struct attrnames { 50 - char * attr_name; 51 - unsigned int attr_namelen; 52 - unsigned int attr_flag; 53 - attrget_t attr_get; 54 - attrset_t attr_set; 55 - attrremove_t attr_remove; 56 - attrexists_t attr_exists; 57 - attrcapable_t attr_capable; 58 - } attrnames_t; 59 - 60 - #define ATTR_NAMECOUNT 4 61 - extern struct attrnames attr_user; 62 - extern struct attrnames attr_secure; 63 - extern struct attrnames attr_system; 64 - extern struct attrnames attr_trusted; 65 - extern struct attrnames *attr_namespaces[ATTR_NAMECOUNT]; 66 - 67 - extern attrnames_t *attr_lookup_namespace(char *, attrnames_t **, int); 68 - extern int attr_generic_list(bhv_vnode_t *, void *, size_t, int, ssize_t *); 69 38 70 39 #define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */ 71 40 #define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */ ··· 44 71 #define ATTR_SECURE 0x0008 /* use attrs in security namespace */ 45 72 #define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */ 46 73 #define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */ 47 - #define ATTR_SYSTEM 0x0100 /* use attrs in system (pseudo) namespace */ 48 74 49 - #define ATTR_KERNACCESS 0x0400 /* [kernel] iaccess, inode held io-locked */ 50 75 #define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ 51 76 #define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ 52 - #define ATTR_KERNAMELS 0x4000 /* [kernel] list attr names (simple list) */ 53 - 54 - #define ATTR_KERNORMALS 0x0800 /* [kernel] normal attr list: user+secure */ 55 - #define ATTR_KERNROOTLS 0x8000 /* [kernel] include root in the attr list */ 56 - #define ATTR_KERNFULLS (ATTR_KERNORMALS|ATTR_KERNROOTLS) 57 77 58 78 /* 59 79 * The maximum size (into the kernel or returned from the kernel) of an ··· 85 119 &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ]) 86 120 87 121 /* 88 - * Multi-attribute operation vector. 89 - */ 90 - typedef struct attr_multiop { 91 - int am_opcode; /* operation to perform (ATTR_OP_GET, etc.) */ 92 - int am_error; /* [out arg] result of this sub-op (an errno) */ 93 - char *am_attrname; /* attribute name to work with */ 94 - char *am_attrvalue; /* [in/out arg] attribute value (raw bytes) */ 95 - int am_length; /* [in/out arg] length of value */ 96 - int am_flags; /* bitwise OR of attr API flags defined above */ 97 - } attr_multiop_t; 98 - 99 - #define ATTR_OP_GET 1 /* return the indicated attr's value */ 100 - #define ATTR_OP_SET 2 /* set/create the indicated attr/value pair */ 101 - #define ATTR_OP_REMOVE 3 /* remove the indicated attr */ 102 - 103 - /* 104 122 * Kernel-internal version of the attrlist cursor. 105 123 */ 106 124 typedef struct attrlist_cursor_kern { ··· 98 148 99 149 100 150 /*======================================================================== 101 - * Function prototypes for the kernel. 151 + * Structure used to pass context around among the routines. 102 152 *========================================================================*/ 103 153 104 - struct xfs_inode; 105 - struct attrlist_cursor_kern; 106 - struct xfs_da_args; 154 + 155 + typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int, 156 + char *, int, int, char *); 157 + 158 + typedef struct xfs_attr_list_context { 159 + struct xfs_inode *dp; /* inode */ 160 + struct attrlist_cursor_kern *cursor; /* position in list */ 161 + char *alist; /* output buffer */ 162 + int seen_enough; /* T/F: seen enough of list? */ 163 + ssize_t count; /* num used entries */ 164 + int dupcnt; /* count dup hashvals seen */ 165 + int bufsize; /* total buffer size */ 166 + int firstu; /* first used byte in buffer */ 167 + int flags; /* from VOP call */ 168 + int resynch; /* T/F: resynch with cursor */ 169 + int put_value; /* T/F: need value for listent */ 170 + put_listent_func_t put_listent; /* list output fmt function */ 171 + int index; /* index into output buffer */ 172 + } xfs_attr_list_context_t; 173 + 174 + 175 + /*======================================================================== 176 + * Function prototypes for the kernel. 177 + *========================================================================*/ 107 178 108 179 /* 109 180 * Overall external interface routines. 110 181 */ 111 182 int xfs_attr_inactive(struct xfs_inode *dp); 112 - 113 - int xfs_attr_shortform_getvalue(struct xfs_da_args *); 114 183 int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); 115 184 int xfs_attr_rmtval_get(struct xfs_da_args *args); 185 + int xfs_attr_list_int(struct xfs_attr_list_context *); 116 186 117 187 #endif /* __XFS_ATTR_H__ */
+27 -72
fs/xfs/xfs_attr_leaf.c
··· 94 94 * Namespace helper routines 95 95 *========================================================================*/ 96 96 97 - STATIC_INLINE attrnames_t * 98 - xfs_attr_flags_namesp(int flags) 99 - { 100 - return ((flags & XFS_ATTR_SECURE) ? &attr_secure: 101 - ((flags & XFS_ATTR_ROOT) ? &attr_trusted : &attr_user)); 102 - } 103 - 104 97 /* 105 98 * If namespace bits don't match return 0. 106 99 * If all match then return 1. ··· 102 109 xfs_attr_namesp_match(int arg_flags, int ondisk_flags) 103 110 { 104 111 return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags); 105 - } 106 - 107 - /* 108 - * If namespace bits don't match and we don't have an override for it 109 - * then return 0. 110 - * If all match or are overridable then return 1. 111 - */ 112 - STATIC_INLINE int 113 - xfs_attr_namesp_match_overrides(int arg_flags, int ondisk_flags) 114 - { 115 - if (((arg_flags & ATTR_SECURE) == 0) != 116 - ((ondisk_flags & XFS_ATTR_SECURE) == 0) && 117 - !(arg_flags & ATTR_KERNORMALS)) 118 - return 0; 119 - if (((arg_flags & ATTR_ROOT) == 0) != 120 - ((ondisk_flags & XFS_ATTR_ROOT) == 0) && 121 - !(arg_flags & ATTR_KERNROOTLS)) 122 - return 0; 123 - return 1; 124 112 } 125 113 126 114 ··· 343 369 * Fix up the start offset of the attribute fork 344 370 */ 345 371 totsize -= size; 346 - if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname && 347 - (mp->m_flags & XFS_MOUNT_ATTR2) && 348 - (dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) { 372 + if (totsize == sizeof(xfs_attr_sf_hdr_t) && 373 + !(args->op_flags & XFS_DA_OP_ADDNAME) && 374 + (mp->m_flags & XFS_MOUNT_ATTR2) && 375 + (dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) { 349 376 /* 350 377 * Last attribute now removed, revert to original 351 378 * inode format making all literal area available ··· 364 389 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 365 390 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 366 391 ASSERT(dp->i_d.di_forkoff); 367 - ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname || 368 - !(mp->m_flags & XFS_MOUNT_ATTR2) || 369 - dp->i_d.di_format == XFS_DINODE_FMT_BTREE); 392 + ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || 393 + (args->op_flags & XFS_DA_OP_ADDNAME) || 394 + !(mp->m_flags & XFS_MOUNT_ATTR2) || 395 + dp->i_d.di_format == XFS_DINODE_FMT_BTREE); 370 396 dp->i_afp->if_ext_max = 371 397 XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t); 372 398 dp->i_df.if_ext_max = ··· 507 531 nargs.total = args->total; 508 532 nargs.whichfork = XFS_ATTR_FORK; 509 533 nargs.trans = args->trans; 510 - nargs.oknoent = 1; 534 + nargs.op_flags = XFS_DA_OP_OKNOENT; 511 535 512 536 sfe = &sf->list[0]; 513 537 for (i = 0; i < sf->hdr.count; i++) { ··· 531 555 out: 532 556 if(bp) 533 557 xfs_da_buf_done(bp); 534 - kmem_free(tmpbuffer, size); 558 + kmem_free(tmpbuffer); 535 559 return(error); 536 560 } 537 561 ··· 600 624 (XFS_ISRESET_CURSOR(cursor) && 601 625 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { 602 626 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { 603 - attrnames_t *namesp; 604 - 605 - if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) { 606 - sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 607 - continue; 608 - } 609 - namesp = xfs_attr_flags_namesp(sfe->flags); 610 627 error = context->put_listent(context, 611 - namesp, 628 + sfe->flags, 612 629 (char *)sfe->nameval, 613 630 (int)sfe->namelen, 614 631 (int)sfe->valuelen, ··· 645 676 XFS_ERRLEVEL_LOW, 646 677 context->dp->i_mount, sfe); 647 678 xfs_attr_trace_l_c("sf corrupted", context); 648 - kmem_free(sbuf, sbsize); 679 + kmem_free(sbuf); 649 680 return XFS_ERROR(EFSCORRUPTED); 650 681 } 651 - if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) { 652 - sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 653 - continue; 654 - } 682 + 655 683 sbp->entno = i; 656 684 sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); 657 685 sbp->name = (char *)sfe->nameval; ··· 683 717 } 684 718 } 685 719 if (i == nsbuf) { 686 - kmem_free(sbuf, sbsize); 720 + kmem_free(sbuf); 687 721 xfs_attr_trace_l_c("blk end", context); 688 722 return(0); 689 723 } ··· 692 726 * Loop putting entries into the user buffer. 693 727 */ 694 728 for ( ; i < nsbuf; i++, sbp++) { 695 - attrnames_t *namesp; 696 - 697 - namesp = xfs_attr_flags_namesp(sbp->flags); 698 - 699 729 if (cursor->hashval != sbp->hash) { 700 730 cursor->hashval = sbp->hash; 701 731 cursor->offset = 0; 702 732 } 703 733 error = context->put_listent(context, 704 - namesp, 734 + sbp->flags, 705 735 sbp->name, 706 736 sbp->namelen, 707 737 sbp->valuelen, ··· 709 747 cursor->offset++; 710 748 } 711 749 712 - kmem_free(sbuf, sbsize); 750 + kmem_free(sbuf); 713 751 xfs_attr_trace_l_c("sf E-O-F", context); 714 752 return(0); 715 753 } ··· 815 853 nargs.total = args->total; 816 854 nargs.whichfork = XFS_ATTR_FORK; 817 855 nargs.trans = args->trans; 818 - nargs.oknoent = 1; 856 + nargs.op_flags = XFS_DA_OP_OKNOENT; 819 857 entry = &leaf->entries[0]; 820 858 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { 821 859 if (entry->flags & XFS_ATTR_INCOMPLETE) ··· 835 873 error = 0; 836 874 837 875 out: 838 - kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); 876 + kmem_free(tmpbuffer); 839 877 return(error); 840 878 } 841 879 ··· 1117 1155 entry->hashval = cpu_to_be32(args->hashval); 1118 1156 entry->flags = tmp ? XFS_ATTR_LOCAL : 0; 1119 1157 entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags); 1120 - if (args->rename) { 1158 + if (args->op_flags & XFS_DA_OP_RENAME) { 1121 1159 entry->flags |= XFS_ATTR_INCOMPLETE; 1122 1160 if ((args->blkno2 == args->blkno) && 1123 1161 (args->index2 <= args->index)) { ··· 1233 1271 be16_to_cpu(hdr_s->count), mp); 1234 1272 xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); 1235 1273 1236 - kmem_free(tmpbuffer, XFS_LBSIZE(mp)); 1274 + kmem_free(tmpbuffer); 1237 1275 } 1238 1276 1239 1277 /* ··· 1883 1921 be16_to_cpu(drop_hdr->count), mp); 1884 1922 } 1885 1923 memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); 1886 - kmem_free(tmpbuffer, state->blocksize); 1924 + kmem_free(tmpbuffer); 1887 1925 } 1888 1926 1889 1927 xfs_da_log_buf(state->args->trans, save_blk->bp, 0, ··· 2362 2400 */ 2363 2401 retval = 0; 2364 2402 for ( ; (i < be16_to_cpu(leaf->hdr.count)); entry++, i++) { 2365 - attrnames_t *namesp; 2366 - 2367 2403 if (be32_to_cpu(entry->hashval) != cursor->hashval) { 2368 2404 cursor->hashval = be32_to_cpu(entry->hashval); 2369 2405 cursor->offset = 0; ··· 2369 2409 2370 2410 if (entry->flags & XFS_ATTR_INCOMPLETE) 2371 2411 continue; /* skip incomplete entries */ 2372 - if (!xfs_attr_namesp_match_overrides(context->flags, entry->flags)) 2373 - continue; 2374 - 2375 - namesp = xfs_attr_flags_namesp(entry->flags); 2376 2412 2377 2413 if (entry->flags & XFS_ATTR_LOCAL) { 2378 2414 xfs_attr_leaf_name_local_t *name_loc = 2379 2415 XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); 2380 2416 2381 2417 retval = context->put_listent(context, 2382 - namesp, 2418 + entry->flags, 2383 2419 (char *)name_loc->nameval, 2384 2420 (int)name_loc->namelen, 2385 2421 be16_to_cpu(name_loc->valuelen), ··· 2402 2446 if (retval) 2403 2447 return retval; 2404 2448 retval = context->put_listent(context, 2405 - namesp, 2449 + entry->flags, 2406 2450 (char *)name_rmt->name, 2407 2451 (int)name_rmt->namelen, 2408 2452 valuelen, 2409 2453 (char*)args.value); 2410 - kmem_free(args.value, valuelen); 2411 - } 2412 - else { 2454 + kmem_free(args.value); 2455 + } else { 2413 2456 retval = context->put_listent(context, 2414 - namesp, 2457 + entry->flags, 2415 2458 (char *)name_rmt->name, 2416 2459 (int)name_rmt->namelen, 2417 2460 valuelen, ··· 2909 2954 error = tmp; /* save only the 1st errno */ 2910 2955 } 2911 2956 2912 - kmem_free((xfs_caddr_t)list, size); 2957 + kmem_free((xfs_caddr_t)list); 2913 2958 return(error); 2914 2959 } 2915 2960
+1 -28
fs/xfs/xfs_attr_leaf.h
··· 30 30 31 31 struct attrlist; 32 32 struct attrlist_cursor_kern; 33 - struct attrnames; 33 + struct xfs_attr_list_context; 34 34 struct xfs_dabuf; 35 35 struct xfs_da_args; 36 36 struct xfs_da_state; ··· 203 203 { 204 204 return (((bsize) >> 1) + ((bsize) >> 2)); 205 205 } 206 - 207 - 208 - /*======================================================================== 209 - * Structure used to pass context around among the routines. 210 - *========================================================================*/ 211 - 212 - 213 - struct xfs_attr_list_context; 214 - 215 - typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, struct attrnames *, 216 - char *, int, int, char *); 217 - 218 - typedef struct xfs_attr_list_context { 219 - struct xfs_inode *dp; /* inode */ 220 - struct attrlist_cursor_kern *cursor; /* position in list */ 221 - struct attrlist *alist; /* output buffer */ 222 - int seen_enough; /* T/F: seen enough of list? */ 223 - int count; /* num used entries */ 224 - int dupcnt; /* count dup hashvals seen */ 225 - int bufsize; /* total buffer size */ 226 - int firstu; /* first used byte in buffer */ 227 - int flags; /* from VOP call */ 228 - int resynch; /* T/F: resynch with cursor */ 229 - int put_value; /* T/F: need value for listent */ 230 - put_listent_func_t put_listent; /* list output fmt function */ 231 - int index; /* index into output buffer */ 232 - } xfs_attr_list_context_t; 233 206 234 207 /* 235 208 * Used to keep a list of "remote value" extents when unlinking an inode.
+3 -7
fs/xfs/xfs_attr_sf.h
··· 97 97 void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, 98 98 struct xfs_attr_leafblock *leaf); 99 99 void xfs_attr_trace_enter(int type, char *where, 100 - __psunsigned_t a2, __psunsigned_t a3, 101 - __psunsigned_t a4, __psunsigned_t a5, 102 - __psunsigned_t a6, __psunsigned_t a7, 103 - __psunsigned_t a8, __psunsigned_t a9, 104 - __psunsigned_t a10, __psunsigned_t a11, 105 - __psunsigned_t a12, __psunsigned_t a13, 106 - __psunsigned_t a14, __psunsigned_t a15); 100 + struct xfs_attr_list_context *context, 101 + __psunsigned_t a13, __psunsigned_t a14, 102 + __psunsigned_t a15); 107 103 #else 108 104 #define xfs_attr_trace_l_c(w,c) 109 105 #define xfs_attr_trace_l_cn(w,c,n)
+63 -55
fs/xfs/xfs_bmap.c
··· 428 428 cur->bc_private.b.firstblock = *firstblock; 429 429 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) 430 430 goto error0; 431 - ASSERT(stat == 1); /* must be at least one entry */ 431 + /* must be at least one entry */ 432 + XFS_WANT_CORRUPTED_GOTO(stat == 1, error0); 432 433 if ((error = xfs_bmbt_newroot(cur, flags, &stat))) 433 434 goto error0; 434 435 if (stat == 0) { ··· 817 816 RIGHT.br_startblock, 818 817 RIGHT.br_blockcount, &i))) 819 818 goto done; 820 - ASSERT(i == 1); 819 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 821 820 if ((error = xfs_bmbt_delete(cur, &i))) 822 821 goto done; 823 - ASSERT(i == 1); 822 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 824 823 if ((error = xfs_bmbt_decrement(cur, 0, &i))) 825 824 goto done; 826 - ASSERT(i == 1); 825 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 827 826 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 828 827 LEFT.br_startblock, 829 828 LEFT.br_blockcount + ··· 861 860 LEFT.br_startblock, LEFT.br_blockcount, 862 861 &i))) 863 862 goto done; 864 - ASSERT(i == 1); 863 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 865 864 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 866 865 LEFT.br_startblock, 867 866 LEFT.br_blockcount + ··· 896 895 RIGHT.br_startblock, 897 896 RIGHT.br_blockcount, &i))) 898 897 goto done; 899 - ASSERT(i == 1); 898 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 900 899 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 901 900 new->br_startblock, 902 901 PREV.br_blockcount + ··· 929 928 new->br_startblock, new->br_blockcount, 930 929 &i))) 931 930 goto done; 932 - ASSERT(i == 0); 931 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 933 932 cur->bc_rec.b.br_state = XFS_EXT_NORM; 934 933 if ((error = xfs_bmbt_insert(cur, &i))) 935 934 goto done; 936 - ASSERT(i == 1); 935 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 937 936 } 938 937 *dnew = 0; 939 938 /* DELTA: The in-core extent described by new changed type. */ ··· 964 963 LEFT.br_startblock, LEFT.br_blockcount, 965 964 &i))) 966 965 goto done; 967 - ASSERT(i == 1); 966 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 968 967 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 969 968 LEFT.br_startblock, 970 969 LEFT.br_blockcount + ··· 1005 1004 new->br_startblock, new->br_blockcount, 1006 1005 &i))) 1007 1006 goto done; 1008 - ASSERT(i == 0); 1007 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1009 1008 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1010 1009 if ((error = xfs_bmbt_insert(cur, &i))) 1011 1010 goto done; 1012 - ASSERT(i == 1); 1011 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1013 1012 } 1014 1013 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1015 1014 ip->i_d.di_nextents > ip->i_df.if_ext_max) { ··· 1055 1054 RIGHT.br_startblock, 1056 1055 RIGHT.br_blockcount, &i))) 1057 1056 goto done; 1058 - ASSERT(i == 1); 1057 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1059 1058 if ((error = xfs_bmbt_update(cur, new->br_startoff, 1060 1059 new->br_startblock, 1061 1060 new->br_blockcount + ··· 1095 1094 new->br_startblock, new->br_blockcount, 1096 1095 &i))) 1097 1096 goto done; 1098 - ASSERT(i == 0); 1097 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1099 1098 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1100 1099 if ((error = xfs_bmbt_insert(cur, &i))) 1101 1100 goto done; 1102 - ASSERT(i == 1); 1101 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1103 1102 } 1104 1103 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1105 1104 ip->i_d.di_nextents > ip->i_df.if_ext_max) { ··· 1150 1149 new->br_startblock, new->br_blockcount, 1151 1150 &i))) 1152 1151 goto done; 1153 - ASSERT(i == 0); 1152 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1154 1153 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1155 1154 if ((error = xfs_bmbt_insert(cur, &i))) 1156 1155 goto done; 1157 - ASSERT(i == 1); 1156 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1158 1157 } 1159 1158 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1160 1159 ip->i_d.di_nextents > ip->i_df.if_ext_max) { ··· 1378 1377 RIGHT.br_startblock, 1379 1378 RIGHT.br_blockcount, &i))) 1380 1379 goto done; 1381 - ASSERT(i == 1); 1380 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1382 1381 if ((error = xfs_bmbt_delete(cur, &i))) 1383 1382 goto done; 1384 - ASSERT(i == 1); 1383 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1385 1384 if ((error = xfs_bmbt_decrement(cur, 0, &i))) 1386 1385 goto done; 1387 - ASSERT(i == 1); 1386 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1388 1387 if ((error = xfs_bmbt_delete(cur, &i))) 1389 1388 goto done; 1390 - ASSERT(i == 1); 1389 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1391 1390 if ((error = xfs_bmbt_decrement(cur, 0, &i))) 1392 1391 goto done; 1393 - ASSERT(i == 1); 1392 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1394 1393 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 1395 1394 LEFT.br_startblock, 1396 1395 LEFT.br_blockcount + PREV.br_blockcount + ··· 1427 1426 PREV.br_startblock, PREV.br_blockcount, 1428 1427 &i))) 1429 1428 goto done; 1430 - ASSERT(i == 1); 1429 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1431 1430 if ((error = xfs_bmbt_delete(cur, &i))) 1432 1431 goto done; 1433 - ASSERT(i == 1); 1432 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1434 1433 if ((error = xfs_bmbt_decrement(cur, 0, &i))) 1435 1434 goto done; 1436 - ASSERT(i == 1); 1435 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1437 1436 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 1438 1437 LEFT.br_startblock, 1439 1438 LEFT.br_blockcount + PREV.br_blockcount, ··· 1470 1469 RIGHT.br_startblock, 1471 1470 RIGHT.br_blockcount, &i))) 1472 1471 goto done; 1473 - ASSERT(i == 1); 1472 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1474 1473 if ((error = xfs_bmbt_delete(cur, &i))) 1475 1474 goto done; 1476 - ASSERT(i == 1); 1475 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1477 1476 if ((error = xfs_bmbt_decrement(cur, 0, &i))) 1478 1477 goto done; 1479 - ASSERT(i == 1); 1478 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1480 1479 if ((error = xfs_bmbt_update(cur, new->br_startoff, 1481 1480 new->br_startblock, 1482 1481 new->br_blockcount + RIGHT.br_blockcount, ··· 1509 1508 new->br_startblock, new->br_blockcount, 1510 1509 &i))) 1511 1510 goto done; 1512 - ASSERT(i == 1); 1511 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1513 1512 if ((error = xfs_bmbt_update(cur, new->br_startoff, 1514 1513 new->br_startblock, new->br_blockcount, 1515 1514 newext))) ··· 1550 1549 PREV.br_startblock, PREV.br_blockcount, 1551 1550 &i))) 1552 1551 goto done; 1553 - ASSERT(i == 1); 1552 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1554 1553 if ((error = xfs_bmbt_update(cur, 1555 1554 PREV.br_startoff + new->br_blockcount, 1556 1555 PREV.br_startblock + new->br_blockcount, ··· 1597 1596 PREV.br_startblock, PREV.br_blockcount, 1598 1597 &i))) 1599 1598 goto done; 1600 - ASSERT(i == 1); 1599 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1601 1600 if ((error = xfs_bmbt_update(cur, 1602 1601 PREV.br_startoff + new->br_blockcount, 1603 1602 PREV.br_startblock + new->br_blockcount, ··· 1607 1606 cur->bc_rec.b = *new; 1608 1607 if ((error = xfs_bmbt_insert(cur, &i))) 1609 1608 goto done; 1610 - ASSERT(i == 1); 1609 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1611 1610 } 1612 1611 /* DELTA: One in-core extent is split in two. */ 1613 1612 temp = PREV.br_startoff; ··· 1641 1640 PREV.br_startblock, 1642 1641 PREV.br_blockcount, &i))) 1643 1642 goto done; 1644 - ASSERT(i == 1); 1643 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1645 1644 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 1646 1645 PREV.br_startblock, 1647 1646 PREV.br_blockcount - new->br_blockcount, ··· 1683 1682 PREV.br_startblock, PREV.br_blockcount, 1684 1683 &i))) 1685 1684 goto done; 1686 - ASSERT(i == 1); 1685 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1687 1686 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 1688 1687 PREV.br_startblock, 1689 1688 PREV.br_blockcount - new->br_blockcount, ··· 1693 1692 new->br_startblock, new->br_blockcount, 1694 1693 &i))) 1695 1694 goto done; 1696 - ASSERT(i == 0); 1695 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1697 1696 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1698 1697 if ((error = xfs_bmbt_insert(cur, &i))) 1699 1698 goto done; 1700 - ASSERT(i == 1); 1699 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1701 1700 } 1702 1701 /* DELTA: One in-core extent is split in two. */ 1703 1702 temp = PREV.br_startoff; ··· 1733 1732 PREV.br_startblock, PREV.br_blockcount, 1734 1733 &i))) 1735 1734 goto done; 1736 - ASSERT(i == 1); 1735 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1737 1736 /* new right extent - oldext */ 1738 1737 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, 1739 1738 r[1].br_startblock, r[1].br_blockcount, 1740 1739 r[1].br_state))) 1741 1740 goto done; 1742 1741 /* new left extent - oldext */ 1743 - PREV.br_blockcount = 1744 - new->br_startoff - PREV.br_startoff; 1745 1742 cur->bc_rec.b = PREV; 1743 + cur->bc_rec.b.br_blockcount = 1744 + new->br_startoff - PREV.br_startoff; 1746 1745 if ((error = xfs_bmbt_insert(cur, &i))) 1747 1746 goto done; 1748 - ASSERT(i == 1); 1749 - if ((error = xfs_bmbt_increment(cur, 0, &i))) 1747 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1748 + /* 1749 + * Reset the cursor to the position of the new extent 1750 + * we are about to insert as we can't trust it after 1751 + * the previous insert. 1752 + */ 1753 + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 1754 + new->br_startblock, new->br_blockcount, 1755 + &i))) 1750 1756 goto done; 1751 - ASSERT(i == 1); 1757 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1752 1758 /* new middle extent - newext */ 1753 - cur->bc_rec.b = *new; 1759 + cur->bc_rec.b.br_state = new->br_state; 1754 1760 if ((error = xfs_bmbt_insert(cur, &i))) 1755 1761 goto done; 1756 - ASSERT(i == 1); 1762 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1757 1763 } 1758 1764 /* DELTA: One in-core extent is split in three. */ 1759 1765 temp = PREV.br_startoff; ··· 2105 2097 right.br_startblock, 2106 2098 right.br_blockcount, &i))) 2107 2099 goto done; 2108 - ASSERT(i == 1); 2100 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2109 2101 if ((error = xfs_bmbt_delete(cur, &i))) 2110 2102 goto done; 2111 - ASSERT(i == 1); 2103 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2112 2104 if ((error = xfs_bmbt_decrement(cur, 0, &i))) 2113 2105 goto done; 2114 - ASSERT(i == 1); 2106 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2115 2107 if ((error = xfs_bmbt_update(cur, left.br_startoff, 2116 2108 left.br_startblock, 2117 2109 left.br_blockcount + ··· 2147 2139 left.br_startblock, 2148 2140 left.br_blockcount, &i))) 2149 2141 goto done; 2150 - ASSERT(i == 1); 2142 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2151 2143 if ((error = xfs_bmbt_update(cur, left.br_startoff, 2152 2144 left.br_startblock, 2153 2145 left.br_blockcount + ··· 2182 2174 right.br_startblock, 2183 2175 right.br_blockcount, &i))) 2184 2176 goto done; 2185 - ASSERT(i == 1); 2177 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2186 2178 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2187 2179 new->br_startblock, 2188 2180 new->br_blockcount + ··· 2216 2208 new->br_startblock, 2217 2209 new->br_blockcount, &i))) 2218 2210 goto done; 2219 - ASSERT(i == 0); 2211 + XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2220 2212 cur->bc_rec.b.br_state = new->br_state; 2221 2213 if ((error = xfs_bmbt_insert(cur, &i))) 2222 2214 goto done; 2223 - ASSERT(i == 1); 2215 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2224 2216 } 2225 2217 /* DELTA: A new extent was added in a hole. */ 2226 2218 temp = new->br_startoff; ··· 3139 3131 got.br_startblock, got.br_blockcount, 3140 3132 &i))) 3141 3133 goto done; 3142 - ASSERT(i == 1); 3134 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3143 3135 } 3144 3136 da_old = da_new = 0; 3145 3137 } else { ··· 3172 3164 } 3173 3165 if ((error = xfs_bmbt_delete(cur, &i))) 3174 3166 goto done; 3175 - ASSERT(i == 1); 3167 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3176 3168 break; 3177 3169 3178 3170 case 2: ··· 3276 3268 got.br_startblock, 3277 3269 temp, &i))) 3278 3270 goto done; 3279 - ASSERT(i == 1); 3271 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3280 3272 /* 3281 3273 * Update the btree record back 3282 3274 * to the original value. ··· 3297 3289 error = XFS_ERROR(ENOSPC); 3298 3290 goto done; 3299 3291 } 3300 - ASSERT(i == 1); 3292 + XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3301 3293 } else 3302 3294 flags |= XFS_ILOG_FEXT(whichfork); 3303 3295 XFS_IFORK_NEXT_SET(ip, whichfork, ··· 5978 5970 xfs_iunlock_map_shared(ip, lock); 5979 5971 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 5980 5972 5981 - kmem_free(map, subnex * sizeof(*map)); 5973 + kmem_free(map); 5982 5974 5983 5975 return error; 5984 5976 }
+12 -1
fs/xfs/xfs_bmap.h
··· 54 54 55 55 /* 56 56 * Header for free extent list. 57 + * 58 + * xbf_low is used by the allocator to activate the lowspace algorithm - 59 + * when free space is running low the extent allocator may choose to 60 + * allocate an extent from an AG without leaving sufficient space for 61 + * a btree split when inserting the new extent. In this case the allocator 62 + * will enable the lowspace algorithm which is supposed to allow further 63 + * allocations (such as btree splits and newroots) to allocate from 64 + * sequential AGs. In order to avoid locking AGs out of order the lowspace 65 + * algorithm will start searching for free space from AG 0. If the correct 66 + * transaction reservations have been made then this algorithm will eventually 67 + * find all the space it needs. 57 68 */ 58 69 typedef struct xfs_bmap_free 59 70 { 60 71 xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */ 61 72 int xbf_count; /* count of items on list */ 62 - int xbf_low; /* kludge: alloc in low mode */ 73 + int xbf_low; /* alloc in low mode */ 63 74 } xfs_bmap_free_t; 64 75 65 76 #define XFS_BMAP_MAX_NMAP 4
+39 -37
fs/xfs/xfs_bmap_btree.c
··· 1493 1493 left = XFS_BUF_TO_BMBT_BLOCK(lbp); 1494 1494 args.fsbno = cur->bc_private.b.firstblock; 1495 1495 args.firstblock = args.fsbno; 1496 + args.minleft = 0; 1496 1497 if (args.fsbno == NULLFSBLOCK) { 1497 1498 args.fsbno = lbno; 1498 1499 args.type = XFS_ALLOCTYPE_START_BNO; 1499 - } else 1500 + /* 1501 + * Make sure there is sufficient room left in the AG to 1502 + * complete a full tree split for an extent insert. If 1503 + * we are converting the middle part of an extent then 1504 + * we may need space for two tree splits. 1505 + * 1506 + * We are relying on the caller to make the correct block 1507 + * reservation for this operation to succeed. If the 1508 + * reservation amount is insufficient then we may fail a 1509 + * block allocation here and corrupt the filesystem. 1510 + */ 1511 + args.minleft = xfs_trans_get_block_res(args.tp); 1512 + } else if (cur->bc_private.b.flist->xbf_low) 1513 + args.type = XFS_ALLOCTYPE_START_BNO; 1514 + else 1500 1515 args.type = XFS_ALLOCTYPE_NEAR_BNO; 1501 - args.mod = args.minleft = args.alignment = args.total = args.isfl = 1516 + args.mod = args.alignment = args.total = args.isfl = 1502 1517 args.userdata = args.minalignslop = 0; 1503 1518 args.minlen = args.maxlen = args.prod = 1; 1504 1519 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; ··· 1524 1509 if ((error = xfs_alloc_vextent(&args))) { 1525 1510 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1526 1511 return error; 1512 + } 1513 + if (args.fsbno == NULLFSBLOCK && args.minleft) { 1514 + /* 1515 + * Could not find an AG with enough free space to satisfy 1516 + * a full btree split. Try again without minleft and if 1517 + * successful activate the lowspace algorithm. 1518 + */ 1519 + args.fsbno = 0; 1520 + args.type = XFS_ALLOCTYPE_FIRST_AG; 1521 + args.minleft = 0; 1522 + if ((error = xfs_alloc_vextent(&args))) { 1523 + XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1524 + return error; 1525 + } 1526 + cur->bc_private.b.flist->xbf_low = 1; 1527 1527 } 1528 1528 if (args.fsbno == NULLFSBLOCK) { 1529 1529 XFS_BMBT_TRACE_CURSOR(cur, EXIT); ··· 2059 2029 * Insert the current record at the point referenced by cur. 2060 2030 * 2061 2031 * A multi-level split of the tree on insert will invalidate the original 2062 - * cursor. It appears, however, that some callers assume that the cursor is 2063 - * always valid. Hence if we do a multi-level split we need to revalidate the 2064 - * cursor. 2065 - * 2066 - * When a split occurs, we will see a new cursor returned. Use that as a 2067 - * trigger to determine if we need to revalidate the original cursor. If we get 2068 - * a split, then use the original irec to lookup up the path of the record we 2069 - * just inserted. 2070 - * 2071 - * Note that the fact that the btree root is in the inode means that we can 2072 - * have the level of the tree change without a "split" occurring at the root 2073 - * level. What happens is that the root is migrated to an allocated block and 2074 - * the inode root is pointed to it. This means a single split can change the 2075 - * level of the tree (level 2 -> level 3) and invalidate the old cursor. Hence 2076 - * the level change should be accounted as a split so as to correctly trigger a 2077 - * revalidation of the old cursor. 2032 + * cursor. All callers of this function should assume that the cursor is 2033 + * no longer valid and revalidate it. 2078 2034 */ 2079 2035 int /* error */ 2080 2036 xfs_bmbt_insert( ··· 2073 2057 xfs_fsblock_t nbno; 2074 2058 xfs_btree_cur_t *ncur; 2075 2059 xfs_bmbt_rec_t nrec; 2076 - xfs_bmbt_irec_t oirec; /* original irec */ 2077 2060 xfs_btree_cur_t *pcur; 2078 - int splits = 0; 2079 2061 2080 2062 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); 2081 2063 level = 0; 2082 2064 nbno = NULLFSBLOCK; 2083 - oirec = cur->bc_rec.b; 2084 2065 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); 2085 2066 ncur = NULL; 2086 2067 pcur = cur; ··· 2086 2073 &i))) { 2087 2074 if (pcur != cur) 2088 2075 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); 2089 - goto error0; 2076 + XFS_BMBT_TRACE_CURSOR(cur, ERROR); 2077 + return error; 2090 2078 } 2091 2079 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 2092 2080 if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) { 2093 - /* allocating a new root is effectively a split */ 2094 - if (cur->bc_nlevels != pcur->bc_nlevels) 2095 - splits++; 2096 2081 cur->bc_nlevels = pcur->bc_nlevels; 2097 2082 cur->bc_private.b.allocated += 2098 2083 pcur->bc_private.b.allocated; ··· 2104 2093 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); 2105 2094 } 2106 2095 if (ncur) { 2107 - splits++; 2108 2096 pcur = ncur; 2109 2097 ncur = NULL; 2110 2098 } 2111 2099 } while (nbno != NULLFSBLOCK); 2112 - 2113 - if (splits > 1) { 2114 - /* revalidate the old cursor as we had a multi-level split */ 2115 - error = xfs_bmbt_lookup_eq(cur, oirec.br_startoff, 2116 - oirec.br_startblock, oirec.br_blockcount, &i); 2117 - if (error) 2118 - goto error0; 2119 - ASSERT(i == 1); 2120 - } 2121 - 2122 2100 XFS_BMBT_TRACE_CURSOR(cur, EXIT); 2123 2101 *stat = i; 2124 2102 return 0; ··· 2254 2254 #endif 2255 2255 args.fsbno = be64_to_cpu(*pp); 2256 2256 args.type = XFS_ALLOCTYPE_START_BNO; 2257 - } else 2257 + } else if (cur->bc_private.b.flist->xbf_low) 2258 + args.type = XFS_ALLOCTYPE_START_BNO; 2259 + else 2258 2260 args.type = XFS_ALLOCTYPE_NEAR_BNO; 2259 2261 if ((error = xfs_alloc_vextent(&args))) { 2260 2262 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+4 -4
fs/xfs/xfs_buf_item.c
··· 889 889 } 890 890 891 891 #ifdef XFS_TRANS_DEBUG 892 - kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); 892 + kmem_free(bip->bli_orig); 893 893 bip->bli_orig = NULL; 894 - kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); 894 + kmem_free(bip->bli_logged); 895 895 bip->bli_logged = NULL; 896 896 #endif /* XFS_TRANS_DEBUG */ 897 897 ··· 1138 1138 xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); 1139 1139 1140 1140 #ifdef XFS_TRANS_DEBUG 1141 - kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); 1141 + kmem_free(bip->bli_orig); 1142 1142 bip->bli_orig = NULL; 1143 - kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); 1143 + kmem_free(bip->bli_logged); 1144 1144 bip->bli_logged = NULL; 1145 1145 #endif /* XFS_TRANS_DEBUG */ 1146 1146
+1
fs/xfs/xfs_clnt.h
··· 78 78 #define XFSMNT_IOSIZE 0x00002000 /* optimize for I/O size */ 79 79 #define XFSMNT_OSYNCISOSYNC 0x00004000 /* o_sync is REALLY o_sync */ 80 80 /* (osyncisdsync is default) */ 81 + #define XFSMNT_NOATTR2 0x00008000 /* turn off ATTR2 EA format */ 81 82 #define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32 82 83 * bits of address space */ 83 84 #define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */
+35 -13
fs/xfs/xfs_da_btree.c
··· 1431 1431 } 1432 1432 if (level < 0) { 1433 1433 *result = XFS_ERROR(ENOENT); /* we're out of our tree */ 1434 - ASSERT(args->oknoent); 1434 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1435 1435 return(0); 1436 1436 } 1437 1437 ··· 1530 1530 } 1531 1531 } 1532 1532 1533 + enum xfs_dacmp 1534 + xfs_da_compname( 1535 + struct xfs_da_args *args, 1536 + const char *name, 1537 + int len) 1538 + { 1539 + return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 1540 + XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 1541 + } 1542 + 1543 + static xfs_dahash_t 1544 + xfs_default_hashname( 1545 + struct xfs_name *name) 1546 + { 1547 + return xfs_da_hashname(name->name, name->len); 1548 + } 1549 + 1550 + const struct xfs_nameops xfs_default_nameops = { 1551 + .hashname = xfs_default_hashname, 1552 + .compname = xfs_da_compname 1553 + }; 1554 + 1533 1555 /* 1534 1556 * Add a block to the btree ahead of the file. 1535 1557 * Return the new block number to the caller. ··· 1620 1598 args->firstblock, args->total, 1621 1599 &mapp[mapi], &nmap, args->flist, 1622 1600 NULL))) { 1623 - kmem_free(mapp, sizeof(*mapp) * count); 1601 + kmem_free(mapp); 1624 1602 return error; 1625 1603 } 1626 1604 if (nmap < 1) ··· 1642 1620 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 1643 1621 bno + count) { 1644 1622 if (mapp != &map) 1645 - kmem_free(mapp, sizeof(*mapp) * count); 1623 + kmem_free(mapp); 1646 1624 return XFS_ERROR(ENOSPC); 1647 1625 } 1648 1626 if (mapp != &map) 1649 - kmem_free(mapp, sizeof(*mapp) * count); 1627 + kmem_free(mapp); 1650 1628 *new_blkno = (xfs_dablk_t)bno; 1651 1629 return 0; 1652 1630 } ··· 2112 2090 } 2113 2091 } 2114 2092 if (bplist) { 2115 - kmem_free(bplist, sizeof(*bplist) * nmap); 2093 + kmem_free(bplist); 2116 2094 } 2117 2095 if (mapp != &map) { 2118 - kmem_free(mapp, sizeof(*mapp) * nfsb); 2096 + kmem_free(mapp); 2119 2097 } 2120 2098 if (bpp) 2121 2099 *bpp = rbp; ··· 2124 2102 if (bplist) { 2125 2103 for (i = 0; i < nbplist; i++) 2126 2104 xfs_trans_brelse(trans, bplist[i]); 2127 - kmem_free(bplist, sizeof(*bplist) * nmap); 2105 + kmem_free(bplist); 2128 2106 } 2129 2107 exit0: 2130 2108 if (mapp != &map) 2131 - kmem_free(mapp, sizeof(*mapp) * nfsb); 2109 + kmem_free(mapp); 2132 2110 if (bpp) 2133 2111 *bpp = NULL; 2134 2112 return error; ··· 2240 2218 2241 2219 #ifdef XFS_DABUF_DEBUG 2242 2220 xfs_dabuf_t *xfs_dabuf_global_list; 2243 - spinlock_t xfs_dabuf_global_lock; 2221 + static DEFINE_SPINLOCK(xfs_dabuf_global_lock); 2244 2222 #endif 2245 2223 2246 2224 /* ··· 2337 2315 if (dabuf->dirty) 2338 2316 xfs_da_buf_clean(dabuf); 2339 2317 if (dabuf->nbuf > 1) 2340 - kmem_free(dabuf->data, BBTOB(dabuf->bbcount)); 2318 + kmem_free(dabuf->data); 2341 2319 #ifdef XFS_DABUF_DEBUG 2342 2320 { 2343 2321 spin_lock(&xfs_dabuf_global_lock); ··· 2354 2332 if (dabuf->nbuf == 1) 2355 2333 kmem_zone_free(xfs_dabuf_zone, dabuf); 2356 2334 else 2357 - kmem_free(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf)); 2335 + kmem_free(dabuf); 2358 2336 } 2359 2337 2360 2338 /* ··· 2425 2403 for (i = 0; i < nbuf; i++) 2426 2404 xfs_trans_brelse(tp, bplist[i]); 2427 2405 if (bplist != &bp) 2428 - kmem_free(bplist, nbuf * sizeof(*bplist)); 2406 + kmem_free(bplist); 2429 2407 } 2430 2408 2431 2409 /* ··· 2451 2429 for (i = 0; i < nbuf; i++) 2452 2430 xfs_trans_binval(tp, bplist[i]); 2453 2431 if (bplist != &bp) 2454 - kmem_free(bplist, nbuf * sizeof(*bplist)); 2432 + kmem_free(bplist); 2455 2433 } 2456 2434 2457 2435 /*
+32 -4
fs/xfs/xfs_da_btree.h
··· 99 99 *========================================================================*/ 100 100 101 101 /* 102 + * Search comparison results 103 + */ 104 + enum xfs_dacmp { 105 + XFS_CMP_DIFFERENT, /* names are completely different */ 106 + XFS_CMP_EXACT, /* names are exactly the same */ 107 + XFS_CMP_CASE /* names are same but differ in case */ 108 + }; 109 + 110 + /* 102 111 * Structure to ease passing around component names. 103 112 */ 104 113 typedef struct xfs_da_args { ··· 132 123 int index2; /* index of 2nd attr in blk */ 133 124 xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */ 134 125 int rmtblkcnt2; /* remote attr value block count */ 135 - unsigned char justcheck; /* T/F: check for ok with no space */ 136 - unsigned char rename; /* T/F: this is an atomic rename op */ 137 - unsigned char addname; /* T/F: this is an add operation */ 138 - unsigned char oknoent; /* T/F: ok to return ENOENT, else die */ 126 + int op_flags; /* operation flags */ 127 + enum xfs_dacmp cmpresult; /* name compare result for lookups */ 139 128 } xfs_da_args_t; 129 + 130 + /* 131 + * Operation flags: 132 + */ 133 + #define XFS_DA_OP_JUSTCHECK 0x0001 /* check for ok with no space */ 134 + #define XFS_DA_OP_RENAME 0x0002 /* this is an atomic rename op */ 135 + #define XFS_DA_OP_ADDNAME 0x0004 /* this is an add operation */ 136 + #define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */ 137 + #define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */ 140 138 141 139 /* 142 140 * Structure to describe buffer(s) for a block. ··· 217 201 (uint)(XFS_DA_LOGOFF(BASE, ADDR)), \ 218 202 (uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1) 219 203 204 + /* 205 + * Name ops for directory and/or attr name operations 206 + */ 207 + struct xfs_nameops { 208 + xfs_dahash_t (*hashname)(struct xfs_name *); 209 + enum xfs_dacmp (*compname)(struct xfs_da_args *, const char *, int); 210 + }; 211 + 220 212 221 213 #ifdef __KERNEL__ 222 214 /*======================================================================== ··· 273 249 xfs_dabuf_t *dead_buf); 274 250 275 251 uint xfs_da_hashname(const uchar_t *name_string, int name_length); 252 + enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args, 253 + const char *name, int len); 254 + 255 + 276 256 xfs_da_state_t *xfs_da_state_alloc(void); 277 257 void xfs_da_state_free(xfs_da_state_t *state); 278 258
+2 -2
fs/xfs/xfs_dfrag.c
··· 116 116 out_put_file: 117 117 fput(file); 118 118 out_free_sxp: 119 - kmem_free(sxp, sizeof(xfs_swapext_t)); 119 + kmem_free(sxp); 120 120 out: 121 121 return error; 122 122 } ··· 381 381 xfs_iunlock(tip, lock_flags); 382 382 } 383 383 if (tempifp != NULL) 384 - kmem_free(tempifp, sizeof(xfs_ifork_t)); 384 + kmem_free(tempifp); 385 385 return error; 386 386 }
+108 -19
fs/xfs/xfs_dir2.c
··· 46 46 47 47 struct xfs_name xfs_name_dotdot = {"..", 2}; 48 48 49 + extern const struct xfs_nameops xfs_default_nameops; 50 + 51 + /* 52 + * ASCII case-insensitive (ie. A-Z) support for directories that was 53 + * used in IRIX. 54 + */ 55 + STATIC xfs_dahash_t 56 + xfs_ascii_ci_hashname( 57 + struct xfs_name *name) 58 + { 59 + xfs_dahash_t hash; 60 + int i; 61 + 62 + for (i = 0, hash = 0; i < name->len; i++) 63 + hash = tolower(name->name[i]) ^ rol32(hash, 7); 64 + 65 + return hash; 66 + } 67 + 68 + STATIC enum xfs_dacmp 69 + xfs_ascii_ci_compname( 70 + struct xfs_da_args *args, 71 + const char *name, 72 + int len) 73 + { 74 + enum xfs_dacmp result; 75 + int i; 76 + 77 + if (args->namelen != len) 78 + return XFS_CMP_DIFFERENT; 79 + 80 + result = XFS_CMP_EXACT; 81 + for (i = 0; i < len; i++) { 82 + if (args->name[i] == name[i]) 83 + continue; 84 + if (tolower(args->name[i]) != tolower(name[i])) 85 + return XFS_CMP_DIFFERENT; 86 + result = XFS_CMP_CASE; 87 + } 88 + 89 + return result; 90 + } 91 + 92 + static struct xfs_nameops xfs_ascii_ci_nameops = { 93 + .hashname = xfs_ascii_ci_hashname, 94 + .compname = xfs_ascii_ci_compname, 95 + }; 96 + 49 97 void 50 98 xfs_dir_mount( 51 99 xfs_mount_t *mp) ··· 113 65 (mp->m_dirblksize - (uint)sizeof(xfs_da_node_hdr_t)) / 114 66 (uint)sizeof(xfs_da_node_entry_t); 115 67 mp->m_dir_magicpct = (mp->m_dirblksize * 37) / 100; 68 + if (xfs_sb_version_hasasciici(&mp->m_sb)) 69 + mp->m_dirnameops = &xfs_ascii_ci_nameops; 70 + else 71 + mp->m_dirnameops = &xfs_default_nameops; 116 72 } 117 73 118 74 /* ··· 214 162 return rval; 215 163 XFS_STATS_INC(xs_dir_create); 216 164 165 + memset(&args, 0, sizeof(xfs_da_args_t)); 217 166 args.name = name->name; 218 167 args.namelen = name->len; 219 - args.hashval = xfs_da_hashname(name->name, name->len); 168 + args.hashval = dp->i_mount->m_dirnameops->hashname(name); 220 169 args.inumber = inum; 221 170 args.dp = dp; 222 171 args.firstblock = first; ··· 225 172 args.total = total; 226 173 args.whichfork = XFS_DATA_FORK; 227 174 args.trans = tp; 228 - args.justcheck = 0; 229 - args.addname = args.oknoent = 1; 175 + args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; 230 176 231 177 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 232 178 rval = xfs_dir2_sf_addname(&args); ··· 243 191 } 244 192 245 193 /* 246 - * Lookup a name in a directory, give back the inode number. 194 + * If doing a CI lookup and case-insensitive match, dup actual name into 195 + * args.value. Return EEXIST for success (ie. name found) or an error. 247 196 */ 197 + int 198 + xfs_dir_cilookup_result( 199 + struct xfs_da_args *args, 200 + const char *name, 201 + int len) 202 + { 203 + if (args->cmpresult == XFS_CMP_DIFFERENT) 204 + return ENOENT; 205 + if (args->cmpresult != XFS_CMP_CASE || 206 + !(args->op_flags & XFS_DA_OP_CILOOKUP)) 207 + return EEXIST; 208 + 209 + args->value = kmem_alloc(len, KM_MAYFAIL); 210 + if (!args->value) 211 + return ENOMEM; 212 + 213 + memcpy(args->value, name, len); 214 + args->valuelen = len; 215 + return EEXIST; 216 + } 217 + 218 + /* 219 + * Lookup a name in a directory, give back the inode number. 220 + * If ci_name is not NULL, returns the actual name in ci_name if it differs 221 + * to name, or ci_name->name is set to NULL for an exact match. 222 + */ 223 + 248 224 int 249 225 xfs_dir_lookup( 250 226 xfs_trans_t *tp, 251 227 xfs_inode_t *dp, 252 228 struct xfs_name *name, 253 - xfs_ino_t *inum) /* out: inode number */ 229 + xfs_ino_t *inum, /* out: inode number */ 230 + struct xfs_name *ci_name) /* out: actual name if CI match */ 254 231 { 255 232 xfs_da_args_t args; 256 233 int rval; ··· 287 206 288 207 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 289 208 XFS_STATS_INC(xs_dir_lookup); 290 - memset(&args, 0, sizeof(xfs_da_args_t)); 291 209 210 + memset(&args, 0, sizeof(xfs_da_args_t)); 292 211 args.name = name->name; 293 212 args.namelen = name->len; 294 - args.hashval = xfs_da_hashname(name->name, name->len); 213 + args.hashval = dp->i_mount->m_dirnameops->hashname(name); 295 214 args.dp = dp; 296 215 args.whichfork = XFS_DATA_FORK; 297 216 args.trans = tp; 298 - args.oknoent = 1; 217 + args.op_flags = XFS_DA_OP_OKNOENT; 218 + if (ci_name) 219 + args.op_flags |= XFS_DA_OP_CILOOKUP; 299 220 300 221 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 301 222 rval = xfs_dir2_sf_lookup(&args); ··· 313 230 rval = xfs_dir2_node_lookup(&args); 314 231 if (rval == EEXIST) 315 232 rval = 0; 316 - if (rval == 0) 233 + if (!rval) { 317 234 *inum = args.inumber; 235 + if (ci_name) { 236 + ci_name->name = args.value; 237 + ci_name->len = args.valuelen; 238 + } 239 + } 318 240 return rval; 319 241 } 320 242 ··· 343 255 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 344 256 XFS_STATS_INC(xs_dir_remove); 345 257 258 + memset(&args, 0, sizeof(xfs_da_args_t)); 346 259 args.name = name->name; 347 260 args.namelen = name->len; 348 - args.hashval = xfs_da_hashname(name->name, name->len); 261 + args.hashval = dp->i_mount->m_dirnameops->hashname(name); 349 262 args.inumber = ino; 350 263 args.dp = dp; 351 264 args.firstblock = first; ··· 354 265 args.total = total; 355 266 args.whichfork = XFS_DATA_FORK; 356 267 args.trans = tp; 357 - args.justcheck = args.addname = args.oknoent = 0; 358 268 359 269 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 360 270 rval = xfs_dir2_sf_removename(&args); ··· 426 338 if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) 427 339 return rval; 428 340 341 + memset(&args, 0, sizeof(xfs_da_args_t)); 429 342 args.name = name->name; 430 343 args.namelen = name->len; 431 - args.hashval = xfs_da_hashname(name->name, name->len); 344 + args.hashval = dp->i_mount->m_dirnameops->hashname(name); 432 345 args.inumber = inum; 433 346 args.dp = dp; 434 347 args.firstblock = first; ··· 437 348 args.total = total; 438 349 args.whichfork = XFS_DATA_FORK; 439 350 args.trans = tp; 440 - args.justcheck = args.addname = args.oknoent = 0; 441 351 442 352 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 443 353 rval = xfs_dir2_sf_replace(&args); ··· 472 384 return 0; 473 385 474 386 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 475 - memset(&args, 0, sizeof(xfs_da_args_t)); 476 387 388 + memset(&args, 0, sizeof(xfs_da_args_t)); 477 389 args.name = name->name; 478 390 args.namelen = name->len; 479 - args.hashval = xfs_da_hashname(name->name, name->len); 391 + args.hashval = dp->i_mount->m_dirnameops->hashname(name); 480 392 args.dp = dp; 481 393 args.whichfork = XFS_DATA_FORK; 482 394 args.trans = tp; 483 - args.justcheck = args.addname = args.oknoent = 1; 395 + args.op_flags = XFS_DA_OP_JUSTCHECK | XFS_DA_OP_ADDNAME | 396 + XFS_DA_OP_OKNOENT; 484 397 485 398 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 486 399 rval = xfs_dir2_sf_addname(&args); ··· 582 493 args->firstblock, args->total, 583 494 &mapp[mapi], &nmap, args->flist, 584 495 NULL))) { 585 - kmem_free(mapp, sizeof(*mapp) * count); 496 + kmem_free(mapp); 586 497 return error; 587 498 } 588 499 if (nmap < 1) ··· 614 525 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 615 526 bno + count) { 616 527 if (mapp != &map) 617 - kmem_free(mapp, sizeof(*mapp) * count); 528 + kmem_free(mapp); 618 529 return XFS_ERROR(ENOSPC); 619 530 } 620 531 /* 621 532 * Done with the temporary mapping table. 622 533 */ 623 534 if (mapp != &map) 624 - kmem_free(mapp, sizeof(*mapp) * count); 535 + kmem_free(mapp); 625 536 *dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno); 626 537 /* 627 538 * Update file's size if this is the data space and it grew.
+5 -1
fs/xfs/xfs_dir2.h
··· 74 74 xfs_fsblock_t *first, 75 75 struct xfs_bmap_free *flist, xfs_extlen_t tot); 76 76 extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, 77 - struct xfs_name *name, xfs_ino_t *inum); 77 + struct xfs_name *name, xfs_ino_t *inum, 78 + struct xfs_name *ci_name); 78 79 extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, 79 80 struct xfs_name *name, xfs_ino_t ino, 80 81 xfs_fsblock_t *first, ··· 99 98 int *vp); 100 99 extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, 101 100 struct xfs_dabuf *bp); 101 + 102 + extern int xfs_dir_cilookup_result(struct xfs_da_args *args, const char *name, 103 + int len); 102 104 103 105 #endif /* __XFS_DIR2_H__ */
+36 -20
fs/xfs/xfs_dir2_block.c
··· 215 215 /* 216 216 * If this isn't a real add, we're done with the buffer. 217 217 */ 218 - if (args->justcheck) 218 + if (args->op_flags & XFS_DA_OP_JUSTCHECK) 219 219 xfs_da_brelse(tp, bp); 220 220 /* 221 221 * If we don't have space for the new entry & leaf ... ··· 225 225 * Not trying to actually do anything, or don't have 226 226 * a space reservation: return no-space. 227 227 */ 228 - if (args->justcheck || args->total == 0) 228 + if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) 229 229 return XFS_ERROR(ENOSPC); 230 230 /* 231 231 * Convert to the next larger format. ··· 240 240 /* 241 241 * Just checking, and it would work, so say so. 242 242 */ 243 - if (args->justcheck) 243 + if (args->op_flags & XFS_DA_OP_JUSTCHECK) 244 244 return 0; 245 245 needlog = needscan = 0; 246 246 /* ··· 610 610 /* 611 611 * Get the offset from the leaf entry, to point to the data. 612 612 */ 613 - dep = (xfs_dir2_data_entry_t *) 614 - ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); 613 + dep = (xfs_dir2_data_entry_t *)((char *)block + 614 + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); 615 615 /* 616 - * Fill in inode number, release the block. 616 + * Fill in inode number, CI name if appropriate, release the block. 617 617 */ 618 618 args->inumber = be64_to_cpu(dep->inumber); 619 + error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 619 620 xfs_da_brelse(args->trans, bp); 620 - return XFS_ERROR(EEXIST); 621 + return XFS_ERROR(error); 621 622 } 622 623 623 624 /* ··· 644 643 int mid; /* binary search current idx */ 645 644 xfs_mount_t *mp; /* filesystem mount point */ 646 645 xfs_trans_t *tp; /* transaction pointer */ 646 + enum xfs_dacmp cmp; /* comparison result */ 647 647 648 648 dp = args->dp; 649 649 tp = args->trans; ··· 675 673 else 676 674 high = mid - 1; 677 675 if (low > high) { 678 - ASSERT(args->oknoent); 676 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 679 677 xfs_da_brelse(tp, bp); 680 678 return XFS_ERROR(ENOENT); 681 679 } ··· 699 697 dep = (xfs_dir2_data_entry_t *) 700 698 ((char *)block + xfs_dir2_dataptr_to_off(mp, addr)); 701 699 /* 702 - * Compare, if it's right give back buffer & entry number. 700 + * Compare name and if it's an exact match, return the index 701 + * and buffer. If it's the first case-insensitive match, store 702 + * the index and buffer and continue looking for an exact match. 703 703 */ 704 - if (dep->namelen == args->namelen && 705 - dep->name[0] == args->name[0] && 706 - memcmp(dep->name, args->name, args->namelen) == 0) { 704 + cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen); 705 + if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) { 706 + args->cmpresult = cmp; 707 707 *bpp = bp; 708 708 *entno = mid; 709 - return 0; 709 + if (cmp == XFS_CMP_EXACT) 710 + return 0; 710 711 } 711 - } while (++mid < be32_to_cpu(btp->count) && be32_to_cpu(blp[mid].hashval) == hash); 712 + } while (++mid < be32_to_cpu(btp->count) && 713 + be32_to_cpu(blp[mid].hashval) == hash); 714 + 715 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 716 + /* 717 + * Here, we can only be doing a lookup (not a rename or replace). 718 + * If a case-insensitive match was found earlier, return success. 719 + */ 720 + if (args->cmpresult == XFS_CMP_CASE) 721 + return 0; 712 722 /* 713 723 * No match, release the buffer and return ENOENT. 714 724 */ 715 - ASSERT(args->oknoent); 716 725 xfs_da_brelse(tp, bp); 717 726 return XFS_ERROR(ENOENT); 718 727 } ··· 1046 1033 xfs_dir2_sf_t *sfp; /* shortform structure */ 1047 1034 __be16 *tagp; /* end of data entry */ 1048 1035 xfs_trans_t *tp; /* transaction pointer */ 1036 + struct xfs_name name; 1049 1037 1050 1038 xfs_dir2_trace_args("sf_to_block", args); 1051 1039 dp = args->dp; ··· 1085 1071 */ 1086 1072 error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno); 1087 1073 if (error) { 1088 - kmem_free(buf, buf_len); 1074 + kmem_free(buf); 1089 1075 return error; 1090 1076 } 1091 1077 /* ··· 1093 1079 */ 1094 1080 error = xfs_dir2_data_init(args, blkno, &bp); 1095 1081 if (error) { 1096 - kmem_free(buf, buf_len); 1082 + kmem_free(buf); 1097 1083 return error; 1098 1084 } 1099 1085 block = bp->data; ··· 1201 1187 tagp = xfs_dir2_data_entry_tag_p(dep); 1202 1188 *tagp = cpu_to_be16((char *)dep - (char *)block); 1203 1189 xfs_dir2_data_log_entry(tp, bp, dep); 1204 - blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname( 1205 - (char *)sfep->name, sfep->namelen)); 1190 + name.name = sfep->name; 1191 + name.len = sfep->namelen; 1192 + blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops-> 1193 + hashname(&name)); 1206 1194 blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1207 1195 (char *)dep - (char *)block)); 1208 1196 offset = (int)((char *)(tagp + 1) - (char *)block); ··· 1214 1198 sfep = xfs_dir2_sf_nextentry(sfp, sfep); 1215 1199 } 1216 1200 /* Done with the temporary buffer */ 1217 - kmem_free(buf, buf_len); 1201 + kmem_free(buf); 1218 1202 /* 1219 1203 * Sort the leaf entries by hash value. 1220 1204 */
+4 -1
fs/xfs/xfs_dir2_data.c
··· 65 65 xfs_mount_t *mp; /* filesystem mount point */ 66 66 char *p; /* current data position */ 67 67 int stale; /* count of stale leaves */ 68 + struct xfs_name name; 68 69 69 70 mp = dp->i_mount; 70 71 d = bp->data; ··· 141 140 addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 142 141 (xfs_dir2_data_aoff_t) 143 142 ((char *)dep - (char *)d)); 144 - hash = xfs_da_hashname((char *)dep->name, dep->namelen); 143 + name.name = dep->name; 144 + name.len = dep->namelen; 145 + hash = mp->m_dirnameops->hashname(&name); 145 146 for (i = 0; i < be32_to_cpu(btp->count); i++) { 146 147 if (be32_to_cpu(lep[i].address) == addr && 147 148 be32_to_cpu(lep[i].hashval) == hash)
+61 -32
fs/xfs/xfs_dir2_leaf.c
··· 263 263 * If we don't have enough free bytes but we can make enough 264 264 * by compacting out stale entries, we'll do that. 265 265 */ 266 - if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes && 267 - be16_to_cpu(leaf->hdr.stale) > 1) { 266 + if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < 267 + needbytes && be16_to_cpu(leaf->hdr.stale) > 1) { 268 268 compact = 1; 269 269 } 270 270 /* 271 271 * Otherwise if we don't have enough free bytes we need to 272 272 * convert to node form. 273 273 */ 274 - else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < 275 - needbytes) { 274 + else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu( 275 + leaf->hdr.count)] < needbytes) { 276 276 /* 277 277 * Just checking or no space reservation, give up. 278 278 */ 279 - if (args->justcheck || args->total == 0) { 279 + if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || 280 + args->total == 0) { 280 281 xfs_da_brelse(tp, lbp); 281 282 return XFS_ERROR(ENOSPC); 282 283 } ··· 302 301 * If just checking, then it will fit unless we needed to allocate 303 302 * a new data block. 304 303 */ 305 - if (args->justcheck) { 304 + if (args->op_flags & XFS_DA_OP_JUSTCHECK) { 306 305 xfs_da_brelse(tp, lbp); 307 306 return use_block == -1 ? XFS_ERROR(ENOSPC) : 0; 308 307 } ··· 1111 1110 *offset = XFS_DIR2_MAX_DATAPTR; 1112 1111 else 1113 1112 *offset = xfs_dir2_byte_to_dataptr(mp, curoff); 1114 - kmem_free(map, map_size * sizeof(*map)); 1113 + kmem_free(map); 1115 1114 if (bp) 1116 1115 xfs_da_brelse(NULL, bp); 1117 1116 return error; ··· 1299 1298 ((char *)dbp->data + 1300 1299 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address))); 1301 1300 /* 1302 - * Return the found inode number. 1301 + * Return the found inode number & CI name if appropriate 1303 1302 */ 1304 1303 args->inumber = be64_to_cpu(dep->inumber); 1304 + error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 1305 1305 xfs_da_brelse(tp, dbp); 1306 1306 xfs_da_brelse(tp, lbp); 1307 - return XFS_ERROR(EEXIST); 1307 + return XFS_ERROR(error); 1308 1308 } 1309 1309 1310 1310 /* ··· 1321 1319 int *indexp, /* out: index in leaf block */ 1322 1320 xfs_dabuf_t **dbpp) /* out: data buffer */ 1323 1321 { 1324 - xfs_dir2_db_t curdb; /* current data block number */ 1325 - xfs_dabuf_t *dbp; /* data buffer */ 1322 + xfs_dir2_db_t curdb = -1; /* current data block number */ 1323 + xfs_dabuf_t *dbp = NULL; /* data buffer */ 1326 1324 xfs_dir2_data_entry_t *dep; /* data entry */ 1327 1325 xfs_inode_t *dp; /* incore directory inode */ 1328 1326 int error; /* error return code */ ··· 1333 1331 xfs_mount_t *mp; /* filesystem mount point */ 1334 1332 xfs_dir2_db_t newdb; /* new data block number */ 1335 1333 xfs_trans_t *tp; /* transaction pointer */ 1334 + xfs_dir2_db_t cidb = -1; /* case match data block no. */ 1335 + enum xfs_dacmp cmp; /* name compare result */ 1336 1336 1337 1337 dp = args->dp; 1338 1338 tp = args->trans; ··· 1342 1338 /* 1343 1339 * Read the leaf block into the buffer. 1344 1340 */ 1345 - if ((error = 1346 - xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp, 1347 - XFS_DATA_FORK))) { 1341 + error = xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp, 1342 + XFS_DATA_FORK); 1343 + if (error) 1348 1344 return error; 1349 - } 1350 1345 *lbpp = lbp; 1351 1346 leaf = lbp->data; 1352 1347 xfs_dir2_leaf_check(dp, lbp); ··· 1357 1354 * Loop over all the entries with the right hash value 1358 1355 * looking to match the name. 1359 1356 */ 1360 - for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; 1361 - index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; 1362 - lep++, index++) { 1357 + for (lep = &leaf->ents[index]; index < be16_to_cpu(leaf->hdr.count) && 1358 + be32_to_cpu(lep->hashval) == args->hashval; 1359 + lep++, index++) { 1363 1360 /* 1364 1361 * Skip over stale leaf entries. 1365 1362 */ ··· 1376 1373 if (newdb != curdb) { 1377 1374 if (dbp) 1378 1375 xfs_da_brelse(tp, dbp); 1379 - if ((error = 1380 - xfs_da_read_buf(tp, dp, 1381 - xfs_dir2_db_to_da(mp, newdb), -1, &dbp, 1382 - XFS_DATA_FORK))) { 1376 + error = xfs_da_read_buf(tp, dp, 1377 + xfs_dir2_db_to_da(mp, newdb), 1378 + -1, &dbp, XFS_DATA_FORK); 1379 + if (error) { 1383 1380 xfs_da_brelse(tp, lbp); 1384 1381 return error; 1385 1382 } ··· 1389 1386 /* 1390 1387 * Point to the data entry. 1391 1388 */ 1392 - dep = (xfs_dir2_data_entry_t *) 1393 - ((char *)dbp->data + 1394 - xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 1389 + dep = (xfs_dir2_data_entry_t *)((char *)dbp->data + 1390 + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 1395 1391 /* 1396 - * If it matches then return it. 1392 + * Compare name and if it's an exact match, return the index 1393 + * and buffer. If it's the first case-insensitive match, store 1394 + * the index and buffer and continue looking for an exact match. 1397 1395 */ 1398 - if (dep->namelen == args->namelen && 1399 - dep->name[0] == args->name[0] && 1400 - memcmp(dep->name, args->name, args->namelen) == 0) { 1401 - *dbpp = dbp; 1396 + cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen); 1397 + if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) { 1398 + args->cmpresult = cmp; 1402 1399 *indexp = index; 1403 - return 0; 1400 + /* case exact match: return the current buffer. */ 1401 + if (cmp == XFS_CMP_EXACT) { 1402 + *dbpp = dbp; 1403 + return 0; 1404 + } 1405 + cidb = curdb; 1404 1406 } 1407 + } 1408 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1409 + /* 1410 + * Here, we can only be doing a lookup (not a rename or remove). 1411 + * If a case-insensitive match was found earlier, re-read the 1412 + * appropriate data block if required and return it. 1413 + */ 1414 + if (args->cmpresult == XFS_CMP_CASE) { 1415 + ASSERT(cidb != -1); 1416 + if (cidb != curdb) { 1417 + xfs_da_brelse(tp, dbp); 1418 + error = xfs_da_read_buf(tp, dp, 1419 + xfs_dir2_db_to_da(mp, cidb), 1420 + -1, &dbp, XFS_DATA_FORK); 1421 + if (error) { 1422 + xfs_da_brelse(tp, lbp); 1423 + return error; 1424 + } 1425 + } 1426 + *dbpp = dbp; 1427 + return 0; 1405 1428 } 1406 1429 /* 1407 1430 * No match found, return ENOENT. 1408 1431 */ 1409 - ASSERT(args->oknoent); 1432 + ASSERT(cidb == -1); 1410 1433 if (dbp) 1411 1434 xfs_da_brelse(tp, dbp); 1412 1435 xfs_da_brelse(tp, lbp);
+246 -170
fs/xfs/xfs_dir2_node.c
··· 226 226 ASSERT(index == be16_to_cpu(leaf->hdr.count) || 227 227 be32_to_cpu(leaf->ents[index].hashval) >= args->hashval); 228 228 229 - if (args->justcheck) 229 + if (args->op_flags & XFS_DA_OP_JUSTCHECK) 230 230 return 0; 231 231 232 232 /* ··· 387 387 } 388 388 389 389 /* 390 - * Look up a leaf entry in a node-format leaf block. 391 - * If this is an addname then the extrablk in state is a freespace block, 392 - * otherwise it's a data block. 390 + * Look up a leaf entry for space to add a name in a node-format leaf block. 391 + * The extrablk in state is a freespace block. 393 392 */ 394 - int 395 - xfs_dir2_leafn_lookup_int( 393 + STATIC int 394 + xfs_dir2_leafn_lookup_for_addname( 396 395 xfs_dabuf_t *bp, /* leaf buffer */ 397 396 xfs_da_args_t *args, /* operation arguments */ 398 397 int *indexp, /* out: leaf entry index */ 399 398 xfs_da_state_t *state) /* state to fill in */ 400 399 { 401 - xfs_dabuf_t *curbp; /* current data/free buffer */ 402 - xfs_dir2_db_t curdb; /* current data block number */ 403 - xfs_dir2_db_t curfdb; /* current free block number */ 404 - xfs_dir2_data_entry_t *dep; /* data block entry */ 400 + xfs_dabuf_t *curbp = NULL; /* current data/free buffer */ 401 + xfs_dir2_db_t curdb = -1; /* current data block number */ 402 + xfs_dir2_db_t curfdb = -1; /* current free block number */ 405 403 xfs_inode_t *dp; /* incore directory inode */ 406 404 int error; /* error return value */ 407 405 int fi; /* free entry index */ 408 - xfs_dir2_free_t *free=NULL; /* free block structure */ 406 + xfs_dir2_free_t *free = NULL; /* free block structure */ 409 407 int index; /* leaf entry index */ 410 408 xfs_dir2_leaf_t *leaf; /* leaf structure */ 411 - int length=0; /* length of new data entry */ 409 + int length; /* length of new data entry */ 412 410 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 413 411 xfs_mount_t *mp; /* filesystem mount point */ 414 412 xfs_dir2_db_t newdb; /* new data block number */ ··· 429 431 /* 430 432 * Do we have a buffer coming in? 431 433 */ 432 - if (state->extravalid) 434 + if (state->extravalid) { 435 + /* If so, it's a free block buffer, get the block number. */ 433 436 curbp = state->extrablk.bp; 434 - else 435 - curbp = NULL; 436 - /* 437 - * For addname, it's a free block buffer, get the block number. 438 - */ 439 - if (args->addname) { 440 - curfdb = curbp ? state->extrablk.blkno : -1; 441 - curdb = -1; 442 - length = xfs_dir2_data_entsize(args->namelen); 443 - if ((free = (curbp ? curbp->data : NULL))) 444 - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); 437 + curfdb = state->extrablk.blkno; 438 + free = curbp->data; 439 + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); 445 440 } 446 - /* 447 - * For others, it's a data block buffer, get the block number. 448 - */ 449 - else { 450 - curfdb = -1; 451 - curdb = curbp ? state->extrablk.blkno : -1; 452 - } 441 + length = xfs_dir2_data_entsize(args->namelen); 453 442 /* 454 443 * Loop over leaf entries with the right hash value. 455 444 */ 456 - for (lep = &leaf->ents[index]; 457 - index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; 458 - lep++, index++) { 445 + for (lep = &leaf->ents[index]; index < be16_to_cpu(leaf->hdr.count) && 446 + be32_to_cpu(lep->hashval) == args->hashval; 447 + lep++, index++) { 459 448 /* 460 449 * Skip stale leaf entries. 461 450 */ ··· 456 471 * For addname, we're looking for a place to put the new entry. 457 472 * We want to use a data block with an entry of equal 458 473 * hash value to ours if there is one with room. 474 + * 475 + * If this block isn't the data block we already have 476 + * in hand, take a look at it. 459 477 */ 460 - if (args->addname) { 478 + if (newdb != curdb) { 479 + curdb = newdb; 461 480 /* 462 - * If this block isn't the data block we already have 463 - * in hand, take a look at it. 481 + * Convert the data block to the free block 482 + * holding its freespace information. 464 483 */ 465 - if (newdb != curdb) { 466 - curdb = newdb; 467 - /* 468 - * Convert the data block to the free block 469 - * holding its freespace information. 470 - */ 471 - newfdb = xfs_dir2_db_to_fdb(mp, newdb); 472 - /* 473 - * If it's not the one we have in hand, 474 - * read it in. 475 - */ 476 - if (newfdb != curfdb) { 477 - /* 478 - * If we had one before, drop it. 479 - */ 480 - if (curbp) 481 - xfs_da_brelse(tp, curbp); 482 - /* 483 - * Read the free block. 484 - */ 485 - if ((error = xfs_da_read_buf(tp, dp, 486 - xfs_dir2_db_to_da(mp, 487 - newfdb), 488 - -1, &curbp, 489 - XFS_DATA_FORK))) { 490 - return error; 491 - } 492 - free = curbp->data; 493 - ASSERT(be32_to_cpu(free->hdr.magic) == 494 - XFS_DIR2_FREE_MAGIC); 495 - ASSERT((be32_to_cpu(free->hdr.firstdb) % 496 - XFS_DIR2_MAX_FREE_BESTS(mp)) == 497 - 0); 498 - ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb); 499 - ASSERT(curdb < 500 - be32_to_cpu(free->hdr.firstdb) + 501 - be32_to_cpu(free->hdr.nvalid)); 502 - } 503 - /* 504 - * Get the index for our entry. 505 - */ 506 - fi = xfs_dir2_db_to_fdindex(mp, curdb); 507 - /* 508 - * If it has room, return it. 509 - */ 510 - if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { 511 - XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", 512 - XFS_ERRLEVEL_LOW, mp); 513 - if (curfdb != newfdb) 514 - xfs_da_brelse(tp, curbp); 515 - return XFS_ERROR(EFSCORRUPTED); 516 - } 517 - curfdb = newfdb; 518 - if (be16_to_cpu(free->bests[fi]) >= length) { 519 - *indexp = index; 520 - state->extravalid = 1; 521 - state->extrablk.bp = curbp; 522 - state->extrablk.blkno = curfdb; 523 - state->extrablk.index = fi; 524 - state->extrablk.magic = 525 - XFS_DIR2_FREE_MAGIC; 526 - ASSERT(args->oknoent); 527 - return XFS_ERROR(ENOENT); 528 - } 529 - } 530 - } 531 - /* 532 - * Not adding a new entry, so we really want to find 533 - * the name given to us. 534 - */ 535 - else { 484 + newfdb = xfs_dir2_db_to_fdb(mp, newdb); 536 485 /* 537 - * If it's a different data block, go get it. 486 + * If it's not the one we have in hand, read it in. 538 487 */ 539 - if (newdb != curdb) { 488 + if (newfdb != curfdb) { 540 489 /* 541 - * If we had a block before, drop it. 490 + * If we had one before, drop it. 542 491 */ 543 492 if (curbp) 544 493 xfs_da_brelse(tp, curbp); 545 494 /* 546 - * Read the data block. 495 + * Read the free block. 547 496 */ 548 - if ((error = 549 - xfs_da_read_buf(tp, dp, 550 - xfs_dir2_db_to_da(mp, newdb), -1, 551 - &curbp, XFS_DATA_FORK))) { 497 + error = xfs_da_read_buf(tp, dp, 498 + xfs_dir2_db_to_da(mp, newfdb), 499 + -1, &curbp, XFS_DATA_FORK); 500 + if (error) 552 501 return error; 553 - } 554 - xfs_dir2_data_check(dp, curbp); 555 - curdb = newdb; 502 + free = curbp->data; 503 + ASSERT(be32_to_cpu(free->hdr.magic) == 504 + XFS_DIR2_FREE_MAGIC); 505 + ASSERT((be32_to_cpu(free->hdr.firstdb) % 506 + XFS_DIR2_MAX_FREE_BESTS(mp)) == 0); 507 + ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb); 508 + ASSERT(curdb < be32_to_cpu(free->hdr.firstdb) + 509 + be32_to_cpu(free->hdr.nvalid)); 556 510 } 557 511 /* 558 - * Point to the data entry. 512 + * Get the index for our entry. 559 513 */ 560 - dep = (xfs_dir2_data_entry_t *) 561 - ((char *)curbp->data + 562 - xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 514 + fi = xfs_dir2_db_to_fdindex(mp, curdb); 563 515 /* 564 - * Compare the entry, return it if it matches. 516 + * If it has room, return it. 565 517 */ 566 - if (dep->namelen == args->namelen && 567 - dep->name[0] == args->name[0] && 568 - memcmp(dep->name, args->name, args->namelen) == 0) { 569 - args->inumber = be64_to_cpu(dep->inumber); 570 - *indexp = index; 571 - state->extravalid = 1; 572 - state->extrablk.bp = curbp; 573 - state->extrablk.blkno = curdb; 574 - state->extrablk.index = 575 - (int)((char *)dep - 576 - (char *)curbp->data); 577 - state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 578 - return XFS_ERROR(EEXIST); 518 + if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { 519 + XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", 520 + XFS_ERRLEVEL_LOW, mp); 521 + if (curfdb != newfdb) 522 + xfs_da_brelse(tp, curbp); 523 + return XFS_ERROR(EFSCORRUPTED); 579 524 } 525 + curfdb = newfdb; 526 + if (be16_to_cpu(free->bests[fi]) >= length) 527 + goto out; 580 528 } 581 529 } 582 - /* 583 - * Didn't find a match. 584 - * If we are holding a buffer, give it back in case our caller 585 - * finds it useful. 586 - */ 587 - if ((state->extravalid = (curbp != NULL))) { 530 + /* Didn't find any space */ 531 + fi = -1; 532 + out: 533 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 534 + if (curbp) { 535 + /* Giving back a free block. */ 536 + state->extravalid = 1; 588 537 state->extrablk.bp = curbp; 589 - state->extrablk.index = -1; 590 - /* 591 - * For addname, giving back a free block. 592 - */ 593 - if (args->addname) { 594 - state->extrablk.blkno = curfdb; 595 - state->extrablk.magic = XFS_DIR2_FREE_MAGIC; 596 - } 597 - /* 598 - * For other callers, giving back a data block. 599 - */ 600 - else { 601 - state->extrablk.blkno = curdb; 602 - state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 603 - } 538 + state->extrablk.index = fi; 539 + state->extrablk.blkno = curfdb; 540 + state->extrablk.magic = XFS_DIR2_FREE_MAGIC; 541 + } else { 542 + state->extravalid = 0; 604 543 } 605 544 /* 606 - * Return the final index, that will be the insertion point. 545 + * Return the index, that will be the insertion point. 607 546 */ 608 547 *indexp = index; 609 - ASSERT(index == be16_to_cpu(leaf->hdr.count) || args->oknoent); 610 548 return XFS_ERROR(ENOENT); 549 + } 550 + 551 + /* 552 + * Look up a leaf entry in a node-format leaf block. 553 + * The extrablk in state a data block. 554 + */ 555 + STATIC int 556 + xfs_dir2_leafn_lookup_for_entry( 557 + xfs_dabuf_t *bp, /* leaf buffer */ 558 + xfs_da_args_t *args, /* operation arguments */ 559 + int *indexp, /* out: leaf entry index */ 560 + xfs_da_state_t *state) /* state to fill in */ 561 + { 562 + xfs_dabuf_t *curbp = NULL; /* current data/free buffer */ 563 + xfs_dir2_db_t curdb = -1; /* current data block number */ 564 + xfs_dir2_data_entry_t *dep; /* data block entry */ 565 + xfs_inode_t *dp; /* incore directory inode */ 566 + int error; /* error return value */ 567 + int index; /* leaf entry index */ 568 + xfs_dir2_leaf_t *leaf; /* leaf structure */ 569 + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 570 + xfs_mount_t *mp; /* filesystem mount point */ 571 + xfs_dir2_db_t newdb; /* new data block number */ 572 + xfs_trans_t *tp; /* transaction pointer */ 573 + enum xfs_dacmp cmp; /* comparison result */ 574 + 575 + dp = args->dp; 576 + tp = args->trans; 577 + mp = dp->i_mount; 578 + leaf = bp->data; 579 + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); 580 + #ifdef __KERNEL__ 581 + ASSERT(be16_to_cpu(leaf->hdr.count) > 0); 582 + #endif 583 + xfs_dir2_leafn_check(dp, bp); 584 + /* 585 + * Look up the hash value in the leaf entries. 586 + */ 587 + index = xfs_dir2_leaf_search_hash(args, bp); 588 + /* 589 + * Do we have a buffer coming in? 590 + */ 591 + if (state->extravalid) { 592 + curbp = state->extrablk.bp; 593 + curdb = state->extrablk.blkno; 594 + } 595 + /* 596 + * Loop over leaf entries with the right hash value. 597 + */ 598 + for (lep = &leaf->ents[index]; index < be16_to_cpu(leaf->hdr.count) && 599 + be32_to_cpu(lep->hashval) == args->hashval; 600 + lep++, index++) { 601 + /* 602 + * Skip stale leaf entries. 603 + */ 604 + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) 605 + continue; 606 + /* 607 + * Pull the data block number from the entry. 608 + */ 609 + newdb = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); 610 + /* 611 + * Not adding a new entry, so we really want to find 612 + * the name given to us. 613 + * 614 + * If it's a different data block, go get it. 615 + */ 616 + if (newdb != curdb) { 617 + /* 618 + * If we had a block before that we aren't saving 619 + * for a CI name, drop it 620 + */ 621 + if (curbp && (args->cmpresult == XFS_CMP_DIFFERENT || 622 + curdb != state->extrablk.blkno)) 623 + xfs_da_brelse(tp, curbp); 624 + /* 625 + * If needing the block that is saved with a CI match, 626 + * use it otherwise read in the new data block. 627 + */ 628 + if (args->cmpresult != XFS_CMP_DIFFERENT && 629 + newdb == state->extrablk.blkno) { 630 + ASSERT(state->extravalid); 631 + curbp = state->extrablk.bp; 632 + } else { 633 + error = xfs_da_read_buf(tp, dp, 634 + xfs_dir2_db_to_da(mp, newdb), 635 + -1, &curbp, XFS_DATA_FORK); 636 + if (error) 637 + return error; 638 + } 639 + xfs_dir2_data_check(dp, curbp); 640 + curdb = newdb; 641 + } 642 + /* 643 + * Point to the data entry. 644 + */ 645 + dep = (xfs_dir2_data_entry_t *)((char *)curbp->data + 646 + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 647 + /* 648 + * Compare the entry and if it's an exact match, return 649 + * EEXIST immediately. If it's the first case-insensitive 650 + * match, store the block & inode number and continue looking. 651 + */ 652 + cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen); 653 + if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) { 654 + /* If there is a CI match block, drop it */ 655 + if (args->cmpresult != XFS_CMP_DIFFERENT && 656 + curdb != state->extrablk.blkno) 657 + xfs_da_brelse(tp, state->extrablk.bp); 658 + args->cmpresult = cmp; 659 + args->inumber = be64_to_cpu(dep->inumber); 660 + *indexp = index; 661 + state->extravalid = 1; 662 + state->extrablk.bp = curbp; 663 + state->extrablk.blkno = curdb; 664 + state->extrablk.index = (int)((char *)dep - 665 + (char *)curbp->data); 666 + state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 667 + if (cmp == XFS_CMP_EXACT) 668 + return XFS_ERROR(EEXIST); 669 + } 670 + } 671 + ASSERT(index == be16_to_cpu(leaf->hdr.count) || 672 + (args->op_flags & XFS_DA_OP_OKNOENT)); 673 + if (curbp) { 674 + if (args->cmpresult == XFS_CMP_DIFFERENT) { 675 + /* Giving back last used data block. */ 676 + state->extravalid = 1; 677 + state->extrablk.bp = curbp; 678 + state->extrablk.index = -1; 679 + state->extrablk.blkno = curdb; 680 + state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 681 + } else { 682 + /* If the curbp is not the CI match block, drop it */ 683 + if (state->extrablk.bp != curbp) 684 + xfs_da_brelse(tp, curbp); 685 + } 686 + } else { 687 + state->extravalid = 0; 688 + } 689 + *indexp = index; 690 + return XFS_ERROR(ENOENT); 691 + } 692 + 693 + /* 694 + * Look up a leaf entry in a node-format leaf block. 695 + * If this is an addname then the extrablk in state is a freespace block, 696 + * otherwise it's a data block. 697 + */ 698 + int 699 + xfs_dir2_leafn_lookup_int( 700 + xfs_dabuf_t *bp, /* leaf buffer */ 701 + xfs_da_args_t *args, /* operation arguments */ 702 + int *indexp, /* out: leaf entry index */ 703 + xfs_da_state_t *state) /* state to fill in */ 704 + { 705 + if (args->op_flags & XFS_DA_OP_ADDNAME) 706 + return xfs_dir2_leafn_lookup_for_addname(bp, args, indexp, 707 + state); 708 + return xfs_dir2_leafn_lookup_for_entry(bp, args, indexp, state); 611 709 } 612 710 613 711 /* ··· 891 823 */ 892 824 if (!state->inleaf) 893 825 blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); 894 - 895 - /* 896 - * Finally sanity check just to make sure we are not returning a negative index 826 + 827 + /* 828 + * Finally sanity check just to make sure we are not returning a 829 + * negative index 897 830 */ 898 831 if(blk2->index < 0) { 899 832 state->inleaf = 1; ··· 1401 1332 /* 1402 1333 * It worked, fix the hash values up the btree. 1403 1334 */ 1404 - if (!args->justcheck) 1335 + if (!(args->op_flags & XFS_DA_OP_JUSTCHECK)) 1405 1336 xfs_da_fixhashpath(state, &state->path); 1406 1337 } else { 1407 1338 /* ··· 1584 1515 /* 1585 1516 * Not allowed to allocate, return failure. 1586 1517 */ 1587 - if (args->justcheck || args->total == 0) { 1518 + if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || 1519 + args->total == 0) { 1588 1520 /* 1589 1521 * Drop the freespace buffer unless it came from our 1590 1522 * caller. ··· 1731 1661 /* 1732 1662 * If just checking, we succeeded. 1733 1663 */ 1734 - if (args->justcheck) { 1664 + if (args->op_flags & XFS_DA_OP_JUSTCHECK) { 1735 1665 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) 1736 1666 xfs_da_buf_done(fbp); 1737 1667 return 0; ··· 1837 1767 error = xfs_da_node_lookup_int(state, &rval); 1838 1768 if (error) 1839 1769 rval = error; 1770 + else if (rval == ENOENT && args->cmpresult == XFS_CMP_CASE) { 1771 + /* If a CI match, dup the actual name and return EEXIST */ 1772 + xfs_dir2_data_entry_t *dep; 1773 + 1774 + dep = (xfs_dir2_data_entry_t *)((char *)state->extrablk.bp-> 1775 + data + state->extrablk.index); 1776 + rval = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 1777 + } 1840 1778 /* 1841 1779 * Release the btree blocks and leaf block. 1842 1780 */ ··· 1888 1810 * Look up the entry we're deleting, set up the cursor. 1889 1811 */ 1890 1812 error = xfs_da_node_lookup_int(state, &rval); 1891 - if (error) { 1813 + if (error) 1892 1814 rval = error; 1893 - } 1894 1815 /* 1895 1816 * Didn't find it, upper layer screwed up. 1896 1817 */ ··· 1906 1829 */ 1907 1830 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, 1908 1831 &state->extrablk, &rval); 1909 - if (error) { 1832 + if (error) 1910 1833 return error; 1911 - } 1912 1834 /* 1913 1835 * Fix the hash values up the btree. 1914 1836 */
+48 -35
fs/xfs/xfs_dir2_sf.c
··· 255 255 xfs_dir2_sf_check(args); 256 256 out: 257 257 xfs_trans_log_inode(args->trans, dp, logflags); 258 - kmem_free(block, mp->m_dirblksize); 258 + kmem_free(block); 259 259 return error; 260 260 } 261 261 ··· 332 332 /* 333 333 * Just checking or no space reservation, it doesn't fit. 334 334 */ 335 - if (args->justcheck || args->total == 0) 335 + if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) 336 336 return XFS_ERROR(ENOSPC); 337 337 /* 338 338 * Convert to block form then add the name. ··· 345 345 /* 346 346 * Just checking, it fits. 347 347 */ 348 - if (args->justcheck) 348 + if (args->op_flags & XFS_DA_OP_JUSTCHECK) 349 349 return 0; 350 350 /* 351 351 * Do it the easy way - just add it at the end. ··· 512 512 sfep = xfs_dir2_sf_nextentry(sfp, sfep); 513 513 memcpy(sfep, oldsfep, old_isize - nbytes); 514 514 } 515 - kmem_free(buf, old_isize); 515 + kmem_free(buf); 516 516 dp->i_d.di_size = new_isize; 517 517 xfs_dir2_sf_check(args); 518 518 } ··· 812 812 { 813 813 xfs_inode_t *dp; /* incore directory inode */ 814 814 int i; /* entry index */ 815 + int error; 815 816 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ 816 817 xfs_dir2_sf_t *sfp; /* shortform structure */ 818 + enum xfs_dacmp cmp; /* comparison result */ 819 + xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */ 817 820 818 821 xfs_dir2_trace_args("sf_lookup", args); 819 822 xfs_dir2_sf_check(args); ··· 839 836 */ 840 837 if (args->namelen == 1 && args->name[0] == '.') { 841 838 args->inumber = dp->i_ino; 839 + args->cmpresult = XFS_CMP_EXACT; 842 840 return XFS_ERROR(EEXIST); 843 841 } 844 842 /* ··· 848 844 if (args->namelen == 2 && 849 845 args->name[0] == '.' && args->name[1] == '.') { 850 846 args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); 847 + args->cmpresult = XFS_CMP_EXACT; 851 848 return XFS_ERROR(EEXIST); 852 849 } 853 850 /* 854 851 * Loop over all the entries trying to match ours. 855 852 */ 856 - for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 857 - i < sfp->hdr.count; 858 - i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 859 - if (sfep->namelen == args->namelen && 860 - sfep->name[0] == args->name[0] && 861 - memcmp(args->name, sfep->name, args->namelen) == 0) { 862 - args->inumber = 863 - xfs_dir2_sf_get_inumber(sfp, 864 - xfs_dir2_sf_inumberp(sfep)); 865 - return XFS_ERROR(EEXIST); 853 + ci_sfep = NULL; 854 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count; 855 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 856 + /* 857 + * Compare name and if it's an exact match, return the inode 858 + * number. If it's the first case-insensitive match, store the 859 + * inode number and continue looking for an exact match. 860 + */ 861 + cmp = dp->i_mount->m_dirnameops->compname(args, sfep->name, 862 + sfep->namelen); 863 + if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) { 864 + args->cmpresult = cmp; 865 + args->inumber = xfs_dir2_sf_get_inumber(sfp, 866 + xfs_dir2_sf_inumberp(sfep)); 867 + if (cmp == XFS_CMP_EXACT) 868 + return XFS_ERROR(EEXIST); 869 + ci_sfep = sfep; 866 870 } 867 871 } 872 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 868 873 /* 869 - * Didn't find it. 874 + * Here, we can only be doing a lookup (not a rename or replace). 875 + * If a case-insensitive match was not found, return ENOENT. 870 876 */ 871 - ASSERT(args->oknoent); 872 - return XFS_ERROR(ENOENT); 877 + if (!ci_sfep) 878 + return XFS_ERROR(ENOENT); 879 + /* otherwise process the CI match as required by the caller */ 880 + error = xfs_dir_cilookup_result(args, ci_sfep->name, ci_sfep->namelen); 881 + return XFS_ERROR(error); 873 882 } 874 883 875 884 /* ··· 921 904 * Loop over the old directory entries. 922 905 * Find the one we're deleting. 923 906 */ 924 - for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 925 - i < sfp->hdr.count; 926 - i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 927 - if (sfep->namelen == args->namelen && 928 - sfep->name[0] == args->name[0] && 929 - memcmp(sfep->name, args->name, args->namelen) == 0) { 907 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count; 908 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 909 + if (xfs_da_compname(args, sfep->name, sfep->namelen) == 910 + XFS_CMP_EXACT) { 930 911 ASSERT(xfs_dir2_sf_get_inumber(sfp, 931 - xfs_dir2_sf_inumberp(sfep)) == 932 - args->inumber); 912 + xfs_dir2_sf_inumberp(sfep)) == 913 + args->inumber); 933 914 break; 934 915 } 935 916 } 936 917 /* 937 918 * Didn't find it. 938 919 */ 939 - if (i == sfp->hdr.count) { 920 + if (i == sfp->hdr.count) 940 921 return XFS_ERROR(ENOENT); 941 - } 942 922 /* 943 923 * Calculate sizes. 944 924 */ ··· 1056 1042 */ 1057 1043 else { 1058 1044 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 1059 - i < sfp->hdr.count; 1060 - i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 1061 - if (sfep->namelen == args->namelen && 1062 - sfep->name[0] == args->name[0] && 1063 - memcmp(args->name, sfep->name, args->namelen) == 0) { 1045 + i < sfp->hdr.count; 1046 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 1047 + if (xfs_da_compname(args, sfep->name, sfep->namelen) == 1048 + XFS_CMP_EXACT) { 1064 1049 #if XFS_BIG_INUMS || defined(DEBUG) 1065 1050 ino = xfs_dir2_sf_get_inumber(sfp, 1066 1051 xfs_dir2_sf_inumberp(sfep)); ··· 1074 1061 * Didn't find it. 1075 1062 */ 1076 1063 if (i == sfp->hdr.count) { 1077 - ASSERT(args->oknoent); 1064 + ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1078 1065 #if XFS_BIG_INUMS 1079 1066 if (i8elevated) 1080 1067 xfs_dir2_sf_toino4(args); ··· 1187 1174 /* 1188 1175 * Clean up the inode. 1189 1176 */ 1190 - kmem_free(buf, oldsize); 1177 + kmem_free(buf); 1191 1178 dp->i_d.di_size = newsize; 1192 1179 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 1193 1180 } ··· 1264 1251 /* 1265 1252 * Clean up the inode. 1266 1253 */ 1267 - kmem_free(buf, oldsize); 1254 + kmem_free(buf); 1268 1255 dp->i_d.di_size = newsize; 1269 1256 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 1270 1257 }
+3 -3
fs/xfs/xfs_dir2_sf.h
··· 62 62 * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t. 63 63 * Only need 16 bits, this is the byte offset into the single block form. 64 64 */ 65 - typedef struct { __uint8_t i[2]; } xfs_dir2_sf_off_t; 65 + typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t; 66 66 67 67 /* 68 68 * The parent directory has a dedicated field, and the self-pointer must ··· 76 76 __uint8_t count; /* count of entries */ 77 77 __uint8_t i8count; /* count of 8-byte inode #s */ 78 78 xfs_dir2_inou_t parent; /* parent dir inode number */ 79 - } xfs_dir2_sf_hdr_t; 79 + } __arch_pack xfs_dir2_sf_hdr_t; 80 80 81 81 typedef struct xfs_dir2_sf_entry { 82 82 __uint8_t namelen; /* actual name length */ 83 83 xfs_dir2_sf_off_t offset; /* saved offset */ 84 84 __uint8_t name[1]; /* name, variable size */ 85 85 xfs_dir2_inou_t inumber; /* inode number, var. offset */ 86 - } xfs_dir2_sf_entry_t; 86 + } __arch_pack xfs_dir2_sf_entry_t; 87 87 88 88 typedef struct xfs_dir2_sf { 89 89 xfs_dir2_sf_hdr_t hdr; /* shortform header */
+11 -9
fs/xfs/xfs_dir2_trace.c
··· 85 85 (void *)((unsigned long)(args->inumber >> 32)), 86 86 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 87 87 (void *)args->dp, (void *)args->trans, 88 - (void *)(unsigned long)args->justcheck, NULL, NULL); 88 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 89 + NULL, NULL); 89 90 } 90 91 91 92 void ··· 101 100 (void *)((unsigned long)(args->inumber >> 32)), 102 101 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 103 102 (void *)args->dp, (void *)args->trans, 104 - (void *)(unsigned long)args->justcheck, 103 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 105 104 (void *)(bp ? bp->bps[0] : NULL), NULL); 106 105 } 107 106 ··· 118 117 (void *)((unsigned long)(args->inumber >> 32)), 119 118 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 120 119 (void *)args->dp, (void *)args->trans, 121 - (void *)(unsigned long)args->justcheck, 120 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 122 121 (void *)(lbp ? lbp->bps[0] : NULL), 123 122 (void *)(dbp ? dbp->bps[0] : NULL)); 124 123 } ··· 158 157 (void *)((unsigned long)(args->inumber >> 32)), 159 158 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 160 159 (void *)args->dp, (void *)args->trans, 161 - (void *)(unsigned long)args->justcheck, (void *)(long)db, 162 - (void *)dbp); 160 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 161 + (void *)(long)db, (void *)dbp); 163 162 } 164 163 165 164 void ··· 174 173 (void *)((unsigned long)(args->inumber >> 32)), 175 174 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 176 175 (void *)args->dp, (void *)args->trans, 177 - (void *)(unsigned long)args->justcheck, 176 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 178 177 (void *)((unsigned long)(i >> 32)), 179 178 (void *)((unsigned long)(i & 0xFFFFFFFF))); 180 179 } ··· 191 190 (void *)((unsigned long)(args->inumber >> 32)), 192 191 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 193 192 (void *)args->dp, (void *)args->trans, 194 - (void *)(unsigned long)args->justcheck, (void *)(long)s, NULL); 193 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 194 + (void *)(long)s, NULL); 195 195 } 196 196 197 197 void ··· 210 208 (void *)((unsigned long)(args->inumber >> 32)), 211 209 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), 212 210 (void *)args->dp, (void *)args->trans, 213 - (void *)(unsigned long)args->justcheck, (void *)(long)s, 214 - (void *)dbp); 211 + (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), 212 + (void *)(long)s, (void *)dbp); 215 213 } 216 214 #endif /* XFS_DIR2_TRACE */
+1 -1
fs/xfs/xfs_dmapi.h
··· 166 166 167 167 #define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \ 168 168 DM_FLAGS_NDELAY : 0) 169 - #define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0) 169 + #define AT_DELAY_FLAG(f) ((f & XFS_ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0) 170 170 171 171 #endif /* __XFS_DMAPI_H__ */
+2 -11
fs/xfs/xfs_error.c
··· 66 66 int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; 67 67 char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; 68 68 69 - void 70 - xfs_error_test_init(void) 71 - { 72 - memset(xfs_etest, 0, sizeof(xfs_etest)); 73 - memset(xfs_etest_fsid, 0, sizeof(xfs_etest_fsid)); 74 - memset(xfs_etest_fsname, 0, sizeof(xfs_etest_fsname)); 75 - } 76 - 77 69 int 78 70 xfs_error_test(int error_tag, int *fsidp, char *expression, 79 71 int line, char *file, unsigned long randfactor) ··· 142 150 xfs_etest[i]); 143 151 xfs_etest[i] = 0; 144 152 xfs_etest_fsid[i] = 0LL; 145 - kmem_free(xfs_etest_fsname[i], 146 - strlen(xfs_etest_fsname[i]) + 1); 153 + kmem_free(xfs_etest_fsname[i]); 147 154 xfs_etest_fsname[i] = NULL; 148 155 } 149 156 } ··· 166 175 newfmt = kmem_alloc(len, KM_SLEEP); 167 176 sprintf(newfmt, "Filesystem \"%s\": %s", mp->m_fsname, fmt); 168 177 icmn_err(level, newfmt, ap); 169 - kmem_free(newfmt, len); 178 + kmem_free(newfmt); 170 179 } else { 171 180 icmn_err(level, fmt, ap); 172 181 }
-1
fs/xfs/xfs_error.h
··· 127 127 128 128 #if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) 129 129 extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); 130 - extern void xfs_error_test_init(void); 131 130 132 131 #define XFS_NUM_INJECT_ERROR 10 133 132
+2 -4
fs/xfs/xfs_extfree_item.c
··· 41 41 int nexts = efip->efi_format.efi_nextents; 42 42 43 43 if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { 44 - kmem_free(efip, sizeof(xfs_efi_log_item_t) + 45 - (nexts - 1) * sizeof(xfs_extent_t)); 44 + kmem_free(efip); 46 45 } else { 47 46 kmem_zone_free(xfs_efi_zone, efip); 48 47 } ··· 373 374 int nexts = efdp->efd_format.efd_nextents; 374 375 375 376 if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { 376 - kmem_free(efdp, sizeof(xfs_efd_log_item_t) + 377 - (nexts - 1) * sizeof(xfs_extent_t)); 377 + kmem_free(efdp); 378 378 } else { 379 379 kmem_zone_free(xfs_efd_zone, efdp); 380 380 }
+3 -1
fs/xfs/xfs_filestream.c
··· 397 397 xfs_filestream_init(void) 398 398 { 399 399 item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item"); 400 + if (!item_zone) 401 + return -ENOMEM; 400 402 #ifdef XFS_FILESTREAMS_TRACE 401 403 xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP); 402 404 #endif 403 - return item_zone ? 0 : -ENOMEM; 405 + return 0; 404 406 } 405 407 406 408 /*
+4
fs/xfs/xfs_fs.h
··· 239 239 #define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */ 240 240 #define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */ 241 241 #define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */ 242 + #define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */ 242 243 #define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */ 243 244 244 245 ··· 372 371 373 372 typedef struct xfs_attr_multiop { 374 373 __u32 am_opcode; 374 + #define ATTR_OP_GET 1 /* return the indicated attr's value */ 375 + #define ATTR_OP_SET 2 /* set/create the indicated attr/value pair */ 376 + #define ATTR_OP_REMOVE 3 /* remove the indicated attr */ 375 377 __s32 am_error; 376 378 void __user *am_attrname; 377 379 void __user *am_attrvalue;
+3 -1
fs/xfs/xfs_fsops.c
··· 95 95 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | 96 96 (xfs_sb_version_hassector(&mp->m_sb) ? 97 97 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) | 98 + (xfs_sb_version_hasasciici(&mp->m_sb) ? 99 + XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) | 98 100 (xfs_sb_version_haslazysbcount(&mp->m_sb) ? 99 101 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) | 100 102 (xfs_sb_version_hasattr2(&mp->m_sb) ? ··· 627 625 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 628 626 thaw_bdev(sb->s_bdev, sb); 629 627 } 630 - 628 + 631 629 break; 632 630 } 633 631 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
+62 -103
fs/xfs/xfs_inode.c
··· 1763 1763 return 0; 1764 1764 } 1765 1765 1766 - 1767 - /* 1768 - * xfs_igrow_start 1769 - * 1770 - * Do the first part of growing a file: zero any data in the last 1771 - * block that is beyond the old EOF. We need to do this before 1772 - * the inode is joined to the transaction to modify the i_size. 1773 - * That way we can drop the inode lock and call into the buffer 1774 - * cache to get the buffer mapping the EOF. 1775 - */ 1776 - int 1777 - xfs_igrow_start( 1778 - xfs_inode_t *ip, 1779 - xfs_fsize_t new_size, 1780 - cred_t *credp) 1781 - { 1782 - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 1783 - ASSERT(new_size > ip->i_size); 1784 - 1785 - /* 1786 - * Zero any pages that may have been created by 1787 - * xfs_write_file() beyond the end of the file 1788 - * and any blocks between the old and new file sizes. 1789 - */ 1790 - return xfs_zero_eof(ip, new_size, ip->i_size); 1791 - } 1792 - 1793 - /* 1794 - * xfs_igrow_finish 1795 - * 1796 - * This routine is called to extend the size of a file. 1797 - * The inode must have both the iolock and the ilock locked 1798 - * for update and it must be a part of the current transaction. 1799 - * The xfs_igrow_start() function must have been called previously. 1800 - * If the change_flag is not zero, the inode change timestamp will 1801 - * be updated. 1802 - */ 1803 - void 1804 - xfs_igrow_finish( 1805 - xfs_trans_t *tp, 1806 - xfs_inode_t *ip, 1807 - xfs_fsize_t new_size, 1808 - int change_flag) 1809 - { 1810 - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 1811 - ASSERT(ip->i_transp == tp); 1812 - ASSERT(new_size > ip->i_size); 1813 - 1814 - /* 1815 - * Update the file size. Update the inode change timestamp 1816 - * if change_flag set. 1817 - */ 1818 - ip->i_d.di_size = new_size; 1819 - ip->i_size = new_size; 1820 - if (change_flag) 1821 - xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 1822 - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1823 - 1824 - } 1825 - 1826 - 1827 1766 /* 1828 1767 * This is called when the inode's link count goes to 0. 1829 1768 * We place the on-disk inode on a list in the AGI. It ··· 2197 2258 xfs_trans_binval(tp, bp); 2198 2259 } 2199 2260 2200 - kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *)); 2261 + kmem_free(ip_found); 2201 2262 xfs_put_perag(mp, pag); 2202 2263 } 2203 2264 ··· 2409 2470 (int)new_size); 2410 2471 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2411 2472 } 2412 - kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2473 + kmem_free(ifp->if_broot); 2413 2474 ifp->if_broot = new_broot; 2414 2475 ifp->if_broot_bytes = (int)new_size; 2415 2476 ASSERT(ifp->if_broot_bytes <= ··· 2453 2514 2454 2515 if (new_size == 0) { 2455 2516 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2456 - kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2517 + kmem_free(ifp->if_u1.if_data); 2457 2518 } 2458 2519 ifp->if_u1.if_data = NULL; 2459 2520 real_size = 0; ··· 2468 2529 ASSERT(ifp->if_real_bytes != 0); 2469 2530 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2470 2531 new_size); 2471 - kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2532 + kmem_free(ifp->if_u1.if_data); 2472 2533 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2473 2534 } 2474 2535 real_size = 0; ··· 2575 2636 2576 2637 ifp = XFS_IFORK_PTR(ip, whichfork); 2577 2638 if (ifp->if_broot != NULL) { 2578 - kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2639 + kmem_free(ifp->if_broot); 2579 2640 ifp->if_broot = NULL; 2580 2641 } 2581 2642 ··· 2589 2650 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2590 2651 (ifp->if_u1.if_data != NULL)) { 2591 2652 ASSERT(ifp->if_real_bytes != 0); 2592 - kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2653 + kmem_free(ifp->if_u1.if_data); 2593 2654 ifp->if_u1.if_data = NULL; 2594 2655 ifp->if_real_bytes = 0; 2595 2656 } ··· 2997 3058 2998 3059 out_free: 2999 3060 read_unlock(&pag->pag_ici_lock); 3000 - kmem_free(ilist, ilist_size); 3061 + kmem_free(ilist); 3001 3062 return 0; 3002 3063 3003 3064 ··· 3041 3102 * Unlocks the flush lock 3042 3103 */ 3043 3104 xfs_iflush_abort(iq); 3044 - kmem_free(ilist, ilist_size); 3105 + kmem_free(ilist); 3045 3106 return XFS_ERROR(EFSCORRUPTED); 3046 3107 } 3047 3108 ··· 3082 3143 * flush lock and do nothing. 3083 3144 */ 3084 3145 if (xfs_inode_clean(ip)) { 3085 - ASSERT((iip != NULL) ? 3086 - !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); 3087 3146 xfs_ifunlock(ip); 3088 3147 return 0; 3089 3148 } ··· 3773 3836 erp = xfs_iext_irec_new(ifp, erp_idx); 3774 3837 } 3775 3838 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3776 - kmem_free(nex2_ep, byte_diff); 3839 + kmem_free(nex2_ep); 3777 3840 erp->er_extcount += nex2; 3778 3841 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3779 3842 } ··· 4049 4112 */ 4050 4113 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 4051 4114 nextents * sizeof(xfs_bmbt_rec_t)); 4052 - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4115 + kmem_free(ifp->if_u1.if_extents); 4053 4116 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 4054 4117 ifp->if_real_bytes = 0; 4055 4118 } ··· 4123 4186 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 4124 4187 4125 4188 ep = ifp->if_u1.if_ext_irec->er_extbuf; 4126 - kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); 4189 + kmem_free(ifp->if_u1.if_ext_irec); 4127 4190 ifp->if_flags &= ~XFS_IFEXTIREC; 4128 4191 ifp->if_u1.if_extents = ep; 4129 4192 ifp->if_bytes = size; ··· 4149 4212 } 4150 4213 ifp->if_flags &= ~XFS_IFEXTIREC; 4151 4214 } else if (ifp->if_real_bytes) { 4152 - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4215 + kmem_free(ifp->if_u1.if_extents); 4153 4216 } else if (ifp->if_bytes) { 4154 4217 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4155 4218 sizeof(xfs_bmbt_rec_t)); ··· 4420 4483 if (erp->er_extbuf) { 4421 4484 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4422 4485 -erp->er_extcount); 4423 - kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); 4486 + kmem_free(erp->er_extbuf); 4424 4487 } 4425 4488 /* Compact extent records */ 4426 4489 erp = ifp->if_u1.if_ext_irec; ··· 4438 4501 xfs_iext_realloc_indirect(ifp, 4439 4502 nlists * sizeof(xfs_ext_irec_t)); 4440 4503 } else { 4441 - kmem_free(ifp->if_u1.if_ext_irec, 4442 - sizeof(xfs_ext_irec_t)); 4504 + kmem_free(ifp->if_u1.if_ext_irec); 4443 4505 } 4444 4506 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4445 4507 } ··· 4507 4571 * so er_extoffs don't get modified in 4508 4572 * xfs_iext_irec_remove. 4509 4573 */ 4510 - kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); 4574 + kmem_free(erp_next->er_extbuf); 4511 4575 erp_next->er_extbuf = NULL; 4512 4576 xfs_iext_irec_remove(ifp, erp_idx + 1); 4513 4577 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; ··· 4532 4596 int nlists; /* number of irec's (ex lists) */ 4533 4597 4534 4598 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4599 + 4535 4600 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4536 4601 erp = ifp->if_u1.if_ext_irec; 4537 4602 ep = &erp->er_extbuf[erp->er_extcount]; 4538 4603 erp_next = erp + 1; 4539 4604 ep_next = erp_next->er_extbuf; 4605 + 4540 4606 while (erp_idx < nlists - 1) { 4607 + /* 4608 + * Check how many extent records are available in this irec. 4609 + * If there is none skip the whole exercise. 4610 + */ 4541 4611 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 4542 - ext_diff = MIN(ext_avail, erp_next->er_extcount); 4543 - memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); 4544 - erp->er_extcount += ext_diff; 4545 - erp_next->er_extcount -= ext_diff; 4546 - /* Remove next page */ 4547 - if (erp_next->er_extcount == 0) { 4612 + if (ext_avail) { 4613 + 4548 4614 /* 4549 - * Free page before removing extent record 4550 - * so er_extoffs don't get modified in 4551 - * xfs_iext_irec_remove. 4615 + * Copy over as many as possible extent records into 4616 + * the previous page. 4552 4617 */ 4553 - kmem_free(erp_next->er_extbuf, 4554 - erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4555 - erp_next->er_extbuf = NULL; 4556 - xfs_iext_irec_remove(ifp, erp_idx + 1); 4557 - erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4558 - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4559 - /* Update next page */ 4560 - } else { 4561 - /* Move rest of page up to become next new page */ 4562 - memmove(erp_next->er_extbuf, ep_next, 4563 - erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4564 - ep_next = erp_next->er_extbuf; 4565 - memset(&ep_next[erp_next->er_extcount], 0, 4566 - (XFS_LINEAR_EXTS - erp_next->er_extcount) * 4567 - sizeof(xfs_bmbt_rec_t)); 4618 + ext_diff = MIN(ext_avail, erp_next->er_extcount); 4619 + memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); 4620 + erp->er_extcount += ext_diff; 4621 + erp_next->er_extcount -= ext_diff; 4622 + 4623 + /* 4624 + * If the next irec is empty now we can simply 4625 + * remove it. 4626 + */ 4627 + if (erp_next->er_extcount == 0) { 4628 + /* 4629 + * Free page before removing extent record 4630 + * so er_extoffs don't get modified in 4631 + * xfs_iext_irec_remove. 4632 + */ 4633 + kmem_free(erp_next->er_extbuf); 4634 + erp_next->er_extbuf = NULL; 4635 + xfs_iext_irec_remove(ifp, erp_idx + 1); 4636 + erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4637 + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4638 + 4639 + /* 4640 + * If the next irec is not empty move up the content 4641 + * that has not been copied to the previous page to 4642 + * the beggining of this one. 4643 + */ 4644 + } else { 4645 + memmove(erp_next->er_extbuf, &ep_next[ext_diff], 4646 + erp_next->er_extcount * 4647 + sizeof(xfs_bmbt_rec_t)); 4648 + ep_next = erp_next->er_extbuf; 4649 + memset(&ep_next[erp_next->er_extcount], 0, 4650 + (XFS_LINEAR_EXTS - 4651 + erp_next->er_extcount) * 4652 + sizeof(xfs_bmbt_rec_t)); 4653 + } 4568 4654 } 4655 + 4569 4656 if (erp->er_extcount == XFS_LINEAR_EXTS) { 4570 4657 erp_idx++; 4571 4658 if (erp_idx < nlists)
-3
fs/xfs/xfs_inode.h
··· 507 507 int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, 508 508 xfs_fsize_t, int, int); 509 509 int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); 510 - int xfs_igrow_start(xfs_inode_t *, xfs_fsize_t, struct cred *); 511 - void xfs_igrow_finish(struct xfs_trans *, xfs_inode_t *, 512 - xfs_fsize_t, int); 513 510 514 511 void xfs_idestroy_fork(xfs_inode_t *, int); 515 512 void xfs_idestroy(xfs_inode_t *);
+3 -4
fs/xfs/xfs_inode_item.c
··· 686 686 ASSERT(ip->i_d.di_nextents > 0); 687 687 ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT); 688 688 ASSERT(ip->i_df.if_bytes > 0); 689 - kmem_free(iip->ili_extents_buf, ip->i_df.if_bytes); 689 + kmem_free(iip->ili_extents_buf); 690 690 iip->ili_extents_buf = NULL; 691 691 } 692 692 if (iip->ili_aextents_buf != NULL) { ··· 694 694 ASSERT(ip->i_d.di_anextents > 0); 695 695 ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT); 696 696 ASSERT(ip->i_afp->if_bytes > 0); 697 - kmem_free(iip->ili_aextents_buf, ip->i_afp->if_bytes); 697 + kmem_free(iip->ili_aextents_buf); 698 698 iip->ili_aextents_buf = NULL; 699 699 } 700 700 ··· 957 957 { 958 958 #ifdef XFS_TRANS_DEBUG 959 959 if (ip->i_itemp->ili_root_size != 0) { 960 - kmem_free(ip->i_itemp->ili_orig_root, 961 - ip->i_itemp->ili_root_size); 960 + kmem_free(ip->i_itemp->ili_orig_root); 962 961 } 963 962 #endif 964 963 kmem_zone_free(xfs_ili_zone, ip->i_itemp);
+10
fs/xfs/xfs_iomap.c
··· 889 889 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 890 890 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 891 891 892 + /* 893 + * Reserve enough blocks in this transaction for two complete extent 894 + * btree splits. We may be converting the middle part of an unwritten 895 + * extent and in this case we will insert two new extents in the btree 896 + * each of which could cause a full split. 897 + * 898 + * This reservation amount will be used in the first call to 899 + * xfs_bmbt_split() to select an AG with enough space to satisfy the 900 + * rest of the operation. 901 + */ 892 902 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 893 903 894 904 do {
+3 -3
fs/xfs/xfs_itable.c
··· 257 257 *ubused = error; 258 258 259 259 out_free: 260 - kmem_free(buf, sizeof(*buf)); 260 + kmem_free(buf); 261 261 return error; 262 262 } 263 263 ··· 708 708 /* 709 709 * Done, we're either out of filesystem or space to put the data. 710 710 */ 711 - kmem_free(irbuf, irbsize); 711 + kmem_free(irbuf); 712 712 *ubcountp = ubelem; 713 713 /* 714 714 * Found some inodes, return them now and return the error next time. ··· 914 914 } 915 915 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 916 916 } 917 - kmem_free(buffer, bcount * sizeof(*buffer)); 917 + kmem_free(buffer); 918 918 if (cur) 919 919 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 920 920 XFS_BTREE_NOERROR));
+25 -24
fs/xfs/xfs_log.c
··· 226 226 static void 227 227 xlog_grant_add_space_write(struct log *log, int bytes) 228 228 { 229 - log->l_grant_write_bytes += bytes; 230 - if (log->l_grant_write_bytes > log->l_logsize) { 231 - log->l_grant_write_bytes -= log->l_logsize; 229 + int tmp = log->l_logsize - log->l_grant_write_bytes; 230 + if (tmp > bytes) 231 + log->l_grant_write_bytes += bytes; 232 + else { 232 233 log->l_grant_write_cycle++; 234 + log->l_grant_write_bytes = bytes - tmp; 233 235 } 234 236 } 235 237 236 238 static void 237 239 xlog_grant_add_space_reserve(struct log *log, int bytes) 238 240 { 239 - log->l_grant_reserve_bytes += bytes; 240 - if (log->l_grant_reserve_bytes > log->l_logsize) { 241 - log->l_grant_reserve_bytes -= log->l_logsize; 241 + int tmp = log->l_logsize - log->l_grant_reserve_bytes; 242 + if (tmp > bytes) 243 + log->l_grant_reserve_bytes += bytes; 244 + else { 242 245 log->l_grant_reserve_cycle++; 246 + log->l_grant_reserve_bytes = bytes - tmp; 243 247 } 244 248 } 245 249 ··· 1232 1228 1233 1229 spin_lock_init(&log->l_icloglock); 1234 1230 spin_lock_init(&log->l_grant_lock); 1235 - initnsema(&log->l_flushsema, 0, "ic-flush"); 1231 + sv_init(&log->l_flush_wait, 0, "flush_wait"); 1236 1232 1237 1233 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ 1238 1234 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); ··· 1574 1570 } 1575 1571 #endif 1576 1572 next_iclog = iclog->ic_next; 1577 - kmem_free(iclog, sizeof(xlog_in_core_t)); 1573 + kmem_free(iclog); 1578 1574 iclog = next_iclog; 1579 1575 } 1580 - freesema(&log->l_flushsema); 1581 1576 spinlock_destroy(&log->l_icloglock); 1582 1577 spinlock_destroy(&log->l_grant_lock); 1583 1578 ··· 1590 1587 } 1591 1588 #endif 1592 1589 log->l_mp->m_log = NULL; 1593 - kmem_free(log, sizeof(xlog_t)); 1590 + kmem_free(log); 1594 1591 } /* xlog_dealloc_log */ 1595 1592 1596 1593 /* ··· 2100 2097 int funcdidcallbacks; /* flag: function did callbacks */ 2101 2098 int repeats; /* for issuing console warnings if 2102 2099 * looping too many times */ 2100 + int wake = 0; 2103 2101 2104 2102 spin_lock(&log->l_icloglock); 2105 2103 first_iclog = iclog = log->l_iclog; ··· 2282 2278 } 2283 2279 #endif 2284 2280 2285 - flushcnt = 0; 2286 - if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) { 2287 - flushcnt = log->l_flushcnt; 2288 - log->l_flushcnt = 0; 2289 - } 2281 + if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) 2282 + wake = 1; 2290 2283 spin_unlock(&log->l_icloglock); 2291 - while (flushcnt--) 2292 - vsema(&log->l_flushsema); 2293 - } /* xlog_state_do_callback */ 2284 + 2285 + if (wake) 2286 + sv_broadcast(&log->l_flush_wait); 2287 + } 2294 2288 2295 2289 2296 2290 /* ··· 2386 2384 } 2387 2385 2388 2386 iclog = log->l_iclog; 2389 - if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { 2390 - log->l_flushcnt++; 2391 - spin_unlock(&log->l_icloglock); 2387 + if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2392 2388 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); 2393 2389 XFS_STATS_INC(xs_log_noiclogs); 2394 - /* Ensure that log writes happen */ 2395 - psema(&log->l_flushsema, PINOD); 2390 + 2391 + /* Wait for log writes to have flushed */ 2392 + sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0); 2396 2393 goto restart; 2397 2394 } 2398 - ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 2395 + 2399 2396 head = &iclog->ic_header; 2400 2397 2401 2398 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
+2 -4
fs/xfs/xfs_log_priv.h
··· 423 423 int l_logBBsize; /* size of log in BB chunks */ 424 424 425 425 /* The following block of fields are changed while holding icloglock */ 426 - sema_t l_flushsema ____cacheline_aligned_in_smp; 427 - /* iclog flushing semaphore */ 428 - int l_flushcnt; /* # of procs waiting on this 429 - * sema */ 426 + sv_t l_flush_wait ____cacheline_aligned_in_smp; 427 + /* waiting for iclog flush */ 430 428 int l_covered_state;/* state of "covering disk 431 429 * log entries" */ 432 430 xlog_in_core_t *l_iclog; /* head log queue */
+8 -13
fs/xfs/xfs_log_recover.c
··· 1715 1715 } else { 1716 1716 prevp->bc_next = bcp->bc_next; 1717 1717 } 1718 - kmem_free(bcp, 1719 - sizeof(xfs_buf_cancel_t)); 1718 + kmem_free(bcp); 1720 1719 } 1721 1720 } 1722 1721 return 1; ··· 2518 2519 2519 2520 error: 2520 2521 if (need_free) 2521 - kmem_free(in_f, sizeof(*in_f)); 2522 + kmem_free(in_f); 2522 2523 return XFS_ERROR(error); 2523 2524 } 2524 2525 ··· 2829 2830 item = item->ri_next; 2830 2831 /* Free the regions in the item. */ 2831 2832 for (i = 0; i < free_item->ri_cnt; i++) { 2832 - kmem_free(free_item->ri_buf[i].i_addr, 2833 - free_item->ri_buf[i].i_len); 2833 + kmem_free(free_item->ri_buf[i].i_addr); 2834 2834 } 2835 2835 /* Free the item itself */ 2836 - kmem_free(free_item->ri_buf, 2837 - (free_item->ri_total * sizeof(xfs_log_iovec_t))); 2838 - kmem_free(free_item, sizeof(xlog_recover_item_t)); 2836 + kmem_free(free_item->ri_buf); 2837 + kmem_free(free_item); 2839 2838 } while (first_item != item); 2840 2839 /* Free the transaction recover structure */ 2841 - kmem_free(trans, sizeof(xlog_recover_t)); 2840 + kmem_free(trans); 2842 2841 } 2843 2842 2844 2843 STATIC int ··· 3783 3786 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3784 3787 XLOG_RECOVER_PASS1); 3785 3788 if (error != 0) { 3786 - kmem_free(log->l_buf_cancel_table, 3787 - XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*)); 3789 + kmem_free(log->l_buf_cancel_table); 3788 3790 log->l_buf_cancel_table = NULL; 3789 3791 return error; 3790 3792 } ··· 3802 3806 } 3803 3807 #endif /* DEBUG */ 3804 3808 3805 - kmem_free(log->l_buf_cancel_table, 3806 - XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*)); 3809 + kmem_free(log->l_buf_cancel_table); 3807 3810 log->l_buf_cancel_table = NULL; 3808 3811 3809 3812 return error;
+38 -80
fs/xfs/xfs_mount.c
··· 47 47 48 48 STATIC int xfs_mount_log_sb(xfs_mount_t *, __int64_t); 49 49 STATIC int xfs_uuid_mount(xfs_mount_t *); 50 - STATIC void xfs_uuid_unmount(xfs_mount_t *mp); 51 50 STATIC void xfs_unmountfs_wait(xfs_mount_t *); 52 51 53 52 54 53 #ifdef HAVE_PERCPU_SB 55 - STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); 56 54 STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, 57 55 int); 58 56 STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, ··· 61 63 62 64 #else 63 65 64 - #define xfs_icsb_destroy_counters(mp) do { } while (0) 65 66 #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 66 67 #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) 67 68 #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) ··· 123 126 }; 124 127 125 128 /* 126 - * Return a pointer to an initialized xfs_mount structure. 127 - */ 128 - xfs_mount_t * 129 - xfs_mount_init(void) 130 - { 131 - xfs_mount_t *mp; 132 - 133 - mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP); 134 - 135 - if (xfs_icsb_init_counters(mp)) { 136 - mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 137 - } 138 - 139 - spin_lock_init(&mp->m_sb_lock); 140 - mutex_init(&mp->m_ilock); 141 - mutex_init(&mp->m_growlock); 142 - atomic_set(&mp->m_active_trans, 0); 143 - 144 - return mp; 145 - } 146 - 147 - /* 148 129 * Free up the resources associated with a mount structure. Assume that 149 130 * the structure was initially zeroed, so we can tell which fields got 150 131 * initialized. 151 132 */ 152 - void 133 + STATIC void 153 134 xfs_mount_free( 154 135 xfs_mount_t *mp) 155 136 { ··· 136 161 137 162 for (agno = 0; agno < mp->m_maxagi; agno++) 138 163 if (mp->m_perag[agno].pagb_list) 139 - kmem_free(mp->m_perag[agno].pagb_list, 140 - sizeof(xfs_perag_busy_t) * 141 - XFS_PAGB_NUM_SLOTS); 142 - kmem_free(mp->m_perag, 143 - sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); 164 + kmem_free(mp->m_perag[agno].pagb_list); 165 + kmem_free(mp->m_perag); 144 166 } 145 167 146 168 spinlock_destroy(&mp->m_ail_lock); ··· 148 176 XFS_QM_DONE(mp); 149 177 150 178 if (mp->m_fsname != NULL) 151 - kmem_free(mp->m_fsname, mp->m_fsname_len); 179 + kmem_free(mp->m_fsname); 152 180 if (mp->m_rtname != NULL) 153 - kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1); 181 + kmem_free(mp->m_rtname); 154 182 if (mp->m_logname != NULL) 155 - kmem_free(mp->m_logname, strlen(mp->m_logname) + 1); 156 - 157 - xfs_icsb_destroy_counters(mp); 183 + kmem_free(mp->m_logname); 158 184 } 159 185 160 186 /* ··· 258 288 return XFS_ERROR(EFSCORRUPTED); 259 289 } 260 290 291 + /* 292 + * Until this is fixed only page-sized or smaller data blocks work. 293 + */ 294 + if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 295 + xfs_fs_mount_cmn_err(flags, 296 + "file system with blocksize %d bytes", 297 + sbp->sb_blocksize); 298 + xfs_fs_mount_cmn_err(flags, 299 + "only pagesize (%ld) or less will currently work.", 300 + PAGE_SIZE); 301 + return XFS_ERROR(ENOSYS); 302 + } 303 + 261 304 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 262 305 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 263 306 xfs_fs_mount_cmn_err(flags, ··· 289 306 if (unlikely(!xfs_sb_version_hasdirv2(sbp))) { 290 307 xfs_fs_mount_cmn_err(flags, 291 308 "file system using version 1 directory format"); 292 - return XFS_ERROR(ENOSYS); 293 - } 294 - 295 - /* 296 - * Until this is fixed only page-sized or smaller data blocks work. 297 - */ 298 - if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 299 - xfs_fs_mount_cmn_err(flags, 300 - "file system with blocksize %d bytes", 301 - sbp->sb_blocksize); 302 - xfs_fs_mount_cmn_err(flags, 303 - "only pagesize (%ld) or less will currently work.", 304 - PAGE_SIZE); 305 309 return XFS_ERROR(ENOSYS); 306 310 } 307 311 ··· 964 994 * Re-check for ATTR2 in case it was found in bad_features2 965 995 * slot. 966 996 */ 967 - if (xfs_sb_version_hasattr2(&mp->m_sb)) 997 + if (xfs_sb_version_hasattr2(&mp->m_sb) && 998 + !(mp->m_flags & XFS_MOUNT_NOATTR2)) 968 999 mp->m_flags |= XFS_MOUNT_ATTR2; 1000 + } 969 1001 1002 + if (xfs_sb_version_hasattr2(&mp->m_sb) && 1003 + (mp->m_flags & XFS_MOUNT_NOATTR2)) { 1004 + xfs_sb_version_removeattr2(&mp->m_sb); 1005 + update_flags |= XFS_SB_FEATURES2; 1006 + 1007 + /* update sb_versionnum for the clearing of the morebits */ 1008 + if (!sbp->sb_features2) 1009 + update_flags |= XFS_SB_VERSIONNUM; 970 1010 } 971 1011 972 1012 /* ··· 1235 1255 error2: 1236 1256 for (agno = 0; agno < sbp->sb_agcount; agno++) 1237 1257 if (mp->m_perag[agno].pagb_list) 1238 - kmem_free(mp->m_perag[agno].pagb_list, 1239 - sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS); 1240 - kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t)); 1258 + kmem_free(mp->m_perag[agno].pagb_list); 1259 + kmem_free(mp->m_perag); 1241 1260 mp->m_perag = NULL; 1242 1261 /* FALLTHROUGH */ 1243 1262 error1: 1244 1263 if (uuid_mounted) 1245 - xfs_uuid_unmount(mp); 1246 - xfs_freesb(mp); 1264 + uuid_table_remove(&mp->m_sb.sb_uuid); 1247 1265 return error; 1248 1266 } 1249 1267 ··· 1252 1274 * log and makes sure that incore structures are freed. 1253 1275 */ 1254 1276 int 1255 - xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) 1277 + xfs_unmountfs(xfs_mount_t *mp) 1256 1278 { 1257 1279 __uint64_t resblks; 1258 1280 int error = 0; ··· 1319 1341 */ 1320 1342 ASSERT(mp->m_inodes == NULL); 1321 1343 1322 - xfs_unmountfs_close(mp, cr); 1323 1344 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) 1324 - xfs_uuid_unmount(mp); 1345 + uuid_table_remove(&mp->m_sb.sb_uuid); 1325 1346 1326 1347 #if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1327 1348 xfs_errortag_clearall(mp, 0); 1328 1349 #endif 1329 1350 xfs_mount_free(mp); 1330 1351 return 0; 1331 - } 1332 - 1333 - void 1334 - xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr) 1335 - { 1336 - if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 1337 - xfs_free_buftarg(mp->m_logdev_targp, 1); 1338 - if (mp->m_rtdev_targp) 1339 - xfs_free_buftarg(mp->m_rtdev_targp, 1); 1340 - xfs_free_buftarg(mp->m_ddev_targp, 0); 1341 1352 } 1342 1353 1343 1354 STATIC void ··· 1872 1905 } 1873 1906 1874 1907 /* 1875 - * Remove filesystem from the UUID table. 1876 - */ 1877 - STATIC void 1878 - xfs_uuid_unmount( 1879 - xfs_mount_t *mp) 1880 - { 1881 - uuid_table_remove(&mp->m_sb.sb_uuid); 1882 - } 1883 - 1884 - /* 1885 1908 * Used to log changes to the superblock unit and width fields which could 1886 1909 * be altered by the mount options, as well as any potential sb_features2 1887 1910 * fixup. Only the first superblock is updated. ··· 1885 1928 int error; 1886 1929 1887 1930 ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | 1888 - XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2)); 1931 + XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 | 1932 + XFS_SB_VERSIONNUM)); 1889 1933 1890 1934 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); 1891 1935 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, ··· 2067 2109 xfs_icsb_unlock(mp); 2068 2110 } 2069 2111 2070 - STATIC void 2112 + void 2071 2113 xfs_icsb_destroy_counters( 2072 2114 xfs_mount_t *mp) 2073 2115 {
+8 -9
fs/xfs/xfs_mount.h
··· 61 61 struct xfs_extdelta; 62 62 struct xfs_swapext; 63 63 struct xfs_mru_cache; 64 + struct xfs_nameops; 64 65 65 66 /* 66 67 * Prototypes and functions for the Data Migration subsystem. ··· 211 210 212 211 extern int xfs_icsb_init_counters(struct xfs_mount *); 213 212 extern void xfs_icsb_reinit_counters(struct xfs_mount *); 213 + extern void xfs_icsb_destroy_counters(struct xfs_mount *); 214 214 extern void xfs_icsb_sync_counters(struct xfs_mount *, int); 215 215 extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); 216 216 217 217 #else 218 - #define xfs_icsb_init_counters(mp) (0) 219 - #define xfs_icsb_reinit_counters(mp) do { } while (0) 218 + #define xfs_icsb_init_counters(mp) (0) 219 + #define xfs_icsb_destroy_counters(mp) do { } while (0) 220 + #define xfs_icsb_reinit_counters(mp) do { } while (0) 220 221 #define xfs_icsb_sync_counters(mp, flags) do { } while (0) 221 222 #define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) 222 223 #endif ··· 316 313 __uint8_t m_inode_quiesce;/* call quiesce on new inodes. 317 314 field governed by m_ilock */ 318 315 __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ 316 + const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */ 319 317 int m_dirblksize; /* directory block sz--bytes */ 320 318 int m_dirblkfsbs; /* directory block sz--fsbs */ 321 319 xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */ ··· 382 378 counters */ 383 379 #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams 384 380 allocator */ 381 + #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ 385 382 386 383 387 384 /* ··· 515 510 #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) 516 511 #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) 517 512 518 - extern xfs_mount_t *xfs_mount_init(void); 519 513 extern void xfs_mod_sb(xfs_trans_t *, __int64_t); 520 514 extern int xfs_log_sbcount(xfs_mount_t *, uint); 521 - extern void xfs_mount_free(xfs_mount_t *mp); 522 515 extern int xfs_mountfs(xfs_mount_t *mp, int); 523 516 extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); 524 517 525 - extern int xfs_unmountfs(xfs_mount_t *, struct cred *); 526 - extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *); 518 + extern int xfs_unmountfs(xfs_mount_t *); 527 519 extern int xfs_unmountfs_writesb(xfs_mount_t *); 528 520 extern int xfs_unmount_flush(xfs_mount_t *, int); 529 521 extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); ··· 545 543 extern void xfs_qmops_put(struct xfs_mount *); 546 544 547 545 extern struct xfs_dmops xfs_dmcore_xfs; 548 - 549 - extern int xfs_init(void); 550 - extern void xfs_cleanup(void); 551 546 552 547 #endif /* __KERNEL__ */ 553 548
+12 -9
fs/xfs/xfs_mru_cache.c
··· 307 307 xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t), 308 308 "xfs_mru_cache_elem"); 309 309 if (!xfs_mru_elem_zone) 310 - return ENOMEM; 310 + goto out; 311 311 312 312 xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); 313 - if (!xfs_mru_reap_wq) { 314 - kmem_zone_destroy(xfs_mru_elem_zone); 315 - return ENOMEM; 316 - } 313 + if (!xfs_mru_reap_wq) 314 + goto out_destroy_mru_elem_zone; 317 315 318 316 return 0; 317 + 318 + out_destroy_mru_elem_zone: 319 + kmem_zone_destroy(xfs_mru_elem_zone); 320 + out: 321 + return -ENOMEM; 319 322 } 320 323 321 324 void ··· 385 382 386 383 exit: 387 384 if (err && mru && mru->lists) 388 - kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 385 + kmem_free(mru->lists); 389 386 if (err && mru) 390 - kmem_free(mru, sizeof(*mru)); 387 + kmem_free(mru); 391 388 392 389 return err; 393 390 } ··· 427 424 428 425 xfs_mru_cache_flush(mru); 429 426 430 - kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 431 - kmem_free(mru, sizeof(*mru)); 427 + kmem_free(mru->lists); 428 + kmem_free(mru); 432 429 } 433 430 434 431 /*
+9 -13
fs/xfs/xfs_rename.c
··· 336 336 ASSERT(error != EEXIST); 337 337 if (error) 338 338 goto abort_return; 339 - xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 340 - 341 - } else { 342 - /* 343 - * We always want to hit the ctime on the source inode. 344 - * We do it in the if clause above for the 'new_parent && 345 - * src_is_directory' case, and here we get all the other 346 - * cases. This isn't strictly required by the standards 347 - * since the source inode isn't really being changed, 348 - * but old unix file systems did it and some incremental 349 - * backup programs won't work without it. 350 - */ 351 - xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG); 352 339 } 340 + 341 + /* 342 + * We always want to hit the ctime on the source inode. 343 + * 344 + * This isn't strictly required by the standards since the source 345 + * inode isn't really being changed, but old unix file systems did 346 + * it and some incremental backup programs won't work without it. 347 + */ 348 + xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG); 353 349 354 350 /* 355 351 * Adjust the link count on src_dp. This is necessary when
+1 -1
fs/xfs/xfs_rtalloc.c
··· 2062 2062 /* 2063 2063 * Free the fake mp structure. 2064 2064 */ 2065 - kmem_free(nmp, sizeof(*nmp)); 2065 + kmem_free(nmp); 2066 2066 2067 2067 return error; 2068 2068 }
+16 -1
fs/xfs/xfs_sb.h
··· 46 46 #define XFS_SB_VERSION_SECTORBIT 0x0800 47 47 #define XFS_SB_VERSION_EXTFLGBIT 0x1000 48 48 #define XFS_SB_VERSION_DIRV2BIT 0x2000 49 + #define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */ 49 50 #define XFS_SB_VERSION_MOREBITSBIT 0x8000 50 51 #define XFS_SB_VERSION_OKSASHFBITS \ 51 52 (XFS_SB_VERSION_EXTFLGBIT | \ 52 - XFS_SB_VERSION_DIRV2BIT) 53 + XFS_SB_VERSION_DIRV2BIT | \ 54 + XFS_SB_VERSION_BORGBIT) 53 55 #define XFS_SB_VERSION_OKREALFBITS \ 54 56 (XFS_SB_VERSION_ATTRBIT | \ 55 57 XFS_SB_VERSION_NLINKBIT | \ ··· 439 437 ((sbp)->sb_versionnum & XFS_SB_VERSION_SECTORBIT); 440 438 } 441 439 440 + static inline int xfs_sb_version_hasasciici(xfs_sb_t *sbp) 441 + { 442 + return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ 443 + (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT); 444 + } 445 + 442 446 static inline int xfs_sb_version_hasmorebits(xfs_sb_t *sbp) 443 447 { 444 448 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ ··· 479 471 ((sbp)->sb_versionnum | XFS_SB_VERSION_MOREBITSBIT), \ 480 472 ((sbp)->sb_features2 = \ 481 473 ((sbp)->sb_features2 | XFS_SB_VERSION2_ATTR2BIT))); 474 + } 475 + 476 + static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp) 477 + { 478 + sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT; 479 + if (!sbp->sb_features2) 480 + sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT; 482 481 } 483 482 484 483 /*
+2 -2
fs/xfs/xfs_trans.c
··· 889 889 890 890 tp->t_commit_lsn = commit_lsn; 891 891 if (nvec > XFS_TRANS_LOGVEC_COUNT) { 892 - kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t)); 892 + kmem_free(log_vector); 893 893 } 894 894 895 895 /* ··· 1265 1265 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 1266 1266 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1267 1267 next_licp = licp->lic_next; 1268 - kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 1268 + kmem_free(licp); 1269 1269 licp = next_licp; 1270 1270 } 1271 1271
+1 -1
fs/xfs/xfs_trans_inode.c
··· 291 291 iip = ip->i_itemp; 292 292 if (iip->ili_root_size != 0) { 293 293 ASSERT(iip->ili_orig_root != NULL); 294 - kmem_free(iip->ili_orig_root, iip->ili_root_size); 294 + kmem_free(iip->ili_orig_root); 295 295 iip->ili_root_size = 0; 296 296 iip->ili_orig_root = NULL; 297 297 }
+4 -4
fs/xfs/xfs_trans_item.c
··· 161 161 licpp = &((*licpp)->lic_next); 162 162 } 163 163 *licpp = licp->lic_next; 164 - kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 164 + kmem_free(licp); 165 165 tp->t_items_free -= XFS_LIC_NUM_SLOTS; 166 166 } 167 167 } ··· 314 314 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 315 315 (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); 316 316 next_licp = licp->lic_next; 317 - kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 317 + kmem_free(licp); 318 318 licp = next_licp; 319 319 } 320 320 ··· 363 363 next_licp = licp->lic_next; 364 364 if (XFS_LIC_ARE_ALL_FREE(licp)) { 365 365 *licpp = next_licp; 366 - kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 366 + kmem_free(licp); 367 367 freed -= XFS_LIC_NUM_SLOTS; 368 368 } else { 369 369 licpp = &(licp->lic_next); ··· 530 530 lbcp = tp->t_busy.lbc_next; 531 531 while (lbcp != NULL) { 532 532 lbcq = lbcp->lbc_next; 533 - kmem_free(lbcp, sizeof(xfs_log_busy_chunk_t)); 533 + kmem_free(lbcp); 534 534 lbcp = lbcq; 535 535 } 536 536
+3 -607
fs/xfs/xfs_vfsops.c
··· 58 58 #include "xfs_utils.h" 59 59 60 60 61 - int __init 62 - xfs_init(void) 63 - { 64 - #ifdef XFS_DABUF_DEBUG 65 - extern spinlock_t xfs_dabuf_global_lock; 66 - spin_lock_init(&xfs_dabuf_global_lock); 67 - #endif 68 - 69 - /* 70 - * Initialize all of the zone allocators we use. 71 - */ 72 - xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 73 - "xfs_log_ticket"); 74 - xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), 75 - "xfs_bmap_free_item"); 76 - xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 77 - "xfs_btree_cur"); 78 - xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 79 - "xfs_da_state"); 80 - xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 81 - xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 82 - xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 83 - xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); 84 - xfs_mru_cache_init(); 85 - xfs_filestream_init(); 86 - 87 - /* 88 - * The size of the zone allocated buf log item is the maximum 89 - * size possible under XFS. This wastes a little bit of memory, 90 - * but it is much faster. 91 - */ 92 - xfs_buf_item_zone = 93 - kmem_zone_init((sizeof(xfs_buf_log_item_t) + 94 - (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / 95 - NBWORD) * sizeof(int))), 96 - "xfs_buf_item"); 97 - xfs_efd_zone = 98 - kmem_zone_init((sizeof(xfs_efd_log_item_t) + 99 - ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 100 - sizeof(xfs_extent_t))), 101 - "xfs_efd_item"); 102 - xfs_efi_zone = 103 - kmem_zone_init((sizeof(xfs_efi_log_item_t) + 104 - ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 105 - sizeof(xfs_extent_t))), 106 - "xfs_efi_item"); 107 - 108 - /* 109 - * These zones warrant special memory allocator hints 110 - */ 111 - xfs_inode_zone = 112 - kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 113 - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | 114 - KM_ZONE_SPREAD, NULL); 115 - xfs_ili_zone = 116 - kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 117 - KM_ZONE_SPREAD, NULL); 118 - 119 - /* 120 - * Allocate global trace buffers. 121 - */ 122 - #ifdef XFS_ALLOC_TRACE 123 - xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_SLEEP); 124 - #endif 125 - #ifdef XFS_BMAP_TRACE 126 - xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_SLEEP); 127 - #endif 128 - #ifdef XFS_BMBT_TRACE 129 - xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_SLEEP); 130 - #endif 131 - #ifdef XFS_ATTR_TRACE 132 - xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_SLEEP); 133 - #endif 134 - #ifdef XFS_DIR2_TRACE 135 - xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_SLEEP); 136 - #endif 137 - 138 - xfs_dir_startup(); 139 - 140 - #if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) 141 - xfs_error_test_init(); 142 - #endif /* DEBUG || INDUCE_IO_ERROR */ 143 - 144 - xfs_init_procfs(); 145 - xfs_sysctl_register(); 146 - return 0; 147 - } 148 - 149 - void __exit 150 - xfs_cleanup(void) 151 - { 152 - extern kmem_zone_t *xfs_inode_zone; 153 - extern kmem_zone_t *xfs_efd_zone; 154 - extern kmem_zone_t *xfs_efi_zone; 155 - 156 - xfs_cleanup_procfs(); 157 - xfs_sysctl_unregister(); 158 - xfs_filestream_uninit(); 159 - xfs_mru_cache_uninit(); 160 - xfs_acl_zone_destroy(xfs_acl_zone); 161 - 162 - #ifdef XFS_DIR2_TRACE 163 - ktrace_free(xfs_dir2_trace_buf); 164 - #endif 165 - #ifdef XFS_ATTR_TRACE 166 - ktrace_free(xfs_attr_trace_buf); 167 - #endif 168 - #ifdef XFS_BMBT_TRACE 169 - ktrace_free(xfs_bmbt_trace_buf); 170 - #endif 171 - #ifdef XFS_BMAP_TRACE 172 - ktrace_free(xfs_bmap_trace_buf); 173 - #endif 174 - #ifdef XFS_ALLOC_TRACE 175 - ktrace_free(xfs_alloc_trace_buf); 176 - #endif 177 - 178 - kmem_zone_destroy(xfs_bmap_free_item_zone); 179 - kmem_zone_destroy(xfs_btree_cur_zone); 180 - kmem_zone_destroy(xfs_inode_zone); 181 - kmem_zone_destroy(xfs_trans_zone); 182 - kmem_zone_destroy(xfs_da_state_zone); 183 - kmem_zone_destroy(xfs_dabuf_zone); 184 - kmem_zone_destroy(xfs_buf_item_zone); 185 - kmem_zone_destroy(xfs_efd_zone); 186 - kmem_zone_destroy(xfs_efi_zone); 187 - kmem_zone_destroy(xfs_ifork_zone); 188 - kmem_zone_destroy(xfs_ili_zone); 189 - kmem_zone_destroy(xfs_log_ticket_zone); 190 - } 191 - 192 - /* 193 - * xfs_start_flags 194 - * 195 - * This function fills in xfs_mount_t fields based on mount args. 196 - * Note: the superblock has _not_ yet been read in. 197 - */ 198 - STATIC int 199 - xfs_start_flags( 200 - struct xfs_mount_args *ap, 201 - struct xfs_mount *mp) 202 - { 203 - /* Values are in BBs */ 204 - if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { 205 - /* 206 - * At this point the superblock has not been read 207 - * in, therefore we do not know the block size. 208 - * Before the mount call ends we will convert 209 - * these to FSBs. 210 - */ 211 - mp->m_dalign = ap->sunit; 212 - mp->m_swidth = ap->swidth; 213 - } 214 - 215 - if (ap->logbufs != -1 && 216 - ap->logbufs != 0 && 217 - (ap->logbufs < XLOG_MIN_ICLOGS || 218 - ap->logbufs > XLOG_MAX_ICLOGS)) { 219 - cmn_err(CE_WARN, 220 - "XFS: invalid logbufs value: %d [not %d-%d]", 221 - ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 222 - return XFS_ERROR(EINVAL); 223 - } 224 - mp->m_logbufs = ap->logbufs; 225 - if (ap->logbufsize != -1 && 226 - ap->logbufsize != 0 && 227 - (ap->logbufsize < XLOG_MIN_RECORD_BSIZE || 228 - ap->logbufsize > XLOG_MAX_RECORD_BSIZE || 229 - !is_power_of_2(ap->logbufsize))) { 230 - cmn_err(CE_WARN, 231 - "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 232 - ap->logbufsize); 233 - return XFS_ERROR(EINVAL); 234 - } 235 - mp->m_logbsize = ap->logbufsize; 236 - mp->m_fsname_len = strlen(ap->fsname) + 1; 237 - mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); 238 - strcpy(mp->m_fsname, ap->fsname); 239 - if (ap->rtname[0]) { 240 - mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP); 241 - strcpy(mp->m_rtname, ap->rtname); 242 - } 243 - if (ap->logname[0]) { 244 - mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP); 245 - strcpy(mp->m_logname, ap->logname); 246 - } 247 - 248 - if (ap->flags & XFSMNT_WSYNC) 249 - mp->m_flags |= XFS_MOUNT_WSYNC; 250 - #if XFS_BIG_INUMS 251 - if (ap->flags & XFSMNT_INO64) { 252 - mp->m_flags |= XFS_MOUNT_INO64; 253 - mp->m_inoadd = XFS_INO64_OFFSET; 254 - } 255 - #endif 256 - if (ap->flags & XFSMNT_RETERR) 257 - mp->m_flags |= XFS_MOUNT_RETERR; 258 - if (ap->flags & XFSMNT_NOALIGN) 259 - mp->m_flags |= XFS_MOUNT_NOALIGN; 260 - if (ap->flags & XFSMNT_SWALLOC) 261 - mp->m_flags |= XFS_MOUNT_SWALLOC; 262 - if (ap->flags & XFSMNT_OSYNCISOSYNC) 263 - mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; 264 - if (ap->flags & XFSMNT_32BITINODES) 265 - mp->m_flags |= XFS_MOUNT_32BITINODES; 266 - 267 - if (ap->flags & XFSMNT_IOSIZE) { 268 - if (ap->iosizelog > XFS_MAX_IO_LOG || 269 - ap->iosizelog < XFS_MIN_IO_LOG) { 270 - cmn_err(CE_WARN, 271 - "XFS: invalid log iosize: %d [not %d-%d]", 272 - ap->iosizelog, XFS_MIN_IO_LOG, 273 - XFS_MAX_IO_LOG); 274 - return XFS_ERROR(EINVAL); 275 - } 276 - 277 - mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 278 - mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; 279 - } 280 - 281 - if (ap->flags & XFSMNT_IKEEP) 282 - mp->m_flags |= XFS_MOUNT_IKEEP; 283 - if (ap->flags & XFSMNT_DIRSYNC) 284 - mp->m_flags |= XFS_MOUNT_DIRSYNC; 285 - if (ap->flags & XFSMNT_ATTR2) 286 - mp->m_flags |= XFS_MOUNT_ATTR2; 287 - 288 - if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE) 289 - mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 290 - 291 - /* 292 - * no recovery flag requires a read-only mount 293 - */ 294 - if (ap->flags & XFSMNT_NORECOVERY) { 295 - if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 296 - cmn_err(CE_WARN, 297 - "XFS: tried to mount a FS read-write without recovery!"); 298 - return XFS_ERROR(EINVAL); 299 - } 300 - mp->m_flags |= XFS_MOUNT_NORECOVERY; 301 - } 302 - 303 - if (ap->flags & XFSMNT_NOUUID) 304 - mp->m_flags |= XFS_MOUNT_NOUUID; 305 - if (ap->flags & XFSMNT_BARRIER) 306 - mp->m_flags |= XFS_MOUNT_BARRIER; 307 - else 308 - mp->m_flags &= ~XFS_MOUNT_BARRIER; 309 - 310 - if (ap->flags2 & XFSMNT2_FILESTREAMS) 311 - mp->m_flags |= XFS_MOUNT_FILESTREAMS; 312 - 313 - if (ap->flags & XFSMNT_DMAPI) 314 - mp->m_flags |= XFS_MOUNT_DMAPI; 315 - return 0; 316 - } 317 - 318 - /* 319 - * This function fills in xfs_mount_t fields based on mount args. 320 - * Note: the superblock _has_ now been read in. 321 - */ 322 - STATIC int 323 - xfs_finish_flags( 324 - struct xfs_mount_args *ap, 325 - struct xfs_mount *mp) 326 - { 327 - int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 328 - 329 - /* Fail a mount where the logbuf is smaller then the log stripe */ 330 - if (xfs_sb_version_haslogv2(&mp->m_sb)) { 331 - if ((ap->logbufsize <= 0) && 332 - (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) { 333 - mp->m_logbsize = mp->m_sb.sb_logsunit; 334 - } else if (ap->logbufsize > 0 && 335 - ap->logbufsize < mp->m_sb.sb_logsunit) { 336 - cmn_err(CE_WARN, 337 - "XFS: logbuf size must be greater than or equal to log stripe size"); 338 - return XFS_ERROR(EINVAL); 339 - } 340 - } else { 341 - /* Fail a mount if the logbuf is larger than 32K */ 342 - if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) { 343 - cmn_err(CE_WARN, 344 - "XFS: logbuf size for version 1 logs must be 16K or 32K"); 345 - return XFS_ERROR(EINVAL); 346 - } 347 - } 348 - 349 - if (xfs_sb_version_hasattr2(&mp->m_sb)) 350 - mp->m_flags |= XFS_MOUNT_ATTR2; 351 - 352 - /* 353 - * prohibit r/w mounts of read-only filesystems 354 - */ 355 - if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 356 - cmn_err(CE_WARN, 357 - "XFS: cannot mount a read-only filesystem as read-write"); 358 - return XFS_ERROR(EROFS); 359 - } 360 - 361 - /* 362 - * check for shared mount. 363 - */ 364 - if (ap->flags & XFSMNT_SHARED) { 365 - if (!xfs_sb_version_hasshared(&mp->m_sb)) 366 - return XFS_ERROR(EINVAL); 367 - 368 - /* 369 - * For IRIX 6.5, shared mounts must have the shared 370 - * version bit set, have the persistent readonly 371 - * field set, must be version 0 and can only be mounted 372 - * read-only. 373 - */ 374 - if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) || 375 - (mp->m_sb.sb_shared_vn != 0)) 376 - return XFS_ERROR(EINVAL); 377 - 378 - mp->m_flags |= XFS_MOUNT_SHARED; 379 - 380 - /* 381 - * Shared XFS V0 can't deal with DMI. Return EINVAL. 382 - */ 383 - if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI)) 384 - return XFS_ERROR(EINVAL); 385 - } 386 - 387 - if (ap->flags & XFSMNT_UQUOTA) { 388 - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 389 - if (ap->flags & XFSMNT_UQUOTAENF) 390 - mp->m_qflags |= XFS_UQUOTA_ENFD; 391 - } 392 - 393 - if (ap->flags & XFSMNT_GQUOTA) { 394 - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 395 - if (ap->flags & XFSMNT_GQUOTAENF) 396 - mp->m_qflags |= XFS_OQUOTA_ENFD; 397 - } else if (ap->flags & XFSMNT_PQUOTA) { 398 - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 399 - if (ap->flags & XFSMNT_PQUOTAENF) 400 - mp->m_qflags |= XFS_OQUOTA_ENFD; 401 - } 402 - 403 - return 0; 404 - } 405 - 406 - /* 407 - * xfs_mount 408 - * 409 - * The file system configurations are: 410 - * (1) device (partition) with data and internal log 411 - * (2) logical volume with data and log subvolumes. 412 - * (3) logical volume with data, log, and realtime subvolumes. 413 - * 414 - * We only have to handle opening the log and realtime volumes here if 415 - * they are present. The data subvolume has already been opened by 416 - * get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev. 417 - */ 418 - int 419 - xfs_mount( 420 - struct xfs_mount *mp, 421 - struct xfs_mount_args *args, 422 - cred_t *credp) 423 - { 424 - struct block_device *ddev, *logdev, *rtdev; 425 - int flags = 0, error; 426 - 427 - ddev = mp->m_super->s_bdev; 428 - logdev = rtdev = NULL; 429 - 430 - error = xfs_dmops_get(mp, args); 431 - if (error) 432 - return error; 433 - error = xfs_qmops_get(mp, args); 434 - if (error) 435 - return error; 436 - 437 - if (args->flags & XFSMNT_QUIET) 438 - flags |= XFS_MFSI_QUIET; 439 - 440 - /* 441 - * Open real time and log devices - order is important. 442 - */ 443 - if (args->logname[0]) { 444 - error = xfs_blkdev_get(mp, args->logname, &logdev); 445 - if (error) 446 - return error; 447 - } 448 - if (args->rtname[0]) { 449 - error = xfs_blkdev_get(mp, args->rtname, &rtdev); 450 - if (error) { 451 - xfs_blkdev_put(logdev); 452 - return error; 453 - } 454 - 455 - if (rtdev == ddev || rtdev == logdev) { 456 - cmn_err(CE_WARN, 457 - "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); 458 - xfs_blkdev_put(logdev); 459 - xfs_blkdev_put(rtdev); 460 - return EINVAL; 461 - } 462 - } 463 - 464 - /* 465 - * Setup xfs_mount buffer target pointers 466 - */ 467 - error = ENOMEM; 468 - mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0); 469 - if (!mp->m_ddev_targp) { 470 - xfs_blkdev_put(logdev); 471 - xfs_blkdev_put(rtdev); 472 - return error; 473 - } 474 - if (rtdev) { 475 - mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1); 476 - if (!mp->m_rtdev_targp) { 477 - xfs_blkdev_put(logdev); 478 - xfs_blkdev_put(rtdev); 479 - goto error0; 480 - } 481 - } 482 - mp->m_logdev_targp = (logdev && logdev != ddev) ? 483 - xfs_alloc_buftarg(logdev, 1) : mp->m_ddev_targp; 484 - if (!mp->m_logdev_targp) { 485 - xfs_blkdev_put(logdev); 486 - xfs_blkdev_put(rtdev); 487 - goto error0; 488 - } 489 - 490 - /* 491 - * Setup flags based on mount(2) options and then the superblock 492 - */ 493 - error = xfs_start_flags(args, mp); 494 - if (error) 495 - goto error1; 496 - error = xfs_readsb(mp, flags); 497 - if (error) 498 - goto error1; 499 - error = xfs_finish_flags(args, mp); 500 - if (error) 501 - goto error2; 502 - 503 - /* 504 - * Setup xfs_mount buffer target pointers based on superblock 505 - */ 506 - error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, 507 - mp->m_sb.sb_sectsize); 508 - if (!error && logdev && logdev != ddev) { 509 - unsigned int log_sector_size = BBSIZE; 510 - 511 - if (xfs_sb_version_hassector(&mp->m_sb)) 512 - log_sector_size = mp->m_sb.sb_logsectsize; 513 - error = xfs_setsize_buftarg(mp->m_logdev_targp, 514 - mp->m_sb.sb_blocksize, 515 - log_sector_size); 516 - } 517 - if (!error && rtdev) 518 - error = xfs_setsize_buftarg(mp->m_rtdev_targp, 519 - mp->m_sb.sb_blocksize, 520 - mp->m_sb.sb_sectsize); 521 - if (error) 522 - goto error2; 523 - 524 - if (mp->m_flags & XFS_MOUNT_BARRIER) 525 - xfs_mountfs_check_barriers(mp); 526 - 527 - if ((error = xfs_filestream_mount(mp))) 528 - goto error2; 529 - 530 - error = xfs_mountfs(mp, flags); 531 - if (error) 532 - goto error2; 533 - 534 - XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname); 535 - 536 - return 0; 537 - 538 - error2: 539 - if (mp->m_sb_bp) 540 - xfs_freesb(mp); 541 - error1: 542 - xfs_binval(mp->m_ddev_targp); 543 - if (logdev && logdev != ddev) 544 - xfs_binval(mp->m_logdev_targp); 545 - if (rtdev) 546 - xfs_binval(mp->m_rtdev_targp); 547 - error0: 548 - xfs_unmountfs_close(mp, credp); 549 - xfs_qmops_put(mp); 550 - xfs_dmops_put(mp); 551 - return error; 552 - } 553 - 554 - int 555 - xfs_unmount( 556 - xfs_mount_t *mp, 557 - int flags, 558 - cred_t *credp) 559 - { 560 - xfs_inode_t *rip; 561 - bhv_vnode_t *rvp; 562 - int unmount_event_wanted = 0; 563 - int unmount_event_flags = 0; 564 - int xfs_unmountfs_needed = 0; 565 - int error; 566 - 567 - rip = mp->m_rootip; 568 - rvp = XFS_ITOV(rip); 569 - 570 - #ifdef HAVE_DMAPI 571 - if (mp->m_flags & XFS_MOUNT_DMAPI) { 572 - error = XFS_SEND_PREUNMOUNT(mp, 573 - rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL, 574 - NULL, NULL, 0, 0, 575 - (mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))? 576 - 0:DM_FLAGS_UNWANTED); 577 - if (error) 578 - return XFS_ERROR(error); 579 - unmount_event_wanted = 1; 580 - unmount_event_flags = (mp->m_dmevmask & (1<<DM_EVENT_UNMOUNT))? 581 - 0 : DM_FLAGS_UNWANTED; 582 - } 583 - #endif 584 - 585 - /* 586 - * Blow away any referenced inode in the filestreams cache. 587 - * This can and will cause log traffic as inodes go inactive 588 - * here. 589 - */ 590 - xfs_filestream_unmount(mp); 591 - 592 - XFS_bflush(mp->m_ddev_targp); 593 - error = xfs_unmount_flush(mp, 0); 594 - if (error) 595 - goto out; 596 - 597 - ASSERT(vn_count(rvp) == 1); 598 - 599 - /* 600 - * Drop the reference count 601 - */ 602 - IRELE(rip); 603 - 604 - /* 605 - * If we're forcing a shutdown, typically because of a media error, 606 - * we want to make sure we invalidate dirty pages that belong to 607 - * referenced vnodes as well. 608 - */ 609 - if (XFS_FORCED_SHUTDOWN(mp)) { 610 - error = xfs_sync(mp, SYNC_WAIT | SYNC_CLOSE); 611 - ASSERT(error != EFSCORRUPTED); 612 - } 613 - xfs_unmountfs_needed = 1; 614 - 615 - out: 616 - /* Send DMAPI event, if required. 617 - * Then do xfs_unmountfs() if needed. 618 - * Then return error (or zero). 619 - */ 620 - if (unmount_event_wanted) { 621 - /* Note: mp structure must still exist for 622 - * XFS_SEND_UNMOUNT() call. 623 - */ 624 - XFS_SEND_UNMOUNT(mp, error == 0 ? rip : NULL, 625 - DM_RIGHT_NULL, 0, error, unmount_event_flags); 626 - } 627 - if (xfs_unmountfs_needed) { 628 - /* 629 - * Call common unmount function to flush to disk 630 - * and free the super block buffer & mount structures. 631 - */ 632 - xfs_unmountfs(mp, credp); 633 - xfs_qmops_put(mp); 634 - xfs_dmops_put(mp); 635 - kmem_free(mp, sizeof(xfs_mount_t)); 636 - } 637 - 638 - return XFS_ERROR(error); 639 - } 640 - 641 61 STATIC void 642 62 xfs_quiesce_fs( 643 63 xfs_mount_t *mp) ··· 112 692 "Frozen image may not be consistent."); 113 693 xfs_log_unmount_write(mp); 114 694 xfs_unmountfs_writesb(mp); 115 - } 116 - 117 - int 118 - xfs_mntupdate( 119 - struct xfs_mount *mp, 120 - int *flags, 121 - struct xfs_mount_args *args) 122 - { 123 - if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */ 124 - if (mp->m_flags & XFS_MOUNT_RDONLY) 125 - mp->m_flags &= ~XFS_MOUNT_RDONLY; 126 - if (args->flags & XFSMNT_BARRIER) { 127 - mp->m_flags |= XFS_MOUNT_BARRIER; 128 - xfs_mountfs_check_barriers(mp); 129 - } else { 130 - mp->m_flags &= ~XFS_MOUNT_BARRIER; 131 - } 132 - } else if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { /* rw -> ro */ 133 - xfs_filestream_flush(mp); 134 - xfs_sync(mp, SYNC_DATA_QUIESCE); 135 - xfs_attr_quiesce(mp); 136 - mp->m_flags |= XFS_MOUNT_RDONLY; 137 - } 138 - return 0; 139 695 } 140 696 141 697 /* ··· 444 1048 445 1049 if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { 446 1050 XFS_MOUNT_IUNLOCK(mp); 447 - kmem_free(ipointer, sizeof(xfs_iptr_t)); 1051 + kmem_free(ipointer); 448 1052 return 0; 449 1053 } 450 1054 ··· 590 1194 } 591 1195 XFS_MOUNT_IUNLOCK(mp); 592 1196 ASSERT(ipointer_in == B_FALSE); 593 - kmem_free(ipointer, sizeof(xfs_iptr_t)); 1197 + kmem_free(ipointer); 594 1198 return XFS_ERROR(error); 595 1199 } 596 1200 ··· 620 1224 621 1225 ASSERT(ipointer_in == B_FALSE); 622 1226 623 - kmem_free(ipointer, sizeof(xfs_iptr_t)); 1227 + kmem_free(ipointer); 624 1228 return XFS_ERROR(last_error); 625 1229 } 626 1230
-5
fs/xfs/xfs_vfsops.h
··· 8 8 struct xfs_mount; 9 9 struct xfs_mount_args; 10 10 11 - int xfs_mount(struct xfs_mount *mp, struct xfs_mount_args *args, 12 - struct cred *credp); 13 - int xfs_unmount(struct xfs_mount *mp, int flags, struct cred *credp); 14 - int xfs_mntupdate(struct xfs_mount *mp, int *flags, 15 - struct xfs_mount_args *args); 16 11 int xfs_sync(struct xfs_mount *mp, int flags); 17 12 void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, 18 13 int lnnum);
+200 -522
fs/xfs/xfs_vnodeops.c
··· 75 75 return 0; 76 76 } 77 77 78 - /* 79 - * xfs_setattr 80 - */ 81 78 int 82 79 xfs_setattr( 83 - xfs_inode_t *ip, 84 - bhv_vattr_t *vap, 80 + struct xfs_inode *ip, 81 + struct iattr *iattr, 85 82 int flags, 86 83 cred_t *credp) 87 84 { 88 85 xfs_mount_t *mp = ip->i_mount; 86 + struct inode *inode = XFS_ITOV(ip); 87 + int mask = iattr->ia_valid; 89 88 xfs_trans_t *tp; 90 - int mask; 91 89 int code; 92 90 uint lock_flags; 93 91 uint commit_flags=0; 94 92 uid_t uid=0, iuid=0; 95 93 gid_t gid=0, igid=0; 96 94 int timeflags = 0; 97 - xfs_prid_t projid=0, iprojid=0; 98 95 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; 99 96 int file_owner; 100 97 int need_iolock = 1; ··· 101 104 if (mp->m_flags & XFS_MOUNT_RDONLY) 102 105 return XFS_ERROR(EROFS); 103 106 104 - /* 105 - * Cannot set certain attributes. 106 - */ 107 - mask = vap->va_mask; 108 - if (mask & XFS_AT_NOSET) { 109 - return XFS_ERROR(EINVAL); 110 - } 111 - 112 107 if (XFS_FORCED_SHUTDOWN(mp)) 113 108 return XFS_ERROR(EIO); 114 - 115 - /* 116 - * Timestamps do not need to be logged and hence do not 117 - * need to be done within a transaction. 118 - */ 119 - if (mask & XFS_AT_UPDTIMES) { 120 - ASSERT((mask & ~XFS_AT_UPDTIMES) == 0); 121 - timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) | 122 - ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) | 123 - ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0); 124 - xfs_ichgtime(ip, timeflags); 125 - return 0; 126 - } 127 109 128 110 olddquot1 = olddquot2 = NULL; 129 111 udqp = gdqp = NULL; ··· 115 139 * If the IDs do change before we take the ilock, we're covered 116 140 * because the i_*dquot fields will get updated anyway. 117 141 */ 118 - if (XFS_IS_QUOTA_ON(mp) && 119 - (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID))) { 142 + if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { 120 143 uint qflags = 0; 121 144 122 - if ((mask & XFS_AT_UID) && XFS_IS_UQUOTA_ON(mp)) { 123 - uid = vap->va_uid; 145 + if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { 146 + uid = iattr->ia_uid; 124 147 qflags |= XFS_QMOPT_UQUOTA; 125 148 } else { 126 149 uid = ip->i_d.di_uid; 127 150 } 128 - if ((mask & XFS_AT_GID) && XFS_IS_GQUOTA_ON(mp)) { 129 - gid = vap->va_gid; 151 + if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { 152 + gid = iattr->ia_gid; 130 153 qflags |= XFS_QMOPT_GQUOTA; 131 154 } else { 132 155 gid = ip->i_d.di_gid; 133 156 } 134 - if ((mask & XFS_AT_PROJID) && XFS_IS_PQUOTA_ON(mp)) { 135 - projid = vap->va_projid; 136 - qflags |= XFS_QMOPT_PQUOTA; 137 - } else { 138 - projid = ip->i_d.di_projid; 139 - } 157 + 140 158 /* 141 159 * We take a reference when we initialize udqp and gdqp, 142 160 * so it is important that we never blindly double trip on ··· 138 168 */ 139 169 ASSERT(udqp == NULL); 140 170 ASSERT(gdqp == NULL); 141 - code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags, 142 - &udqp, &gdqp); 171 + code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, ip->i_d.di_projid, 172 + qflags, &udqp, &gdqp); 143 173 if (code) 144 174 return code; 145 175 } ··· 150 180 */ 151 181 tp = NULL; 152 182 lock_flags = XFS_ILOCK_EXCL; 153 - if (flags & ATTR_NOLOCK) 183 + if (flags & XFS_ATTR_NOLOCK) 154 184 need_iolock = 0; 155 - if (!(mask & XFS_AT_SIZE)) { 156 - if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) || 185 + if (!(mask & ATTR_SIZE)) { 186 + if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) || 157 187 (mp->m_flags & XFS_MOUNT_WSYNC)) { 158 188 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 159 189 commit_flags = 0; ··· 166 196 } 167 197 } else { 168 198 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && 169 - !(flags & ATTR_DMI)) { 199 + !(flags & XFS_ATTR_DMI)) { 170 200 int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR; 171 201 code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip, 172 - vap->va_size, 0, dmflags, NULL); 202 + iattr->ia_size, 0, dmflags, NULL); 173 203 if (code) { 174 204 lock_flags = 0; 175 205 goto error_return; ··· 189 219 * Only the owner or users with CAP_FOWNER 190 220 * capability may do these things. 191 221 */ 192 - if (mask & 193 - (XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID| 194 - XFS_AT_GID|XFS_AT_PROJID)) { 222 + if (mask & (ATTR_MODE|ATTR_UID|ATTR_GID)) { 195 223 /* 196 224 * CAP_FOWNER overrides the following restrictions: 197 225 * ··· 213 245 * IDs of the calling process shall match the group owner of 214 246 * the file when setting the set-group-ID bit on that file 215 247 */ 216 - if (mask & XFS_AT_MODE) { 248 + if (mask & ATTR_MODE) { 217 249 mode_t m = 0; 218 250 219 - if ((vap->va_mode & S_ISUID) && !file_owner) 251 + if ((iattr->ia_mode & S_ISUID) && !file_owner) 220 252 m |= S_ISUID; 221 - if ((vap->va_mode & S_ISGID) && 253 + if ((iattr->ia_mode & S_ISGID) && 222 254 !in_group_p((gid_t)ip->i_d.di_gid)) 223 255 m |= S_ISGID; 224 256 #if 0 225 257 /* Linux allows this, Irix doesn't. */ 226 - if ((vap->va_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode)) 258 + if ((iattr->ia_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode)) 227 259 m |= S_ISVTX; 228 260 #endif 229 261 if (m && !capable(CAP_FSETID)) 230 - vap->va_mode &= ~m; 262 + iattr->ia_mode &= ~m; 231 263 } 232 264 } 233 265 ··· 238 270 * and can change the group id only to a group of which he 239 271 * or she is a member. 240 272 */ 241 - if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { 273 + if (mask & (ATTR_UID|ATTR_GID)) { 242 274 /* 243 275 * These IDs could have changed since we last looked at them. 244 276 * But, we're assured that if the ownership did change ··· 246 278 * would have changed also. 247 279 */ 248 280 iuid = ip->i_d.di_uid; 249 - iprojid = ip->i_d.di_projid; 250 281 igid = ip->i_d.di_gid; 251 - gid = (mask & XFS_AT_GID) ? vap->va_gid : igid; 252 - uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid; 253 - projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid : 254 - iprojid; 282 + gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; 283 + uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; 255 284 256 285 /* 257 286 * CAP_CHOWN overrides the following restrictions: ··· 268 303 goto error_return; 269 304 } 270 305 /* 271 - * Do a quota reservation only if uid/projid/gid is actually 306 + * Do a quota reservation only if uid/gid is actually 272 307 * going to change. 273 308 */ 274 309 if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || 275 - (XFS_IS_PQUOTA_ON(mp) && iprojid != projid) || 276 310 (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { 277 311 ASSERT(tp); 278 312 code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, ··· 285 321 /* 286 322 * Truncate file. Must have write permission and not be a directory. 287 323 */ 288 - if (mask & XFS_AT_SIZE) { 324 + if (mask & ATTR_SIZE) { 289 325 /* Short circuit the truncate case for zero length files */ 290 - if ((vap->va_size == 0) && 291 - (ip->i_size == 0) && (ip->i_d.di_nextents == 0)) { 326 + if (iattr->ia_size == 0 && 327 + ip->i_size == 0 && ip->i_d.di_nextents == 0) { 292 328 xfs_iunlock(ip, XFS_ILOCK_EXCL); 293 329 lock_flags &= ~XFS_ILOCK_EXCL; 294 - if (mask & XFS_AT_CTIME) 330 + if (mask & ATTR_CTIME) 295 331 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 296 332 code = 0; 297 333 goto error_return; ··· 314 350 /* 315 351 * Change file access or modified times. 316 352 */ 317 - if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { 353 + if (mask & (ATTR_ATIME|ATTR_MTIME)) { 318 354 if (!file_owner) { 319 - if ((flags & ATTR_UTIME) && 355 + if ((mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)) && 320 356 !capable(CAP_FOWNER)) { 321 357 code = XFS_ERROR(EPERM); 322 358 goto error_return; ··· 325 361 } 326 362 327 363 /* 328 - * Change extent size or realtime flag. 329 - */ 330 - if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { 331 - /* 332 - * Can't change extent size if any extents are allocated. 333 - */ 334 - if (ip->i_d.di_nextents && (mask & XFS_AT_EXTSIZE) && 335 - ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != 336 - vap->va_extsize) ) { 337 - code = XFS_ERROR(EINVAL); /* EFBIG? */ 338 - goto error_return; 339 - } 340 - 341 - /* 342 - * Can't change realtime flag if any extents are allocated. 343 - */ 344 - if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 345 - (mask & XFS_AT_XFLAGS) && 346 - (XFS_IS_REALTIME_INODE(ip)) != 347 - (vap->va_xflags & XFS_XFLAG_REALTIME)) { 348 - code = XFS_ERROR(EINVAL); /* EFBIG? */ 349 - goto error_return; 350 - } 351 - /* 352 - * Extent size must be a multiple of the appropriate block 353 - * size, if set at all. 354 - */ 355 - if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { 356 - xfs_extlen_t size; 357 - 358 - if (XFS_IS_REALTIME_INODE(ip) || 359 - ((mask & XFS_AT_XFLAGS) && 360 - (vap->va_xflags & XFS_XFLAG_REALTIME))) { 361 - size = mp->m_sb.sb_rextsize << 362 - mp->m_sb.sb_blocklog; 363 - } else { 364 - size = mp->m_sb.sb_blocksize; 365 - } 366 - if (vap->va_extsize % size) { 367 - code = XFS_ERROR(EINVAL); 368 - goto error_return; 369 - } 370 - } 371 - /* 372 - * If realtime flag is set then must have realtime data. 373 - */ 374 - if ((mask & XFS_AT_XFLAGS) && 375 - (vap->va_xflags & XFS_XFLAG_REALTIME)) { 376 - if ((mp->m_sb.sb_rblocks == 0) || 377 - (mp->m_sb.sb_rextsize == 0) || 378 - (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { 379 - code = XFS_ERROR(EINVAL); 380 - goto error_return; 381 - } 382 - } 383 - 384 - /* 385 - * Can't modify an immutable/append-only file unless 386 - * we have appropriate permission. 387 - */ 388 - if ((mask & XFS_AT_XFLAGS) && 389 - (ip->i_d.di_flags & 390 - (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) || 391 - (vap->va_xflags & 392 - (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && 393 - !capable(CAP_LINUX_IMMUTABLE)) { 394 - code = XFS_ERROR(EPERM); 395 - goto error_return; 396 - } 397 - } 398 - 399 - /* 400 364 * Now we can make the changes. Before we join the inode 401 - * to the transaction, if XFS_AT_SIZE is set then take care of 365 + * to the transaction, if ATTR_SIZE is set then take care of 402 366 * the part of the truncation that must be done without the 403 367 * inode lock. This needs to be done before joining the inode 404 368 * to the transaction, because the inode cannot be unlocked 405 369 * once it is a part of the transaction. 406 370 */ 407 - if (mask & XFS_AT_SIZE) { 371 + if (mask & ATTR_SIZE) { 408 372 code = 0; 409 - if ((vap->va_size > ip->i_size) && 410 - (flags & ATTR_NOSIZETOK) == 0) { 411 - code = xfs_igrow_start(ip, vap->va_size, credp); 373 + if (iattr->ia_size > ip->i_size) { 374 + /* 375 + * Do the first part of growing a file: zero any data 376 + * in the last block that is beyond the old EOF. We 377 + * need to do this before the inode is joined to the 378 + * transaction to modify the i_size. 379 + */ 380 + code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size); 412 381 } 413 382 xfs_iunlock(ip, XFS_ILOCK_EXCL); 414 383 ··· 358 461 * not within the range we care about here. 359 462 */ 360 463 if (!code && 361 - (ip->i_size != ip->i_d.di_size) && 362 - (vap->va_size > ip->i_d.di_size)) { 464 + ip->i_size != ip->i_d.di_size && 465 + iattr->ia_size > ip->i_d.di_size) { 363 466 code = xfs_flush_pages(ip, 364 - ip->i_d.di_size, vap->va_size, 467 + ip->i_d.di_size, iattr->ia_size, 365 468 XFS_B_ASYNC, FI_NONE); 366 469 } 367 470 ··· 369 472 vn_iowait(ip); 370 473 371 474 if (!code) 372 - code = xfs_itruncate_data(ip, vap->va_size); 475 + code = xfs_itruncate_data(ip, iattr->ia_size); 373 476 if (code) { 374 477 ASSERT(tp == NULL); 375 478 lock_flags &= ~XFS_ILOCK_EXCL; ··· 398 501 /* 399 502 * Truncate file. Must have write permission and not be a directory. 400 503 */ 401 - if (mask & XFS_AT_SIZE) { 504 + if (mask & ATTR_SIZE) { 402 505 /* 403 506 * Only change the c/mtime if we are changing the size 404 507 * or we are explicitly asked to change it. This handles 405 508 * the semantic difference between truncate() and ftruncate() 406 509 * as implemented in the VFS. 407 510 */ 408 - if (vap->va_size != ip->i_size || (mask & XFS_AT_CTIME)) 511 + if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME)) 409 512 timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 410 513 411 - if (vap->va_size > ip->i_size) { 412 - xfs_igrow_finish(tp, ip, vap->va_size, 413 - !(flags & ATTR_DMI)); 414 - } else if ((vap->va_size <= ip->i_size) || 415 - ((vap->va_size == 0) && ip->i_d.di_nextents)) { 514 + if (iattr->ia_size > ip->i_size) { 515 + ip->i_d.di_size = iattr->ia_size; 516 + ip->i_size = iattr->ia_size; 517 + if (!(flags & XFS_ATTR_DMI)) 518 + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 519 + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 520 + } else if (iattr->ia_size <= ip->i_size || 521 + (iattr->ia_size == 0 && ip->i_d.di_nextents)) { 416 522 /* 417 523 * signal a sync transaction unless 418 524 * we're truncating an already unlinked 419 525 * file on a wsync filesystem 420 526 */ 421 - code = xfs_itruncate_finish(&tp, ip, 422 - (xfs_fsize_t)vap->va_size, 527 + code = xfs_itruncate_finish(&tp, ip, iattr->ia_size, 423 528 XFS_DATA_FORK, 424 529 ((ip->i_d.di_nlink != 0 || 425 530 !(mp->m_flags & XFS_MOUNT_WSYNC)) ··· 443 544 /* 444 545 * Change file access modes. 445 546 */ 446 - if (mask & XFS_AT_MODE) { 547 + if (mask & ATTR_MODE) { 447 548 ip->i_d.di_mode &= S_IFMT; 448 - ip->i_d.di_mode |= vap->va_mode & ~S_IFMT; 549 + ip->i_d.di_mode |= iattr->ia_mode & ~S_IFMT; 550 + 551 + inode->i_mode &= S_IFMT; 552 + inode->i_mode |= iattr->ia_mode & ~S_IFMT; 449 553 450 554 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 451 555 timeflags |= XFS_ICHGTIME_CHG; ··· 461 559 * and can change the group id only to a group of which he 462 560 * or she is a member. 463 561 */ 464 - if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { 562 + if (mask & (ATTR_UID|ATTR_GID)) { 465 563 /* 466 564 * CAP_FSETID overrides the following restrictions: 467 565 * ··· 479 577 */ 480 578 if (iuid != uid) { 481 579 if (XFS_IS_UQUOTA_ON(mp)) { 482 - ASSERT(mask & XFS_AT_UID); 580 + ASSERT(mask & ATTR_UID); 483 581 ASSERT(udqp); 484 582 olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 485 583 &ip->i_udquot, udqp); 486 584 } 487 585 ip->i_d.di_uid = uid; 586 + inode->i_uid = uid; 488 587 } 489 588 if (igid != gid) { 490 589 if (XFS_IS_GQUOTA_ON(mp)) { 491 590 ASSERT(!XFS_IS_PQUOTA_ON(mp)); 492 - ASSERT(mask & XFS_AT_GID); 591 + ASSERT(mask & ATTR_GID); 493 592 ASSERT(gdqp); 494 593 olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 495 594 &ip->i_gdquot, gdqp); 496 595 } 497 596 ip->i_d.di_gid = gid; 498 - } 499 - if (iprojid != projid) { 500 - if (XFS_IS_PQUOTA_ON(mp)) { 501 - ASSERT(!XFS_IS_GQUOTA_ON(mp)); 502 - ASSERT(mask & XFS_AT_PROJID); 503 - ASSERT(gdqp); 504 - olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 505 - &ip->i_gdquot, gdqp); 506 - } 507 - ip->i_d.di_projid = projid; 508 - /* 509 - * We may have to rev the inode as well as 510 - * the superblock version number since projids didn't 511 - * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. 512 - */ 513 - if (ip->i_d.di_version == XFS_DINODE_VERSION_1) 514 - xfs_bump_ino_vers2(tp, ip); 597 + inode->i_gid = gid; 515 598 } 516 599 517 600 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); ··· 507 620 /* 508 621 * Change file access or modified times. 509 622 */ 510 - if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { 511 - if (mask & XFS_AT_ATIME) { 512 - ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec; 513 - ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec; 623 + if (mask & (ATTR_ATIME|ATTR_MTIME)) { 624 + if (mask & ATTR_ATIME) { 625 + inode->i_atime = iattr->ia_atime; 626 + ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; 627 + ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; 514 628 ip->i_update_core = 1; 515 629 timeflags &= ~XFS_ICHGTIME_ACC; 516 630 } 517 - if (mask & XFS_AT_MTIME) { 518 - ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec; 519 - ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec; 631 + if (mask & ATTR_MTIME) { 632 + inode->i_mtime = iattr->ia_mtime; 633 + ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; 634 + ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; 520 635 timeflags &= ~XFS_ICHGTIME_MOD; 521 636 timeflags |= XFS_ICHGTIME_CHG; 522 637 } 523 - if (tp && (flags & ATTR_UTIME)) 638 + if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET))) 524 639 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 525 640 } 526 641 527 642 /* 528 - * Change XFS-added attributes. 529 - */ 530 - if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { 531 - if (mask & XFS_AT_EXTSIZE) { 532 - /* 533 - * Converting bytes to fs blocks. 534 - */ 535 - ip->i_d.di_extsize = vap->va_extsize >> 536 - mp->m_sb.sb_blocklog; 537 - } 538 - if (mask & XFS_AT_XFLAGS) { 539 - uint di_flags; 540 - 541 - /* can't set PREALLOC this way, just preserve it */ 542 - di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); 543 - if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) 544 - di_flags |= XFS_DIFLAG_IMMUTABLE; 545 - if (vap->va_xflags & XFS_XFLAG_APPEND) 546 - di_flags |= XFS_DIFLAG_APPEND; 547 - if (vap->va_xflags & XFS_XFLAG_SYNC) 548 - di_flags |= XFS_DIFLAG_SYNC; 549 - if (vap->va_xflags & XFS_XFLAG_NOATIME) 550 - di_flags |= XFS_DIFLAG_NOATIME; 551 - if (vap->va_xflags & XFS_XFLAG_NODUMP) 552 - di_flags |= XFS_DIFLAG_NODUMP; 553 - if (vap->va_xflags & XFS_XFLAG_PROJINHERIT) 554 - di_flags |= XFS_DIFLAG_PROJINHERIT; 555 - if (vap->va_xflags & XFS_XFLAG_NODEFRAG) 556 - di_flags |= XFS_DIFLAG_NODEFRAG; 557 - if (vap->va_xflags & XFS_XFLAG_FILESTREAM) 558 - di_flags |= XFS_DIFLAG_FILESTREAM; 559 - if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 560 - if (vap->va_xflags & XFS_XFLAG_RTINHERIT) 561 - di_flags |= XFS_DIFLAG_RTINHERIT; 562 - if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS) 563 - di_flags |= XFS_DIFLAG_NOSYMLINKS; 564 - if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT) 565 - di_flags |= XFS_DIFLAG_EXTSZINHERIT; 566 - } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 567 - if (vap->va_xflags & XFS_XFLAG_REALTIME) 568 - di_flags |= XFS_DIFLAG_REALTIME; 569 - if (vap->va_xflags & XFS_XFLAG_EXTSIZE) 570 - di_flags |= XFS_DIFLAG_EXTSIZE; 571 - } 572 - ip->i_d.di_flags = di_flags; 573 - } 574 - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 575 - timeflags |= XFS_ICHGTIME_CHG; 576 - } 577 - 578 - /* 579 - * Change file inode change time only if XFS_AT_CTIME set 643 + * Change file inode change time only if ATTR_CTIME set 580 644 * AND we have been called by a DMI function. 581 645 */ 582 646 583 - if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) { 584 - ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec; 585 - ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec; 647 + if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) { 648 + inode->i_ctime = iattr->ia_ctime; 649 + ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; 650 + ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; 586 651 ip->i_update_core = 1; 587 652 timeflags &= ~XFS_ICHGTIME_CHG; 588 653 } ··· 543 704 * Send out timestamp changes that need to be set to the 544 705 * current time. Not done when called by a DMI function. 545 706 */ 546 - if (timeflags && !(flags & ATTR_DMI)) 707 + if (timeflags && !(flags & XFS_ATTR_DMI)) 547 708 xfs_ichgtime(ip, timeflags); 548 709 549 710 XFS_STATS_INC(xs_ig_attrchg); ··· 581 742 } 582 743 583 744 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && 584 - !(flags & ATTR_DMI)) { 745 + !(flags & XFS_ATTR_DMI)) { 585 746 (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL, 586 747 NULL, DM_RIGHT_NULL, NULL, NULL, 587 748 0, 0, AT_DELAY_FLAG(flags)); ··· 1440 1601 return VN_INACTIVE_CACHE; 1441 1602 } 1442 1603 1443 - 1604 + /* 1605 + * Lookups up an inode from "name". If ci_name is not NULL, then a CI match 1606 + * is allowed, otherwise it has to be an exact match. If a CI match is found, 1607 + * ci_name->name will point to a the actual name (caller must free) or 1608 + * will be set to NULL if an exact match is found. 1609 + */ 1444 1610 int 1445 1611 xfs_lookup( 1446 1612 xfs_inode_t *dp, 1447 1613 struct xfs_name *name, 1448 - xfs_inode_t **ipp) 1614 + xfs_inode_t **ipp, 1615 + struct xfs_name *ci_name) 1449 1616 { 1450 1617 xfs_ino_t inum; 1451 1618 int error; ··· 1463 1618 return XFS_ERROR(EIO); 1464 1619 1465 1620 lock_mode = xfs_ilock_map_shared(dp); 1466 - error = xfs_dir_lookup(NULL, dp, name, &inum); 1621 + error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 1467 1622 xfs_iunlock_map_shared(dp, lock_mode); 1468 1623 1469 1624 if (error) ··· 1471 1626 1472 1627 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); 1473 1628 if (error) 1474 - goto out; 1629 + goto out_free_name; 1475 1630 1476 1631 xfs_itrace_ref(*ipp); 1477 1632 return 0; 1478 1633 1479 - out: 1634 + out_free_name: 1635 + if (ci_name) 1636 + kmem_free(ci_name->name); 1637 + out: 1480 1638 *ipp = NULL; 1481 1639 return error; 1482 1640 } ··· 1946 2098 #endif 1947 2099 } 1948 2100 1949 - #ifdef DEBUG 1950 - #define REMOVE_DEBUG_TRACE(x) {remove_which_error_return = (x);} 1951 - int remove_which_error_return = 0; 1952 - #else /* ! DEBUG */ 1953 - #define REMOVE_DEBUG_TRACE(x) 1954 - #endif /* ! DEBUG */ 1955 - 1956 2101 int 1957 2102 xfs_remove( 1958 2103 xfs_inode_t *dp, ··· 1954 2113 { 1955 2114 xfs_mount_t *mp = dp->i_mount; 1956 2115 xfs_trans_t *tp = NULL; 2116 + int is_dir = S_ISDIR(ip->i_d.di_mode); 1957 2117 int error = 0; 1958 2118 xfs_bmap_free_t free_list; 1959 2119 xfs_fsblock_t first_block; ··· 1962 2120 int committed; 1963 2121 int link_zero; 1964 2122 uint resblks; 2123 + uint log_count; 1965 2124 1966 2125 xfs_itrace_entry(dp); 2126 + xfs_itrace_entry(ip); 1967 2127 1968 2128 if (XFS_FORCED_SHUTDOWN(mp)) 1969 2129 return XFS_ERROR(EIO); ··· 1978 2134 return error; 1979 2135 } 1980 2136 1981 - xfs_itrace_entry(ip); 1982 - xfs_itrace_ref(ip); 1983 - 1984 2137 error = XFS_QM_DQATTACH(mp, dp, 0); 1985 - if (!error) 1986 - error = XFS_QM_DQATTACH(mp, ip, 0); 1987 - if (error) { 1988 - REMOVE_DEBUG_TRACE(__LINE__); 2138 + if (error) 1989 2139 goto std_return; 1990 - } 1991 2140 1992 - tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); 2141 + error = XFS_QM_DQATTACH(mp, ip, 0); 2142 + if (error) 2143 + goto std_return; 2144 + 2145 + if (is_dir) { 2146 + tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); 2147 + log_count = XFS_DEFAULT_LOG_COUNT; 2148 + } else { 2149 + tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); 2150 + log_count = XFS_REMOVE_LOG_COUNT; 2151 + } 1993 2152 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 2153 + 1994 2154 /* 1995 2155 * We try to get the real space reservation first, 1996 2156 * allowing for directory btree deletion(s) implying ··· 2006 2158 */ 2007 2159 resblks = XFS_REMOVE_SPACE_RES(mp); 2008 2160 error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, 2009 - XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); 2161 + XFS_TRANS_PERM_LOG_RES, log_count); 2010 2162 if (error == ENOSPC) { 2011 2163 resblks = 0; 2012 2164 error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, 2013 - XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); 2165 + XFS_TRANS_PERM_LOG_RES, log_count); 2014 2166 } 2015 2167 if (error) { 2016 2168 ASSERT(error != ENOSPC); 2017 - REMOVE_DEBUG_TRACE(__LINE__); 2018 - xfs_trans_cancel(tp, 0); 2019 - return error; 2169 + cancel_flags = 0; 2170 + goto out_trans_cancel; 2020 2171 } 2021 2172 2022 2173 error = xfs_lock_dir_and_entry(dp, ip); 2023 - if (error) { 2024 - REMOVE_DEBUG_TRACE(__LINE__); 2025 - xfs_trans_cancel(tp, cancel_flags); 2026 - goto std_return; 2027 - } 2174 + if (error) 2175 + goto out_trans_cancel; 2028 2176 2029 2177 /* 2030 2178 * At this point, we've gotten both the directory and the entry ··· 2033 2189 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2034 2190 2035 2191 /* 2192 + * If we're removing a directory perform some additional validation. 2193 + */ 2194 + if (is_dir) { 2195 + ASSERT(ip->i_d.di_nlink >= 2); 2196 + if (ip->i_d.di_nlink != 2) { 2197 + error = XFS_ERROR(ENOTEMPTY); 2198 + goto out_trans_cancel; 2199 + } 2200 + if (!xfs_dir_isempty(ip)) { 2201 + error = XFS_ERROR(ENOTEMPTY); 2202 + goto out_trans_cancel; 2203 + } 2204 + } 2205 + 2206 + /* 2036 2207 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. 2037 2208 */ 2038 2209 XFS_BMAP_INIT(&free_list, &first_block); ··· 2055 2196 &first_block, &free_list, resblks); 2056 2197 if (error) { 2057 2198 ASSERT(error != ENOENT); 2058 - REMOVE_DEBUG_TRACE(__LINE__); 2059 - goto error1; 2199 + goto out_bmap_cancel; 2060 2200 } 2061 2201 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2062 2202 2203 + /* 2204 + * Bump the in memory generation count on the parent 2205 + * directory so that other can know that it has changed. 2206 + */ 2063 2207 dp->i_gen++; 2064 2208 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2065 2209 2066 - error = xfs_droplink(tp, ip); 2067 - if (error) { 2068 - REMOVE_DEBUG_TRACE(__LINE__); 2069 - goto error1; 2210 + if (is_dir) { 2211 + /* 2212 + * Drop the link from ip's "..". 2213 + */ 2214 + error = xfs_droplink(tp, dp); 2215 + if (error) 2216 + goto out_bmap_cancel; 2217 + 2218 + /* 2219 + * Drop the link from dp to ip. 2220 + */ 2221 + error = xfs_droplink(tp, ip); 2222 + if (error) 2223 + goto out_bmap_cancel; 2224 + } else { 2225 + /* 2226 + * When removing a non-directory we need to log the parent 2227 + * inode here for the i_gen update. For a directory this is 2228 + * done implicitly by the xfs_droplink call for the ".." entry. 2229 + */ 2230 + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2070 2231 } 2071 2232 2072 - /* Determine if this is the last link while 2233 + /* 2234 + * Drop the "." link from ip to self. 2235 + */ 2236 + error = xfs_droplink(tp, ip); 2237 + if (error) 2238 + goto out_bmap_cancel; 2239 + 2240 + /* 2241 + * Determine if this is the last link while 2073 2242 * we are in the transaction. 2074 2243 */ 2075 - link_zero = (ip)->i_d.di_nlink==0; 2244 + link_zero = (ip->i_d.di_nlink == 0); 2076 2245 2077 2246 /* 2078 2247 * If this is a synchronous mount, make sure that the 2079 2248 * remove transaction goes to disk before returning to 2080 2249 * the user. 2081 2250 */ 2082 - if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { 2251 + if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 2083 2252 xfs_trans_set_sync(tp); 2084 - } 2085 2253 2086 2254 error = xfs_bmap_finish(&tp, &free_list, &committed); 2087 - if (error) { 2088 - REMOVE_DEBUG_TRACE(__LINE__); 2089 - goto error_rele; 2090 - } 2255 + if (error) 2256 + goto out_bmap_cancel; 2091 2257 2092 2258 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 2093 2259 if (error) ··· 2124 2240 * will get killed on last close in xfs_close() so we don't 2125 2241 * have to worry about that. 2126 2242 */ 2127 - if (link_zero && xfs_inode_is_filestream(ip)) 2243 + if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) 2128 2244 xfs_filestream_deassociate(ip); 2129 2245 2130 2246 xfs_itrace_exit(ip); 2247 + xfs_itrace_exit(dp); 2131 2248 2132 - /* Fall through to std_return with error = 0 */ 2133 2249 std_return: 2134 2250 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { 2135 - (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, 2136 - dp, DM_RIGHT_NULL, 2137 - NULL, DM_RIGHT_NULL, 2138 - name->name, NULL, ip->i_d.di_mode, error, 0); 2251 + XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL, 2252 + NULL, DM_RIGHT_NULL, name->name, NULL, 2253 + ip->i_d.di_mode, error, 0); 2139 2254 } 2255 + 2140 2256 return error; 2141 2257 2142 - error1: 2258 + out_bmap_cancel: 2143 2259 xfs_bmap_cancel(&free_list); 2144 2260 cancel_flags |= XFS_TRANS_ABORT; 2261 + out_trans_cancel: 2145 2262 xfs_trans_cancel(tp, cancel_flags); 2146 - goto std_return; 2147 - 2148 - error_rele: 2149 - /* 2150 - * In this case make sure to not release the inode until after 2151 - * the current transaction is aborted. Releasing it beforehand 2152 - * can cause us to go to xfs_inactive and start a recursive 2153 - * transaction which can easily deadlock with the current one. 2154 - */ 2155 - xfs_bmap_cancel(&free_list); 2156 - cancel_flags |= XFS_TRANS_ABORT; 2157 - xfs_trans_cancel(tp, cancel_flags); 2158 - 2159 2263 goto std_return; 2160 2264 } 2161 2265 ··· 2506 2634 if (unlock_dp_on_error) 2507 2635 xfs_iunlock(dp, XFS_ILOCK_EXCL); 2508 2636 2509 - goto std_return; 2510 - } 2511 - 2512 - int 2513 - xfs_rmdir( 2514 - xfs_inode_t *dp, 2515 - struct xfs_name *name, 2516 - xfs_inode_t *cdp) 2517 - { 2518 - xfs_mount_t *mp = dp->i_mount; 2519 - xfs_trans_t *tp; 2520 - int error; 2521 - xfs_bmap_free_t free_list; 2522 - xfs_fsblock_t first_block; 2523 - int cancel_flags; 2524 - int committed; 2525 - int last_cdp_link; 2526 - uint resblks; 2527 - 2528 - xfs_itrace_entry(dp); 2529 - 2530 - if (XFS_FORCED_SHUTDOWN(mp)) 2531 - return XFS_ERROR(EIO); 2532 - 2533 - if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) { 2534 - error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, 2535 - dp, DM_RIGHT_NULL, 2536 - NULL, DM_RIGHT_NULL, name->name, 2537 - NULL, cdp->i_d.di_mode, 0, 0); 2538 - if (error) 2539 - return XFS_ERROR(error); 2540 - } 2541 - 2542 - /* 2543 - * Get the dquots for the inodes. 2544 - */ 2545 - error = XFS_QM_DQATTACH(mp, dp, 0); 2546 - if (!error) 2547 - error = XFS_QM_DQATTACH(mp, cdp, 0); 2548 - if (error) { 2549 - REMOVE_DEBUG_TRACE(__LINE__); 2550 - goto std_return; 2551 - } 2552 - 2553 - tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); 2554 - cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 2555 - /* 2556 - * We try to get the real space reservation first, 2557 - * allowing for directory btree deletion(s) implying 2558 - * possible bmap insert(s). If we can't get the space 2559 - * reservation then we use 0 instead, and avoid the bmap 2560 - * btree insert(s) in the directory code by, if the bmap 2561 - * insert tries to happen, instead trimming the LAST 2562 - * block from the directory. 2563 - */ 2564 - resblks = XFS_REMOVE_SPACE_RES(mp); 2565 - error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, 2566 - XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); 2567 - if (error == ENOSPC) { 2568 - resblks = 0; 2569 - error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, 2570 - XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); 2571 - } 2572 - if (error) { 2573 - ASSERT(error != ENOSPC); 2574 - cancel_flags = 0; 2575 - goto error_return; 2576 - } 2577 - XFS_BMAP_INIT(&free_list, &first_block); 2578 - 2579 - /* 2580 - * Now lock the child directory inode and the parent directory 2581 - * inode in the proper order. This will take care of validating 2582 - * that the directory entry for the child directory inode has 2583 - * not changed while we were obtaining a log reservation. 2584 - */ 2585 - error = xfs_lock_dir_and_entry(dp, cdp); 2586 - if (error) { 2587 - xfs_trans_cancel(tp, cancel_flags); 2588 - goto std_return; 2589 - } 2590 - 2591 - IHOLD(dp); 2592 - xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 2593 - 2594 - IHOLD(cdp); 2595 - xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); 2596 - 2597 - ASSERT(cdp->i_d.di_nlink >= 2); 2598 - if (cdp->i_d.di_nlink != 2) { 2599 - error = XFS_ERROR(ENOTEMPTY); 2600 - goto error_return; 2601 - } 2602 - if (!xfs_dir_isempty(cdp)) { 2603 - error = XFS_ERROR(ENOTEMPTY); 2604 - goto error_return; 2605 - } 2606 - 2607 - error = xfs_dir_removename(tp, dp, name, cdp->i_ino, 2608 - &first_block, &free_list, resblks); 2609 - if (error) 2610 - goto error1; 2611 - 2612 - xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2613 - 2614 - /* 2615 - * Bump the in memory generation count on the parent 2616 - * directory so that other can know that it has changed. 2617 - */ 2618 - dp->i_gen++; 2619 - 2620 - /* 2621 - * Drop the link from cdp's "..". 2622 - */ 2623 - error = xfs_droplink(tp, dp); 2624 - if (error) { 2625 - goto error1; 2626 - } 2627 - 2628 - /* 2629 - * Drop the link from dp to cdp. 2630 - */ 2631 - error = xfs_droplink(tp, cdp); 2632 - if (error) { 2633 - goto error1; 2634 - } 2635 - 2636 - /* 2637 - * Drop the "." link from cdp to self. 2638 - */ 2639 - error = xfs_droplink(tp, cdp); 2640 - if (error) { 2641 - goto error1; 2642 - } 2643 - 2644 - /* Determine these before committing transaction */ 2645 - last_cdp_link = (cdp)->i_d.di_nlink==0; 2646 - 2647 - /* 2648 - * If this is a synchronous mount, make sure that the 2649 - * rmdir transaction goes to disk before returning to 2650 - * the user. 2651 - */ 2652 - if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { 2653 - xfs_trans_set_sync(tp); 2654 - } 2655 - 2656 - error = xfs_bmap_finish (&tp, &free_list, &committed); 2657 - if (error) { 2658 - xfs_bmap_cancel(&free_list); 2659 - xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | 2660 - XFS_TRANS_ABORT)); 2661 - goto std_return; 2662 - } 2663 - 2664 - error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 2665 - if (error) { 2666 - goto std_return; 2667 - } 2668 - 2669 - 2670 - /* Fall through to std_return with error = 0 or the errno 2671 - * from xfs_trans_commit. */ 2672 - std_return: 2673 - if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { 2674 - (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, 2675 - dp, DM_RIGHT_NULL, 2676 - NULL, DM_RIGHT_NULL, 2677 - name->name, NULL, cdp->i_d.di_mode, 2678 - error, 0); 2679 - } 2680 - return error; 2681 - 2682 - error1: 2683 - xfs_bmap_cancel(&free_list); 2684 - cancel_flags |= XFS_TRANS_ABORT; 2685 - /* FALLTHROUGH */ 2686 - 2687 - error_return: 2688 - xfs_trans_cancel(tp, cancel_flags); 2689 2637 goto std_return; 2690 2638 } 2691 2639 ··· 2934 3242 { 2935 3243 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); 2936 3244 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2937 - int error; 2938 3245 2939 3246 if (vp && VN_BAD(vp)) 2940 3247 goto reclaim; ··· 2976 3285 xfs_iflock(ip); 2977 3286 } 2978 3287 2979 - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 2980 - if (ip->i_update_core || 2981 - ((ip->i_itemp != NULL) && 2982 - (ip->i_itemp->ili_format.ilf_fields != 0))) { 2983 - error = xfs_iflush(ip, sync_mode); 2984 - /* 2985 - * If we hit an error, typically because of filesystem 2986 - * shutdown, we don't need to let vn_reclaim to know 2987 - * because we're gonna reclaim the inode anyway. 2988 - */ 2989 - if (error) { 2990 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 2991 - goto reclaim; 2992 - } 2993 - xfs_iflock(ip); /* synchronize with xfs_iflush_done */ 2994 - } 2995 - 2996 - ASSERT(ip->i_update_core == 0); 2997 - ASSERT(ip->i_itemp == NULL || 2998 - ip->i_itemp->ili_format.ilf_fields == 0); 3288 + /* 3289 + * In the case of a forced shutdown we rely on xfs_iflush() to 3290 + * wait for the inode to be unpinned before returning an error. 3291 + */ 3292 + if (xfs_iflush(ip, sync_mode) == 0) { 3293 + /* synchronize with xfs_iflush_done */ 3294 + xfs_iflock(ip); 3295 + xfs_ifunlock(ip); 2999 3296 } 3000 3297 3001 - xfs_ifunlock(ip); 3002 3298 xfs_iunlock(ip, XFS_ILOCK_EXCL); 3003 3299 3004 3300 reclaim: ··· 3096 3418 3097 3419 /* Generate a DMAPI event if needed. */ 3098 3420 if (alloc_type != 0 && offset < ip->i_size && 3099 - (attr_flags&ATTR_DMI) == 0 && 3421 + (attr_flags & XFS_ATTR_DMI) == 0 && 3100 3422 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { 3101 3423 xfs_off_t end_dmi_offset; 3102 3424 ··· 3210 3532 allocatesize_fsb -= allocated_fsb; 3211 3533 } 3212 3534 dmapi_enospc_check: 3213 - if (error == ENOSPC && (attr_flags & ATTR_DMI) == 0 && 3535 + if (error == ENOSPC && (attr_flags & XFS_ATTR_DMI) == 0 && 3214 3536 DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) { 3215 3537 error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, 3216 3538 ip, DM_RIGHT_NULL, ··· 3357 3679 end_dmi_offset = offset + len; 3358 3680 endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); 3359 3681 3360 - if (offset < ip->i_size && (attr_flags & ATTR_DMI) == 0 && 3682 + if (offset < ip->i_size && (attr_flags & XFS_ATTR_DMI) == 0 && 3361 3683 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { 3362 3684 if (end_dmi_offset > ip->i_size) 3363 3685 end_dmi_offset = ip->i_size; ··· 3368 3690 return error; 3369 3691 } 3370 3692 3371 - if (attr_flags & ATTR_NOLOCK) 3693 + if (attr_flags & XFS_ATTR_NOLOCK) 3372 3694 need_iolock = 0; 3373 3695 if (need_iolock) { 3374 3696 xfs_ilock(ip, XFS_IOLOCK_EXCL); ··· 3545 3867 xfs_off_t startoffset; 3546 3868 xfs_off_t llen; 3547 3869 xfs_trans_t *tp; 3548 - bhv_vattr_t va; 3870 + struct iattr iattr; 3549 3871 3550 3872 xfs_itrace_entry(ip); 3551 3873 ··· 3619 3941 break; 3620 3942 } 3621 3943 3622 - va.va_mask = XFS_AT_SIZE; 3623 - va.va_size = startoffset; 3944 + iattr.ia_valid = ATTR_SIZE; 3945 + iattr.ia_size = startoffset; 3624 3946 3625 - error = xfs_setattr(ip, &va, attr_flags, credp); 3947 + error = xfs_setattr(ip, &iattr, attr_flags, credp); 3626 3948 3627 3949 if (error) 3628 3950 return error; ··· 3652 3974 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 3653 3975 xfs_trans_ihold(tp, ip); 3654 3976 3655 - if ((attr_flags & ATTR_DMI) == 0) { 3977 + if ((attr_flags & XFS_ATTR_DMI) == 0) { 3656 3978 ip->i_d.di_mode &= ~S_ISUID; 3657 3979 3658 3980 /*
+7 -5
fs/xfs/xfs_vnodeops.h
··· 2 2 #define _XFS_VNODEOPS_H 1 3 3 4 4 struct attrlist_cursor_kern; 5 - struct bhv_vattr; 6 5 struct cred; 7 6 struct file; 7 + struct iattr; 8 8 struct inode; 9 9 struct iovec; 10 10 struct kiocb; ··· 15 15 16 16 17 17 int xfs_open(struct xfs_inode *ip); 18 - int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, 18 + int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags, 19 19 struct cred *credp); 20 + #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ 21 + #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ 22 + #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ 23 + 20 24 int xfs_readlink(struct xfs_inode *ip, char *link); 21 25 int xfs_fsync(struct xfs_inode *ip); 22 26 int xfs_release(struct xfs_inode *ip); 23 27 int xfs_inactive(struct xfs_inode *ip); 24 28 int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, 25 - struct xfs_inode **ipp); 29 + struct xfs_inode **ipp, struct xfs_name *ci_name); 26 30 int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode, 27 31 xfs_dev_t rdev, struct xfs_inode **ipp, struct cred *credp); 28 32 int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, ··· 35 31 struct xfs_name *target_name); 36 32 int xfs_mkdir(struct xfs_inode *dp, struct xfs_name *dir_name, 37 33 mode_t mode, struct xfs_inode **ipp, struct cred *credp); 38 - int xfs_rmdir(struct xfs_inode *dp, struct xfs_name *name, 39 - struct xfs_inode *cdp); 40 34 int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, 41 35 xfs_off_t *offset, filldir_t filldir); 42 36 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
+1
include/linux/dcache.h
··· 230 230 extern struct dentry * d_alloc(struct dentry *, const struct qstr *); 231 231 extern struct dentry * d_alloc_anon(struct inode *); 232 232 extern struct dentry * d_splice_alias(struct inode *, struct dentry *); 233 + extern struct dentry * d_add_ci(struct inode *, struct dentry *, struct qstr *); 233 234 extern void shrink_dcache_sb(struct super_block *); 234 235 extern void shrink_dcache_parent(struct dentry *); 235 236 extern void shrink_dcache_for_umount(struct super_block *);