Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
[XFS] Update maintainers
[XFS] use scalable vmap API
[XFS] remove old vmap cache
[XFS] make xfs_ino_t an unsigned long long
[XFS] truncate readdir offsets to signed 32 bit values
[XFS] fix compile of xfs_btree_readahead_lblock on m68k
[XFS] Remove macro-to-function indirections in the mask code
[XFS] Remove macro-to-function indirections in attr code
[XFS] Remove several unused typedefs.
[XFS] pass XFS_IGET_BULKSTAT to xfs_iget for handle operations

+122 -203
+2 -2
MAINTAINERS
··· 4842 4842 4843 4843 XFS FILESYSTEM 4844 4844 P: Silicon Graphics Inc 4845 - P: Tim Shimmin 4845 + P: Bill O'Donnell 4846 4846 M: xfs-masters@oss.sgi.com 4847 4847 L: xfs@oss.sgi.com 4848 4848 W: http://oss.sgi.com/projects/xfs 4849 - T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git 4849 + T: git://oss.sgi.com/xfs/xfs.git 4850 4850 S: Supported 4851 4851 4852 4852 XILINX SYSTEMACE DRIVER
-2
fs/xfs/linux-2.6/xfs_aops.h
··· 21 21 extern struct workqueue_struct *xfsdatad_workqueue; 22 22 extern mempool_t *xfs_ioend_pool; 23 23 24 - typedef void (*xfs_ioend_func_t)(void *); 25 - 26 24 /* 27 25 * xfs_ioend struct manages large extent writes for XFS. 28 26 * It can manage several multi-page bio's at once.
+3 -76
fs/xfs/linux-2.6/xfs_buf.c
··· 166 166 } 167 167 168 168 /* 169 - * Mapping of multi-page buffers into contiguous virtual space 170 - */ 171 - 172 - typedef struct a_list { 173 - void *vm_addr; 174 - struct a_list *next; 175 - } a_list_t; 176 - 177 - static a_list_t *as_free_head; 178 - static int as_list_len; 179 - static DEFINE_SPINLOCK(as_lock); 180 - 181 - /* 182 - * Try to batch vunmaps because they are costly. 183 - */ 184 - STATIC void 185 - free_address( 186 - void *addr) 187 - { 188 - a_list_t *aentry; 189 - 190 - #ifdef CONFIG_XEN 191 - /* 192 - * Xen needs to be able to make sure it can get an exclusive 193 - * RO mapping of pages it wants to turn into a pagetable. If 194 - * a newly allocated page is also still being vmap()ed by xfs, 195 - * it will cause pagetable construction to fail. This is a 196 - * quick workaround to always eagerly unmap pages so that Xen 197 - * is happy. 198 - */ 199 - vunmap(addr); 200 - return; 201 - #endif 202 - 203 - aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); 204 - if (likely(aentry)) { 205 - spin_lock(&as_lock); 206 - aentry->next = as_free_head; 207 - aentry->vm_addr = addr; 208 - as_free_head = aentry; 209 - as_list_len++; 210 - spin_unlock(&as_lock); 211 - } else { 212 - vunmap(addr); 213 - } 214 - } 215 - 216 - STATIC void 217 - purge_addresses(void) 218 - { 219 - a_list_t *aentry, *old; 220 - 221 - if (as_free_head == NULL) 222 - return; 223 - 224 - spin_lock(&as_lock); 225 - aentry = as_free_head; 226 - as_free_head = NULL; 227 - as_list_len = 0; 228 - spin_unlock(&as_lock); 229 - 230 - while ((old = aentry) != NULL) { 231 - vunmap(aentry->vm_addr); 232 - aentry = aentry->next; 233 - kfree(old); 234 - } 235 - } 236 - 237 - /* 238 169 * Internal xfs_buf_t object manipulation 239 170 */ 240 171 ··· 264 333 uint i; 265 334 266 335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 267 - free_address(bp->b_addr - bp->b_offset); 336 + vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count); 268 337 269 338 for (i = 0; i < bp->b_page_count; i++) { 270 339 struct page *page = bp->b_pages[i]; ··· 386 455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 387 456 bp->b_flags |= XBF_MAPPED; 388 457 } else if (flags & XBF_MAPPED) { 389 - if (as_list_len > 64) 390 - purge_addresses(); 391 - bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 392 - VM_MAP, PAGE_KERNEL); 458 + bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 459 + -1, PAGE_KERNEL); 393 460 if (unlikely(bp->b_addr == NULL)) 394 461 return -ENOMEM; 395 462 bp->b_addr += bp->b_offset; ··· 1672 1743 count++; 1673 1744 } 1674 1745 1675 - if (as_list_len > 0) 1676 - purge_addresses(); 1677 1746 if (count) 1678 1747 blk_run_address_space(target->bt_mapping); 1679 1748
+19 -4
fs/xfs/linux-2.6/xfs_export.c
··· 126 126 if (ino == 0) 127 127 return ERR_PTR(-ESTALE); 128 128 129 - error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); 130 - if (error) 129 + /* 130 + * The XFS_IGET_BULKSTAT means that an invalid inode number is just 131 + * fine and not an indication of a corrupted filesystem. Because 132 + * clients can send any kind of invalid file handle, e.g. after 133 + * a restore on the server we have to deal with this case gracefully. 134 + */ 135 + error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT, 136 + XFS_ILOCK_SHARED, &ip, 0); 137 + if (error) { 138 + /* 139 + * EINVAL means the inode cluster doesn't exist anymore. 140 + * This implies the filehandle is stale, so we should 141 + * translate it here. 142 + * We don't use ESTALE directly down the chain to not 143 + * confuse applications using bulkstat that expect EINVAL. 144 + */ 145 + if (error == EINVAL) 146 + error = ESTALE; 131 147 return ERR_PTR(-error); 132 - if (!ip) 133 - return ERR_PTR(-EIO); 148 + } 134 149 135 150 if (ip->i_d.di_gen != generation) { 136 151 xfs_iput_new(ip, XFS_ILOCK_SHARED);
-1
fs/xfs/xfs_acl.h
··· 22 22 * Access Control Lists 23 23 */ 24 24 typedef __uint16_t xfs_acl_perm_t; 25 - typedef __int32_t xfs_acl_type_t; 26 25 typedef __int32_t xfs_acl_tag_t; 27 26 typedef __int32_t xfs_acl_id_t; 28 27
+1 -1
fs/xfs/xfs_ag.h
··· 231 231 #define XFS_FSB_TO_AGNO(mp,fsbno) \ 232 232 ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog)) 233 233 #define XFS_FSB_TO_AGBNO(mp,fsbno) \ 234 - ((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog))) 234 + ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog))) 235 235 #define XFS_AGB_TO_DADDR(mp,agno,agbno) \ 236 236 ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \ 237 237 (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
+36 -36
fs/xfs/xfs_attr_leaf.c
··· 736 736 continue; /* don't copy partial entries */ 737 737 if (!(entry->flags & XFS_ATTR_LOCAL)) 738 738 return(0); 739 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); 739 + name_loc = xfs_attr_leaf_name_local(leaf, i); 740 740 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) 741 741 return(0); 742 742 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) ··· 823 823 if (!entry->nameidx) 824 824 continue; 825 825 ASSERT(entry->flags & XFS_ATTR_LOCAL); 826 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); 826 + name_loc = xfs_attr_leaf_name_local(leaf, i); 827 827 nargs.name = (char *)name_loc->nameval; 828 828 nargs.namelen = name_loc->namelen; 829 829 nargs.value = (char *)&name_loc->nameval[nargs.namelen]; ··· 1141 1141 * as part of this transaction (a split operation for example). 1142 1142 */ 1143 1143 if (entry->flags & XFS_ATTR_LOCAL) { 1144 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); 1144 + name_loc = xfs_attr_leaf_name_local(leaf, args->index); 1145 1145 name_loc->namelen = args->namelen; 1146 1146 name_loc->valuelen = cpu_to_be16(args->valuelen); 1147 1147 memcpy((char *)name_loc->nameval, args->name, args->namelen); 1148 1148 memcpy((char *)&name_loc->nameval[args->namelen], args->value, 1149 1149 be16_to_cpu(name_loc->valuelen)); 1150 1150 } else { 1151 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 1151 + name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 1152 1152 name_rmt->namelen = args->namelen; 1153 1153 memcpy((char *)name_rmt->name, args->name, args->namelen); 1154 1154 entry->flags |= XFS_ATTR_INCOMPLETE; ··· 1159 1159 args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen); 1160 1160 } 1161 1161 xfs_da_log_buf(args->trans, bp, 1162 - XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), 1162 + XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index), 1163 1163 xfs_attr_leaf_entsize(leaf, args->index))); 1164 1164 1165 1165 /* ··· 1749 1749 /* 1750 1750 * Compress the remaining entries and zero out the removed stuff. 1751 1751 */ 1752 - memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); 1752 + memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize); 1753 1753 be16_add_cpu(&hdr->usedbytes, -entsize); 1754 1754 xfs_da_log_buf(args->trans, bp, 1755 - XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), 1755 + XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index), 1756 1756 entsize)); 1757 1757 1758 1758 tmp = (be16_to_cpu(hdr->count) - args->index) ··· 1985 1985 continue; 1986 1986 } 1987 1987 if (entry->flags & XFS_ATTR_LOCAL) { 1988 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe); 1988 + name_loc = xfs_attr_leaf_name_local(leaf, probe); 1989 1989 if (name_loc->namelen != args->namelen) 1990 1990 continue; 1991 1991 if (memcmp(args->name, (char *)name_loc->nameval, args->namelen) != 0) ··· 1995 1995 args->index = probe; 1996 1996 return(XFS_ERROR(EEXIST)); 1997 1997 } else { 1998 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe); 1998 + name_rmt = xfs_attr_leaf_name_remote(leaf, probe); 1999 1999 if (name_rmt->namelen != args->namelen) 2000 2000 continue; 2001 2001 if (memcmp(args->name, (char *)name_rmt->name, ··· 2035 2035 2036 2036 entry = &leaf->entries[args->index]; 2037 2037 if (entry->flags & XFS_ATTR_LOCAL) { 2038 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); 2038 + name_loc = xfs_attr_leaf_name_local(leaf, args->index); 2039 2039 ASSERT(name_loc->namelen == args->namelen); 2040 2040 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); 2041 2041 valuelen = be16_to_cpu(name_loc->valuelen); ··· 2050 2050 args->valuelen = valuelen; 2051 2051 memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); 2052 2052 } else { 2053 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 2053 + name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 2054 2054 ASSERT(name_rmt->namelen == args->namelen); 2055 2055 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2056 2056 valuelen = be32_to_cpu(name_rmt->valuelen); ··· 2143 2143 * off for 6.2, should be revisited later. 2144 2144 */ 2145 2145 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ 2146 - memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); 2146 + memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp); 2147 2147 be16_add_cpu(&hdr_s->usedbytes, -tmp); 2148 2148 be16_add_cpu(&hdr_s->count, -1); 2149 2149 entry_d--; /* to compensate for ++ in loop hdr */ ··· 2160 2160 entry_d->flags = entry_s->flags; 2161 2161 ASSERT(be16_to_cpu(entry_d->nameidx) + tmp 2162 2162 <= XFS_LBSIZE(mp)); 2163 - memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), 2164 - XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); 2163 + memmove(xfs_attr_leaf_name(leaf_d, desti), 2164 + xfs_attr_leaf_name(leaf_s, start_s + i), tmp); 2165 2165 ASSERT(be16_to_cpu(entry_s->nameidx) + tmp 2166 2166 <= XFS_LBSIZE(mp)); 2167 - memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); 2167 + memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp); 2168 2168 be16_add_cpu(&hdr_s->usedbytes, -tmp); 2169 2169 be16_add_cpu(&hdr_d->usedbytes, tmp); 2170 2170 be16_add_cpu(&hdr_s->count, -1); ··· 2276 2276 2277 2277 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); 2278 2278 if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { 2279 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); 2280 - size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, 2279 + name_loc = xfs_attr_leaf_name_local(leaf, index); 2280 + size = xfs_attr_leaf_entsize_local(name_loc->namelen, 2281 2281 be16_to_cpu(name_loc->valuelen)); 2282 2282 } else { 2283 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); 2284 - size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); 2283 + name_rmt = xfs_attr_leaf_name_remote(leaf, index); 2284 + size = xfs_attr_leaf_entsize_remote(name_rmt->namelen); 2285 2285 } 2286 2286 return(size); 2287 2287 } ··· 2297 2297 { 2298 2298 int size; 2299 2299 2300 - size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(namelen, valuelen); 2301 - if (size < XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(blocksize)) { 2300 + size = xfs_attr_leaf_entsize_local(namelen, valuelen); 2301 + if (size < xfs_attr_leaf_entsize_local_max(blocksize)) { 2302 2302 if (local) { 2303 2303 *local = 1; 2304 2304 } 2305 2305 } else { 2306 - size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(namelen); 2306 + size = xfs_attr_leaf_entsize_remote(namelen); 2307 2307 if (local) { 2308 2308 *local = 0; 2309 2309 } ··· 2372 2372 2373 2373 if (entry->flags & XFS_ATTR_LOCAL) { 2374 2374 xfs_attr_leaf_name_local_t *name_loc = 2375 - XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); 2375 + xfs_attr_leaf_name_local(leaf, i); 2376 2376 2377 2377 retval = context->put_listent(context, 2378 2378 entry->flags, ··· 2384 2384 return retval; 2385 2385 } else { 2386 2386 xfs_attr_leaf_name_remote_t *name_rmt = 2387 - XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2387 + xfs_attr_leaf_name_remote(leaf, i); 2388 2388 2389 2389 int valuelen = be32_to_cpu(name_rmt->valuelen); 2390 2390 ··· 2468 2468 2469 2469 #ifdef DEBUG 2470 2470 if (entry->flags & XFS_ATTR_LOCAL) { 2471 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); 2471 + name_loc = xfs_attr_leaf_name_local(leaf, args->index); 2472 2472 namelen = name_loc->namelen; 2473 2473 name = (char *)name_loc->nameval; 2474 2474 } else { 2475 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 2475 + name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 2476 2476 namelen = name_rmt->namelen; 2477 2477 name = (char *)name_rmt->name; 2478 2478 } ··· 2487 2487 2488 2488 if (args->rmtblkno) { 2489 2489 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); 2490 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 2490 + name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 2491 2491 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2492 2492 name_rmt->valuelen = cpu_to_be32(args->valuelen); 2493 2493 xfs_da_log_buf(args->trans, bp, ··· 2534 2534 xfs_da_log_buf(args->trans, bp, 2535 2535 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2536 2536 if ((entry->flags & XFS_ATTR_LOCAL) == 0) { 2537 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 2537 + name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 2538 2538 name_rmt->valueblk = 0; 2539 2539 name_rmt->valuelen = 0; 2540 2540 xfs_da_log_buf(args->trans, bp, ··· 2607 2607 2608 2608 #ifdef DEBUG 2609 2609 if (entry1->flags & XFS_ATTR_LOCAL) { 2610 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf1, args->index); 2610 + name_loc = xfs_attr_leaf_name_local(leaf1, args->index); 2611 2611 namelen1 = name_loc->namelen; 2612 2612 name1 = (char *)name_loc->nameval; 2613 2613 } else { 2614 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); 2614 + name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index); 2615 2615 namelen1 = name_rmt->namelen; 2616 2616 name1 = (char *)name_rmt->name; 2617 2617 } 2618 2618 if (entry2->flags & XFS_ATTR_LOCAL) { 2619 - name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf2, args->index2); 2619 + name_loc = xfs_attr_leaf_name_local(leaf2, args->index2); 2620 2620 namelen2 = name_loc->namelen; 2621 2621 name2 = (char *)name_loc->nameval; 2622 2622 } else { 2623 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2); 2623 + name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2); 2624 2624 namelen2 = name_rmt->namelen; 2625 2625 name2 = (char *)name_rmt->name; 2626 2626 } ··· 2637 2637 XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1))); 2638 2638 if (args->rmtblkno) { 2639 2639 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2640 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); 2640 + name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index); 2641 2641 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2642 2642 name_rmt->valuelen = cpu_to_be32(args->valuelen); 2643 2643 xfs_da_log_buf(args->trans, bp1, ··· 2648 2648 xfs_da_log_buf(args->trans, bp2, 2649 2649 XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2))); 2650 2650 if ((entry2->flags & XFS_ATTR_LOCAL) == 0) { 2651 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2); 2651 + name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2); 2652 2652 name_rmt->valueblk = 0; 2653 2653 name_rmt->valuelen = 0; 2654 2654 xfs_da_log_buf(args->trans, bp2, ··· 2855 2855 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { 2856 2856 if (be16_to_cpu(entry->nameidx) && 2857 2857 ((entry->flags & XFS_ATTR_LOCAL) == 0)) { 2858 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2858 + name_rmt = xfs_attr_leaf_name_remote(leaf, i); 2859 2859 if (name_rmt->valueblk) 2860 2860 count++; 2861 2861 } ··· 2883 2883 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { 2884 2884 if (be16_to_cpu(entry->nameidx) && 2885 2885 ((entry->flags & XFS_ATTR_LOCAL) == 0)) { 2886 - name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2886 + name_rmt = xfs_attr_leaf_name_remote(leaf, i); 2887 2887 if (name_rmt->valueblk) { 2888 2888 lp->valueblk = be32_to_cpu(name_rmt->valueblk); 2889 2889 lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
-12
fs/xfs/xfs_attr_leaf.h
··· 151 151 /* 152 152 * Cast typed pointers for "local" and "remote" name/value structs. 153 153 */ 154 - #define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) \ 155 - xfs_attr_leaf_name_remote(leafp,idx) 156 154 static inline xfs_attr_leaf_name_remote_t * 157 155 xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) 158 156 { ··· 158 160 &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; 159 161 } 160 162 161 - #define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ 162 - xfs_attr_leaf_name_local(leafp,idx) 163 163 static inline xfs_attr_leaf_name_local_t * 164 164 xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) 165 165 { ··· 165 169 &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; 166 170 } 167 171 168 - #define XFS_ATTR_LEAF_NAME(leafp,idx) \ 169 - xfs_attr_leaf_name(leafp,idx) 170 172 static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) 171 173 { 172 174 return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; ··· 175 181 * a "local" name/value structure, a "remote" name/value structure, and 176 182 * a pointer which might be either. 177 183 */ 178 - #define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) \ 179 - xfs_attr_leaf_entsize_remote(nlen) 180 184 static inline int xfs_attr_leaf_entsize_remote(int nlen) 181 185 { 182 186 return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \ 183 187 XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1); 184 188 } 185 189 186 - #define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) \ 187 - xfs_attr_leaf_entsize_local(nlen,vlen) 188 190 static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen) 189 191 { 190 192 return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) + 191 193 XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1); 192 194 } 193 195 194 - #define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) \ 195 - xfs_attr_leaf_entsize_local_max(bsize) 196 196 static inline int xfs_attr_leaf_entsize_local_max(int bsize) 197 197 { 198 198 return (((bsize) >> 1) + ((bsize) >> 2));
+1 -9
fs/xfs/xfs_bit.h
··· 23 23 */ 24 24 25 25 /* 26 - * masks with n high/low bits set, 32-bit values & 64-bit values 26 + * masks with n high/low bits set, 64-bit values 27 27 */ 28 - #define XFS_MASK32HI(n) xfs_mask32hi(n) 29 - static inline __uint32_t xfs_mask32hi(int n) 30 - { 31 - return (__uint32_t)-1 << (32 - (n)); 32 - } 33 - #define XFS_MASK64HI(n) xfs_mask64hi(n) 34 28 static inline __uint64_t xfs_mask64hi(int n) 35 29 { 36 30 return (__uint64_t)-1 << (64 - (n)); 37 31 } 38 - #define XFS_MASK32LO(n) xfs_mask32lo(n) 39 32 static inline __uint32_t xfs_mask32lo(int n) 40 33 { 41 34 return ((__uint32_t)1 << (n)) - 1; 42 35 } 43 - #define XFS_MASK64LO(n) xfs_mask64lo(n) 44 36 static inline __uint64_t xfs_mask64lo(int n) 45 37 { 46 38 return ((__uint64_t)1 << (n)) - 1;
+42 -42
fs/xfs/xfs_bmap_btree.c
··· 110 110 111 111 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); 112 112 s->br_startoff = ((xfs_fileoff_t)l0 & 113 - XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 113 + xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 114 114 #if XFS_BIG_BLKNOS 115 - s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) | 115 + s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) | 116 116 (((xfs_fsblock_t)l1) >> 21); 117 117 #else 118 118 #ifdef DEBUG 119 119 { 120 120 xfs_dfsbno_t b; 121 121 122 - b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) | 122 + b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | 123 123 (((xfs_dfsbno_t)l1) >> 21); 124 124 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); 125 125 s->br_startblock = (xfs_fsblock_t)b; ··· 128 128 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); 129 129 #endif /* DEBUG */ 130 130 #endif /* XFS_BIG_BLKNOS */ 131 - s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21)); 131 + s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21)); 132 132 /* This is xfs_extent_state() in-line */ 133 133 if (ext_flag) { 134 134 ASSERT(s->br_blockcount != 0); /* saved for DMIG */ ··· 153 153 xfs_bmbt_get_blockcount( 154 154 xfs_bmbt_rec_host_t *r) 155 155 { 156 - return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21)); 156 + return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21)); 157 157 } 158 158 159 159 /* ··· 164 164 xfs_bmbt_rec_host_t *r) 165 165 { 166 166 #if XFS_BIG_BLKNOS 167 - return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) | 167 + return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) | 168 168 (((xfs_fsblock_t)r->l1) >> 21); 169 169 #else 170 170 #ifdef DEBUG 171 171 xfs_dfsbno_t b; 172 172 173 - b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) | 173 + b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | 174 174 (((xfs_dfsbno_t)r->l1) >> 21); 175 175 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); 176 176 return (xfs_fsblock_t)b; ··· 188 188 xfs_bmbt_rec_host_t *r) 189 189 { 190 190 return ((xfs_fileoff_t)r->l0 & 191 - XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 191 + xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 192 192 } 193 193 194 194 xfs_exntst_t ··· 219 219 xfs_bmbt_disk_get_blockcount( 220 220 xfs_bmbt_rec_t *r) 221 221 { 222 - return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21)); 222 + return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21)); 223 223 } 224 224 225 225 /* ··· 230 230 xfs_bmbt_rec_t *r) 231 231 { 232 232 return ((xfs_fileoff_t)be64_to_cpu(r->l0) & 233 - XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 233 + xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 234 234 } 235 235 236 236 ··· 248 248 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1; 249 249 250 250 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); 251 - ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); 252 - ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); 251 + ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); 252 + ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); 253 253 254 254 #if XFS_BIG_BLKNOS 255 - ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); 255 + ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); 256 256 257 257 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | 258 258 ((xfs_bmbt_rec_base_t)startoff << 9) | 259 259 ((xfs_bmbt_rec_base_t)startblock >> 43); 260 260 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | 261 261 ((xfs_bmbt_rec_base_t)blockcount & 262 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); 262 + (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 263 263 #else /* !XFS_BIG_BLKNOS */ 264 264 if (ISNULLSTARTBLOCK(startblock)) { 265 265 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | 266 266 ((xfs_bmbt_rec_base_t)startoff << 9) | 267 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); 268 - r->l1 = XFS_MASK64HI(11) | 267 + (xfs_bmbt_rec_base_t)xfs_mask64lo(9); 268 + r->l1 = xfs_mask64hi(11) | 269 269 ((xfs_bmbt_rec_base_t)startblock << 21) | 270 270 ((xfs_bmbt_rec_base_t)blockcount & 271 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); 271 + (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 272 272 } else { 273 273 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | 274 274 ((xfs_bmbt_rec_base_t)startoff << 9); 275 275 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | 276 276 ((xfs_bmbt_rec_base_t)blockcount & 277 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); 277 + (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 278 278 } 279 279 #endif /* XFS_BIG_BLKNOS */ 280 280 } ··· 306 306 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1; 307 307 308 308 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); 309 - ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); 310 - ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); 309 + ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); 310 + ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); 311 311 312 312 #if XFS_BIG_BLKNOS 313 - ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); 313 + ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); 314 314 315 315 r->l0 = cpu_to_be64( 316 316 ((xfs_bmbt_rec_base_t)extent_flag << 63) | ··· 319 319 r->l1 = cpu_to_be64( 320 320 ((xfs_bmbt_rec_base_t)startblock << 21) | 321 321 ((xfs_bmbt_rec_base_t)blockcount & 322 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); 322 + (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); 323 323 #else /* !XFS_BIG_BLKNOS */ 324 324 if (ISNULLSTARTBLOCK(startblock)) { 325 325 r->l0 = cpu_to_be64( 326 326 ((xfs_bmbt_rec_base_t)extent_flag << 63) | 327 327 ((xfs_bmbt_rec_base_t)startoff << 9) | 328 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); 329 - r->l1 = cpu_to_be64(XFS_MASK64HI(11) | 328 + (xfs_bmbt_rec_base_t)xfs_mask64lo(9)); 329 + r->l1 = cpu_to_be64(xfs_mask64hi(11) | 330 330 ((xfs_bmbt_rec_base_t)startblock << 21) | 331 331 ((xfs_bmbt_rec_base_t)blockcount & 332 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); 332 + (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); 333 333 } else { 334 334 r->l0 = cpu_to_be64( 335 335 ((xfs_bmbt_rec_base_t)extent_flag << 63) | ··· 337 337 r->l1 = cpu_to_be64( 338 338 ((xfs_bmbt_rec_base_t)startblock << 21) | 339 339 ((xfs_bmbt_rec_base_t)blockcount & 340 - (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); 340 + (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); 341 341 } 342 342 #endif /* XFS_BIG_BLKNOS */ 343 343 } ··· 362 362 xfs_bmbt_rec_host_t *r, 363 363 xfs_filblks_t v) 364 364 { 365 - ASSERT((v & XFS_MASK64HI(43)) == 0); 366 - r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) | 367 - (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21)); 365 + ASSERT((v & xfs_mask64hi(43)) == 0); 366 + r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) | 367 + (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21)); 368 368 } 369 369 370 370 /* ··· 376 376 xfs_fsblock_t v) 377 377 { 378 378 #if XFS_BIG_BLKNOS 379 - ASSERT((v & XFS_MASK64HI(12)) == 0); 380 - r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) | 379 + ASSERT((v & xfs_mask64hi(12)) == 0); 380 + r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) | 381 381 (xfs_bmbt_rec_base_t)(v >> 43); 382 - r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) | 382 + r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | 383 383 (xfs_bmbt_rec_base_t)(v << 21); 384 384 #else /* !XFS_BIG_BLKNOS */ 385 385 if (ISNULLSTARTBLOCK(v)) { 386 - r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); 387 - r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) | 386 + r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); 387 + r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | 388 388 ((xfs_bmbt_rec_base_t)v << 21) | 389 - (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); 389 + (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 390 390 } else { 391 - r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9); 391 + r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9); 392 392 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | 393 - (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); 393 + (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 394 394 } 395 395 #endif /* XFS_BIG_BLKNOS */ 396 396 } ··· 403 403 xfs_bmbt_rec_host_t *r, 404 404 xfs_fileoff_t v) 405 405 { 406 - ASSERT((v & XFS_MASK64HI(9)) == 0); 407 - r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) | 406 + ASSERT((v & xfs_mask64hi(9)) == 0); 407 + r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) | 408 408 ((xfs_bmbt_rec_base_t)v << 9) | 409 - (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); 409 + (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9)); 410 410 } 411 411 412 412 /* ··· 419 419 { 420 420 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN); 421 421 if (v == XFS_EXT_NORM) 422 - r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN); 422 + r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN); 423 423 else 424 - r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN); 424 + r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN); 425 425 } 426 426 427 427 /*
+2 -2
fs/xfs/xfs_btree.c
··· 730 730 struct xfs_btree_block *block) 731 731 { 732 732 int rval = 0; 733 - xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); 734 - xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); 733 + xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); 734 + xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); 735 735 736 736 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) { 737 737 xfs_btree_reada_bufl(cur->bc_mp, left, 1);
+4 -3
fs/xfs/xfs_dir2_block.c
··· 517 517 /* 518 518 * If it didn't fit, set the final offset to here & return. 519 519 */ 520 - if (filldir(dirent, dep->name, dep->namelen, cook, 520 + if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff, 521 521 ino, DT_UNKNOWN)) { 522 - *offset = cook; 522 + *offset = cook & 0x7fffffff; 523 523 xfs_da_brelse(NULL, bp); 524 524 return 0; 525 525 } ··· 529 529 * Reached the end of the block. 530 530 * Set the offset to a non-existent block 1 and return. 531 531 */ 532 - *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0); 532 + *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 533 + 0x7fffffff; 533 534 xfs_da_brelse(NULL, bp); 534 535 return 0; 535 536 }
+3 -3
fs/xfs/xfs_dir2_leaf.c
··· 1092 1092 * Won't fit. Return to caller. 1093 1093 */ 1094 1094 if (filldir(dirent, dep->name, dep->namelen, 1095 - xfs_dir2_byte_to_dataptr(mp, curoff), 1095 + xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff, 1096 1096 ino, DT_UNKNOWN)) 1097 1097 break; 1098 1098 ··· 1108 1108 * All done. Set output offset value to current offset. 1109 1109 */ 1110 1110 if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR)) 1111 - *offset = XFS_DIR2_MAX_DATAPTR; 1111 + *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; 1112 1112 else 1113 - *offset = xfs_dir2_byte_to_dataptr(mp, curoff); 1113 + *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; 1114 1114 kmem_free(map); 1115 1115 if (bp) 1116 1116 xfs_da_brelse(NULL, bp);
+8 -7
fs/xfs/xfs_dir2_sf.c
··· 752 752 #if XFS_BIG_INUMS 753 753 ino += mp->m_inoadd; 754 754 #endif 755 - if (filldir(dirent, ".", 1, dot_offset, ino, DT_DIR)) { 756 - *offset = dot_offset; 755 + if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) { 756 + *offset = dot_offset & 0x7fffffff; 757 757 return 0; 758 758 } 759 759 } ··· 766 766 #if XFS_BIG_INUMS 767 767 ino += mp->m_inoadd; 768 768 #endif 769 - if (filldir(dirent, "..", 2, dotdot_offset, ino, DT_DIR)) { 770 - *offset = dotdot_offset; 769 + if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) { 770 + *offset = dotdot_offset & 0x7fffffff; 771 771 return 0; 772 772 } 773 773 } ··· 791 791 #endif 792 792 793 793 if (filldir(dirent, sfep->name, sfep->namelen, 794 - off, ino, DT_UNKNOWN)) { 795 - *offset = off; 794 + off & 0x7fffffff, ino, DT_UNKNOWN)) { 795 + *offset = off & 0x7fffffff; 796 796 return 0; 797 797 } 798 798 sfep = xfs_dir2_sf_nextentry(sfp, sfep); 799 799 } 800 800 801 - *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0); 801 + *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 802 + 0x7fffffff; 802 803 return 0; 803 804 } 804 805
+1 -3
fs/xfs/xfs_types.h
··· 45 45 typedef __uint32_t inst_t; /* an instruction */ 46 46 47 47 typedef __s64 xfs_off_t; /* <file offset> type */ 48 - typedef __u64 xfs_ino_t; /* <inode> type */ 48 + typedef unsigned long long xfs_ino_t; /* <inode> type */ 49 49 typedef __s64 xfs_daddr_t; /* <disk address> type */ 50 50 typedef char * xfs_caddr_t; /* <core address> type */ 51 51 typedef __u32 xfs_dev_t; ··· 110 110 typedef __uint64_t xfs_fileoff_t; /* block number in a file */ 111 111 typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ 112 112 typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ 113 - 114 - typedef __uint8_t xfs_arch_t; /* architecture of an xfs fs */ 115 113 116 114 /* 117 115 * Null values for the types.