Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.15-rc6 2703 lines 78 kB view raw
1/* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * Copyright (c) 2013 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19#include "xfs.h" 20#include "xfs_fs.h" 21#include "xfs_shared.h" 22#include "xfs_format.h" 23#include "xfs_log_format.h" 24#include "xfs_trans_resv.h" 25#include "xfs_bit.h" 26#include "xfs_sb.h" 27#include "xfs_ag.h" 28#include "xfs_mount.h" 29#include "xfs_da_format.h" 30#include "xfs_da_btree.h" 31#include "xfs_inode.h" 32#include "xfs_trans.h" 33#include "xfs_inode_item.h" 34#include "xfs_bmap_btree.h" 35#include "xfs_bmap.h" 36#include "xfs_attr_sf.h" 37#include "xfs_attr_remote.h" 38#include "xfs_attr.h" 39#include "xfs_attr_leaf.h" 40#include "xfs_error.h" 41#include "xfs_trace.h" 42#include "xfs_buf_item.h" 43#include "xfs_cksum.h" 44#include "xfs_dinode.h" 45#include "xfs_dir2.h" 46 47 48/* 49 * xfs_attr_leaf.c 50 * 51 * Routines to implement leaf blocks of attributes as Btrees of hashed names. 52 */ 53 54/*======================================================================== 55 * Function prototypes for the kernel. 56 *========================================================================*/ 57 58/* 59 * Routines used for growing the Btree. 60 */ 61STATIC int xfs_attr3_leaf_create(struct xfs_da_args *args, 62 xfs_dablk_t which_block, struct xfs_buf **bpp); 63STATIC int xfs_attr3_leaf_add_work(struct xfs_buf *leaf_buffer, 64 struct xfs_attr3_icleaf_hdr *ichdr, 65 struct xfs_da_args *args, int freemap_index); 66STATIC void xfs_attr3_leaf_compact(struct xfs_da_args *args, 67 struct xfs_attr3_icleaf_hdr *ichdr, 68 struct xfs_buf *leaf_buffer); 69STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state, 70 xfs_da_state_blk_t *blk1, 71 xfs_da_state_blk_t *blk2); 72STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state, 73 xfs_da_state_blk_t *leaf_blk_1, 74 struct xfs_attr3_icleaf_hdr *ichdr1, 75 xfs_da_state_blk_t *leaf_blk_2, 76 struct xfs_attr3_icleaf_hdr *ichdr2, 77 int *number_entries_in_blk1, 78 int *number_usedbytes_in_blk1); 79 80/* 81 * Utility routines. 82 */ 83STATIC void xfs_attr3_leaf_moveents(struct xfs_attr_leafblock *src_leaf, 84 struct xfs_attr3_icleaf_hdr *src_ichdr, int src_start, 85 struct xfs_attr_leafblock *dst_leaf, 86 struct xfs_attr3_icleaf_hdr *dst_ichdr, int dst_start, 87 int move_count, struct xfs_mount *mp); 88STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index); 89 90void 91xfs_attr3_leaf_hdr_from_disk( 92 struct xfs_attr3_icleaf_hdr *to, 93 struct xfs_attr_leafblock *from) 94{ 95 int i; 96 97 ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 98 from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 99 100 if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) { 101 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from; 102 103 to->forw = be32_to_cpu(hdr3->info.hdr.forw); 104 to->back = be32_to_cpu(hdr3->info.hdr.back); 105 to->magic = be16_to_cpu(hdr3->info.hdr.magic); 106 to->count = be16_to_cpu(hdr3->count); 107 to->usedbytes = be16_to_cpu(hdr3->usedbytes); 108 to->firstused = be16_to_cpu(hdr3->firstused); 109 to->holes = hdr3->holes; 110 111 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 112 to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base); 113 to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size); 114 } 115 return; 116 } 117 to->forw = be32_to_cpu(from->hdr.info.forw); 118 to->back = be32_to_cpu(from->hdr.info.back); 119 to->magic = be16_to_cpu(from->hdr.info.magic); 120 to->count = be16_to_cpu(from->hdr.count); 121 to->usedbytes = be16_to_cpu(from->hdr.usedbytes); 122 to->firstused = be16_to_cpu(from->hdr.firstused); 123 to->holes = from->hdr.holes; 124 125 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 126 to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base); 127 to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size); 128 } 129} 130 131void 132xfs_attr3_leaf_hdr_to_disk( 133 struct xfs_attr_leafblock *to, 134 struct xfs_attr3_icleaf_hdr *from) 135{ 136 int i; 137 138 ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC || 139 from->magic == XFS_ATTR3_LEAF_MAGIC); 140 141 if (from->magic == XFS_ATTR3_LEAF_MAGIC) { 142 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to; 143 144 hdr3->info.hdr.forw = cpu_to_be32(from->forw); 145 hdr3->info.hdr.back = cpu_to_be32(from->back); 146 hdr3->info.hdr.magic = cpu_to_be16(from->magic); 147 hdr3->count = cpu_to_be16(from->count); 148 hdr3->usedbytes = cpu_to_be16(from->usedbytes); 149 hdr3->firstused = cpu_to_be16(from->firstused); 150 hdr3->holes = from->holes; 151 hdr3->pad1 = 0; 152 153 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 154 hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base); 155 hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size); 156 } 157 return; 158 } 159 to->hdr.info.forw = cpu_to_be32(from->forw); 160 to->hdr.info.back = cpu_to_be32(from->back); 161 to->hdr.info.magic = cpu_to_be16(from->magic); 162 to->hdr.count = cpu_to_be16(from->count); 163 to->hdr.usedbytes = cpu_to_be16(from->usedbytes); 164 to->hdr.firstused = cpu_to_be16(from->firstused); 165 to->hdr.holes = from->holes; 166 to->hdr.pad1 = 0; 167 168 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 169 to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base); 170 to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size); 171 } 172} 173 174static bool 175xfs_attr3_leaf_verify( 176 struct xfs_buf *bp) 177{ 178 struct xfs_mount *mp = bp->b_target->bt_mount; 179 struct xfs_attr_leafblock *leaf = bp->b_addr; 180 struct xfs_attr3_icleaf_hdr ichdr; 181 182 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 183 184 if (xfs_sb_version_hascrc(&mp->m_sb)) { 185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 186 187 if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC) 188 return false; 189 190 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid)) 191 return false; 192 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) 193 return false; 194 } else { 195 if (ichdr.magic != XFS_ATTR_LEAF_MAGIC) 196 return false; 197 } 198 if (ichdr.count == 0) 199 return false; 200 201 /* XXX: need to range check rest of attr header values */ 202 /* XXX: hash order check? */ 203 204 return true; 205} 206 207static void 208xfs_attr3_leaf_write_verify( 209 struct xfs_buf *bp) 210{ 211 struct xfs_mount *mp = bp->b_target->bt_mount; 212 struct xfs_buf_log_item *bip = bp->b_fspriv; 213 struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; 214 215 if (!xfs_attr3_leaf_verify(bp)) { 216 xfs_buf_ioerror(bp, EFSCORRUPTED); 217 xfs_verifier_error(bp); 218 return; 219 } 220 221 if (!xfs_sb_version_hascrc(&mp->m_sb)) 222 return; 223 224 if (bip) 225 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 226 227 xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF); 228} 229 230/* 231 * leaf/node format detection on trees is sketchy, so a node read can be done on 232 * leaf level blocks when detection identifies the tree as a node format tree 233 * incorrectly. In this case, we need to swap the verifier to match the correct 234 * format of the block being read. 235 */ 236static void 237xfs_attr3_leaf_read_verify( 238 struct xfs_buf *bp) 239{ 240 struct xfs_mount *mp = bp->b_target->bt_mount; 241 242 if (xfs_sb_version_hascrc(&mp->m_sb) && 243 !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) 244 xfs_buf_ioerror(bp, EFSBADCRC); 245 else if (!xfs_attr3_leaf_verify(bp)) 246 xfs_buf_ioerror(bp, EFSCORRUPTED); 247 248 if (bp->b_error) 249 xfs_verifier_error(bp); 250} 251 252const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = { 253 .verify_read = xfs_attr3_leaf_read_verify, 254 .verify_write = xfs_attr3_leaf_write_verify, 255}; 256 257int 258xfs_attr3_leaf_read( 259 struct xfs_trans *tp, 260 struct xfs_inode *dp, 261 xfs_dablk_t bno, 262 xfs_daddr_t mappedbno, 263 struct xfs_buf **bpp) 264{ 265 int err; 266 267 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp, 268 XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops); 269 if (!err && tp) 270 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF); 271 return err; 272} 273 274/*======================================================================== 275 * Namespace helper routines 276 *========================================================================*/ 277 278/* 279 * If namespace bits don't match return 0. 280 * If all match then return 1. 281 */ 282STATIC int 283xfs_attr_namesp_match(int arg_flags, int ondisk_flags) 284{ 285 return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags); 286} 287 288 289/*======================================================================== 290 * External routines when attribute fork size < XFS_LITINO(mp). 291 *========================================================================*/ 292 293/* 294 * Query whether the requested number of additional bytes of extended 295 * attribute space will be able to fit inline. 296 * 297 * Returns zero if not, else the di_forkoff fork offset to be used in the 298 * literal area for attribute data once the new bytes have been added. 299 * 300 * di_forkoff must be 8 byte aligned, hence is stored as a >>3 value; 301 * special case for dev/uuid inodes, they have fixed size data forks. 302 */ 303int 304xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) 305{ 306 int offset; 307 int minforkoff; /* lower limit on valid forkoff locations */ 308 int maxforkoff; /* upper limit on valid forkoff locations */ 309 int dsize; 310 xfs_mount_t *mp = dp->i_mount; 311 312 /* rounded down */ 313 offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3; 314 315 switch (dp->i_d.di_format) { 316 case XFS_DINODE_FMT_DEV: 317 minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 318 return (offset >= minforkoff) ? minforkoff : 0; 319 case XFS_DINODE_FMT_UUID: 320 minforkoff = roundup(sizeof(uuid_t), 8) >> 3; 321 return (offset >= minforkoff) ? minforkoff : 0; 322 } 323 324 /* 325 * If the requested numbers of bytes is smaller or equal to the 326 * current attribute fork size we can always proceed. 327 * 328 * Note that if_bytes in the data fork might actually be larger than 329 * the current data fork size is due to delalloc extents. In that 330 * case either the extent count will go down when they are converted 331 * to real extents, or the delalloc conversion will take care of the 332 * literal area rebalancing. 333 */ 334 if (bytes <= XFS_IFORK_ASIZE(dp)) 335 return dp->i_d.di_forkoff; 336 337 /* 338 * For attr2 we can try to move the forkoff if there is space in the 339 * literal area, but for the old format we are done if there is no 340 * space in the fixed attribute fork. 341 */ 342 if (!(mp->m_flags & XFS_MOUNT_ATTR2)) 343 return 0; 344 345 dsize = dp->i_df.if_bytes; 346 347 switch (dp->i_d.di_format) { 348 case XFS_DINODE_FMT_EXTENTS: 349 /* 350 * If there is no attr fork and the data fork is extents, 351 * determine if creating the default attr fork will result 352 * in the extents form migrating to btree. If so, the 353 * minimum offset only needs to be the space required for 354 * the btree root. 355 */ 356 if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > 357 xfs_default_attroffset(dp)) 358 dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 359 break; 360 case XFS_DINODE_FMT_BTREE: 361 /* 362 * If we have a data btree then keep forkoff if we have one, 363 * otherwise we are adding a new attr, so then we set 364 * minforkoff to where the btree root can finish so we have 365 * plenty of room for attrs 366 */ 367 if (dp->i_d.di_forkoff) { 368 if (offset < dp->i_d.di_forkoff) 369 return 0; 370 return dp->i_d.di_forkoff; 371 } 372 dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot); 373 break; 374 } 375 376 /* 377 * A data fork btree root must have space for at least 378 * MINDBTPTRS key/ptr pairs if the data fork is small or empty. 379 */ 380 minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); 381 minforkoff = roundup(minforkoff, 8) >> 3; 382 383 /* attr fork btree root can have at least this many key/ptr pairs */ 384 maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) - 385 XFS_BMDR_SPACE_CALC(MINABTPTRS); 386 maxforkoff = maxforkoff >> 3; /* rounded down */ 387 388 if (offset >= maxforkoff) 389 return maxforkoff; 390 if (offset >= minforkoff) 391 return offset; 392 return 0; 393} 394 395/* 396 * Switch on the ATTR2 superblock bit (implies also FEATURES2) 397 */ 398STATIC void 399xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) 400{ 401 if ((mp->m_flags & XFS_MOUNT_ATTR2) && 402 !(xfs_sb_version_hasattr2(&mp->m_sb))) { 403 spin_lock(&mp->m_sb_lock); 404 if (!xfs_sb_version_hasattr2(&mp->m_sb)) { 405 xfs_sb_version_addattr2(&mp->m_sb); 406 spin_unlock(&mp->m_sb_lock); 407 xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 408 } else 409 spin_unlock(&mp->m_sb_lock); 410 } 411} 412 413/* 414 * Create the initial contents of a shortform attribute list. 415 */ 416void 417xfs_attr_shortform_create(xfs_da_args_t *args) 418{ 419 xfs_attr_sf_hdr_t *hdr; 420 xfs_inode_t *dp; 421 xfs_ifork_t *ifp; 422 423 trace_xfs_attr_sf_create(args); 424 425 dp = args->dp; 426 ASSERT(dp != NULL); 427 ifp = dp->i_afp; 428 ASSERT(ifp != NULL); 429 ASSERT(ifp->if_bytes == 0); 430 if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) { 431 ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */ 432 dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL; 433 ifp->if_flags |= XFS_IFINLINE; 434 } else { 435 ASSERT(ifp->if_flags & XFS_IFINLINE); 436 } 437 xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); 438 hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; 439 hdr->count = 0; 440 hdr->totsize = cpu_to_be16(sizeof(*hdr)); 441 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 442} 443 444/* 445 * Add a name/value pair to the shortform attribute list. 446 * Overflow from the inode has already been checked for. 447 */ 448void 449xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) 450{ 451 xfs_attr_shortform_t *sf; 452 xfs_attr_sf_entry_t *sfe; 453 int i, offset, size; 454 xfs_mount_t *mp; 455 xfs_inode_t *dp; 456 xfs_ifork_t *ifp; 457 458 trace_xfs_attr_sf_add(args); 459 460 dp = args->dp; 461 mp = dp->i_mount; 462 dp->i_d.di_forkoff = forkoff; 463 464 ifp = dp->i_afp; 465 ASSERT(ifp->if_flags & XFS_IFINLINE); 466 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 467 sfe = &sf->list[0]; 468 for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { 469#ifdef DEBUG 470 if (sfe->namelen != args->namelen) 471 continue; 472 if (memcmp(args->name, sfe->nameval, args->namelen) != 0) 473 continue; 474 if (!xfs_attr_namesp_match(args->flags, sfe->flags)) 475 continue; 476 ASSERT(0); 477#endif 478 } 479 480 offset = (char *)sfe - (char *)sf; 481 size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); 482 xfs_idata_realloc(dp, size, XFS_ATTR_FORK); 483 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 484 sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); 485 486 sfe->namelen = args->namelen; 487 sfe->valuelen = args->valuelen; 488 sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags); 489 memcpy(sfe->nameval, args->name, args->namelen); 490 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); 491 sf->hdr.count++; 492 be16_add_cpu(&sf->hdr.totsize, size); 493 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 494 495 xfs_sbversion_add_attr2(mp, args->trans); 496} 497 498/* 499 * After the last attribute is removed revert to original inode format, 500 * making all literal area available to the data fork once more. 501 */ 502STATIC void 503xfs_attr_fork_reset( 504 struct xfs_inode *ip, 505 struct xfs_trans *tp) 506{ 507 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 508 ip->i_d.di_forkoff = 0; 509 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 510 511 ASSERT(ip->i_d.di_anextents == 0); 512 ASSERT(ip->i_afp == NULL); 513 514 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 515} 516 517/* 518 * Remove an attribute from the shortform attribute list structure. 519 */ 520int 521xfs_attr_shortform_remove(xfs_da_args_t *args) 522{ 523 xfs_attr_shortform_t *sf; 524 xfs_attr_sf_entry_t *sfe; 525 int base, size=0, end, totsize, i; 526 xfs_mount_t *mp; 527 xfs_inode_t *dp; 528 529 trace_xfs_attr_sf_remove(args); 530 531 dp = args->dp; 532 mp = dp->i_mount; 533 base = sizeof(xfs_attr_sf_hdr_t); 534 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; 535 sfe = &sf->list[0]; 536 end = sf->hdr.count; 537 for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), 538 base += size, i++) { 539 size = XFS_ATTR_SF_ENTSIZE(sfe); 540 if (sfe->namelen != args->namelen) 541 continue; 542 if (memcmp(sfe->nameval, args->name, args->namelen) != 0) 543 continue; 544 if (!xfs_attr_namesp_match(args->flags, sfe->flags)) 545 continue; 546 break; 547 } 548 if (i == end) 549 return(XFS_ERROR(ENOATTR)); 550 551 /* 552 * Fix up the attribute fork data, covering the hole 553 */ 554 end = base + size; 555 totsize = be16_to_cpu(sf->hdr.totsize); 556 if (end != totsize) 557 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); 558 sf->hdr.count--; 559 be16_add_cpu(&sf->hdr.totsize, -size); 560 561 /* 562 * Fix up the start offset of the attribute fork 563 */ 564 totsize -= size; 565 if (totsize == sizeof(xfs_attr_sf_hdr_t) && 566 (mp->m_flags & XFS_MOUNT_ATTR2) && 567 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && 568 !(args->op_flags & XFS_DA_OP_ADDNAME)) { 569 xfs_attr_fork_reset(dp, args->trans); 570 } else { 571 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 572 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 573 ASSERT(dp->i_d.di_forkoff); 574 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || 575 (args->op_flags & XFS_DA_OP_ADDNAME) || 576 !(mp->m_flags & XFS_MOUNT_ATTR2) || 577 dp->i_d.di_format == XFS_DINODE_FMT_BTREE); 578 xfs_trans_log_inode(args->trans, dp, 579 XFS_ILOG_CORE | XFS_ILOG_ADATA); 580 } 581 582 xfs_sbversion_add_attr2(mp, args->trans); 583 584 return(0); 585} 586 587/* 588 * Look up a name in a shortform attribute list structure. 589 */ 590/*ARGSUSED*/ 591int 592xfs_attr_shortform_lookup(xfs_da_args_t *args) 593{ 594 xfs_attr_shortform_t *sf; 595 xfs_attr_sf_entry_t *sfe; 596 int i; 597 xfs_ifork_t *ifp; 598 599 trace_xfs_attr_sf_lookup(args); 600 601 ifp = args->dp->i_afp; 602 ASSERT(ifp->if_flags & XFS_IFINLINE); 603 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 604 sfe = &sf->list[0]; 605 for (i = 0; i < sf->hdr.count; 606 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { 607 if (sfe->namelen != args->namelen) 608 continue; 609 if (memcmp(args->name, sfe->nameval, args->namelen) != 0) 610 continue; 611 if (!xfs_attr_namesp_match(args->flags, sfe->flags)) 612 continue; 613 return(XFS_ERROR(EEXIST)); 614 } 615 return(XFS_ERROR(ENOATTR)); 616} 617 618/* 619 * Look up a name in a shortform attribute list structure. 620 */ 621/*ARGSUSED*/ 622int 623xfs_attr_shortform_getvalue(xfs_da_args_t *args) 624{ 625 xfs_attr_shortform_t *sf; 626 xfs_attr_sf_entry_t *sfe; 627 int i; 628 629 ASSERT(args->dp->i_afp->if_flags == XFS_IFINLINE); 630 sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; 631 sfe = &sf->list[0]; 632 for (i = 0; i < sf->hdr.count; 633 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { 634 if (sfe->namelen != args->namelen) 635 continue; 636 if (memcmp(args->name, sfe->nameval, args->namelen) != 0) 637 continue; 638 if (!xfs_attr_namesp_match(args->flags, sfe->flags)) 639 continue; 640 if (args->flags & ATTR_KERNOVAL) { 641 args->valuelen = sfe->valuelen; 642 return(XFS_ERROR(EEXIST)); 643 } 644 if (args->valuelen < sfe->valuelen) { 645 args->valuelen = sfe->valuelen; 646 return(XFS_ERROR(ERANGE)); 647 } 648 args->valuelen = sfe->valuelen; 649 memcpy(args->value, &sfe->nameval[args->namelen], 650 args->valuelen); 651 return(XFS_ERROR(EEXIST)); 652 } 653 return(XFS_ERROR(ENOATTR)); 654} 655 656/* 657 * Convert from using the shortform to the leaf. 658 */ 659int 660xfs_attr_shortform_to_leaf(xfs_da_args_t *args) 661{ 662 xfs_inode_t *dp; 663 xfs_attr_shortform_t *sf; 664 xfs_attr_sf_entry_t *sfe; 665 xfs_da_args_t nargs; 666 char *tmpbuffer; 667 int error, i, size; 668 xfs_dablk_t blkno; 669 struct xfs_buf *bp; 670 xfs_ifork_t *ifp; 671 672 trace_xfs_attr_sf_to_leaf(args); 673 674 dp = args->dp; 675 ifp = dp->i_afp; 676 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 677 size = be16_to_cpu(sf->hdr.totsize); 678 tmpbuffer = kmem_alloc(size, KM_SLEEP); 679 ASSERT(tmpbuffer != NULL); 680 memcpy(tmpbuffer, ifp->if_u1.if_data, size); 681 sf = (xfs_attr_shortform_t *)tmpbuffer; 682 683 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 684 xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK); 685 686 bp = NULL; 687 error = xfs_da_grow_inode(args, &blkno); 688 if (error) { 689 /* 690 * If we hit an IO error middle of the transaction inside 691 * grow_inode(), we may have inconsistent data. Bail out. 692 */ 693 if (error == EIO) 694 goto out; 695 xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ 696 memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ 697 goto out; 698 } 699 700 ASSERT(blkno == 0); 701 error = xfs_attr3_leaf_create(args, blkno, &bp); 702 if (error) { 703 error = xfs_da_shrink_inode(args, 0, bp); 704 bp = NULL; 705 if (error) 706 goto out; 707 xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ 708 memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ 709 goto out; 710 } 711 712 memset((char *)&nargs, 0, sizeof(nargs)); 713 nargs.dp = dp; 714 nargs.firstblock = args->firstblock; 715 nargs.flist = args->flist; 716 nargs.total = args->total; 717 nargs.whichfork = XFS_ATTR_FORK; 718 nargs.trans = args->trans; 719 nargs.op_flags = XFS_DA_OP_OKNOENT; 720 721 sfe = &sf->list[0]; 722 for (i = 0; i < sf->hdr.count; i++) { 723 nargs.name = sfe->nameval; 724 nargs.namelen = sfe->namelen; 725 nargs.value = &sfe->nameval[nargs.namelen]; 726 nargs.valuelen = sfe->valuelen; 727 nargs.hashval = xfs_da_hashname(sfe->nameval, 728 sfe->namelen); 729 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags); 730 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ 731 ASSERT(error == ENOATTR); 732 error = xfs_attr3_leaf_add(bp, &nargs); 733 ASSERT(error != ENOSPC); 734 if (error) 735 goto out; 736 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 737 } 738 error = 0; 739 740out: 741 kmem_free(tmpbuffer); 742 return(error); 743} 744 745/* 746 * Check a leaf attribute block to see if all the entries would fit into 747 * a shortform attribute list. 748 */ 749int 750xfs_attr_shortform_allfit( 751 struct xfs_buf *bp, 752 struct xfs_inode *dp) 753{ 754 struct xfs_attr_leafblock *leaf; 755 struct xfs_attr_leaf_entry *entry; 756 xfs_attr_leaf_name_local_t *name_loc; 757 struct xfs_attr3_icleaf_hdr leafhdr; 758 int bytes; 759 int i; 760 761 leaf = bp->b_addr; 762 xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); 763 entry = xfs_attr3_leaf_entryp(leaf); 764 765 bytes = sizeof(struct xfs_attr_sf_hdr); 766 for (i = 0; i < leafhdr.count; entry++, i++) { 767 if (entry->flags & XFS_ATTR_INCOMPLETE) 768 continue; /* don't copy partial entries */ 769 if (!(entry->flags & XFS_ATTR_LOCAL)) 770 return(0); 771 name_loc = xfs_attr3_leaf_name_local(leaf, i); 772 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) 773 return(0); 774 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) 775 return(0); 776 bytes += sizeof(struct xfs_attr_sf_entry) - 1 777 + name_loc->namelen 778 + be16_to_cpu(name_loc->valuelen); 779 } 780 if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && 781 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && 782 (bytes == sizeof(struct xfs_attr_sf_hdr))) 783 return -1; 784 return xfs_attr_shortform_bytesfit(dp, bytes); 785} 786 787/* 788 * Convert a leaf attribute list to shortform attribute list 789 */ 790int 791xfs_attr3_leaf_to_shortform( 792 struct xfs_buf *bp, 793 struct xfs_da_args *args, 794 int forkoff) 795{ 796 struct xfs_attr_leafblock *leaf; 797 struct xfs_attr3_icleaf_hdr ichdr; 798 struct xfs_attr_leaf_entry *entry; 799 struct xfs_attr_leaf_name_local *name_loc; 800 struct xfs_da_args nargs; 801 struct xfs_inode *dp = args->dp; 802 char *tmpbuffer; 803 int error; 804 int i; 805 806 trace_xfs_attr_leaf_to_sf(args); 807 808 tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP); 809 if (!tmpbuffer) 810 return ENOMEM; 811 812 memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(dp->i_mount)); 813 814 leaf = (xfs_attr_leafblock_t *)tmpbuffer; 815 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 816 entry = xfs_attr3_leaf_entryp(leaf); 817 818 /* XXX (dgc): buffer is about to be marked stale - why zero it? */ 819 memset(bp->b_addr, 0, XFS_LBSIZE(dp->i_mount)); 820 821 /* 822 * Clean out the prior contents of the attribute list. 823 */ 824 error = xfs_da_shrink_inode(args, 0, bp); 825 if (error) 826 goto out; 827 828 if (forkoff == -1) { 829 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); 830 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); 831 xfs_attr_fork_reset(dp, args->trans); 832 goto out; 833 } 834 835 xfs_attr_shortform_create(args); 836 837 /* 838 * Copy the attributes 839 */ 840 memset((char *)&nargs, 0, sizeof(nargs)); 841 nargs.dp = dp; 842 nargs.firstblock = args->firstblock; 843 nargs.flist = args->flist; 844 nargs.total = args->total; 845 nargs.whichfork = XFS_ATTR_FORK; 846 nargs.trans = args->trans; 847 nargs.op_flags = XFS_DA_OP_OKNOENT; 848 849 for (i = 0; i < ichdr.count; entry++, i++) { 850 if (entry->flags & XFS_ATTR_INCOMPLETE) 851 continue; /* don't copy partial entries */ 852 if (!entry->nameidx) 853 continue; 854 ASSERT(entry->flags & XFS_ATTR_LOCAL); 855 name_loc = xfs_attr3_leaf_name_local(leaf, i); 856 nargs.name = name_loc->nameval; 857 nargs.namelen = name_loc->namelen; 858 nargs.value = &name_loc->nameval[nargs.namelen]; 859 nargs.valuelen = be16_to_cpu(name_loc->valuelen); 860 nargs.hashval = be32_to_cpu(entry->hashval); 861 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags); 862 xfs_attr_shortform_add(&nargs, forkoff); 863 } 864 error = 0; 865 866out: 867 kmem_free(tmpbuffer); 868 return error; 869} 870 871/* 872 * Convert from using a single leaf to a root node and a leaf. 873 */ 874int 875xfs_attr3_leaf_to_node( 876 struct xfs_da_args *args) 877{ 878 struct xfs_attr_leafblock *leaf; 879 struct xfs_attr3_icleaf_hdr icleafhdr; 880 struct xfs_attr_leaf_entry *entries; 881 struct xfs_da_node_entry *btree; 882 struct xfs_da3_icnode_hdr icnodehdr; 883 struct xfs_da_intnode *node; 884 struct xfs_inode *dp = args->dp; 885 struct xfs_mount *mp = dp->i_mount; 886 struct xfs_buf *bp1 = NULL; 887 struct xfs_buf *bp2 = NULL; 888 xfs_dablk_t blkno; 889 int error; 890 891 trace_xfs_attr_leaf_to_node(args); 892 893 error = xfs_da_grow_inode(args, &blkno); 894 if (error) 895 goto out; 896 error = xfs_attr3_leaf_read(args->trans, dp, 0, -1, &bp1); 897 if (error) 898 goto out; 899 900 error = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp2, XFS_ATTR_FORK); 901 if (error) 902 goto out; 903 904 /* copy leaf to new buffer, update identifiers */ 905 xfs_trans_buf_set_type(args->trans, bp2, XFS_BLFT_ATTR_LEAF_BUF); 906 bp2->b_ops = bp1->b_ops; 907 memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(mp)); 908 if (xfs_sb_version_hascrc(&mp->m_sb)) { 909 struct xfs_da3_blkinfo *hdr3 = bp2->b_addr; 910 hdr3->blkno = cpu_to_be64(bp2->b_bn); 911 } 912 xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(mp) - 1); 913 914 /* 915 * Set up the new root node. 916 */ 917 error = xfs_da3_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK); 918 if (error) 919 goto out; 920 node = bp1->b_addr; 921 dp->d_ops->node_hdr_from_disk(&icnodehdr, node); 922 btree = dp->d_ops->node_tree_p(node); 923 924 leaf = bp2->b_addr; 925 xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf); 926 entries = xfs_attr3_leaf_entryp(leaf); 927 928 /* both on-disk, don't endian-flip twice */ 929 btree[0].hashval = entries[icleafhdr.count - 1].hashval; 930 btree[0].before = cpu_to_be32(blkno); 931 icnodehdr.count = 1; 932 dp->d_ops->node_hdr_to_disk(node, &icnodehdr); 933 xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(mp) - 1); 934 error = 0; 935out: 936 return error; 937} 938 939/*======================================================================== 940 * Routines used for growing the Btree. 941 *========================================================================*/ 942 943/* 944 * Create the initial contents of a leaf attribute list 945 * or a leaf in a node attribute list. 946 */ 947STATIC int 948xfs_attr3_leaf_create( 949 struct xfs_da_args *args, 950 xfs_dablk_t blkno, 951 struct xfs_buf **bpp) 952{ 953 struct xfs_attr_leafblock *leaf; 954 struct xfs_attr3_icleaf_hdr ichdr; 955 struct xfs_inode *dp = args->dp; 956 struct xfs_mount *mp = dp->i_mount; 957 struct xfs_buf *bp; 958 int error; 959 960 trace_xfs_attr_leaf_create(args); 961 962 error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp, 963 XFS_ATTR_FORK); 964 if (error) 965 return error; 966 bp->b_ops = &xfs_attr3_leaf_buf_ops; 967 xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF); 968 leaf = bp->b_addr; 969 memset(leaf, 0, XFS_LBSIZE(mp)); 970 971 memset(&ichdr, 0, sizeof(ichdr)); 972 ichdr.firstused = XFS_LBSIZE(mp); 973 974 if (xfs_sb_version_hascrc(&mp->m_sb)) { 975 struct xfs_da3_blkinfo *hdr3 = bp->b_addr; 976 977 ichdr.magic = XFS_ATTR3_LEAF_MAGIC; 978 979 hdr3->blkno = cpu_to_be64(bp->b_bn); 980 hdr3->owner = cpu_to_be64(dp->i_ino); 981 uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid); 982 983 ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr); 984 } else { 985 ichdr.magic = XFS_ATTR_LEAF_MAGIC; 986 ichdr.freemap[0].base = sizeof(struct xfs_attr_leaf_hdr); 987 } 988 ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base; 989 990 xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr); 991 xfs_trans_log_buf(args->trans, bp, 0, XFS_LBSIZE(mp) - 1); 992 993 *bpp = bp; 994 return 0; 995} 996 997/* 998 * Split the leaf node, rebalance, then add the new entry. 999 */ 1000int 1001xfs_attr3_leaf_split( 1002 struct xfs_da_state *state, 1003 struct xfs_da_state_blk *oldblk, 1004 struct xfs_da_state_blk *newblk) 1005{ 1006 xfs_dablk_t blkno; 1007 int error; 1008 1009 trace_xfs_attr_leaf_split(state->args); 1010 1011 /* 1012 * Allocate space for a new leaf node. 1013 */ 1014 ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); 1015 error = xfs_da_grow_inode(state->args, &blkno); 1016 if (error) 1017 return(error); 1018 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); 1019 if (error) 1020 return(error); 1021 newblk->blkno = blkno; 1022 newblk->magic = XFS_ATTR_LEAF_MAGIC; 1023 1024 /* 1025 * Rebalance the entries across the two leaves. 1026 * NOTE: rebalance() currently depends on the 2nd block being empty. 1027 */ 1028 xfs_attr3_leaf_rebalance(state, oldblk, newblk); 1029 error = xfs_da3_blk_link(state, oldblk, newblk); 1030 if (error) 1031 return(error); 1032 1033 /* 1034 * Save info on "old" attribute for "atomic rename" ops, leaf_add() 1035 * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the 1036 * "new" attrs info. Will need the "old" info to remove it later. 1037 * 1038 * Insert the "new" entry in the correct block. 1039 */ 1040 if (state->inleaf) { 1041 trace_xfs_attr_leaf_add_old(state->args); 1042 error = xfs_attr3_leaf_add(oldblk->bp, state->args); 1043 } else { 1044 trace_xfs_attr_leaf_add_new(state->args); 1045 error = xfs_attr3_leaf_add(newblk->bp, state->args); 1046 } 1047 1048 /* 1049 * Update last hashval in each block since we added the name. 1050 */ 1051 oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); 1052 newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); 1053 return(error); 1054} 1055 1056/* 1057 * Add a name to the leaf attribute list structure. 1058 */ 1059int 1060xfs_attr3_leaf_add( 1061 struct xfs_buf *bp, 1062 struct xfs_da_args *args) 1063{ 1064 struct xfs_attr_leafblock *leaf; 1065 struct xfs_attr3_icleaf_hdr ichdr; 1066 int tablesize; 1067 int entsize; 1068 int sum; 1069 int tmp; 1070 int i; 1071 1072 trace_xfs_attr_leaf_add(args); 1073 1074 leaf = bp->b_addr; 1075 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 1076 ASSERT(args->index >= 0 && args->index <= ichdr.count); 1077 entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 1078 args->trans->t_mountp->m_sb.sb_blocksize, NULL); 1079 1080 /* 1081 * Search through freemap for first-fit on new name length. 1082 * (may need to figure in size of entry struct too) 1083 */ 1084 tablesize = (ichdr.count + 1) * sizeof(xfs_attr_leaf_entry_t) 1085 + xfs_attr3_leaf_hdr_size(leaf); 1086 for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) { 1087 if (tablesize > ichdr.firstused) { 1088 sum += ichdr.freemap[i].size; 1089 continue; 1090 } 1091 if (!ichdr.freemap[i].size) 1092 continue; /* no space in this map */ 1093 tmp = entsize; 1094 if (ichdr.freemap[i].base < ichdr.firstused) 1095 tmp += sizeof(xfs_attr_leaf_entry_t); 1096 if (ichdr.freemap[i].size >= tmp) { 1097 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i); 1098 goto out_log_hdr; 1099 } 1100 sum += ichdr.freemap[i].size; 1101 } 1102 1103 /* 1104 * If there are no holes in the address space of the block, 1105 * and we don't have enough freespace, then compaction will do us 1106 * no good and we should just give up. 1107 */ 1108 if (!ichdr.holes && sum < entsize) 1109 return XFS_ERROR(ENOSPC); 1110 1111 /* 1112 * Compact the entries to coalesce free space. 1113 * This may change the hdr->count via dropping INCOMPLETE entries. 1114 */ 1115 xfs_attr3_leaf_compact(args, &ichdr, bp); 1116 1117 /* 1118 * After compaction, the block is guaranteed to have only one 1119 * free region, in freemap[0]. If it is not big enough, give up. 1120 */ 1121 if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) { 1122 tmp = ENOSPC; 1123 goto out_log_hdr; 1124 } 1125 1126 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0); 1127 1128out_log_hdr: 1129 xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr); 1130 xfs_trans_log_buf(args->trans, bp, 1131 XFS_DA_LOGRANGE(leaf, &leaf->hdr, 1132 xfs_attr3_leaf_hdr_size(leaf))); 1133 return tmp; 1134} 1135 1136/* 1137 * Add a name to a leaf attribute list structure. 1138 */ 1139STATIC int 1140xfs_attr3_leaf_add_work( 1141 struct xfs_buf *bp, 1142 struct xfs_attr3_icleaf_hdr *ichdr, 1143 struct xfs_da_args *args, 1144 int mapindex) 1145{ 1146 struct xfs_attr_leafblock *leaf; 1147 struct xfs_attr_leaf_entry *entry; 1148 struct xfs_attr_leaf_name_local *name_loc; 1149 struct xfs_attr_leaf_name_remote *name_rmt; 1150 struct xfs_mount *mp; 1151 int tmp; 1152 int i; 1153 1154 trace_xfs_attr_leaf_add_work(args); 1155 1156 leaf = bp->b_addr; 1157 ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE); 1158 ASSERT(args->index >= 0 && args->index <= ichdr->count); 1159 1160 /* 1161 * Force open some space in the entry array and fill it in. 1162 */ 1163 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 1164 if (args->index < ichdr->count) { 1165 tmp = ichdr->count - args->index; 1166 tmp *= sizeof(xfs_attr_leaf_entry_t); 1167 memmove(entry + 1, entry, tmp); 1168 xfs_trans_log_buf(args->trans, bp, 1169 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1170 } 1171 ichdr->count++; 1172 1173 /* 1174 * Allocate space for the new string (at the end of the run). 1175 */ 1176 mp = args->trans->t_mountp; 1177 ASSERT(ichdr->freemap[mapindex].base < XFS_LBSIZE(mp)); 1178 ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0); 1179 ASSERT(ichdr->freemap[mapindex].size >= 1180 xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 1181 mp->m_sb.sb_blocksize, NULL)); 1182 ASSERT(ichdr->freemap[mapindex].size < XFS_LBSIZE(mp)); 1183 ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0); 1184 1185 ichdr->freemap[mapindex].size -= 1186 xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 1187 mp->m_sb.sb_blocksize, &tmp); 1188 1189 entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base + 1190 ichdr->freemap[mapindex].size); 1191 entry->hashval = cpu_to_be32(args->hashval); 1192 entry->flags = tmp ? XFS_ATTR_LOCAL : 0; 1193 entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags); 1194 if (args->op_flags & XFS_DA_OP_RENAME) { 1195 entry->flags |= XFS_ATTR_INCOMPLETE; 1196 if ((args->blkno2 == args->blkno) && 1197 (args->index2 <= args->index)) { 1198 args->index2++; 1199 } 1200 } 1201 xfs_trans_log_buf(args->trans, bp, 1202 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 1203 ASSERT((args->index == 0) || 1204 (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); 1205 ASSERT((args->index == ichdr->count - 1) || 1206 (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval))); 1207 1208 /* 1209 * For "remote" attribute values, simply note that we need to 1210 * allocate space for the "remote" value. We can't actually 1211 * allocate the extents in this transaction, and we can't decide 1212 * which blocks they should be as we might allocate more blocks 1213 * as part of this transaction (a split operation for example). 1214 */ 1215 if (entry->flags & XFS_ATTR_LOCAL) { 1216 name_loc = xfs_attr3_leaf_name_local(leaf, args->index); 1217 name_loc->namelen = args->namelen; 1218 name_loc->valuelen = cpu_to_be16(args->valuelen); 1219 memcpy((char *)name_loc->nameval, args->name, args->namelen); 1220 memcpy((char *)&name_loc->nameval[args->namelen], args->value, 1221 be16_to_cpu(name_loc->valuelen)); 1222 } else { 1223 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 1224 name_rmt->namelen = args->namelen; 1225 memcpy((char *)name_rmt->name, args->name, args->namelen); 1226 entry->flags |= XFS_ATTR_INCOMPLETE; 1227 /* just in case */ 1228 name_rmt->valuelen = 0; 1229 name_rmt->valueblk = 0; 1230 args->rmtblkno = 1; 1231 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); 1232 args->rmtvaluelen = args->valuelen; 1233 } 1234 xfs_trans_log_buf(args->trans, bp, 1235 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), 1236 xfs_attr_leaf_entsize(leaf, args->index))); 1237 1238 /* 1239 * Update the control info for this leaf node 1240 */ 1241 if (be16_to_cpu(entry->nameidx) < ichdr->firstused) 1242 ichdr->firstused = be16_to_cpu(entry->nameidx); 1243 1244 ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t) 1245 + xfs_attr3_leaf_hdr_size(leaf)); 1246 tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t) 1247 + xfs_attr3_leaf_hdr_size(leaf); 1248 1249 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 1250 if (ichdr->freemap[i].base == tmp) { 1251 ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); 1252 ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); 1253 } 1254 } 1255 ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); 1256 return 0; 1257} 1258 1259/* 1260 * Garbage collect a leaf attribute list block by copying it to a new buffer. 1261 */ 1262STATIC void 1263xfs_attr3_leaf_compact( 1264 struct xfs_da_args *args, 1265 struct xfs_attr3_icleaf_hdr *ichdr_dst, 1266 struct xfs_buf *bp) 1267{ 1268 struct xfs_attr_leafblock *leaf_src; 1269 struct xfs_attr_leafblock *leaf_dst; 1270 struct xfs_attr3_icleaf_hdr ichdr_src; 1271 struct xfs_trans *trans = args->trans; 1272 struct xfs_mount *mp = trans->t_mountp; 1273 char *tmpbuffer; 1274 1275 trace_xfs_attr_leaf_compact(args); 1276 1277 tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP); 1278 memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp)); 1279 memset(bp->b_addr, 0, XFS_LBSIZE(mp)); 1280 leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; 1281 leaf_dst = bp->b_addr; 1282 1283 /* 1284 * Copy the on-disk header back into the destination buffer to ensure 1285 * all the information in the header that is not part of the incore 1286 * header structure is preserved. 1287 */ 1288 memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src)); 1289 1290 /* Initialise the incore headers */ 1291 ichdr_src = *ichdr_dst; /* struct copy */ 1292 ichdr_dst->firstused = XFS_LBSIZE(mp); 1293 ichdr_dst->usedbytes = 0; 1294 ichdr_dst->count = 0; 1295 ichdr_dst->holes = 0; 1296 ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src); 1297 ichdr_dst->freemap[0].size = ichdr_dst->firstused - 1298 ichdr_dst->freemap[0].base; 1299 1300 /* write the header back to initialise the underlying buffer */ 1301 xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst); 1302 1303 /* 1304 * Copy all entry's in the same (sorted) order, 1305 * but allocate name/value pairs packed and in sequence. 1306 */ 1307 xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0, 1308 ichdr_src.count, mp); 1309 /* 1310 * this logs the entire buffer, but the caller must write the header 1311 * back to the buffer when it is finished modifying it. 1312 */ 1313 xfs_trans_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); 1314 1315 kmem_free(tmpbuffer); 1316} 1317 1318/* 1319 * Compare two leaf blocks "order". 1320 * Return 0 unless leaf2 should go before leaf1. 1321 */ 1322static int 1323xfs_attr3_leaf_order( 1324 struct xfs_buf *leaf1_bp, 1325 struct xfs_attr3_icleaf_hdr *leaf1hdr, 1326 struct xfs_buf *leaf2_bp, 1327 struct xfs_attr3_icleaf_hdr *leaf2hdr) 1328{ 1329 struct xfs_attr_leaf_entry *entries1; 1330 struct xfs_attr_leaf_entry *entries2; 1331 1332 entries1 = xfs_attr3_leaf_entryp(leaf1_bp->b_addr); 1333 entries2 = xfs_attr3_leaf_entryp(leaf2_bp->b_addr); 1334 if (leaf1hdr->count > 0 && leaf2hdr->count > 0 && 1335 ((be32_to_cpu(entries2[0].hashval) < 1336 be32_to_cpu(entries1[0].hashval)) || 1337 (be32_to_cpu(entries2[leaf2hdr->count - 1].hashval) < 1338 be32_to_cpu(entries1[leaf1hdr->count - 1].hashval)))) { 1339 return 1; 1340 } 1341 return 0; 1342} 1343 1344int 1345xfs_attr_leaf_order( 1346 struct xfs_buf *leaf1_bp, 1347 struct xfs_buf *leaf2_bp) 1348{ 1349 struct xfs_attr3_icleaf_hdr ichdr1; 1350 struct xfs_attr3_icleaf_hdr ichdr2; 1351 1352 xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1_bp->b_addr); 1353 xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2_bp->b_addr); 1354 return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2); 1355} 1356 1357/* 1358 * Redistribute the attribute list entries between two leaf nodes, 1359 * taking into account the size of the new entry. 1360 * 1361 * NOTE: if new block is empty, then it will get the upper half of the 1362 * old block. At present, all (one) callers pass in an empty second block. 1363 * 1364 * This code adjusts the args->index/blkno and args->index2/blkno2 fields 1365 * to match what it is doing in splitting the attribute leaf block. Those 1366 * values are used in "atomic rename" operations on attributes. Note that 1367 * the "new" and "old" values can end up in different blocks. 1368 */ 1369STATIC void 1370xfs_attr3_leaf_rebalance( 1371 struct xfs_da_state *state, 1372 struct xfs_da_state_blk *blk1, 1373 struct xfs_da_state_blk *blk2) 1374{ 1375 struct xfs_da_args *args; 1376 struct xfs_attr_leafblock *leaf1; 1377 struct xfs_attr_leafblock *leaf2; 1378 struct xfs_attr3_icleaf_hdr ichdr1; 1379 struct xfs_attr3_icleaf_hdr ichdr2; 1380 struct xfs_attr_leaf_entry *entries1; 1381 struct xfs_attr_leaf_entry *entries2; 1382 int count; 1383 int totallen; 1384 int max; 1385 int space; 1386 int swap; 1387 1388 /* 1389 * Set up environment. 1390 */ 1391 ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC); 1392 ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); 1393 leaf1 = blk1->bp->b_addr; 1394 leaf2 = blk2->bp->b_addr; 1395 xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1); 1396 xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2); 1397 ASSERT(ichdr2.count == 0); 1398 args = state->args; 1399 1400 trace_xfs_attr_leaf_rebalance(args); 1401 1402 /* 1403 * Check ordering of blocks, reverse if it makes things simpler. 1404 * 1405 * NOTE: Given that all (current) callers pass in an empty 1406 * second block, this code should never set "swap". 1407 */ 1408 swap = 0; 1409 if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) { 1410 struct xfs_da_state_blk *tmp_blk; 1411 struct xfs_attr3_icleaf_hdr tmp_ichdr; 1412 1413 tmp_blk = blk1; 1414 blk1 = blk2; 1415 blk2 = tmp_blk; 1416 1417 /* struct copies to swap them rather than reconverting */ 1418 tmp_ichdr = ichdr1; 1419 ichdr1 = ichdr2; 1420 ichdr2 = tmp_ichdr; 1421 1422 leaf1 = blk1->bp->b_addr; 1423 leaf2 = blk2->bp->b_addr; 1424 swap = 1; 1425 } 1426 1427 /* 1428 * Examine entries until we reduce the absolute difference in 1429 * byte usage between the two blocks to a minimum. Then get 1430 * the direction to copy and the number of elements to move. 1431 * 1432 * "inleaf" is true if the new entry should be inserted into blk1. 1433 * If "swap" is also true, then reverse the sense of "inleaf". 1434 */ 1435 state->inleaf = xfs_attr3_leaf_figure_balance(state, blk1, &ichdr1, 1436 blk2, &ichdr2, 1437 &count, &totallen); 1438 if (swap) 1439 state->inleaf = !state->inleaf; 1440 1441 /* 1442 * Move any entries required from leaf to leaf: 1443 */ 1444 if (count < ichdr1.count) { 1445 /* 1446 * Figure the total bytes to be added to the destination leaf. 1447 */ 1448 /* number entries being moved */ 1449 count = ichdr1.count - count; 1450 space = ichdr1.usedbytes - totallen; 1451 space += count * sizeof(xfs_attr_leaf_entry_t); 1452 1453 /* 1454 * leaf2 is the destination, compact it if it looks tight. 1455 */ 1456 max = ichdr2.firstused - xfs_attr3_leaf_hdr_size(leaf1); 1457 max -= ichdr2.count * sizeof(xfs_attr_leaf_entry_t); 1458 if (space > max) 1459 xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp); 1460 1461 /* 1462 * Move high entries from leaf1 to low end of leaf2. 1463 */ 1464 xfs_attr3_leaf_moveents(leaf1, &ichdr1, ichdr1.count - count, 1465 leaf2, &ichdr2, 0, count, state->mp); 1466 1467 } else if (count > ichdr1.count) { 1468 /* 1469 * I assert that since all callers pass in an empty 1470 * second buffer, this code should never execute. 1471 */ 1472 ASSERT(0); 1473 1474 /* 1475 * Figure the total bytes to be added to the destination leaf. 1476 */ 1477 /* number entries being moved */ 1478 count -= ichdr1.count; 1479 space = totallen - ichdr1.usedbytes; 1480 space += count * sizeof(xfs_attr_leaf_entry_t); 1481 1482 /* 1483 * leaf1 is the destination, compact it if it looks tight. 1484 */ 1485 max = ichdr1.firstused - xfs_attr3_leaf_hdr_size(leaf1); 1486 max -= ichdr1.count * sizeof(xfs_attr_leaf_entry_t); 1487 if (space > max) 1488 xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp); 1489 1490 /* 1491 * Move low entries from leaf2 to high end of leaf1. 1492 */ 1493 xfs_attr3_leaf_moveents(leaf2, &ichdr2, 0, leaf1, &ichdr1, 1494 ichdr1.count, count, state->mp); 1495 } 1496 1497 xfs_attr3_leaf_hdr_to_disk(leaf1, &ichdr1); 1498 xfs_attr3_leaf_hdr_to_disk(leaf2, &ichdr2); 1499 xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); 1500 xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); 1501 1502 /* 1503 * Copy out last hashval in each block for B-tree code. 1504 */ 1505 entries1 = xfs_attr3_leaf_entryp(leaf1); 1506 entries2 = xfs_attr3_leaf_entryp(leaf2); 1507 blk1->hashval = be32_to_cpu(entries1[ichdr1.count - 1].hashval); 1508 blk2->hashval = be32_to_cpu(entries2[ichdr2.count - 1].hashval); 1509 1510 /* 1511 * Adjust the expected index for insertion. 1512 * NOTE: this code depends on the (current) situation that the 1513 * second block was originally empty. 1514 * 1515 * If the insertion point moved to the 2nd block, we must adjust 1516 * the index. We must also track the entry just following the 1517 * new entry for use in an "atomic rename" operation, that entry 1518 * is always the "old" entry and the "new" entry is what we are 1519 * inserting. The index/blkno fields refer to the "old" entry, 1520 * while the index2/blkno2 fields refer to the "new" entry. 1521 */ 1522 if (blk1->index > ichdr1.count) { 1523 ASSERT(state->inleaf == 0); 1524 blk2->index = blk1->index - ichdr1.count; 1525 args->index = args->index2 = blk2->index; 1526 args->blkno = args->blkno2 = blk2->blkno; 1527 } else if (blk1->index == ichdr1.count) { 1528 if (state->inleaf) { 1529 args->index = blk1->index; 1530 args->blkno = blk1->blkno; 1531 args->index2 = 0; 1532 args->blkno2 = blk2->blkno; 1533 } else { 1534 /* 1535 * On a double leaf split, the original attr location 1536 * is already stored in blkno2/index2, so don't 1537 * overwrite it overwise we corrupt the tree. 1538 */ 1539 blk2->index = blk1->index - ichdr1.count; 1540 args->index = blk2->index; 1541 args->blkno = blk2->blkno; 1542 if (!state->extravalid) { 1543 /* 1544 * set the new attr location to match the old 1545 * one and let the higher level split code 1546 * decide where in the leaf to place it. 1547 */ 1548 args->index2 = blk2->index; 1549 args->blkno2 = blk2->blkno; 1550 } 1551 } 1552 } else { 1553 ASSERT(state->inleaf == 1); 1554 args->index = args->index2 = blk1->index; 1555 args->blkno = args->blkno2 = blk1->blkno; 1556 } 1557} 1558 1559/* 1560 * Examine entries until we reduce the absolute difference in 1561 * byte usage between the two blocks to a minimum. 1562 * GROT: Is this really necessary? With other than a 512 byte blocksize, 1563 * GROT: there will always be enough room in either block for a new entry. 1564 * GROT: Do a double-split for this case? 1565 */ 1566STATIC int 1567xfs_attr3_leaf_figure_balance( 1568 struct xfs_da_state *state, 1569 struct xfs_da_state_blk *blk1, 1570 struct xfs_attr3_icleaf_hdr *ichdr1, 1571 struct xfs_da_state_blk *blk2, 1572 struct xfs_attr3_icleaf_hdr *ichdr2, 1573 int *countarg, 1574 int *usedbytesarg) 1575{ 1576 struct xfs_attr_leafblock *leaf1 = blk1->bp->b_addr; 1577 struct xfs_attr_leafblock *leaf2 = blk2->bp->b_addr; 1578 struct xfs_attr_leaf_entry *entry; 1579 int count; 1580 int max; 1581 int index; 1582 int totallen = 0; 1583 int half; 1584 int lastdelta; 1585 int foundit = 0; 1586 int tmp; 1587 1588 /* 1589 * Examine entries until we reduce the absolute difference in 1590 * byte usage between the two blocks to a minimum. 1591 */ 1592 max = ichdr1->count + ichdr2->count; 1593 half = (max + 1) * sizeof(*entry); 1594 half += ichdr1->usedbytes + ichdr2->usedbytes + 1595 xfs_attr_leaf_newentsize(state->args->namelen, 1596 state->args->valuelen, 1597 state->blocksize, NULL); 1598 half /= 2; 1599 lastdelta = state->blocksize; 1600 entry = xfs_attr3_leaf_entryp(leaf1); 1601 for (count = index = 0; count < max; entry++, index++, count++) { 1602 1603#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A)) 1604 /* 1605 * The new entry is in the first block, account for it. 1606 */ 1607 if (count == blk1->index) { 1608 tmp = totallen + sizeof(*entry) + 1609 xfs_attr_leaf_newentsize( 1610 state->args->namelen, 1611 state->args->valuelen, 1612 state->blocksize, NULL); 1613 if (XFS_ATTR_ABS(half - tmp) > lastdelta) 1614 break; 1615 lastdelta = XFS_ATTR_ABS(half - tmp); 1616 totallen = tmp; 1617 foundit = 1; 1618 } 1619 1620 /* 1621 * Wrap around into the second block if necessary. 1622 */ 1623 if (count == ichdr1->count) { 1624 leaf1 = leaf2; 1625 entry = xfs_attr3_leaf_entryp(leaf1); 1626 index = 0; 1627 } 1628 1629 /* 1630 * Figure out if next leaf entry would be too much. 1631 */ 1632 tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1, 1633 index); 1634 if (XFS_ATTR_ABS(half - tmp) > lastdelta) 1635 break; 1636 lastdelta = XFS_ATTR_ABS(half - tmp); 1637 totallen = tmp; 1638#undef XFS_ATTR_ABS 1639 } 1640 1641 /* 1642 * Calculate the number of usedbytes that will end up in lower block. 1643 * If new entry not in lower block, fix up the count. 1644 */ 1645 totallen -= count * sizeof(*entry); 1646 if (foundit) { 1647 totallen -= sizeof(*entry) + 1648 xfs_attr_leaf_newentsize( 1649 state->args->namelen, 1650 state->args->valuelen, 1651 state->blocksize, NULL); 1652 } 1653 1654 *countarg = count; 1655 *usedbytesarg = totallen; 1656 return foundit; 1657} 1658 1659/*======================================================================== 1660 * Routines used for shrinking the Btree. 1661 *========================================================================*/ 1662 1663/* 1664 * Check a leaf block and its neighbors to see if the block should be 1665 * collapsed into one or the other neighbor. Always keep the block 1666 * with the smaller block number. 1667 * If the current block is over 50% full, don't try to join it, return 0. 1668 * If the block is empty, fill in the state structure and return 2. 1669 * If it can be collapsed, fill in the state structure and return 1. 1670 * If nothing can be done, return 0. 1671 * 1672 * GROT: allow for INCOMPLETE entries in calculation. 1673 */ 1674int 1675xfs_attr3_leaf_toosmall( 1676 struct xfs_da_state *state, 1677 int *action) 1678{ 1679 struct xfs_attr_leafblock *leaf; 1680 struct xfs_da_state_blk *blk; 1681 struct xfs_attr3_icleaf_hdr ichdr; 1682 struct xfs_buf *bp; 1683 xfs_dablk_t blkno; 1684 int bytes; 1685 int forward; 1686 int error; 1687 int retval; 1688 int i; 1689 1690 trace_xfs_attr_leaf_toosmall(state->args); 1691 1692 /* 1693 * Check for the degenerate case of the block being over 50% full. 1694 * If so, it's not worth even looking to see if we might be able 1695 * to coalesce with a sibling. 1696 */ 1697 blk = &state->path.blk[ state->path.active-1 ]; 1698 leaf = blk->bp->b_addr; 1699 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 1700 bytes = xfs_attr3_leaf_hdr_size(leaf) + 1701 ichdr.count * sizeof(xfs_attr_leaf_entry_t) + 1702 ichdr.usedbytes; 1703 if (bytes > (state->blocksize >> 1)) { 1704 *action = 0; /* blk over 50%, don't try to join */ 1705 return(0); 1706 } 1707 1708 /* 1709 * Check for the degenerate case of the block being empty. 1710 * If the block is empty, we'll simply delete it, no need to 1711 * coalesce it with a sibling block. We choose (arbitrarily) 1712 * to merge with the forward block unless it is NULL. 1713 */ 1714 if (ichdr.count == 0) { 1715 /* 1716 * Make altpath point to the block we want to keep and 1717 * path point to the block we want to drop (this one). 1718 */ 1719 forward = (ichdr.forw != 0); 1720 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1721 error = xfs_da3_path_shift(state, &state->altpath, forward, 1722 0, &retval); 1723 if (error) 1724 return(error); 1725 if (retval) { 1726 *action = 0; 1727 } else { 1728 *action = 2; 1729 } 1730 return 0; 1731 } 1732 1733 /* 1734 * Examine each sibling block to see if we can coalesce with 1735 * at least 25% free space to spare. We need to figure out 1736 * whether to merge with the forward or the backward block. 1737 * We prefer coalescing with the lower numbered sibling so as 1738 * to shrink an attribute list over time. 1739 */ 1740 /* start with smaller blk num */ 1741 forward = ichdr.forw < ichdr.back; 1742 for (i = 0; i < 2; forward = !forward, i++) { 1743 struct xfs_attr3_icleaf_hdr ichdr2; 1744 if (forward) 1745 blkno = ichdr.forw; 1746 else 1747 blkno = ichdr.back; 1748 if (blkno == 0) 1749 continue; 1750 error = xfs_attr3_leaf_read(state->args->trans, state->args->dp, 1751 blkno, -1, &bp); 1752 if (error) 1753 return(error); 1754 1755 xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr); 1756 1757 bytes = state->blocksize - (state->blocksize >> 2) - 1758 ichdr.usedbytes - ichdr2.usedbytes - 1759 ((ichdr.count + ichdr2.count) * 1760 sizeof(xfs_attr_leaf_entry_t)) - 1761 xfs_attr3_leaf_hdr_size(leaf); 1762 1763 xfs_trans_brelse(state->args->trans, bp); 1764 if (bytes >= 0) 1765 break; /* fits with at least 25% to spare */ 1766 } 1767 if (i >= 2) { 1768 *action = 0; 1769 return(0); 1770 } 1771 1772 /* 1773 * Make altpath point to the block we want to keep (the lower 1774 * numbered block) and path point to the block we want to drop. 1775 */ 1776 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1777 if (blkno < blk->blkno) { 1778 error = xfs_da3_path_shift(state, &state->altpath, forward, 1779 0, &retval); 1780 } else { 1781 error = xfs_da3_path_shift(state, &state->path, forward, 1782 0, &retval); 1783 } 1784 if (error) 1785 return(error); 1786 if (retval) { 1787 *action = 0; 1788 } else { 1789 *action = 1; 1790 } 1791 return(0); 1792} 1793 1794/* 1795 * Remove a name from the leaf attribute list structure. 1796 * 1797 * Return 1 if leaf is less than 37% full, 0 if >= 37% full. 1798 * If two leaves are 37% full, when combined they will leave 25% free. 1799 */ 1800int 1801xfs_attr3_leaf_remove( 1802 struct xfs_buf *bp, 1803 struct xfs_da_args *args) 1804{ 1805 struct xfs_attr_leafblock *leaf; 1806 struct xfs_attr3_icleaf_hdr ichdr; 1807 struct xfs_attr_leaf_entry *entry; 1808 struct xfs_mount *mp = args->trans->t_mountp; 1809 int before; 1810 int after; 1811 int smallest; 1812 int entsize; 1813 int tablesize; 1814 int tmp; 1815 int i; 1816 1817 trace_xfs_attr_leaf_remove(args); 1818 1819 leaf = bp->b_addr; 1820 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 1821 1822 ASSERT(ichdr.count > 0 && ichdr.count < XFS_LBSIZE(mp) / 8); 1823 ASSERT(args->index >= 0 && args->index < ichdr.count); 1824 ASSERT(ichdr.firstused >= ichdr.count * sizeof(*entry) + 1825 xfs_attr3_leaf_hdr_size(leaf)); 1826 1827 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 1828 1829 ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused); 1830 ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); 1831 1832 /* 1833 * Scan through free region table: 1834 * check for adjacency of free'd entry with an existing one, 1835 * find smallest free region in case we need to replace it, 1836 * adjust any map that borders the entry table, 1837 */ 1838 tablesize = ichdr.count * sizeof(xfs_attr_leaf_entry_t) 1839 + xfs_attr3_leaf_hdr_size(leaf); 1840 tmp = ichdr.freemap[0].size; 1841 before = after = -1; 1842 smallest = XFS_ATTR_LEAF_MAPSIZE - 1; 1843 entsize = xfs_attr_leaf_entsize(leaf, args->index); 1844 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 1845 ASSERT(ichdr.freemap[i].base < XFS_LBSIZE(mp)); 1846 ASSERT(ichdr.freemap[i].size < XFS_LBSIZE(mp)); 1847 if (ichdr.freemap[i].base == tablesize) { 1848 ichdr.freemap[i].base -= sizeof(xfs_attr_leaf_entry_t); 1849 ichdr.freemap[i].size += sizeof(xfs_attr_leaf_entry_t); 1850 } 1851 1852 if (ichdr.freemap[i].base + ichdr.freemap[i].size == 1853 be16_to_cpu(entry->nameidx)) { 1854 before = i; 1855 } else if (ichdr.freemap[i].base == 1856 (be16_to_cpu(entry->nameidx) + entsize)) { 1857 after = i; 1858 } else if (ichdr.freemap[i].size < tmp) { 1859 tmp = ichdr.freemap[i].size; 1860 smallest = i; 1861 } 1862 } 1863 1864 /* 1865 * Coalesce adjacent freemap regions, 1866 * or replace the smallest region. 1867 */ 1868 if ((before >= 0) || (after >= 0)) { 1869 if ((before >= 0) && (after >= 0)) { 1870 ichdr.freemap[before].size += entsize; 1871 ichdr.freemap[before].size += ichdr.freemap[after].size; 1872 ichdr.freemap[after].base = 0; 1873 ichdr.freemap[after].size = 0; 1874 } else if (before >= 0) { 1875 ichdr.freemap[before].size += entsize; 1876 } else { 1877 ichdr.freemap[after].base = be16_to_cpu(entry->nameidx); 1878 ichdr.freemap[after].size += entsize; 1879 } 1880 } else { 1881 /* 1882 * Replace smallest region (if it is smaller than free'd entry) 1883 */ 1884 if (ichdr.freemap[smallest].size < entsize) { 1885 ichdr.freemap[smallest].base = be16_to_cpu(entry->nameidx); 1886 ichdr.freemap[smallest].size = entsize; 1887 } 1888 } 1889 1890 /* 1891 * Did we remove the first entry? 1892 */ 1893 if (be16_to_cpu(entry->nameidx) == ichdr.firstused) 1894 smallest = 1; 1895 else 1896 smallest = 0; 1897 1898 /* 1899 * Compress the remaining entries and zero out the removed stuff. 1900 */ 1901 memset(xfs_attr3_leaf_name(leaf, args->index), 0, entsize); 1902 ichdr.usedbytes -= entsize; 1903 xfs_trans_log_buf(args->trans, bp, 1904 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), 1905 entsize)); 1906 1907 tmp = (ichdr.count - args->index) * sizeof(xfs_attr_leaf_entry_t); 1908 memmove(entry, entry + 1, tmp); 1909 ichdr.count--; 1910 xfs_trans_log_buf(args->trans, bp, 1911 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(xfs_attr_leaf_entry_t))); 1912 1913 entry = &xfs_attr3_leaf_entryp(leaf)[ichdr.count]; 1914 memset(entry, 0, sizeof(xfs_attr_leaf_entry_t)); 1915 1916 /* 1917 * If we removed the first entry, re-find the first used byte 1918 * in the name area. Note that if the entry was the "firstused", 1919 * then we don't have a "hole" in our block resulting from 1920 * removing the name. 1921 */ 1922 if (smallest) { 1923 tmp = XFS_LBSIZE(mp); 1924 entry = xfs_attr3_leaf_entryp(leaf); 1925 for (i = ichdr.count - 1; i >= 0; entry++, i--) { 1926 ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused); 1927 ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); 1928 1929 if (be16_to_cpu(entry->nameidx) < tmp) 1930 tmp = be16_to_cpu(entry->nameidx); 1931 } 1932 ichdr.firstused = tmp; 1933 if (!ichdr.firstused) 1934 ichdr.firstused = tmp - XFS_ATTR_LEAF_NAME_ALIGN; 1935 } else { 1936 ichdr.holes = 1; /* mark as needing compaction */ 1937 } 1938 xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr); 1939 xfs_trans_log_buf(args->trans, bp, 1940 XFS_DA_LOGRANGE(leaf, &leaf->hdr, 1941 xfs_attr3_leaf_hdr_size(leaf))); 1942 1943 /* 1944 * Check if leaf is less than 50% full, caller may want to 1945 * "join" the leaf with a sibling if so. 1946 */ 1947 tmp = ichdr.usedbytes + xfs_attr3_leaf_hdr_size(leaf) + 1948 ichdr.count * sizeof(xfs_attr_leaf_entry_t); 1949 1950 return tmp < mp->m_attr_magicpct; /* leaf is < 37% full */ 1951} 1952 1953/* 1954 * Move all the attribute list entries from drop_leaf into save_leaf. 1955 */ 1956void 1957xfs_attr3_leaf_unbalance( 1958 struct xfs_da_state *state, 1959 struct xfs_da_state_blk *drop_blk, 1960 struct xfs_da_state_blk *save_blk) 1961{ 1962 struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr; 1963 struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr; 1964 struct xfs_attr3_icleaf_hdr drophdr; 1965 struct xfs_attr3_icleaf_hdr savehdr; 1966 struct xfs_attr_leaf_entry *entry; 1967 struct xfs_mount *mp = state->mp; 1968 1969 trace_xfs_attr_leaf_unbalance(state->args); 1970 1971 drop_leaf = drop_blk->bp->b_addr; 1972 save_leaf = save_blk->bp->b_addr; 1973 xfs_attr3_leaf_hdr_from_disk(&drophdr, drop_leaf); 1974 xfs_attr3_leaf_hdr_from_disk(&savehdr, save_leaf); 1975 entry = xfs_attr3_leaf_entryp(drop_leaf); 1976 1977 /* 1978 * Save last hashval from dying block for later Btree fixup. 1979 */ 1980 drop_blk->hashval = be32_to_cpu(entry[drophdr.count - 1].hashval); 1981 1982 /* 1983 * Check if we need a temp buffer, or can we do it in place. 1984 * Note that we don't check "leaf" for holes because we will 1985 * always be dropping it, toosmall() decided that for us already. 1986 */ 1987 if (savehdr.holes == 0) { 1988 /* 1989 * dest leaf has no holes, so we add there. May need 1990 * to make some room in the entry array. 1991 */ 1992 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, 1993 drop_blk->bp, &drophdr)) { 1994 xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0, 1995 save_leaf, &savehdr, 0, 1996 drophdr.count, mp); 1997 } else { 1998 xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0, 1999 save_leaf, &savehdr, 2000 savehdr.count, drophdr.count, mp); 2001 } 2002 } else { 2003 /* 2004 * Destination has holes, so we make a temporary copy 2005 * of the leaf and add them both to that. 2006 */ 2007 struct xfs_attr_leafblock *tmp_leaf; 2008 struct xfs_attr3_icleaf_hdr tmphdr; 2009 2010 tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP); 2011 2012 /* 2013 * Copy the header into the temp leaf so that all the stuff 2014 * not in the incore header is present and gets copied back in 2015 * once we've moved all the entries. 2016 */ 2017 memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf)); 2018 2019 memset(&tmphdr, 0, sizeof(tmphdr)); 2020 tmphdr.magic = savehdr.magic; 2021 tmphdr.forw = savehdr.forw; 2022 tmphdr.back = savehdr.back; 2023 tmphdr.firstused = state->blocksize; 2024 2025 /* write the header to the temp buffer to initialise it */ 2026 xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr); 2027 2028 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, 2029 drop_blk->bp, &drophdr)) { 2030 xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0, 2031 tmp_leaf, &tmphdr, 0, 2032 drophdr.count, mp); 2033 xfs_attr3_leaf_moveents(save_leaf, &savehdr, 0, 2034 tmp_leaf, &tmphdr, tmphdr.count, 2035 savehdr.count, mp); 2036 } else { 2037 xfs_attr3_leaf_moveents(save_leaf, &savehdr, 0, 2038 tmp_leaf, &tmphdr, 0, 2039 savehdr.count, mp); 2040 xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0, 2041 tmp_leaf, &tmphdr, tmphdr.count, 2042 drophdr.count, mp); 2043 } 2044 memcpy(save_leaf, tmp_leaf, state->blocksize); 2045 savehdr = tmphdr; /* struct copy */ 2046 kmem_free(tmp_leaf); 2047 } 2048 2049 xfs_attr3_leaf_hdr_to_disk(save_leaf, &savehdr); 2050 xfs_trans_log_buf(state->args->trans, save_blk->bp, 0, 2051 state->blocksize - 1); 2052 2053 /* 2054 * Copy out last hashval in each block for B-tree code. 2055 */ 2056 entry = xfs_attr3_leaf_entryp(save_leaf); 2057 save_blk->hashval = be32_to_cpu(entry[savehdr.count - 1].hashval); 2058} 2059 2060/*======================================================================== 2061 * Routines used for finding things in the Btree. 2062 *========================================================================*/ 2063 2064/* 2065 * Look up a name in a leaf attribute list structure. 2066 * This is the internal routine, it uses the caller's buffer. 2067 * 2068 * Note that duplicate keys are allowed, but only check within the 2069 * current leaf node. The Btree code must check in adjacent leaf nodes. 2070 * 2071 * Return in args->index the index into the entry[] array of either 2072 * the found entry, or where the entry should have been (insert before 2073 * that entry). 2074 * 2075 * Don't change the args->value unless we find the attribute. 2076 */ 2077int 2078xfs_attr3_leaf_lookup_int( 2079 struct xfs_buf *bp, 2080 struct xfs_da_args *args) 2081{ 2082 struct xfs_attr_leafblock *leaf; 2083 struct xfs_attr3_icleaf_hdr ichdr; 2084 struct xfs_attr_leaf_entry *entry; 2085 struct xfs_attr_leaf_entry *entries; 2086 struct xfs_attr_leaf_name_local *name_loc; 2087 struct xfs_attr_leaf_name_remote *name_rmt; 2088 xfs_dahash_t hashval; 2089 int probe; 2090 int span; 2091 2092 trace_xfs_attr_leaf_lookup(args); 2093 2094 leaf = bp->b_addr; 2095 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 2096 entries = xfs_attr3_leaf_entryp(leaf); 2097 ASSERT(ichdr.count < XFS_LBSIZE(args->dp->i_mount) / 8); 2098 2099 /* 2100 * Binary search. (note: small blocks will skip this loop) 2101 */ 2102 hashval = args->hashval; 2103 probe = span = ichdr.count / 2; 2104 for (entry = &entries[probe]; span > 4; entry = &entries[probe]) { 2105 span /= 2; 2106 if (be32_to_cpu(entry->hashval) < hashval) 2107 probe += span; 2108 else if (be32_to_cpu(entry->hashval) > hashval) 2109 probe -= span; 2110 else 2111 break; 2112 } 2113 ASSERT(probe >= 0 && (!ichdr.count || probe < ichdr.count)); 2114 ASSERT(span <= 4 || be32_to_cpu(entry->hashval) == hashval); 2115 2116 /* 2117 * Since we may have duplicate hashval's, find the first matching 2118 * hashval in the leaf. 2119 */ 2120 while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) { 2121 entry--; 2122 probe--; 2123 } 2124 while (probe < ichdr.count && 2125 be32_to_cpu(entry->hashval) < hashval) { 2126 entry++; 2127 probe++; 2128 } 2129 if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) { 2130 args->index = probe; 2131 return XFS_ERROR(ENOATTR); 2132 } 2133 2134 /* 2135 * Duplicate keys may be present, so search all of them for a match. 2136 */ 2137 for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval); 2138 entry++, probe++) { 2139/* 2140 * GROT: Add code to remove incomplete entries. 2141 */ 2142 /* 2143 * If we are looking for INCOMPLETE entries, show only those. 2144 * If we are looking for complete entries, show only those. 2145 */ 2146 if ((args->flags & XFS_ATTR_INCOMPLETE) != 2147 (entry->flags & XFS_ATTR_INCOMPLETE)) { 2148 continue; 2149 } 2150 if (entry->flags & XFS_ATTR_LOCAL) { 2151 name_loc = xfs_attr3_leaf_name_local(leaf, probe); 2152 if (name_loc->namelen != args->namelen) 2153 continue; 2154 if (memcmp(args->name, name_loc->nameval, 2155 args->namelen) != 0) 2156 continue; 2157 if (!xfs_attr_namesp_match(args->flags, entry->flags)) 2158 continue; 2159 args->index = probe; 2160 return XFS_ERROR(EEXIST); 2161 } else { 2162 name_rmt = xfs_attr3_leaf_name_remote(leaf, probe); 2163 if (name_rmt->namelen != args->namelen) 2164 continue; 2165 if (memcmp(args->name, name_rmt->name, 2166 args->namelen) != 0) 2167 continue; 2168 if (!xfs_attr_namesp_match(args->flags, entry->flags)) 2169 continue; 2170 args->index = probe; 2171 args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen); 2172 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2173 args->rmtblkcnt = xfs_attr3_rmt_blocks( 2174 args->dp->i_mount, 2175 args->rmtvaluelen); 2176 return XFS_ERROR(EEXIST); 2177 } 2178 } 2179 args->index = probe; 2180 return XFS_ERROR(ENOATTR); 2181} 2182 2183/* 2184 * Get the value associated with an attribute name from a leaf attribute 2185 * list structure. 2186 */ 2187int 2188xfs_attr3_leaf_getvalue( 2189 struct xfs_buf *bp, 2190 struct xfs_da_args *args) 2191{ 2192 struct xfs_attr_leafblock *leaf; 2193 struct xfs_attr3_icleaf_hdr ichdr; 2194 struct xfs_attr_leaf_entry *entry; 2195 struct xfs_attr_leaf_name_local *name_loc; 2196 struct xfs_attr_leaf_name_remote *name_rmt; 2197 int valuelen; 2198 2199 leaf = bp->b_addr; 2200 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 2201 ASSERT(ichdr.count < XFS_LBSIZE(args->dp->i_mount) / 8); 2202 ASSERT(args->index < ichdr.count); 2203 2204 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2205 if (entry->flags & XFS_ATTR_LOCAL) { 2206 name_loc = xfs_attr3_leaf_name_local(leaf, args->index); 2207 ASSERT(name_loc->namelen == args->namelen); 2208 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); 2209 valuelen = be16_to_cpu(name_loc->valuelen); 2210 if (args->flags & ATTR_KERNOVAL) { 2211 args->valuelen = valuelen; 2212 return 0; 2213 } 2214 if (args->valuelen < valuelen) { 2215 args->valuelen = valuelen; 2216 return XFS_ERROR(ERANGE); 2217 } 2218 args->valuelen = valuelen; 2219 memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); 2220 } else { 2221 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2222 ASSERT(name_rmt->namelen == args->namelen); 2223 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2224 args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen); 2225 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2226 args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount, 2227 args->rmtvaluelen); 2228 if (args->flags & ATTR_KERNOVAL) { 2229 args->valuelen = args->rmtvaluelen; 2230 return 0; 2231 } 2232 if (args->valuelen < args->rmtvaluelen) { 2233 args->valuelen = args->rmtvaluelen; 2234 return XFS_ERROR(ERANGE); 2235 } 2236 args->valuelen = args->rmtvaluelen; 2237 } 2238 return 0; 2239} 2240 2241/*======================================================================== 2242 * Utility routines. 2243 *========================================================================*/ 2244 2245/* 2246 * Move the indicated entries from one leaf to another. 2247 * NOTE: this routine modifies both source and destination leaves. 2248 */ 2249/*ARGSUSED*/ 2250STATIC void 2251xfs_attr3_leaf_moveents( 2252 struct xfs_attr_leafblock *leaf_s, 2253 struct xfs_attr3_icleaf_hdr *ichdr_s, 2254 int start_s, 2255 struct xfs_attr_leafblock *leaf_d, 2256 struct xfs_attr3_icleaf_hdr *ichdr_d, 2257 int start_d, 2258 int count, 2259 struct xfs_mount *mp) 2260{ 2261 struct xfs_attr_leaf_entry *entry_s; 2262 struct xfs_attr_leaf_entry *entry_d; 2263 int desti; 2264 int tmp; 2265 int i; 2266 2267 /* 2268 * Check for nothing to do. 2269 */ 2270 if (count == 0) 2271 return; 2272 2273 /* 2274 * Set up environment. 2275 */ 2276 ASSERT(ichdr_s->magic == XFS_ATTR_LEAF_MAGIC || 2277 ichdr_s->magic == XFS_ATTR3_LEAF_MAGIC); 2278 ASSERT(ichdr_s->magic == ichdr_d->magic); 2279 ASSERT(ichdr_s->count > 0 && ichdr_s->count < XFS_LBSIZE(mp) / 8); 2280 ASSERT(ichdr_s->firstused >= (ichdr_s->count * sizeof(*entry_s)) 2281 + xfs_attr3_leaf_hdr_size(leaf_s)); 2282 ASSERT(ichdr_d->count < XFS_LBSIZE(mp) / 8); 2283 ASSERT(ichdr_d->firstused >= (ichdr_d->count * sizeof(*entry_d)) 2284 + xfs_attr3_leaf_hdr_size(leaf_d)); 2285 2286 ASSERT(start_s < ichdr_s->count); 2287 ASSERT(start_d <= ichdr_d->count); 2288 ASSERT(count <= ichdr_s->count); 2289 2290 2291 /* 2292 * Move the entries in the destination leaf up to make a hole? 2293 */ 2294 if (start_d < ichdr_d->count) { 2295 tmp = ichdr_d->count - start_d; 2296 tmp *= sizeof(xfs_attr_leaf_entry_t); 2297 entry_s = &xfs_attr3_leaf_entryp(leaf_d)[start_d]; 2298 entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d + count]; 2299 memmove(entry_d, entry_s, tmp); 2300 } 2301 2302 /* 2303 * Copy all entry's in the same (sorted) order, 2304 * but allocate attribute info packed and in sequence. 2305 */ 2306 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s]; 2307 entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d]; 2308 desti = start_d; 2309 for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { 2310 ASSERT(be16_to_cpu(entry_s->nameidx) >= ichdr_s->firstused); 2311 tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); 2312#ifdef GROT 2313 /* 2314 * Code to drop INCOMPLETE entries. Difficult to use as we 2315 * may also need to change the insertion index. Code turned 2316 * off for 6.2, should be revisited later. 2317 */ 2318 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ 2319 memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp); 2320 ichdr_s->usedbytes -= tmp; 2321 ichdr_s->count -= 1; 2322 entry_d--; /* to compensate for ++ in loop hdr */ 2323 desti--; 2324 if ((start_s + i) < offset) 2325 result++; /* insertion index adjustment */ 2326 } else { 2327#endif /* GROT */ 2328 ichdr_d->firstused -= tmp; 2329 /* both on-disk, don't endian flip twice */ 2330 entry_d->hashval = entry_s->hashval; 2331 entry_d->nameidx = cpu_to_be16(ichdr_d->firstused); 2332 entry_d->flags = entry_s->flags; 2333 ASSERT(be16_to_cpu(entry_d->nameidx) + tmp 2334 <= XFS_LBSIZE(mp)); 2335 memmove(xfs_attr3_leaf_name(leaf_d, desti), 2336 xfs_attr3_leaf_name(leaf_s, start_s + i), tmp); 2337 ASSERT(be16_to_cpu(entry_s->nameidx) + tmp 2338 <= XFS_LBSIZE(mp)); 2339 memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp); 2340 ichdr_s->usedbytes -= tmp; 2341 ichdr_d->usedbytes += tmp; 2342 ichdr_s->count -= 1; 2343 ichdr_d->count += 1; 2344 tmp = ichdr_d->count * sizeof(xfs_attr_leaf_entry_t) 2345 + xfs_attr3_leaf_hdr_size(leaf_d); 2346 ASSERT(ichdr_d->firstused >= tmp); 2347#ifdef GROT 2348 } 2349#endif /* GROT */ 2350 } 2351 2352 /* 2353 * Zero out the entries we just copied. 2354 */ 2355 if (start_s == ichdr_s->count) { 2356 tmp = count * sizeof(xfs_attr_leaf_entry_t); 2357 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s]; 2358 ASSERT(((char *)entry_s + tmp) <= 2359 ((char *)leaf_s + XFS_LBSIZE(mp))); 2360 memset(entry_s, 0, tmp); 2361 } else { 2362 /* 2363 * Move the remaining entries down to fill the hole, 2364 * then zero the entries at the top. 2365 */ 2366 tmp = (ichdr_s->count - count) * sizeof(xfs_attr_leaf_entry_t); 2367 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s + count]; 2368 entry_d = &xfs_attr3_leaf_entryp(leaf_s)[start_s]; 2369 memmove(entry_d, entry_s, tmp); 2370 2371 tmp = count * sizeof(xfs_attr_leaf_entry_t); 2372 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[ichdr_s->count]; 2373 ASSERT(((char *)entry_s + tmp) <= 2374 ((char *)leaf_s + XFS_LBSIZE(mp))); 2375 memset(entry_s, 0, tmp); 2376 } 2377 2378 /* 2379 * Fill in the freemap information 2380 */ 2381 ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_d); 2382 ichdr_d->freemap[0].base += ichdr_d->count * sizeof(xfs_attr_leaf_entry_t); 2383 ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base; 2384 ichdr_d->freemap[1].base = 0; 2385 ichdr_d->freemap[2].base = 0; 2386 ichdr_d->freemap[1].size = 0; 2387 ichdr_d->freemap[2].size = 0; 2388 ichdr_s->holes = 1; /* leaf may not be compact */ 2389} 2390 2391/* 2392 * Pick up the last hashvalue from a leaf block. 2393 */ 2394xfs_dahash_t 2395xfs_attr_leaf_lasthash( 2396 struct xfs_buf *bp, 2397 int *count) 2398{ 2399 struct xfs_attr3_icleaf_hdr ichdr; 2400 struct xfs_attr_leaf_entry *entries; 2401 2402 xfs_attr3_leaf_hdr_from_disk(&ichdr, bp->b_addr); 2403 entries = xfs_attr3_leaf_entryp(bp->b_addr); 2404 if (count) 2405 *count = ichdr.count; 2406 if (!ichdr.count) 2407 return 0; 2408 return be32_to_cpu(entries[ichdr.count - 1].hashval); 2409} 2410 2411/* 2412 * Calculate the number of bytes used to store the indicated attribute 2413 * (whether local or remote only calculate bytes in this block). 2414 */ 2415STATIC int 2416xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) 2417{ 2418 struct xfs_attr_leaf_entry *entries; 2419 xfs_attr_leaf_name_local_t *name_loc; 2420 xfs_attr_leaf_name_remote_t *name_rmt; 2421 int size; 2422 2423 entries = xfs_attr3_leaf_entryp(leaf); 2424 if (entries[index].flags & XFS_ATTR_LOCAL) { 2425 name_loc = xfs_attr3_leaf_name_local(leaf, index); 2426 size = xfs_attr_leaf_entsize_local(name_loc->namelen, 2427 be16_to_cpu(name_loc->valuelen)); 2428 } else { 2429 name_rmt = xfs_attr3_leaf_name_remote(leaf, index); 2430 size = xfs_attr_leaf_entsize_remote(name_rmt->namelen); 2431 } 2432 return size; 2433} 2434 2435/* 2436 * Calculate the number of bytes that would be required to store the new 2437 * attribute (whether local or remote only calculate bytes in this block). 2438 * This routine decides as a side effect whether the attribute will be 2439 * a "local" or a "remote" attribute. 2440 */ 2441int 2442xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local) 2443{ 2444 int size; 2445 2446 size = xfs_attr_leaf_entsize_local(namelen, valuelen); 2447 if (size < xfs_attr_leaf_entsize_local_max(blocksize)) { 2448 if (local) { 2449 *local = 1; 2450 } 2451 } else { 2452 size = xfs_attr_leaf_entsize_remote(namelen); 2453 if (local) { 2454 *local = 0; 2455 } 2456 } 2457 return size; 2458} 2459 2460 2461/*======================================================================== 2462 * Manage the INCOMPLETE flag in a leaf entry 2463 *========================================================================*/ 2464 2465/* 2466 * Clear the INCOMPLETE flag on an entry in a leaf block. 2467 */ 2468int 2469xfs_attr3_leaf_clearflag( 2470 struct xfs_da_args *args) 2471{ 2472 struct xfs_attr_leafblock *leaf; 2473 struct xfs_attr_leaf_entry *entry; 2474 struct xfs_attr_leaf_name_remote *name_rmt; 2475 struct xfs_buf *bp; 2476 int error; 2477#ifdef DEBUG 2478 struct xfs_attr3_icleaf_hdr ichdr; 2479 xfs_attr_leaf_name_local_t *name_loc; 2480 int namelen; 2481 char *name; 2482#endif /* DEBUG */ 2483 2484 trace_xfs_attr_leaf_clearflag(args); 2485 /* 2486 * Set up the operation. 2487 */ 2488 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); 2489 if (error) 2490 return(error); 2491 2492 leaf = bp->b_addr; 2493 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2494 ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); 2495 2496#ifdef DEBUG 2497 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 2498 ASSERT(args->index < ichdr.count); 2499 ASSERT(args->index >= 0); 2500 2501 if (entry->flags & XFS_ATTR_LOCAL) { 2502 name_loc = xfs_attr3_leaf_name_local(leaf, args->index); 2503 namelen = name_loc->namelen; 2504 name = (char *)name_loc->nameval; 2505 } else { 2506 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2507 namelen = name_rmt->namelen; 2508 name = (char *)name_rmt->name; 2509 } 2510 ASSERT(be32_to_cpu(entry->hashval) == args->hashval); 2511 ASSERT(namelen == args->namelen); 2512 ASSERT(memcmp(name, args->name, namelen) == 0); 2513#endif /* DEBUG */ 2514 2515 entry->flags &= ~XFS_ATTR_INCOMPLETE; 2516 xfs_trans_log_buf(args->trans, bp, 2517 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2518 2519 if (args->rmtblkno) { 2520 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); 2521 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2522 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2523 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen); 2524 xfs_trans_log_buf(args->trans, bp, 2525 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2526 } 2527 2528 /* 2529 * Commit the flag value change and start the next trans in series. 2530 */ 2531 return xfs_trans_roll(&args->trans, args->dp); 2532} 2533 2534/* 2535 * Set the INCOMPLETE flag on an entry in a leaf block. 2536 */ 2537int 2538xfs_attr3_leaf_setflag( 2539 struct xfs_da_args *args) 2540{ 2541 struct xfs_attr_leafblock *leaf; 2542 struct xfs_attr_leaf_entry *entry; 2543 struct xfs_attr_leaf_name_remote *name_rmt; 2544 struct xfs_buf *bp; 2545 int error; 2546#ifdef DEBUG 2547 struct xfs_attr3_icleaf_hdr ichdr; 2548#endif 2549 2550 trace_xfs_attr_leaf_setflag(args); 2551 2552 /* 2553 * Set up the operation. 2554 */ 2555 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); 2556 if (error) 2557 return(error); 2558 2559 leaf = bp->b_addr; 2560#ifdef DEBUG 2561 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); 2562 ASSERT(args->index < ichdr.count); 2563 ASSERT(args->index >= 0); 2564#endif 2565 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2566 2567 ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0); 2568 entry->flags |= XFS_ATTR_INCOMPLETE; 2569 xfs_trans_log_buf(args->trans, bp, 2570 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2571 if ((entry->flags & XFS_ATTR_LOCAL) == 0) { 2572 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2573 name_rmt->valueblk = 0; 2574 name_rmt->valuelen = 0; 2575 xfs_trans_log_buf(args->trans, bp, 2576 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2577 } 2578 2579 /* 2580 * Commit the flag value change and start the next trans in series. 2581 */ 2582 return xfs_trans_roll(&args->trans, args->dp); 2583} 2584 2585/* 2586 * In a single transaction, clear the INCOMPLETE flag on the leaf entry 2587 * given by args->blkno/index and set the INCOMPLETE flag on the leaf 2588 * entry given by args->blkno2/index2. 2589 * 2590 * Note that they could be in different blocks, or in the same block. 2591 */ 2592int 2593xfs_attr3_leaf_flipflags( 2594 struct xfs_da_args *args) 2595{ 2596 struct xfs_attr_leafblock *leaf1; 2597 struct xfs_attr_leafblock *leaf2; 2598 struct xfs_attr_leaf_entry *entry1; 2599 struct xfs_attr_leaf_entry *entry2; 2600 struct xfs_attr_leaf_name_remote *name_rmt; 2601 struct xfs_buf *bp1; 2602 struct xfs_buf *bp2; 2603 int error; 2604#ifdef DEBUG 2605 struct xfs_attr3_icleaf_hdr ichdr1; 2606 struct xfs_attr3_icleaf_hdr ichdr2; 2607 xfs_attr_leaf_name_local_t *name_loc; 2608 int namelen1, namelen2; 2609 char *name1, *name2; 2610#endif /* DEBUG */ 2611 2612 trace_xfs_attr_leaf_flipflags(args); 2613 2614 /* 2615 * Read the block containing the "old" attr 2616 */ 2617 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp1); 2618 if (error) 2619 return error; 2620 2621 /* 2622 * Read the block containing the "new" attr, if it is different 2623 */ 2624 if (args->blkno2 != args->blkno) { 2625 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2, 2626 -1, &bp2); 2627 if (error) 2628 return error; 2629 } else { 2630 bp2 = bp1; 2631 } 2632 2633 leaf1 = bp1->b_addr; 2634 entry1 = &xfs_attr3_leaf_entryp(leaf1)[args->index]; 2635 2636 leaf2 = bp2->b_addr; 2637 entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2]; 2638 2639#ifdef DEBUG 2640 xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1); 2641 ASSERT(args->index < ichdr1.count); 2642 ASSERT(args->index >= 0); 2643 2644 xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2); 2645 ASSERT(args->index2 < ichdr2.count); 2646 ASSERT(args->index2 >= 0); 2647 2648 if (entry1->flags & XFS_ATTR_LOCAL) { 2649 name_loc = xfs_attr3_leaf_name_local(leaf1, args->index); 2650 namelen1 = name_loc->namelen; 2651 name1 = (char *)name_loc->nameval; 2652 } else { 2653 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index); 2654 namelen1 = name_rmt->namelen; 2655 name1 = (char *)name_rmt->name; 2656 } 2657 if (entry2->flags & XFS_ATTR_LOCAL) { 2658 name_loc = xfs_attr3_leaf_name_local(leaf2, args->index2); 2659 namelen2 = name_loc->namelen; 2660 name2 = (char *)name_loc->nameval; 2661 } else { 2662 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2); 2663 namelen2 = name_rmt->namelen; 2664 name2 = (char *)name_rmt->name; 2665 } 2666 ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval)); 2667 ASSERT(namelen1 == namelen2); 2668 ASSERT(memcmp(name1, name2, namelen1) == 0); 2669#endif /* DEBUG */ 2670 2671 ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE); 2672 ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0); 2673 2674 entry1->flags &= ~XFS_ATTR_INCOMPLETE; 2675 xfs_trans_log_buf(args->trans, bp1, 2676 XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1))); 2677 if (args->rmtblkno) { 2678 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2679 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index); 2680 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2681 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen); 2682 xfs_trans_log_buf(args->trans, bp1, 2683 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); 2684 } 2685 2686 entry2->flags |= XFS_ATTR_INCOMPLETE; 2687 xfs_trans_log_buf(args->trans, bp2, 2688 XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2))); 2689 if ((entry2->flags & XFS_ATTR_LOCAL) == 0) { 2690 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2); 2691 name_rmt->valueblk = 0; 2692 name_rmt->valuelen = 0; 2693 xfs_trans_log_buf(args->trans, bp2, 2694 XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt))); 2695 } 2696 2697 /* 2698 * Commit the flag value change and start the next trans in series. 2699 */ 2700 error = xfs_trans_roll(&args->trans, args->dp); 2701 2702 return error; 2703}