Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for_v5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, udf and quota updates from Jan Kara:

- some ext2 fixes and cleanups

- a fix of udf bug when extending files

- a fix of quota Q_XGETQSTAT[V] handling

* tag 'for_v5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
udf: Fix incorrect final NOT_ALLOCATED (hole) extent length
ext2: Use kmemdup rather than duplicating its implementation
quota: honor quota type in Q_XGETQSTAT[V] calls
ext2: Always brelse bh on failure in ext2_iget()
ext2: add missing brelse() in ext2_iget()
ext2: Fix a typo in ext2_getattr argument
ext2: fix a typo in comment
ext2: add missing brelse() in ext2_new_inode()
ext2: optimize ext2_xattr_get()
ext2: introduce new helper for xattr entry comparison
ext2: merge xattr next entry check to ext2_xattr_entry_valid()
ext2: code cleanup for ext2_preread_inode()
ext2: code cleanup by using test_opt() and clear_opt()
doc: ext2: update description of quota options for ext2
ext2: Strengthen xattr block checks
ext2: Merge loops in ext2_xattr_set()
ext2: introduce helper for xattr entry validation
ext2: introduce helper for xattr header validation
quota: add dqi_dirty_list description to comment of Dquot List Management

+195 -151
+7 -1
Documentation/filesystems/ext2.txt
··· 57 57 58 58 nobh Do not attach buffer_heads to file pagecache. 59 59 60 - grpquota,noquota,quota,usrquota Quota options are silently ignored by ext2. 60 + quota, usrquota Enable user disk quota support 61 + (requires CONFIG_QUOTA). 62 + 63 + grpquota Enable group disk quota support 64 + (requires CONFIG_QUOTA). 65 + 66 + noquota option ls silently ignored by ext2. 61 67 62 68 63 69 Specification
+1 -2
fs/ext2/balloc.c
··· 1197 1197 1198 1198 /* 1199 1199 * Returns 1 if the passed-in block region is valid; 0 if some part overlaps 1200 - * with filesystem metadata blocksi. 1200 + * with filesystem metadata blocks. 1201 1201 */ 1202 1202 int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk, 1203 1203 unsigned int count) ··· 1211 1211 if ((start_blk <= sbi->s_sb_block) && 1212 1212 (start_blk + count >= sbi->s_sb_block)) 1213 1213 return 0; 1214 - 1215 1214 1216 1215 return 1; 1217 1216 }
+2 -3
fs/ext2/ialloc.c
··· 172 172 struct backing_dev_info *bdi; 173 173 174 174 bdi = inode_to_bdi(inode); 175 - if (bdi_read_congested(bdi)) 176 - return; 177 - if (bdi_write_congested(bdi)) 175 + if (bdi_rw_congested(bdi)) 178 176 return; 179 177 180 178 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb); ··· 509 511 /* 510 512 * Scanned all blockgroups. 511 513 */ 514 + brelse(bitmap_bh); 512 515 err = -ENOSPC; 513 516 goto fail; 514 517 got:
+3 -4
fs/ext2/inode.c
··· 1400 1400 struct inode *ext2_iget (struct super_block *sb, unsigned long ino) 1401 1401 { 1402 1402 struct ext2_inode_info *ei; 1403 - struct buffer_head * bh; 1403 + struct buffer_head * bh = NULL; 1404 1404 struct ext2_inode *raw_inode; 1405 1405 struct inode *inode; 1406 1406 long ret = -EIO; ··· 1446 1446 */ 1447 1447 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { 1448 1448 /* this inode is deleted */ 1449 - brelse (bh); 1450 1449 ret = -ESTALE; 1451 1450 goto bad_inode; 1452 1451 } ··· 1462 1463 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) { 1463 1464 ext2_error(sb, "ext2_iget", "bad extended attribute block %u", 1464 1465 ei->i_file_acl); 1465 - brelse(bh); 1466 1466 ret = -EFSCORRUPTED; 1467 1467 goto bad_inode; 1468 1468 } ··· 1524 1526 return inode; 1525 1527 1526 1528 bad_inode: 1529 + brelse(bh); 1527 1530 iget_failed(inode); 1528 1531 return ERR_PTR(ret); 1529 1532 } ··· 1639 1640 } 1640 1641 1641 1642 int ext2_getattr(const struct path *path, struct kstat *stat, 1642 - u32 request_mask, unsigned int query_falgs) 1643 + u32 request_mask, unsigned int query_flags) 1643 1644 { 1644 1645 struct inode *inode = d_inode(path->dentry); 1645 1646 struct ext2_inode_info *ei = EXT2_I(inode);
+8 -9
fs/ext2/super.c
··· 303 303 if (test_opt(sb, NOBH)) 304 304 seq_puts(seq, ",nobh"); 305 305 306 - if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA) 306 + if (test_opt(sb, USRQUOTA)) 307 307 seq_puts(seq, ",usrquota"); 308 308 309 - if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA) 309 + if (test_opt(sb, GRPQUOTA)) 310 310 seq_puts(seq, ",grpquota"); 311 311 312 - if (sbi->s_mount_opt & EXT2_MOUNT_XIP) 312 + if (test_opt(sb, XIP)) 313 313 seq_puts(seq, ",xip"); 314 314 315 - if (sbi->s_mount_opt & EXT2_MOUNT_DAX) 315 + if (test_opt(sb, DAX)) 316 316 seq_puts(seq, ",dax"); 317 317 318 318 if (!test_opt(sb, RESERVATION)) ··· 935 935 sbi->s_resgid = opts.s_resgid; 936 936 937 937 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 938 - ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? 939 - SB_POSIXACL : 0); 938 + (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 940 939 sb->s_iflags |= SB_I_CGROUPWB; 941 940 942 941 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && ··· 966 967 967 968 blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 968 969 969 - if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { 970 + if (test_opt(sb, DAX)) { 970 971 if (!bdev_dax_supported(sb->s_bdev, blocksize)) { 971 972 ext2_msg(sb, KERN_ERR, 972 973 "DAX unsupported by block device. Turning off DAX."); 973 - sbi->s_mount_opt &= ~EXT2_MOUNT_DAX; 974 + clear_opt(sbi->s_mount_opt, DAX); 974 975 } 975 976 } 976 977 ··· 1403 1404 sbi->s_resuid = new_opts.s_resuid; 1404 1405 sbi->s_resgid = new_opts.s_resgid; 1405 1406 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 1406 - ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0); 1407 + (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 1407 1408 spin_unlock(&sbi->s_lock); 1408 1409 1409 1410 return 0;
+94 -70
fs/ext2/xattr.c
··· 134 134 return handler; 135 135 } 136 136 137 + static bool 138 + ext2_xattr_header_valid(struct ext2_xattr_header *header) 139 + { 140 + if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 141 + header->h_blocks != cpu_to_le32(1)) 142 + return false; 143 + 144 + return true; 145 + } 146 + 147 + static bool 148 + ext2_xattr_entry_valid(struct ext2_xattr_entry *entry, 149 + char *end, size_t end_offs) 150 + { 151 + struct ext2_xattr_entry *next; 152 + size_t size; 153 + 154 + next = EXT2_XATTR_NEXT(entry); 155 + if ((char *)next >= end) 156 + return false; 157 + 158 + if (entry->e_value_block != 0) 159 + return false; 160 + 161 + size = le32_to_cpu(entry->e_value_size); 162 + if (size > end_offs || 163 + le16_to_cpu(entry->e_value_offs) + size > end_offs) 164 + return false; 165 + 166 + return true; 167 + } 168 + 169 + static int 170 + ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name, 171 + struct ext2_xattr_entry *entry) 172 + { 173 + int cmp; 174 + 175 + cmp = name_index - entry->e_name_index; 176 + if (!cmp) 177 + cmp = name_len - entry->e_name_len; 178 + if (!cmp) 179 + cmp = memcmp(name, entry->e_name, name_len); 180 + 181 + return cmp; 182 + } 183 + 137 184 /* 138 185 * ext2_xattr_get() 139 186 * ··· 199 152 struct ext2_xattr_entry *entry; 200 153 size_t name_len, size; 201 154 char *end; 202 - int error; 155 + int error, not_found; 203 156 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 204 157 205 158 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", ··· 223 176 ea_bdebug(bh, "b_count=%d, refcount=%d", 224 177 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 225 178 end = bh->b_data + bh->b_size; 226 - if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 227 - HDR(bh)->h_blocks != cpu_to_le32(1)) { 228 - bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", 179 + if (!ext2_xattr_header_valid(HDR(bh))) { 180 + bad_block: 181 + ext2_error(inode->i_sb, "ext2_xattr_get", 229 182 "inode %ld: bad block %d", inode->i_ino, 230 183 EXT2_I(inode)->i_file_acl); 231 184 error = -EIO; ··· 235 188 /* find named attribute */ 236 189 entry = FIRST_ENTRY(bh); 237 190 while (!IS_LAST_ENTRY(entry)) { 238 - struct ext2_xattr_entry *next = 239 - EXT2_XATTR_NEXT(entry); 240 - if ((char *)next >= end) 191 + if (!ext2_xattr_entry_valid(entry, end, 192 + inode->i_sb->s_blocksize)) 241 193 goto bad_block; 242 - if (name_index == entry->e_name_index && 243 - name_len == entry->e_name_len && 244 - memcmp(name, entry->e_name, name_len) == 0) 194 + 195 + not_found = ext2_xattr_cmp_entry(name_index, name_len, name, 196 + entry); 197 + if (!not_found) 245 198 goto found; 246 - entry = next; 199 + if (not_found < 0) 200 + break; 201 + 202 + entry = EXT2_XATTR_NEXT(entry); 247 203 } 248 204 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 249 205 ea_idebug(inode, "cache insert failed"); 250 206 error = -ENODATA; 251 207 goto cleanup; 252 208 found: 253 - /* check the buffer size */ 254 - if (entry->e_value_block != 0) 255 - goto bad_block; 256 209 size = le32_to_cpu(entry->e_value_size); 257 - if (size > inode->i_sb->s_blocksize || 258 - le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) 259 - goto bad_block; 260 - 261 210 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 262 211 ea_idebug(inode, "cache insert failed"); 263 212 if (buffer) { ··· 309 266 ea_bdebug(bh, "b_count=%d, refcount=%d", 310 267 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 311 268 end = bh->b_data + bh->b_size; 312 - if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 313 - HDR(bh)->h_blocks != cpu_to_le32(1)) { 314 - bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", 269 + if (!ext2_xattr_header_valid(HDR(bh))) { 270 + bad_block: 271 + ext2_error(inode->i_sb, "ext2_xattr_list", 315 272 "inode %ld: bad block %d", inode->i_ino, 316 273 EXT2_I(inode)->i_file_acl); 317 274 error = -EIO; ··· 321 278 /* check the on-disk data structure */ 322 279 entry = FIRST_ENTRY(bh); 323 280 while (!IS_LAST_ENTRY(entry)) { 324 - struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry); 325 - 326 - if ((char *)next >= end) 281 + if (!ext2_xattr_entry_valid(entry, end, 282 + inode->i_sb->s_blocksize)) 327 283 goto bad_block; 328 - entry = next; 284 + entry = EXT2_XATTR_NEXT(entry); 329 285 } 330 286 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 331 287 ea_idebug(inode, "cache insert failed"); ··· 409 367 struct super_block *sb = inode->i_sb; 410 368 struct buffer_head *bh = NULL; 411 369 struct ext2_xattr_header *header = NULL; 412 - struct ext2_xattr_entry *here, *last; 370 + struct ext2_xattr_entry *here = NULL, *last = NULL; 413 371 size_t name_len, free, min_offs = sb->s_blocksize; 414 372 int not_found = 1, error; 415 373 char *end; ··· 448 406 le32_to_cpu(HDR(bh)->h_refcount)); 449 407 header = HDR(bh); 450 408 end = bh->b_data + bh->b_size; 451 - if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 452 - header->h_blocks != cpu_to_le32(1)) { 453 - bad_block: ext2_error(sb, "ext2_xattr_set", 409 + if (!ext2_xattr_header_valid(header)) { 410 + bad_block: 411 + ext2_error(sb, "ext2_xattr_set", 454 412 "inode %ld: bad block %d", inode->i_ino, 455 413 EXT2_I(inode)->i_file_acl); 456 414 error = -EIO; 457 415 goto cleanup; 458 416 } 459 - /* Find the named attribute. */ 460 - here = FIRST_ENTRY(bh); 461 - while (!IS_LAST_ENTRY(here)) { 462 - struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here); 463 - if ((char *)next >= end) 464 - goto bad_block; 465 - if (!here->e_value_block && here->e_value_size) { 466 - size_t offs = le16_to_cpu(here->e_value_offs); 467 - if (offs < min_offs) 468 - min_offs = offs; 469 - } 470 - not_found = name_index - here->e_name_index; 471 - if (!not_found) 472 - not_found = name_len - here->e_name_len; 473 - if (!not_found) 474 - not_found = memcmp(name, here->e_name,name_len); 475 - if (not_found <= 0) 476 - break; 477 - here = next; 478 - } 479 - last = here; 480 - /* We still need to compute min_offs and last. */ 417 + /* 418 + * Find the named attribute. If not found, 'here' will point 419 + * to entry where the new attribute should be inserted to 420 + * maintain sorting. 421 + */ 422 + last = FIRST_ENTRY(bh); 481 423 while (!IS_LAST_ENTRY(last)) { 482 - struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last); 483 - if ((char *)next >= end) 424 + if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize)) 484 425 goto bad_block; 485 - if (!last->e_value_block && last->e_value_size) { 426 + if (last->e_value_size) { 486 427 size_t offs = le16_to_cpu(last->e_value_offs); 487 428 if (offs < min_offs) 488 429 min_offs = offs; 489 430 } 490 - last = next; 431 + if (not_found > 0) { 432 + not_found = ext2_xattr_cmp_entry(name_index, 433 + name_len, 434 + name, last); 435 + if (not_found <= 0) 436 + here = last; 437 + } 438 + last = EXT2_XATTR_NEXT(last); 491 439 } 440 + if (not_found > 0) 441 + here = last; 492 442 493 443 /* Check whether we have enough space left. */ 494 444 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); ··· 488 454 /* We will use a new extended attribute block. */ 489 455 free = sb->s_blocksize - 490 456 sizeof(struct ext2_xattr_header) - sizeof(__u32); 491 - here = last = NULL; /* avoid gcc uninitialized warning. */ 492 457 } 493 458 494 459 if (not_found) { ··· 503 470 error = -EEXIST; 504 471 if (flags & XATTR_CREATE) 505 472 goto cleanup; 506 - if (!here->e_value_block && here->e_value_size) { 507 - size_t size = le32_to_cpu(here->e_value_size); 508 - 509 - if (le16_to_cpu(here->e_value_offs) + size > 510 - sb->s_blocksize || size > sb->s_blocksize) 511 - goto bad_block; 512 - free += EXT2_XATTR_SIZE(size); 513 - } 473 + free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size)); 514 474 free += EXT2_XATTR_LEN(name_len); 515 475 } 516 476 error = -ENOSPC; ··· 532 506 533 507 unlock_buffer(bh); 534 508 ea_bdebug(bh, "cloning"); 535 - header = kmalloc(bh->b_size, GFP_KERNEL); 509 + header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); 536 510 error = -ENOMEM; 537 511 if (header == NULL) 538 512 goto cleanup; 539 - memcpy(header, HDR(bh), bh->b_size); 540 513 header->h_refcount = cpu_to_le32(1); 541 514 542 515 offset = (char *)here - bh->b_data; ··· 567 542 here->e_name_len = name_len; 568 543 memcpy(here->e_name, name, name_len); 569 544 } else { 570 - if (!here->e_value_block && here->e_value_size) { 545 + if (here->e_value_size) { 571 546 char *first_val = (char *)header + min_offs; 572 547 size_t offs = le16_to_cpu(here->e_value_offs); 573 548 char *val = (char *)header + offs; ··· 594 569 last = ENTRY(header+1); 595 570 while (!IS_LAST_ENTRY(last)) { 596 571 size_t o = le16_to_cpu(last->e_value_offs); 597 - if (!last->e_value_block && o < offs) 572 + if (o < offs) 598 573 last->e_value_offs = 599 574 cpu_to_le16(o + size); 600 575 last = EXT2_XATTR_NEXT(last); ··· 809 784 goto cleanup; 810 785 } 811 786 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); 812 - if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 813 - HDR(bh)->h_blocks != cpu_to_le32(1)) { 787 + if (!ext2_xattr_header_valid(HDR(bh))) { 814 788 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 815 789 "inode %ld: bad block %d", inode->i_ino, 816 790 EXT2_I(inode)->i_file_acl);
+8 -3
fs/quota/dquot.c
··· 223 223 224 224 /* 225 225 * Dquot List Management: 226 - * The quota code uses three lists for dquot management: the inuse_list, 227 - * free_dquots, and dquot_hash[] array. A single dquot structure may be 228 - * on all three lists, depending on its current state. 226 + * The quota code uses four lists for dquot management: the inuse_list, 227 + * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot 228 + * structure may be on some of those lists, depending on its current state. 229 229 * 230 230 * All dquots are placed to the end of inuse_list when first created, and this 231 231 * list is used for invalidate operation, which must look at every dquot. ··· 235 235 * removed from the list as soon as they are used again, and 236 236 * dqstats.free_dquots gives the number of dquots on the list. When 237 237 * dquot is invalidated it's completely released from memory. 238 + * 239 + * Dirty dquots are added to the dqi_dirty_list of quota_info when mark 240 + * dirtied, and this list is searched when writing dirty dquots back to 241 + * quota file. Note that some filesystems do dirty dquot tracking on their 242 + * own (e.g. in a journal) and thus don't use dqi_dirty_list. 238 243 * 239 244 * Dquots with a specific identity (device, type and id) are placed on 240 245 * one of the dquot_hash[] hash chains. The provides an efficient search
+12 -26
fs/quota/quota.c
··· 331 331 return flags; 332 332 } 333 333 334 - static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) 334 + static int quota_getstate(struct super_block *sb, int type, 335 + struct fs_quota_stat *fqs) 335 336 { 336 - int type; 337 337 struct qc_state state; 338 338 int ret; 339 339 ··· 349 349 if (!fqs->qs_flags) 350 350 return -ENOSYS; 351 351 fqs->qs_incoredqs = state.s_incoredqs; 352 - /* 353 - * GETXSTATE quotactl has space for just one set of time limits so 354 - * report them for the first enabled quota type 355 - */ 356 - for (type = 0; type < MAXQUOTAS; type++) 357 - if (state.s_state[type].flags & QCI_ACCT_ENABLED) 358 - break; 359 - BUG_ON(type == MAXQUOTAS); 352 + 360 353 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 361 354 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 362 355 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; ··· 384 391 return 0; 385 392 } 386 393 387 - static int quota_getxstate(struct super_block *sb, void __user *addr) 394 + static int quota_getxstate(struct super_block *sb, int type, void __user *addr) 388 395 { 389 396 struct fs_quota_stat fqs; 390 397 int ret; 391 398 392 399 if (!sb->s_qcop->get_state) 393 400 return -ENOSYS; 394 - ret = quota_getstate(sb, &fqs); 401 + ret = quota_getstate(sb, type, &fqs); 395 402 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 396 403 return -EFAULT; 397 404 return ret; 398 405 } 399 406 400 - static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) 407 + static int quota_getstatev(struct super_block *sb, int type, 408 + struct fs_quota_statv *fqs) 401 409 { 402 - int type; 403 410 struct qc_state state; 404 411 int ret; 405 412 ··· 415 422 if (!fqs->qs_flags) 416 423 return -ENOSYS; 417 424 fqs->qs_incoredqs = state.s_incoredqs; 418 - /* 419 - * GETXSTATV quotactl has space for just one set of time limits so 420 - * report them for the first enabled quota type 421 - */ 422 - for (type = 0; type < MAXQUOTAS; type++) 423 - if (state.s_state[type].flags & QCI_ACCT_ENABLED) 424 - break; 425 - BUG_ON(type == MAXQUOTAS); 425 + 426 426 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 427 427 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 428 428 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; ··· 441 455 return 0; 442 456 } 443 457 444 - static int quota_getxstatev(struct super_block *sb, void __user *addr) 458 + static int quota_getxstatev(struct super_block *sb, int type, void __user *addr) 445 459 { 446 460 struct fs_quota_statv fqs; 447 461 int ret; ··· 460 474 default: 461 475 return -EINVAL; 462 476 } 463 - ret = quota_getstatev(sb, &fqs); 477 + ret = quota_getstatev(sb, type, &fqs); 464 478 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 465 479 return -EFAULT; 466 480 return ret; ··· 730 744 case Q_XQUOTARM: 731 745 return quota_rmxquota(sb, addr); 732 746 case Q_XGETQSTAT: 733 - return quota_getxstate(sb, addr); 747 + return quota_getxstate(sb, type, addr); 734 748 case Q_XGETQSTATV: 735 - return quota_getxstatev(sb, addr); 749 + return quota_getxstatev(sb, type, addr); 736 750 case Q_XSETQLIM: 737 751 return quota_setxquota(sb, type, id, addr); 738 752 case Q_XGETQUOTA:
+60 -33
fs/udf/inode.c
··· 470 470 return NULL; 471 471 } 472 472 473 - /* Extend the file by 'blocks' blocks, return the number of extents added */ 473 + /* Extend the file with new blocks totaling 'new_block_bytes', 474 + * return the number of extents added 475 + */ 474 476 static int udf_do_extend_file(struct inode *inode, 475 477 struct extent_position *last_pos, 476 478 struct kernel_long_ad *last_ext, 477 - sector_t blocks) 479 + loff_t new_block_bytes) 478 480 { 479 - sector_t add; 481 + uint32_t add; 480 482 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 481 483 struct super_block *sb = inode->i_sb; 482 484 struct kernel_lb_addr prealloc_loc = {}; ··· 488 486 489 487 /* The previous extent is fake and we should not extend by anything 490 488 * - there's nothing to do... */ 491 - if (!blocks && fake) 489 + if (!new_block_bytes && fake) 492 490 return 0; 493 491 494 492 iinfo = UDF_I(inode); ··· 519 517 /* Can we merge with the previous extent? */ 520 518 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == 521 519 EXT_NOT_RECORDED_NOT_ALLOCATED) { 522 - add = ((1 << 30) - sb->s_blocksize - 523 - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> 524 - sb->s_blocksize_bits; 525 - if (add > blocks) 526 - add = blocks; 527 - blocks -= add; 528 - last_ext->extLength += add << sb->s_blocksize_bits; 520 + add = (1 << 30) - sb->s_blocksize - 521 + (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 522 + if (add > new_block_bytes) 523 + add = new_block_bytes; 524 + new_block_bytes -= add; 525 + last_ext->extLength += add; 529 526 } 530 527 531 528 if (fake) { ··· 545 544 } 546 545 547 546 /* Managed to do everything necessary? */ 548 - if (!blocks) 547 + if (!new_block_bytes) 549 548 goto out; 550 549 551 550 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ 552 551 last_ext->extLocation.logicalBlockNum = 0; 553 552 last_ext->extLocation.partitionReferenceNum = 0; 554 - add = (1 << (30-sb->s_blocksize_bits)) - 1; 555 - last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 556 - (add << sb->s_blocksize_bits); 553 + add = (1 << 30) - sb->s_blocksize; 554 + last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add; 557 555 558 556 /* Create enough extents to cover the whole hole */ 559 - while (blocks > add) { 560 - blocks -= add; 557 + while (new_block_bytes > add) { 558 + new_block_bytes -= add; 561 559 err = udf_add_aext(inode, last_pos, &last_ext->extLocation, 562 560 last_ext->extLength, 1); 563 561 if (err) 564 562 return err; 565 563 count++; 566 564 } 567 - if (blocks) { 565 + if (new_block_bytes) { 568 566 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 569 - (blocks << sb->s_blocksize_bits); 567 + new_block_bytes; 570 568 err = udf_add_aext(inode, last_pos, &last_ext->extLocation, 571 569 last_ext->extLength, 1); 572 570 if (err) ··· 596 596 return count; 597 597 } 598 598 599 + /* Extend the final block of the file to final_block_len bytes */ 600 + static void udf_do_extend_final_block(struct inode *inode, 601 + struct extent_position *last_pos, 602 + struct kernel_long_ad *last_ext, 603 + uint32_t final_block_len) 604 + { 605 + struct super_block *sb = inode->i_sb; 606 + uint32_t added_bytes; 607 + 608 + added_bytes = final_block_len - 609 + (last_ext->extLength & (sb->s_blocksize - 1)); 610 + last_ext->extLength += added_bytes; 611 + UDF_I(inode)->i_lenExtents += added_bytes; 612 + 613 + udf_write_aext(inode, last_pos, &last_ext->extLocation, 614 + last_ext->extLength, 1); 615 + } 616 + 599 617 static int udf_extend_file(struct inode *inode, loff_t newsize) 600 618 { 601 619 ··· 623 605 int8_t etype; 624 606 struct super_block *sb = inode->i_sb; 625 607 sector_t first_block = newsize >> sb->s_blocksize_bits, offset; 608 + unsigned long partial_final_block; 626 609 int adsize; 627 610 struct udf_inode_info *iinfo = UDF_I(inode); 628 611 struct kernel_long_ad extent; 629 - int err; 612 + int err = 0; 613 + int within_final_block; 630 614 631 615 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 632 616 adsize = sizeof(struct short_ad); ··· 638 618 BUG(); 639 619 640 620 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); 621 + within_final_block = (etype != -1); 641 622 642 - /* File has extent covering the new size (could happen when extending 643 - * inside a block)? */ 644 - if (etype != -1) 645 - return 0; 646 - if (newsize & (sb->s_blocksize - 1)) 647 - offset++; 648 - /* Extended file just to the boundary of the last file block? */ 649 - if (offset == 0) 650 - return 0; 651 - 652 - /* Truncate is extending the file by 'offset' blocks */ 653 623 if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || 654 624 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { 655 625 /* File has no extents at all or has empty last ··· 653 643 &extent.extLength, 0); 654 644 extent.extLength |= etype << 30; 655 645 } 656 - err = udf_do_extend_file(inode, &epos, &extent, offset); 646 + 647 + partial_final_block = newsize & (sb->s_blocksize - 1); 648 + 649 + /* File has extent covering the new size (could happen when extending 650 + * inside a block)? 651 + */ 652 + if (within_final_block) { 653 + /* Extending file within the last file block */ 654 + udf_do_extend_final_block(inode, &epos, &extent, 655 + partial_final_block); 656 + } else { 657 + loff_t add = ((loff_t)offset << sb->s_blocksize_bits) | 658 + partial_final_block; 659 + err = udf_do_extend_file(inode, &epos, &extent, add); 660 + } 661 + 657 662 if (err < 0) 658 663 goto out; 659 664 err = 0; ··· 770 745 /* Are we beyond EOF? */ 771 746 if (etype == -1) { 772 747 int ret; 748 + loff_t hole_len; 773 749 isBeyondEOF = true; 774 750 if (count) { 775 751 if (c) ··· 786 760 startnum = (offset > 0); 787 761 } 788 762 /* Create extents for the hole between EOF and offset */ 789 - ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); 763 + hole_len = (loff_t)offset << inode->i_blkbits; 764 + ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len); 790 765 if (ret < 0) { 791 766 *err = ret; 792 767 newblock = 0;