Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull misc filesystem updates from Jan Kara:
"udf, ext2, quota, fsnotify fixes & cleanups:

- udf fixes for handling of media without uid/gid

- udf fixes for some corner cases in parsing of volume recognition
sequence

- improvements of fsnotify handling of ENOMEM

- new ioctl to allow setting of watch descriptor id for inotify (for
checkpoint - restart)

- small ext2, reiserfs, quota cleanups"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
quota: Kill an unused extern entry form quota.h
reiserfs: Remove VLA from fs/reiserfs/reiserfs.h
udf: fix potential refcnt problem of nls module
ext2: change return code to -ENOMEM when failing memory allocation
udf: Do not mark possibly inconsistent filesystems as closed
fsnotify: Let userspace know about lost events due to ENOMEM
fanotify: Avoid lost events due to ENOMEM for unlimited queues
udf: Remove never implemented mount options
udf: Update mount option documentation
udf: Provide saner default for invalid uid / gid
udf: Clean up handling of invalid uid/gid
udf: Apply uid/gid mount options also to new inodes & chown
udf: Ignore [ug]id=ignore mount options
udf: Fix handling of Partition Descriptors
udf: Unify common handling of descriptors
udf: Convert descriptor index definitions to enum
udf: Allow volume descriptor sequence to be terminated by unrecorded block
udf: Simplify handling of Volume Descriptor Pointers
udf: Fix off-by-one in volume descriptor sequence length
inotify: Extend ioctl to allow to request id of new watch descriptor

+239 -178
+7 -19
Documentation/filesystems/udf.txt
··· 36 36 iocharset= Set the NLS character set 37 37 38 38 The uid= and gid= options need a bit more explaining. They will accept a 39 - decimal numeric value which will be used as the default ID for that mount. 40 - They will also accept the string "ignore" and "forget". For files on the disk 41 - that are owned by nobody ( -1 ), they will instead look as if they are owned 42 - by the default ID. The ignore option causes the default ID to override all 43 - IDs on the disk, not just -1. The forget option causes all IDs to be written 44 - to disk as -1, so when the media is later remounted, they will appear to be 45 - owned by whatever default ID it is mounted with at that time. 39 + decimal numeric value and all inodes on that mount will then appear as 40 + belonging to that uid and gid. Mount options also accept the string "forget". 41 + The forget option causes all IDs to be written to disk as -1 which is a way 42 + of UDF standard to indicate that IDs are not supported for these files . 46 43 47 - For typical desktop use of removable media, you should set the ID to that 48 - of the interactively logged on user, and also specify both the forget and 49 - ignore options. This way the interactive user will always see the files 50 - on the disk as belonging to him. 44 + For typical desktop use of removable media, you should set the ID to that of 45 + the interactively logged on user, and also specify the forget option. This way 46 + the interactive user will always see the files on the disk as belonging to him. 51 47 52 48 The remaining are for debugging and disaster recovery: 53 49 ··· 53 57 54 58 session= Set the CDROM session (default= last session) 55 59 anchor= Override standard anchor location. (default= 256) 56 - volume= Override the VolumeDesc location. (unused) 57 - partition= Override the PartitionDesc location. (unused) 58 60 lastblock= Set the last block of the filesystem/ 59 61 60 - The following expect a offset from the partition root. 61 - 62 - fileset= Override the fileset block location. (unused) 63 - rootdir= Override the root directory location. (unused) 64 - WARNING: overriding the rootdir to a non-directory may 65 - yield highly unpredictable results. 66 62 ------------------------------------------------------------------------------- 67 63 68 64
+2 -2
fs/ext2/super.c
··· 827 827 unsigned long logic_sb_block; 828 828 unsigned long offset = 0; 829 829 unsigned long def_mount_opts; 830 - long ret = -EINVAL; 830 + long ret = -ENOMEM; 831 831 int blocksize = BLOCK_SIZE; 832 832 int db_count; 833 833 int i, j; ··· 835 835 int err; 836 836 struct ext2_mount_options opts; 837 837 838 - err = -ENOMEM; 839 838 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 840 839 if (!sbi) 841 840 goto failed; ··· 850 851 sbi->s_daxdev = dax_dev; 851 852 852 853 spin_lock_init(&sbi->s_lock); 854 + ret = -EINVAL; 853 855 854 856 /* 855 857 * See what the current blocksize for the device is, and
+22 -6
fs/notify/fanotify/fanotify.c
··· 139 139 return false; 140 140 } 141 141 142 - struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, 142 + struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group, 143 + struct inode *inode, u32 mask, 143 144 const struct path *path) 144 145 { 145 146 struct fanotify_event_info *event; 147 + gfp_t gfp = GFP_KERNEL; 148 + 149 + /* 150 + * For queues with unlimited length lost events are not expected and 151 + * can possibly have security implications. Avoid losing events when 152 + * memory is short. 153 + */ 154 + if (group->max_events == UINT_MAX) 155 + gfp |= __GFP_NOFAIL; 146 156 147 157 if (fanotify_is_perm_event(mask)) { 148 158 struct fanotify_perm_event_info *pevent; 149 159 150 - pevent = kmem_cache_alloc(fanotify_perm_event_cachep, 151 - GFP_KERNEL); 160 + pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); 152 161 if (!pevent) 153 162 return NULL; 154 163 event = &pevent->fae; 155 164 pevent->response = 0; 156 165 goto init; 157 166 } 158 - event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); 167 + event = kmem_cache_alloc(fanotify_event_cachep, gfp); 159 168 if (!event) 160 169 return NULL; 161 170 init: __maybe_unused ··· 219 210 return 0; 220 211 } 221 212 222 - event = fanotify_alloc_event(inode, mask, data); 213 + event = fanotify_alloc_event(group, inode, mask, data); 223 214 ret = -ENOMEM; 224 - if (unlikely(!event)) 215 + if (unlikely(!event)) { 216 + /* 217 + * We don't queue overflow events for permission events as 218 + * there the access is denied and so no event is in fact lost. 219 + */ 220 + if (!fanotify_is_perm_event(mask)) 221 + fsnotify_queue_overflow(group); 225 222 goto finish; 223 + } 226 224 227 225 fsn_event = &event->fse; 228 226 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
+2 -1
fs/notify/fanotify/fanotify.h
··· 52 52 return container_of(fse, struct fanotify_event_info, fse); 53 53 } 54 54 55 - struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, 55 + struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group, 56 + struct inode *inode, u32 mask, 56 57 const struct path *path);
+1 -1
fs/notify/fanotify/fanotify_user.c
··· 757 757 group->fanotify_data.user = user; 758 758 atomic_inc(&user->fanotify_listeners); 759 759 760 - oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL); 760 + oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL); 761 761 if (unlikely(!oevent)) { 762 762 fd = -ENOMEM; 763 763 goto out_destroy_group;
+7 -1
fs/notify/inotify/inotify_fsnotify.c
··· 99 99 fsn_mark); 100 100 101 101 event = kmalloc(alloc_len, GFP_KERNEL); 102 - if (unlikely(!event)) 102 + if (unlikely(!event)) { 103 + /* 104 + * Treat lost event due to ENOMEM the same way as queue 105 + * overflow to let userspace know event was lost. 106 + */ 107 + fsnotify_queue_overflow(group); 103 108 return -ENOMEM; 109 + } 104 110 105 111 fsn_event = &event->fse; 106 112 fsnotify_init_event(fsn_event, inode, mask);
+14
fs/notify/inotify/inotify_user.c
··· 307 307 spin_unlock(&group->notification_lock); 308 308 ret = put_user(send_len, (int __user *) p); 309 309 break; 310 + #ifdef CONFIG_CHECKPOINT_RESTORE 311 + case INOTIFY_IOC_SETNEXTWD: 312 + ret = -EINVAL; 313 + if (arg >= 1 && arg <= INT_MAX) { 314 + struct inotify_group_private_data *data; 315 + 316 + data = &group->inotify_data; 317 + spin_lock(&data->idr_lock); 318 + idr_set_cursor(&data->idr, (unsigned int)arg); 319 + spin_unlock(&data->idr_lock); 320 + ret = 0; 321 + } 322 + break; 323 + #endif /* CONFIG_CHECKPOINT_RESTORE */ 310 324 } 311 325 312 326 return ret;
+2 -1
fs/notify/notification.c
··· 111 111 return 2; 112 112 } 113 113 114 - if (group->q_len >= group->max_events) { 114 + if (event == group->overflow_event || 115 + group->q_len >= group->max_events) { 115 116 ret = 2; 116 117 /* Queue overflow event only if it isn't already queued */ 117 118 if (!list_empty(&group->overflow_event->list)) {
+1 -1
fs/reiserfs/reiserfs.h
··· 1916 1916 1917 1917 /* empty directory contains two entries "." and ".." and their headers */ 1918 1918 #define EMPTY_DIR_SIZE \ 1919 - (DEH_SIZE * 2 + ROUND_UP (strlen (".")) + ROUND_UP (strlen (".."))) 1919 + (DEH_SIZE * 2 + ROUND_UP (sizeof(".") - 1) + ROUND_UP (sizeof("..") - 1)) 1920 1920 1921 1921 /* old format directories have this size when empty */ 1922 1922 #define EMPTY_DIR_SIZE_V1 (DEH_SIZE * 2 + 3)
+10
fs/udf/file.c
··· 257 257 static int udf_setattr(struct dentry *dentry, struct iattr *attr) 258 258 { 259 259 struct inode *inode = d_inode(dentry); 260 + struct super_block *sb = inode->i_sb; 260 261 int error; 261 262 262 263 error = setattr_prepare(dentry, attr); 263 264 if (error) 264 265 return error; 266 + 267 + if ((attr->ia_valid & ATTR_UID) && 268 + UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) && 269 + !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid)) 270 + return -EPERM; 271 + if ((attr->ia_valid & ATTR_GID) && 272 + UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) && 273 + !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid)) 274 + return -EPERM; 265 275 266 276 if ((attr->ia_valid & ATTR_SIZE) && 267 277 attr->ia_size != i_size_read(inode)) {
+4
fs/udf/ialloc.c
··· 104 104 } 105 105 106 106 inode_init_owner(inode, dir, mode); 107 + if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) 108 + inode->i_uid = sbi->s_uid; 109 + if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) 110 + inode->i_gid = sbi->s_gid; 107 111 108 112 iinfo->i_location.logicalBlockNum = block; 109 113 iinfo->i_location.partitionReferenceNum =
+13 -10
fs/udf/inode.c
··· 1275 1275 unsigned int indirections = 0; 1276 1276 int bs = inode->i_sb->s_blocksize; 1277 1277 int ret = -EIO; 1278 + uint32_t uid, gid; 1278 1279 1279 1280 reread: 1280 1281 if (iloc->partitionReferenceNum >= sbi->s_partitions) { ··· 1401 1400 1402 1401 ret = -EIO; 1403 1402 read_lock(&sbi->s_cred_lock); 1404 - i_uid_write(inode, le32_to_cpu(fe->uid)); 1405 - if (!uid_valid(inode->i_uid) || 1406 - UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || 1403 + uid = le32_to_cpu(fe->uid); 1404 + if (uid == UDF_INVALID_ID || 1407 1405 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) 1408 - inode->i_uid = UDF_SB(inode->i_sb)->s_uid; 1406 + inode->i_uid = sbi->s_uid; 1407 + else 1408 + i_uid_write(inode, uid); 1409 1409 1410 - i_gid_write(inode, le32_to_cpu(fe->gid)); 1411 - if (!gid_valid(inode->i_gid) || 1412 - UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || 1410 + gid = le32_to_cpu(fe->gid); 1411 + if (gid == UDF_INVALID_ID || 1413 1412 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) 1414 - inode->i_gid = UDF_SB(inode->i_sb)->s_gid; 1413 + inode->i_gid = sbi->s_gid; 1414 + else 1415 + i_gid_write(inode, gid); 1415 1416 1416 1417 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && 1417 1418 sbi->s_fmode != UDF_INVALID_MODE) ··· 1658 1655 } 1659 1656 1660 1657 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1661 - fe->uid = cpu_to_le32(-1); 1658 + fe->uid = cpu_to_le32(UDF_INVALID_ID); 1662 1659 else 1663 1660 fe->uid = cpu_to_le32(i_uid_read(inode)); 1664 1661 1665 1662 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) 1666 - fe->gid = cpu_to_le32(-1); 1663 + fe->gid = cpu_to_le32(UDF_INVALID_ID); 1667 1664 else 1668 1665 fe->gid = cpu_to_le32(i_gid_read(inode)); 1669 1666
+131 -127
fs/udf/super.c
··· 64 64 #include <linux/init.h> 65 65 #include <linux/uaccess.h> 66 66 67 - #define VDS_POS_PRIMARY_VOL_DESC 0 68 - #define VDS_POS_UNALLOC_SPACE_DESC 1 69 - #define VDS_POS_LOGICAL_VOL_DESC 2 70 - #define VDS_POS_PARTITION_DESC 3 71 - #define VDS_POS_IMP_USE_VOL_DESC 4 72 - #define VDS_POS_VOL_DESC_PTR 5 73 - #define VDS_POS_TERMINATING_DESC 6 74 - #define VDS_POS_LENGTH 7 67 + enum { 68 + VDS_POS_PRIMARY_VOL_DESC, 69 + VDS_POS_UNALLOC_SPACE_DESC, 70 + VDS_POS_LOGICAL_VOL_DESC, 71 + VDS_POS_IMP_USE_VOL_DESC, 72 + VDS_POS_LENGTH 73 + }; 75 74 76 75 #define VSD_FIRST_SECTOR_OFFSET 32768 77 76 #define VSD_MAX_SECTOR_OFFSET 0x800000 ··· 222 223 unsigned int session; 223 224 unsigned int lastblock; 224 225 unsigned int anchor; 225 - unsigned int volume; 226 - unsigned short partition; 227 - unsigned int fileset; 228 - unsigned int rootdir; 229 226 unsigned int flags; 230 227 umode_t umask; 231 228 kgid_t gid; ··· 344 349 seq_puts(seq, ",shortad"); 345 350 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET)) 346 351 seq_puts(seq, ",uid=forget"); 347 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE)) 348 - seq_puts(seq, ",uid=ignore"); 349 352 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET)) 350 353 seq_puts(seq, ",gid=forget"); 351 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE)) 352 - seq_puts(seq, ",gid=ignore"); 353 354 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) 354 355 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid)); 355 356 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) ··· 362 371 seq_printf(seq, ",lastblock=%u", sbi->s_last_block); 363 372 if (sbi->s_anchor != 0) 364 373 seq_printf(seq, ",anchor=%u", sbi->s_anchor); 365 - /* 366 - * volume, partition, fileset and rootdir seem to be ignored 367 - * currently 368 - */ 369 374 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) 370 375 seq_puts(seq, ",utf8"); 371 376 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map) ··· 474 487 int option; 475 488 476 489 uopt->novrs = 0; 477 - uopt->partition = 0xFFFF; 478 490 uopt->session = 0xFFFFFFFF; 479 491 uopt->lastblock = 0; 480 492 uopt->anchor = 0; 481 - uopt->volume = 0xFFFFFFFF; 482 - uopt->rootdir = 0xFFFFFFFF; 483 - uopt->fileset = 0xFFFFFFFF; 484 - uopt->nls_map = NULL; 485 493 486 494 if (!options) 487 495 return 1; ··· 564 582 uopt->anchor = option; 565 583 break; 566 584 case Opt_volume: 567 - if (match_int(args, &option)) 568 - return 0; 569 - uopt->volume = option; 570 - break; 571 585 case Opt_partition: 572 - if (match_int(args, &option)) 573 - return 0; 574 - uopt->partition = option; 575 - break; 576 586 case Opt_fileset: 577 - if (match_int(args, &option)) 578 - return 0; 579 - uopt->fileset = option; 580 - break; 581 587 case Opt_rootdir: 582 - if (match_int(args, &option)) 583 - return 0; 584 - uopt->rootdir = option; 588 + /* Ignored (never implemented properly) */ 585 589 break; 586 590 case Opt_utf8: 587 591 uopt->flags |= (1 << UDF_FLAG_UTF8); 588 592 break; 589 593 #ifdef CONFIG_UDF_NLS 590 594 case Opt_iocharset: 591 - uopt->nls_map = load_nls(args[0].from); 592 - uopt->flags |= (1 << UDF_FLAG_NLS_MAP); 595 + if (!remount) { 596 + if (uopt->nls_map) 597 + unload_nls(uopt->nls_map); 598 + uopt->nls_map = load_nls(args[0].from); 599 + uopt->flags |= (1 << UDF_FLAG_NLS_MAP); 600 + } 593 601 break; 594 602 #endif 595 - case Opt_uignore: 596 - uopt->flags |= (1 << UDF_FLAG_UID_IGNORE); 597 - break; 598 603 case Opt_uforget: 599 604 uopt->flags |= (1 << UDF_FLAG_UID_FORGET); 600 605 break; 606 + case Opt_uignore: 601 607 case Opt_gignore: 602 - uopt->flags |= (1 << UDF_FLAG_GID_IGNORE); 608 + /* These options are superseeded by uid=<number> */ 603 609 break; 604 610 case Opt_gforget: 605 611 uopt->flags |= (1 << UDF_FLAG_GID_FORGET); ··· 630 660 uopt.umask = sbi->s_umask; 631 661 uopt.fmode = sbi->s_fmode; 632 662 uopt.dmode = sbi->s_dmode; 663 + uopt.nls_map = NULL; 633 664 634 665 if (!udf_parse_options(options, &uopt, true)) 635 666 return -EINVAL; ··· 1563 1592 sbi->s_lvid_bh = NULL; 1564 1593 } 1565 1594 1595 + /* 1596 + * Step for reallocation of table of partition descriptor sequence numbers. 1597 + * Must be power of 2. 1598 + */ 1599 + #define PART_DESC_ALLOC_STEP 32 1600 + 1601 + struct desc_seq_scan_data { 1602 + struct udf_vds_record vds[VDS_POS_LENGTH]; 1603 + unsigned int size_part_descs; 1604 + struct udf_vds_record *part_descs_loc; 1605 + }; 1606 + 1607 + static struct udf_vds_record *handle_partition_descriptor( 1608 + struct buffer_head *bh, 1609 + struct desc_seq_scan_data *data) 1610 + { 1611 + struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; 1612 + int partnum; 1613 + 1614 + partnum = le16_to_cpu(desc->partitionNumber); 1615 + if (partnum >= data->size_part_descs) { 1616 + struct udf_vds_record *new_loc; 1617 + unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); 1618 + 1619 + new_loc = kzalloc(sizeof(*new_loc) * new_size, GFP_KERNEL); 1620 + if (!new_loc) 1621 + return ERR_PTR(-ENOMEM); 1622 + memcpy(new_loc, data->part_descs_loc, 1623 + data->size_part_descs * sizeof(*new_loc)); 1624 + kfree(data->part_descs_loc); 1625 + data->part_descs_loc = new_loc; 1626 + data->size_part_descs = new_size; 1627 + } 1628 + return &(data->part_descs_loc[partnum]); 1629 + } 1630 + 1631 + 1632 + static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident, 1633 + struct buffer_head *bh, struct desc_seq_scan_data *data) 1634 + { 1635 + switch (ident) { 1636 + case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1637 + return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]); 1638 + case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1639 + return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]); 1640 + case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1641 + return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]); 1642 + case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1643 + return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]); 1644 + case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1645 + return handle_partition_descriptor(bh, data); 1646 + } 1647 + return NULL; 1648 + } 1566 1649 1567 1650 /* 1568 1651 * Process a main/reserve volume descriptor sequence. ··· 1633 1608 struct kernel_lb_addr *fileset) 1634 1609 { 1635 1610 struct buffer_head *bh = NULL; 1636 - struct udf_vds_record vds[VDS_POS_LENGTH]; 1637 1611 struct udf_vds_record *curr; 1638 1612 struct generic_desc *gd; 1639 1613 struct volDescPtr *vdp; 1640 1614 bool done = false; 1641 1615 uint32_t vdsn; 1642 1616 uint16_t ident; 1643 - long next_s = 0, next_e = 0; 1644 1617 int ret; 1645 1618 unsigned int indirections = 0; 1619 + struct desc_seq_scan_data data; 1620 + unsigned int i; 1646 1621 1647 - memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1622 + memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1623 + data.size_part_descs = PART_DESC_ALLOC_STEP; 1624 + data.part_descs_loc = kzalloc(sizeof(*data.part_descs_loc) * 1625 + data.size_part_descs, GFP_KERNEL); 1626 + if (!data.part_descs_loc) 1627 + return -ENOMEM; 1648 1628 1649 1629 /* 1650 1630 * Read the main descriptor sequence and find which descriptors ··· 1658 1628 for (; (!done && block <= lastblock); block++) { 1659 1629 1660 1630 bh = udf_read_tagged(sb, block, block, &ident); 1661 - if (!bh) { 1662 - udf_err(sb, 1663 - "Block %llu of volume descriptor sequence is corrupted or we could not read it\n", 1664 - (unsigned long long)block); 1665 - return -EAGAIN; 1666 - } 1631 + if (!bh) 1632 + break; 1667 1633 1668 1634 /* Process each descriptor (ISO 13346 3/8.3-8.4) */ 1669 1635 gd = (struct generic_desc *)bh->b_data; 1670 1636 vdsn = le32_to_cpu(gd->volDescSeqNum); 1671 1637 switch (ident) { 1672 - case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1673 - curr = &vds[VDS_POS_PRIMARY_VOL_DESC]; 1674 - if (vdsn >= curr->volDescSeqNum) { 1675 - curr->volDescSeqNum = vdsn; 1676 - curr->block = block; 1677 - } 1678 - break; 1679 1638 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ 1680 - curr = &vds[VDS_POS_VOL_DESC_PTR]; 1681 - if (vdsn >= curr->volDescSeqNum) { 1682 - curr->volDescSeqNum = vdsn; 1683 - curr->block = block; 1639 + if (++indirections > UDF_MAX_TD_NESTING) { 1640 + udf_err(sb, "too many Volume Descriptor " 1641 + "Pointers (max %u supported)\n", 1642 + UDF_MAX_TD_NESTING); 1643 + brelse(bh); 1644 + return -EIO; 1645 + } 1684 1646 1685 - vdp = (struct volDescPtr *)bh->b_data; 1686 - next_s = le32_to_cpu( 1687 - vdp->nextVolDescSeqExt.extLocation); 1688 - next_e = le32_to_cpu( 1689 - vdp->nextVolDescSeqExt.extLength); 1690 - next_e = next_e >> sb->s_blocksize_bits; 1691 - next_e += next_s; 1692 - } 1647 + vdp = (struct volDescPtr *)bh->b_data; 1648 + block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation); 1649 + lastblock = le32_to_cpu( 1650 + vdp->nextVolDescSeqExt.extLength) >> 1651 + sb->s_blocksize_bits; 1652 + lastblock += block - 1; 1653 + /* For loop is going to increment 'block' again */ 1654 + block--; 1693 1655 break; 1656 + case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1694 1657 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1695 - curr = &vds[VDS_POS_IMP_USE_VOL_DESC]; 1696 - if (vdsn >= curr->volDescSeqNum) { 1697 - curr->volDescSeqNum = vdsn; 1698 - curr->block = block; 1699 - } 1700 - break; 1701 - case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1702 - curr = &vds[VDS_POS_PARTITION_DESC]; 1703 - if (!curr->block) 1704 - curr->block = block; 1705 - break; 1706 1658 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1707 - curr = &vds[VDS_POS_LOGICAL_VOL_DESC]; 1708 - if (vdsn >= curr->volDescSeqNum) { 1709 - curr->volDescSeqNum = vdsn; 1710 - curr->block = block; 1711 - } 1712 - break; 1713 1659 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1714 - curr = &vds[VDS_POS_UNALLOC_SPACE_DESC]; 1660 + case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1661 + curr = get_volume_descriptor_record(ident, bh, &data); 1662 + if (IS_ERR(curr)) { 1663 + brelse(bh); 1664 + return PTR_ERR(curr); 1665 + } 1666 + /* Descriptor we don't care about? */ 1667 + if (!curr) 1668 + break; 1715 1669 if (vdsn >= curr->volDescSeqNum) { 1716 1670 curr->volDescSeqNum = vdsn; 1717 1671 curr->block = block; 1718 1672 } 1719 1673 break; 1720 1674 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ 1721 - if (++indirections > UDF_MAX_TD_NESTING) { 1722 - udf_err(sb, "too many TDs (max %u supported)\n", UDF_MAX_TD_NESTING); 1723 - brelse(bh); 1724 - return -EIO; 1725 - } 1726 - 1727 - vds[VDS_POS_TERMINATING_DESC].block = block; 1728 - if (next_e) { 1729 - block = next_s; 1730 - lastblock = next_e; 1731 - next_s = next_e = 0; 1732 - } else 1733 - done = true; 1675 + done = true; 1734 1676 break; 1735 1677 } 1736 1678 brelse(bh); ··· 1711 1709 * Now read interesting descriptors again and process them 1712 1710 * in a suitable order 1713 1711 */ 1714 - if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) { 1712 + if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) { 1715 1713 udf_err(sb, "Primary Volume Descriptor not found!\n"); 1716 1714 return -EAGAIN; 1717 1715 } 1718 - ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block); 1716 + ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block); 1719 1717 if (ret < 0) 1720 1718 return ret; 1721 1719 1722 - if (vds[VDS_POS_LOGICAL_VOL_DESC].block) { 1720 + if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) { 1723 1721 ret = udf_load_logicalvol(sb, 1724 - vds[VDS_POS_LOGICAL_VOL_DESC].block, 1725 - fileset); 1722 + data.vds[VDS_POS_LOGICAL_VOL_DESC].block, 1723 + fileset); 1726 1724 if (ret < 0) 1727 1725 return ret; 1728 1726 } 1729 1727 1730 - if (vds[VDS_POS_PARTITION_DESC].block) { 1731 - /* 1732 - * We rescan the whole descriptor sequence to find 1733 - * partition descriptor blocks and process them. 1734 - */ 1735 - for (block = vds[VDS_POS_PARTITION_DESC].block; 1736 - block < vds[VDS_POS_TERMINATING_DESC].block; 1737 - block++) { 1738 - ret = udf_load_partdesc(sb, block); 1728 + /* Now handle prevailing Partition Descriptors */ 1729 + for (i = 0; i < data.size_part_descs; i++) { 1730 + if (data.part_descs_loc[i].block) { 1731 + ret = udf_load_partdesc(sb, 1732 + data.part_descs_loc[i].block); 1739 1733 if (ret < 0) 1740 1734 return ret; 1741 1735 } ··· 1758 1760 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); 1759 1761 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength); 1760 1762 main_e = main_e >> sb->s_blocksize_bits; 1761 - main_e += main_s; 1763 + main_e += main_s - 1; 1762 1764 1763 1765 /* Locate the reserve sequence */ 1764 1766 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); 1765 1767 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); 1766 1768 reserve_e = reserve_e >> sb->s_blocksize_bits; 1767 - reserve_e += reserve_s; 1769 + reserve_e += reserve_s - 1; 1768 1770 1769 1771 /* Process the main & reserve sequences */ 1770 1772 /* responsible for finding the PartitionDesc(s) */ ··· 1992 1994 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1993 1995 ktime_get_real_ts(&ts); 1994 1996 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); 1995 - lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); 1997 + if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE) 1998 + lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); 1999 + else 2000 + UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT); 1996 2001 1997 2002 lvid->descTag.descCRC = cpu_to_le16( 1998 2003 crc_itu_t(0, (char *)lvid + sizeof(struct tag), ··· 2035 2034 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); 2036 2035 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) 2037 2036 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); 2038 - lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); 2037 + if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT)) 2038 + lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); 2039 2039 2040 2040 lvid->descTag.descCRC = cpu_to_le16( 2041 2041 crc_itu_t(0, (char *)lvid + sizeof(struct tag), ··· 2093 2091 bool lvid_open = false; 2094 2092 2095 2093 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 2096 - uopt.uid = INVALID_UID; 2097 - uopt.gid = INVALID_GID; 2094 + /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */ 2095 + uopt.uid = make_kuid(current_user_ns(), overflowuid); 2096 + uopt.gid = make_kgid(current_user_ns(), overflowgid); 2098 2097 uopt.umask = 0; 2099 2098 uopt.fmode = UDF_INVALID_MODE; 2100 2099 uopt.dmode = UDF_INVALID_MODE; 2100 + uopt.nls_map = NULL; 2101 2101 2102 2102 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 2103 2103 if (!sbi) ··· 2280 2276 iput(sbi->s_vat_inode); 2281 2277 parse_options_failure: 2282 2278 #ifdef CONFIG_UDF_NLS 2283 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 2284 - unload_nls(sbi->s_nls_map); 2279 + if (uopt.nls_map) 2280 + unload_nls(uopt.nls_map); 2285 2281 #endif 2286 2282 if (lvid_open) 2287 2283 udf_close_lvid(sb);
+7 -8
fs/udf/udf_sb.h
··· 23 23 #define UDF_FLAG_NLS_MAP 9 24 24 #define UDF_FLAG_UTF8 10 25 25 #define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */ 26 - #define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */ 27 - #define UDF_FLAG_GID_FORGET 13 28 - #define UDF_FLAG_GID_IGNORE 14 29 - #define UDF_FLAG_UID_SET 15 30 - #define UDF_FLAG_GID_SET 16 31 - #define UDF_FLAG_SESSION_SET 17 32 - #define UDF_FLAG_LASTBLOCK_SET 18 33 - #define UDF_FLAG_BLOCKSIZE_SET 19 26 + #define UDF_FLAG_GID_FORGET 12 27 + #define UDF_FLAG_UID_SET 13 28 + #define UDF_FLAG_GID_SET 14 29 + #define UDF_FLAG_SESSION_SET 15 30 + #define UDF_FLAG_LASTBLOCK_SET 16 31 + #define UDF_FLAG_BLOCKSIZE_SET 17 32 + #define UDF_FLAG_INCONSISTENT 18 34 33 35 34 #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 36 35 #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
+2
fs/udf/udfdecl.h
··· 48 48 #define UDF_EXTENT_LENGTH_MASK 0x3FFFFFFF 49 49 #define UDF_EXTENT_FLAG_MASK 0xC0000000 50 50 51 + #define UDF_INVALID_ID ((uint32_t)-1) 52 + 51 53 #define UDF_NAME_PAD 4 52 54 #define UDF_NAME_LEN 254 53 55 #define UDF_NAME_LEN_CS0 255
+6
include/linux/fsnotify_backend.h
··· 331 331 struct fsnotify_event *event, 332 332 int (*merge)(struct list_head *, 333 333 struct fsnotify_event *)); 334 + /* Queue overflow event to a notification group */ 335 + static inline void fsnotify_queue_overflow(struct fsnotify_group *group) 336 + { 337 + fsnotify_add_event(group, group->overflow_event, NULL); 338 + } 339 + 334 340 /* true if the group notification queue is empty */ 335 341 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); 336 342 /* return, but do not dequeue the first event on the notification queue */
-1
include/linux/quota.h
··· 267 267 struct percpu_counter counter[_DQST_DQSTAT_LAST]; 268 268 }; 269 269 270 - extern struct dqstats *dqstats_pcpu; 271 270 extern struct dqstats dqstats; 272 271 273 272 static inline void dqstats_inc(unsigned int type)
+8
include/uapi/linux/inotify.h
··· 71 71 #define IN_CLOEXEC O_CLOEXEC 72 72 #define IN_NONBLOCK O_NONBLOCK 73 73 74 + /* 75 + * ioctl numbers: inotify uses 'I' prefix for all ioctls, 76 + * except historical FIONREAD, which is based on 'T'. 77 + * 78 + * INOTIFY_IOC_SETNEXTWD: set desired number of next created 79 + * watch descriptor. 80 + */ 81 + #define INOTIFY_IOC_SETNEXTWD _IOW('I', 0, __s32) 74 82 75 83 #endif /* _UAPI_LINUX_INOTIFY_H */