Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

userns: Convert struct dquot dq_id to be a struct kqid

Change struct dquot dq_id to a struct kqid and remove the now
unecessary dq_type.

Make minimal changes to dquot, quota_tree, quota_v1, quota_v2, ext3,
ext4, and ocfs2 to deal with the change in quota structures and
signatures. The ocfs2 changes are larger than most because of the
extensive tracing throughout the ocfs2 quota code that prints out
dq_id.

quota_tree.c:get_index is modified to take a struct kqid instead of a
qid_t because all of it's callers pass in dquot->dq_id and it allows
me to introduce only a single conversion.

The rest of the changes are either just replacing dq_type with dq_id.type,
adding conversions to deal with the change in type and occassionally
adding qid_eq to allow quota id comparisons in a user namespace safe way.

Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Theodore Tso <tytso@mit.edu>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>

+102 -82
+1 -1
fs/ext3/super.c
··· 2814 2814 2815 2815 static inline struct inode *dquot_to_inode(struct dquot *dquot) 2816 2816 { 2817 - return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 2817 + return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 2818 2818 } 2819 2819 2820 2820 static int ext3_write_dquot(struct dquot *dquot)
+1 -1
fs/ext4/super.c
··· 4796 4796 4797 4797 static inline struct inode *dquot_to_inode(struct dquot *dquot) 4798 4798 { 4799 - return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 4799 + return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 4800 4800 } 4801 4801 4802 4802 static int ext4_write_dquot(struct dquot *dquot)
+26 -17
fs/ocfs2/quota_global.c
··· 95 95 struct ocfs2_global_disk_dqblk *d = dp; 96 96 struct mem_dqblk *m = &dquot->dq_dqb; 97 97 98 - d->dqb_id = cpu_to_le32(dquot->dq_id); 98 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); 99 99 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count); 100 100 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); 101 101 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); ··· 112 112 { 113 113 struct ocfs2_global_disk_dqblk *d = dp; 114 114 struct ocfs2_mem_dqinfo *oinfo = 115 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 115 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 116 116 117 117 if (qtree_entry_unused(&oinfo->dqi_gi, dp)) 118 118 return 0; 119 - return le32_to_cpu(d->dqb_id) == dquot->dq_id; 119 + 120 + return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type, 121 + le32_to_cpu(d->dqb_id)), 122 + dquot->dq_id); 120 123 } 121 124 122 125 struct qtree_fmt_operations ocfs2_global_ops = { ··· 478 475 { 479 476 int err, err2; 480 477 struct super_block *sb = dquot->dq_sb; 481 - int type = dquot->dq_type; 478 + int type = dquot->dq_id.type; 482 479 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 483 480 struct ocfs2_global_disk_dqblk dqblk; 484 481 s64 spacechange, inodechange; ··· 507 504 olditime = dquot->dq_dqb.dqb_itime; 508 505 oldbtime = dquot->dq_dqb.dqb_btime; 509 506 ocfs2_global_disk2memdqb(dquot, &dqblk); 510 - trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace, 507 + trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id), 508 + dquot->dq_dqb.dqb_curspace, 511 509 (long long)spacechange, 512 510 dquot->dq_dqb.dqb_curinodes, 513 511 (long long)inodechange); ··· 559 555 err = ocfs2_qinfo_lock(info, freeing); 560 556 if (err < 0) { 561 557 mlog(ML_ERROR, "Failed to lock quota info, losing quota write" 562 - " (type=%d, id=%u)\n", dquot->dq_type, 563 - (unsigned)dquot->dq_id); 558 + " (type=%d, id=%u)\n", dquot->dq_id.type, 559 + (unsigned)from_kqid(&init_user_ns, dquot->dq_id)); 564 560 goto out; 565 561 } 566 562 if (freeing) ··· 595 591 struct ocfs2_super *osb = OCFS2_SB(sb); 596 592 int status = 0; 597 593 598 - trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type, 594 + trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id), 595 + dquot->dq_id.type, 599 596 type, sb->s_id); 600 - if (type != dquot->dq_type) 597 + if (type != dquot->dq_id.type) 601 598 goto out; 602 599 status = ocfs2_lock_global_qf(oinfo, 1); 603 600 if (status < 0) ··· 648 643 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 649 644 int status = 0; 650 645 651 - trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type); 646 + trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id), 647 + dquot->dq_id.type); 652 648 653 649 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); 654 650 if (IS_ERR(handle)) { ··· 683 677 { 684 678 handle_t *handle; 685 679 struct ocfs2_mem_dqinfo *oinfo = 686 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 680 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 687 681 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 688 682 int status = 0; 689 683 690 - trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type); 684 + trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id), 685 + dquot->dq_id.type); 691 686 692 687 mutex_lock(&dquot->dq_lock); 693 688 /* Check whether we are not racing with some other dqget() */ ··· 698 691 if (status < 0) 699 692 goto out; 700 693 handle = ocfs2_start_trans(osb, 701 - ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type)); 694 + ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type)); 702 695 if (IS_ERR(handle)) { 703 696 status = PTR_ERR(handle); 704 697 mlog_errno(status); ··· 740 733 int ex = 0; 741 734 struct super_block *sb = dquot->dq_sb; 742 735 struct ocfs2_super *osb = OCFS2_SB(sb); 743 - int type = dquot->dq_type; 736 + int type = dquot->dq_id.type; 744 737 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 745 738 struct inode *gqinode = info->dqi_gqinode; 746 739 int need_alloc = ocfs2_global_qinit_alloc(sb, type); 747 740 handle_t *handle; 748 741 749 - trace_ocfs2_acquire_dquot(dquot->dq_id, type); 742 + trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id), 743 + type); 750 744 mutex_lock(&dquot->dq_lock); 751 745 /* 752 746 * We need an exclusive lock, because we're going to update use count ··· 829 821 int sync = 0; 830 822 int status; 831 823 struct super_block *sb = dquot->dq_sb; 832 - int type = dquot->dq_type; 824 + int type = dquot->dq_id.type; 833 825 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 834 826 handle_t *handle; 835 827 struct ocfs2_super *osb = OCFS2_SB(sb); 836 828 837 - trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type); 829 + trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id), 830 + type); 838 831 839 832 /* In case user set some limits, sync dquot immediately to global 840 833 * quota file so that information propagates quicker */
+6 -5
fs/ocfs2/quota_local.c
··· 883 883 dqblk = (struct ocfs2_local_disk_dqblk *)(bh->b_data 884 884 + ol_dqblk_block_offset(sb, od->dq_local_off)); 885 885 886 - dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id); 886 + dqblk->dqb_id = cpu_to_le64(from_kqid(&init_user_ns, 887 + od->dq_dquot.dq_id)); 887 888 spin_lock(&dq_data_lock); 888 889 dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace - 889 890 od->dq_origspace); ··· 894 893 trace_olq_set_dquot( 895 894 (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod), 896 895 (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod), 897 - od->dq_dquot.dq_id); 896 + from_kqid(&init_user_ns, od->dq_dquot.dq_id)); 898 897 } 899 898 900 899 /* Write dquot to local quota file */ ··· 903 902 struct super_block *sb = dquot->dq_sb; 904 903 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); 905 904 struct buffer_head *bh; 906 - struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type]; 905 + struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_id.type]; 907 906 int status; 908 907 909 908 status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk, ··· 1224 1223 int ocfs2_create_local_dquot(struct dquot *dquot) 1225 1224 { 1226 1225 struct super_block *sb = dquot->dq_sb; 1227 - int type = dquot->dq_type; 1226 + int type = dquot->dq_id.type; 1228 1227 struct inode *lqinode = sb_dqopt(sb)->files[type]; 1229 1228 struct ocfs2_quota_chunk *chunk; 1230 1229 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); ··· 1278 1277 int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) 1279 1278 { 1280 1279 int status; 1281 - int type = dquot->dq_type; 1280 + int type = dquot->dq_id.type; 1282 1281 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); 1283 1282 struct super_block *sb = dquot->dq_sb; 1284 1283 struct ocfs2_local_disk_chunk *dchunk;
+32 -31
fs/quota/dquot.c
··· 267 267 static inline void insert_dquot_hash(struct dquot *dquot) 268 268 { 269 269 struct hlist_head *head; 270 - head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); 270 + head = dquot_hash + hashfn(dquot->dq_sb, from_kqid(&init_user_ns, dquot->dq_id), dquot->dq_id.type); 271 271 hlist_add_head(&dquot->dq_hash, head); 272 272 } 273 273 ··· 279 279 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 280 280 unsigned int id, int type) 281 281 { 282 + struct kqid qid = make_kqid(&init_user_ns, type, id); 282 283 struct hlist_node *node; 283 284 struct dquot *dquot; 284 285 285 286 hlist_for_each (node, dquot_hash+hashent) { 286 287 dquot = hlist_entry(node, struct dquot, dq_hash); 287 - if (dquot->dq_sb == sb && dquot->dq_id == id && 288 - dquot->dq_type == type) 288 + if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) 289 289 return dquot; 290 290 } 291 291 return NULL; ··· 351 351 spin_lock(&dq_list_lock); 352 352 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 353 353 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 354 - info[dquot->dq_type].dqi_dirty_list); 354 + info[dquot->dq_id.type].dqi_dirty_list); 355 355 ret = 0; 356 356 } 357 357 spin_unlock(&dq_list_lock); ··· 410 410 mutex_lock(&dquot->dq_lock); 411 411 mutex_lock(&dqopt->dqio_mutex); 412 412 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) 413 - ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); 413 + ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); 414 414 if (ret < 0) 415 415 goto out_iolock; 416 416 set_bit(DQ_READ_B, &dquot->dq_flags); 417 417 /* Instantiate dquot if needed */ 418 418 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { 419 - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 419 + ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 420 420 /* Write the info if needed */ 421 - if (info_dirty(&dqopt->info[dquot->dq_type])) { 422 - ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 423 - dquot->dq_sb, dquot->dq_type); 421 + if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 422 + ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 423 + dquot->dq_sb, dquot->dq_id.type); 424 424 } 425 425 if (ret < 0) 426 426 goto out_iolock; ··· 455 455 /* Inactive dquot can be only if there was error during read/init 456 456 * => we have better not writing it */ 457 457 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) 458 - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 458 + ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 459 459 else 460 460 ret = -EIO; 461 461 out_sem: ··· 477 477 if (atomic_read(&dquot->dq_count) > 1) 478 478 goto out_dqlock; 479 479 mutex_lock(&dqopt->dqio_mutex); 480 - if (dqopt->ops[dquot->dq_type]->release_dqblk) { 481 - ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 480 + if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { 481 + ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); 482 482 /* Write the info */ 483 - if (info_dirty(&dqopt->info[dquot->dq_type])) { 484 - ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 485 - dquot->dq_sb, dquot->dq_type); 483 + if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 484 + ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 485 + dquot->dq_sb, dquot->dq_id.type); 486 486 } 487 487 if (ret >= 0) 488 488 ret = ret2; ··· 521 521 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 522 522 if (dquot->dq_sb != sb) 523 523 continue; 524 - if (dquot->dq_type != type) 524 + if (dquot->dq_id.type != type) 525 525 continue; 526 526 /* Wait for dquot users */ 527 527 if (atomic_read(&dquot->dq_count)) { ··· 741 741 #ifdef CONFIG_QUOTA_DEBUG 742 742 if (!atomic_read(&dquot->dq_count)) { 743 743 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 744 - quotatypes[dquot->dq_type], dquot->dq_id); 744 + quotatypes[dquot->dq_id.type], 745 + from_kqid(&init_user_ns, dquot->dq_id)); 745 746 BUG(); 746 747 } 747 748 #endif ··· 753 752 /* We have more than one user... nothing to do */ 754 753 atomic_dec(&dquot->dq_count); 755 754 /* Releasing dquot during quotaoff phase? */ 756 - if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && 755 + if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && 757 756 atomic_read(&dquot->dq_count) == 1) 758 757 wake_up(&dquot->dq_wait_unused); 759 758 spin_unlock(&dq_list_lock); ··· 816 815 INIT_LIST_HEAD(&dquot->dq_dirty); 817 816 init_waitqueue_head(&dquot->dq_wait_unused); 818 817 dquot->dq_sb = sb; 819 - dquot->dq_type = type; 818 + dquot->dq_id.type = type; 820 819 atomic_set(&dquot->dq_count, 1); 821 820 822 821 return dquot; ··· 860 859 } 861 860 dquot = empty; 862 861 empty = NULL; 863 - dquot->dq_id = id; 862 + dquot->dq_id = qid; 864 863 /* all dquots go on the inuse_list */ 865 864 put_inuse(dquot); 866 865 /* hash it first so it can be found */ ··· 1220 1219 return; 1221 1220 warn->w_type = warntype; 1222 1221 warn->w_sb = dquot->dq_sb; 1223 - warn->w_dq_id = dquot->dq_id; 1224 - warn->w_dq_type = dquot->dq_type; 1222 + warn->w_dq_id = from_kqid(&init_user_ns, dquot->dq_id); 1223 + warn->w_dq_type = dquot->dq_id.type; 1225 1224 } 1226 1225 1227 1226 /* ··· 1246 1245 1247 1246 static int ignore_hardlimit(struct dquot *dquot) 1248 1247 { 1249 - struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 1248 + struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 1250 1249 1251 1250 return capable(CAP_SYS_RESOURCE) && 1252 1251 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || ··· 1259 1258 { 1260 1259 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1261 1260 1262 - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1261 + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || 1263 1262 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1264 1263 return 0; 1265 1264 ··· 1284 1283 dquot->dq_dqb.dqb_itime == 0) { 1285 1284 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); 1286 1285 dquot->dq_dqb.dqb_itime = get_seconds() + 1287 - sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1286 + sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; 1288 1287 } 1289 1288 1290 1289 return 0; ··· 1297 1296 qsize_t tspace; 1298 1297 struct super_block *sb = dquot->dq_sb; 1299 1298 1300 - if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || 1299 + if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1301 1300 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1302 1301 return 0; 1303 1302 ··· 1328 1327 if (!prealloc) { 1329 1328 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); 1330 1329 dquot->dq_dqb.dqb_btime = get_seconds() + 1331 - sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; 1330 + sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; 1332 1331 } 1333 1332 else 1334 1333 /* ··· 1347 1346 1348 1347 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1349 1348 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1350 - !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) 1349 + !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) 1351 1350 return QUOTA_NL_NOWARN; 1352 1351 1353 1352 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; ··· 2363 2362 2364 2363 memset(di, 0, sizeof(*di)); 2365 2364 di->d_version = FS_DQUOT_VERSION; 2366 - di->d_flags = dquot->dq_type == USRQUOTA ? 2365 + di->d_flags = dquot->dq_id.type == USRQUOTA ? 2367 2366 FS_USER_QUOTA : FS_GROUP_QUOTA; 2368 - di->d_id = dquot->dq_id; 2367 + di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); 2369 2368 2370 2369 spin_lock(&dq_data_lock); 2371 2370 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); ··· 2404 2403 { 2405 2404 struct mem_dqblk *dm = &dquot->dq_dqb; 2406 2405 int check_blim = 0, check_ilim = 0; 2407 - struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 2406 + struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2408 2407 2409 2408 if (di->d_fieldmask & ~VFS_FS_DQ_MASK) 2410 2409 return -EINVAL;
+13 -9
fs/quota/quota_tree.c
··· 22 22 23 23 #define __QUOTA_QT_PARANOIA 24 24 25 - static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 25 + static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) 26 26 { 27 27 unsigned int epb = info->dqi_usable_bs >> 2; 28 + qid_t id = from_kqid(&init_user_ns, qid); 28 29 29 30 depth = info->dqi_qtree_depth - depth - 1; 30 31 while (depth--) ··· 245 244 /* This is enough as the block is already zeroed and the entry 246 245 * list is empty... */ 247 246 info->dqi_free_entry = blk; 248 - mark_info_dirty(dquot->dq_sb, dquot->dq_type); 247 + mark_info_dirty(dquot->dq_sb, dquot->dq_id.type); 249 248 } 250 249 /* Block will be full? */ 251 250 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { ··· 358 357 */ 359 358 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 360 359 { 361 - int type = dquot->dq_type; 360 + int type = dquot->dq_id.type; 362 361 struct super_block *sb = dquot->dq_sb; 363 362 ssize_t ret; 364 363 char *ddquot = getdqbuf(info->dqi_entry_size); ··· 539 538 ddquot += info->dqi_entry_size; 540 539 } 541 540 if (i == qtree_dqstr_in_blk(info)) { 542 - quota_error(dquot->dq_sb, "Quota for id %u referenced " 543 - "but not present", dquot->dq_id); 541 + quota_error(dquot->dq_sb, 542 + "Quota for id %u referenced but not present", 543 + from_kqid(&init_user_ns, dquot->dq_id)); 544 544 ret = -EIO; 545 545 goto out_buf; 546 546 } else { ··· 591 589 592 590 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 593 591 { 594 - int type = dquot->dq_type; 592 + int type = dquot->dq_id.type; 595 593 struct super_block *sb = dquot->dq_sb; 596 594 loff_t offset; 597 595 char *ddquot; ··· 609 607 offset = find_dqentry(info, dquot); 610 608 if (offset <= 0) { /* Entry not present? */ 611 609 if (offset < 0) 612 - quota_error(sb, "Can't read quota structure " 613 - "for id %u", dquot->dq_id); 610 + quota_error(sb,"Can't read quota structure " 611 + "for id %u", 612 + from_kqid(&init_user_ns, 613 + dquot->dq_id)); 614 614 dquot->dq_off = 0; 615 615 set_bit(DQ_FAKE_B, &dquot->dq_flags); 616 616 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); ··· 630 626 if (ret >= 0) 631 627 ret = -EIO; 632 628 quota_error(sb, "Error while reading quota structure for id %u", 633 - dquot->dq_id); 629 + from_kqid(&init_user_ns, dquot->dq_id)); 634 630 set_bit(DQ_FAKE_B, &dquot->dq_flags); 635 631 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 636 632 kfree(ddquot);
+7 -5
fs/quota/quota_v1.c
··· 54 54 55 55 static int v1_read_dqblk(struct dquot *dquot) 56 56 { 57 - int type = dquot->dq_type; 57 + int type = dquot->dq_id.type; 58 58 struct v1_disk_dqblk dqblk; 59 59 60 60 if (!sb_dqopt(dquot->dq_sb)->files[type]) ··· 63 63 /* Set structure to 0s in case read fails/is after end of file */ 64 64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); 65 65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, 66 - sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); 66 + sizeof(struct v1_disk_dqblk), 67 + v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id))); 67 68 68 69 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); 69 70 if (dquot->dq_dqb.dqb_bhardlimit == 0 && ··· 79 78 80 79 static int v1_commit_dqblk(struct dquot *dquot) 81 80 { 82 - short type = dquot->dq_type; 81 + short type = dquot->dq_id.type; 83 82 ssize_t ret; 84 83 struct v1_disk_dqblk dqblk; 85 84 86 85 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); 87 - if (dquot->dq_id == 0) { 86 + if (((type == USRQUOTA) && uid_eq(dquot->dq_id.uid, GLOBAL_ROOT_UID)) || 87 + ((type == GRPQUOTA) && gid_eq(dquot->dq_id.gid, GLOBAL_ROOT_GID))) { 88 88 dqblk.dqb_btime = 89 89 sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; 90 90 dqblk.dqb_itime = ··· 95 93 if (sb_dqopt(dquot->dq_sb)->files[type]) 96 94 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, 97 95 (char *)&dqblk, sizeof(struct v1_disk_dqblk), 98 - v1_dqoff(dquot->dq_id)); 96 + v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id))); 99 97 if (ret != sizeof(struct v1_disk_dqblk)) { 100 98 quota_error(dquot->dq_sb, "dquota write failed"); 101 99 if (ret >= 0)
+15 -11
fs/quota/quota_v2.c
··· 196 196 struct v2r0_disk_dqblk *d = dp; 197 197 struct mem_dqblk *m = &dquot->dq_dqb; 198 198 struct qtree_mem_dqinfo *info = 199 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 199 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 200 200 201 201 d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); 202 202 d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); ··· 206 206 d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); 207 207 d->dqb_curspace = cpu_to_le64(m->dqb_curspace); 208 208 d->dqb_btime = cpu_to_le64(m->dqb_btime); 209 - d->dqb_id = cpu_to_le32(dquot->dq_id); 209 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); 210 210 if (qtree_entry_unused(info, dp)) 211 211 d->dqb_itime = cpu_to_le64(1); 212 212 } ··· 215 215 { 216 216 struct v2r0_disk_dqblk *d = dp; 217 217 struct qtree_mem_dqinfo *info = 218 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 218 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 219 219 220 220 if (qtree_entry_unused(info, dp)) 221 221 return 0; 222 - return le32_to_cpu(d->dqb_id) == dquot->dq_id; 222 + return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type, 223 + le32_to_cpu(d->dqb_id)), 224 + dquot->dq_id); 223 225 } 224 226 225 227 static void v2r1_disk2memdqb(struct dquot *dquot, void *dp) ··· 249 247 struct v2r1_disk_dqblk *d = dp; 250 248 struct mem_dqblk *m = &dquot->dq_dqb; 251 249 struct qtree_mem_dqinfo *info = 252 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 250 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 253 251 254 252 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); 255 253 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); ··· 259 257 d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit)); 260 258 d->dqb_curspace = cpu_to_le64(m->dqb_curspace); 261 259 d->dqb_btime = cpu_to_le64(m->dqb_btime); 262 - d->dqb_id = cpu_to_le32(dquot->dq_id); 260 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); 263 261 if (qtree_entry_unused(info, dp)) 264 262 d->dqb_itime = cpu_to_le64(1); 265 263 } ··· 268 266 { 269 267 struct v2r1_disk_dqblk *d = dp; 270 268 struct qtree_mem_dqinfo *info = 271 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 269 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 272 270 273 271 if (qtree_entry_unused(info, dp)) 274 272 return 0; 275 - return le32_to_cpu(d->dqb_id) == dquot->dq_id; 273 + return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type, 274 + le32_to_cpu(d->dqb_id)), 275 + dquot->dq_id); 276 276 } 277 277 278 278 static int v2_read_dquot(struct dquot *dquot) 279 279 { 280 - return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); 280 + return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 281 281 } 282 282 283 283 static int v2_write_dquot(struct dquot *dquot) 284 284 { 285 - return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); 285 + return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 286 286 } 287 287 288 288 static int v2_release_dquot(struct dquot *dquot) 289 289 { 290 - return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); 290 + return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 291 291 } 292 292 293 293 static int v2_free_file_info(struct super_block *sb, int type)
+1 -2
include/linux/quota.h
··· 419 419 atomic_t dq_count; /* Use count */ 420 420 wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ 421 421 struct super_block *dq_sb; /* superblock this applies to */ 422 - unsigned int dq_id; /* ID this applies to (uid, gid) */ 422 + struct kqid dq_id; /* ID this applies to (uid, gid, projid) */ 423 423 loff_t dq_off; /* Offset of dquot on disk */ 424 424 unsigned long dq_flags; /* See DQ_* */ 425 - short dq_type; /* Type of quota */ 426 425 struct mem_dqblk dq_dqb; /* Diskquota usage */ 427 426 }; 428 427