Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace

Pull user namespace changes from Eric Biederman:
"This is a mostly modest set of changes to enable basic user namespace
support. This allows the code to code to compile with user namespaces
enabled and removes the assumption there is only the initial user
namespace. Everything is converted except for the most complex of the
filesystems: autofs4, 9p, afs, ceph, cifs, coda, fuse, gfs2, ncpfs,
nfs, ocfs2 and xfs as those patches need a bit more review.

The strategy is to push kuid_t and kgid_t values are far down into
subsystems and filesystems as reasonable. Leaving the make_kuid and
from_kuid operations to happen at the edge of userspace, as the values
come off the disk, and as the values come in from the network.
Letting compile type incompatible compile errors (present when user
namespaces are enabled) guide me to find the issues.

The most tricky areas have been the places where we had an implicit
union of uid and gid values and were storing them in an unsigned int.
Those places were converted into explicit unions. I made certain to
handle those places with simple trivial patches.

Out of that work I discovered we have generic interfaces for storing
quota by projid. I had never heard of the project identifiers before.
Adding full user namespace support for project identifiers accounts
for most of the code size growth in my git tree.

Ultimately there will be work to relax privlige checks from
"capable(FOO)" to "ns_capable(user_ns, FOO)" where it is safe allowing
root in a user names to do those things that today we only forbid to
non-root users because it will confuse suid root applications.

While I was pushing kuid_t and kgid_t changes deep into the audit code
I made a few other cleanups. I capitalized on the fact we process
netlink messages in the context of the message sender. I removed
usage of NETLINK_CRED, and started directly using current->tty.

Some of these patches have also made it into maintainer trees, with no
problems from identical code from different trees showing up in
linux-next.

After reading through all of this code I feel like I might be able to
win a game of kernel trivial pursuit."

Fix up some fairly trivial conflicts in netfilter uid/git logging code.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (107 commits)
userns: Convert the ufs filesystem to use kuid/kgid where appropriate
userns: Convert the udf filesystem to use kuid/kgid where appropriate
userns: Convert ubifs to use kuid/kgid
userns: Convert squashfs to use kuid/kgid where appropriate
userns: Convert reiserfs to use kuid and kgid where appropriate
userns: Convert jfs to use kuid/kgid where appropriate
userns: Convert jffs2 to use kuid and kgid where appropriate
userns: Convert hpfs to use kuid and kgid where appropriate
userns: Convert btrfs to use kuid/kgid where appropriate
userns: Convert bfs to use kuid/kgid where appropriate
userns: Convert affs to use kuid/kgid wherwe appropriate
userns: On alpha modify linux_to_osf_stat to use convert from kuids and kgids
userns: On ia64 deal with current_uid and current_gid being kuid and kgid
userns: On ppc convert current_uid from a kuid before printing.
userns: Convert s390 getting uid and gid system calls to use kuid and kgid
userns: Convert s390 hypfs to use kuid and kgid where appropriate
userns: Convert binder ipc to use kuids
userns: Teach security_path_chown to take kuids and kgids
userns: Add user namespace support to IMA
userns: Convert EVM to deal with kuids and kgids in it's hmac computation
...

+2471 -1304
+2 -2
arch/alpha/kernel/osf_sys.c
··· 278 278 tmp.st_dev = lstat->dev; 279 279 tmp.st_mode = lstat->mode; 280 280 tmp.st_nlink = lstat->nlink; 281 - tmp.st_uid = lstat->uid; 282 - tmp.st_gid = lstat->gid; 281 + tmp.st_uid = from_kuid_munged(current_user_ns(), lstat->uid); 282 + tmp.st_gid = from_kgid_munged(current_user_ns(), lstat->gid); 283 283 tmp.st_rdev = lstat->rdev; 284 284 tmp.st_ldev = lstat->rdev; 285 285 tmp.st_size = lstat->size;
+2 -1
arch/ia64/kernel/mca_drv.c
··· 158 158 ia64_mlogbuf_dump(); 159 159 printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " 160 160 "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", 161 - raw_smp_processor_id(), current->pid, current_uid(), 161 + raw_smp_processor_id(), current->pid, 162 + from_kuid(&init_user_ns, current_uid()), 162 163 iip, ipsr, paddr, current->comm); 163 164 164 165 spin_lock(&mca_bh_lock);
+15 -15
arch/ia64/kernel/perfmon.c
··· 2380 2380 pfm_bad_permissions(struct task_struct *task) 2381 2381 { 2382 2382 const struct cred *tcred; 2383 - uid_t uid = current_uid(); 2384 - gid_t gid = current_gid(); 2383 + kuid_t uid = current_uid(); 2384 + kgid_t gid = current_gid(); 2385 2385 int ret; 2386 2386 2387 2387 rcu_read_lock(); ··· 2389 2389 2390 2390 /* inspired by ptrace_attach() */ 2391 2391 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", 2392 - uid, 2393 - gid, 2394 - tcred->euid, 2395 - tcred->suid, 2396 - tcred->uid, 2397 - tcred->egid, 2398 - tcred->sgid)); 2392 + from_kuid(&init_user_ns, uid), 2393 + from_kgid(&init_user_ns, gid), 2394 + from_kuid(&init_user_ns, tcred->euid), 2395 + from_kuid(&init_user_ns, tcred->suid), 2396 + from_kuid(&init_user_ns, tcred->uid), 2397 + from_kgid(&init_user_ns, tcred->egid), 2398 + from_kgid(&init_user_ns, tcred->sgid))); 2399 2399 2400 - ret = ((uid != tcred->euid) 2401 - || (uid != tcred->suid) 2402 - || (uid != tcred->uid) 2403 - || (gid != tcred->egid) 2404 - || (gid != tcred->sgid) 2405 - || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE); 2400 + ret = ((!uid_eq(uid, tcred->euid)) 2401 + || (!uid_eq(uid, tcred->suid)) 2402 + || (!uid_eq(uid, tcred->uid)) 2403 + || (!gid_eq(gid, tcred->egid)) 2404 + || (!gid_eq(gid, tcred->sgid)) 2405 + || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE); 2406 2406 2407 2407 rcu_read_unlock(); 2408 2408 return ret;
+2 -2
arch/ia64/kernel/signal.c
··· 220 220 si.si_errno = 0; 221 221 si.si_code = SI_KERNEL; 222 222 si.si_pid = task_pid_vnr(current); 223 - si.si_uid = current_uid(); 223 + si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 224 224 si.si_addr = sc; 225 225 force_sig_info(SIGSEGV, &si, current); 226 226 return retval; ··· 317 317 si.si_errno = 0; 318 318 si.si_code = SI_KERNEL; 319 319 si.si_pid = task_pid_vnr(current); 320 - si.si_uid = current_uid(); 320 + si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 321 321 si.si_addr = addr; 322 322 force_sig_info(SIGSEGV, &si, current); 323 323 return 0;
+1 -1
arch/powerpc/mm/fault.c
··· 470 470 if (is_exec && (error_code & DSISR_PROTFAULT)) 471 471 printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected" 472 472 " page (%lx) - exploit attempt? (uid: %d)\n", 473 - address, current_uid()); 473 + address, from_kuid(&init_user_ns, current_uid())); 474 474 475 475 return SIGSEGV; 476 476
+14 -6
arch/s390/hypfs/inode.c
··· 31 31 struct dentry *dir); 32 32 33 33 struct hypfs_sb_info { 34 - uid_t uid; /* uid used for files and dirs */ 35 - gid_t gid; /* gid used for files and dirs */ 34 + kuid_t uid; /* uid used for files and dirs */ 35 + kgid_t gid; /* gid used for files and dirs */ 36 36 struct dentry *update_file; /* file to trigger update */ 37 37 time_t last_update; /* last update time in secs since 1970 */ 38 38 struct mutex lock; /* lock to protect update process */ ··· 229 229 { 230 230 char *str; 231 231 substring_t args[MAX_OPT_ARGS]; 232 + kuid_t uid; 233 + kgid_t gid; 232 234 233 235 if (!options) 234 236 return 0; ··· 245 243 case opt_uid: 246 244 if (match_int(&args[0], &option)) 247 245 return -EINVAL; 248 - hypfs_info->uid = option; 246 + uid = make_kuid(current_user_ns(), option); 247 + if (!uid_valid(uid)) 248 + return -EINVAL; 249 + hypfs_info->uid = uid; 249 250 break; 250 251 case opt_gid: 251 252 if (match_int(&args[0], &option)) 252 253 return -EINVAL; 253 - hypfs_info->gid = option; 254 + gid = make_kgid(current_user_ns(), option); 255 + if (!gid_valid(gid)) 256 + return -EINVAL; 257 + hypfs_info->gid = gid; 254 258 break; 255 259 case opt_err: 256 260 default: ··· 271 263 { 272 264 struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info; 273 265 274 - seq_printf(s, ",uid=%u", hypfs_info->uid); 275 - seq_printf(s, ",gid=%u", hypfs_info->gid); 266 + seq_printf(s, ",uid=%u", from_kuid_munged(&init_user_ns, hypfs_info->uid)); 267 + seq_printf(s, ",gid=%u", from_kgid_munged(&init_user_ns, hypfs_info->gid)); 276 268 return 0; 277 269 } 278 270
+24 -12
arch/s390/kernel/compat_linux.c
··· 131 131 low2highuid(suid)); 132 132 } 133 133 134 - asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid) 134 + asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp) 135 135 { 136 + const struct cred *cred = current_cred(); 136 137 int retval; 138 + u16 ruid, euid, suid; 137 139 138 - if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && 139 - !(retval = put_user(high2lowuid(current->cred->euid), euid))) 140 - retval = put_user(high2lowuid(current->cred->suid), suid); 140 + ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid)); 141 + euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid)); 142 + suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid)); 143 + 144 + if (!(retval = put_user(ruid, ruidp)) && 145 + !(retval = put_user(euid, euidp))) 146 + retval = put_user(suid, suidp); 141 147 142 148 return retval; 143 149 } ··· 154 148 low2highgid(sgid)); 155 149 } 156 150 157 - asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid) 151 + asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp) 158 152 { 153 + const struct cred *cred = current_cred(); 159 154 int retval; 155 + u16 rgid, egid, sgid; 160 156 161 - if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && 162 - !(retval = put_user(high2lowgid(current->cred->egid), egid))) 163 - retval = put_user(high2lowgid(current->cred->sgid), sgid); 157 + rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid)); 158 + egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid)); 159 + sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid)); 160 + 161 + if (!(retval = put_user(rgid, rgidp)) && 162 + !(retval = put_user(egid, egidp))) 163 + retval = put_user(sgid, sgidp); 164 164 165 165 return retval; 166 166 } ··· 270 258 271 259 asmlinkage long sys32_getuid16(void) 272 260 { 273 - return high2lowuid(current->cred->uid); 261 + return high2lowuid(from_kuid_munged(current_user_ns(), current_uid())); 274 262 } 275 263 276 264 asmlinkage long sys32_geteuid16(void) 277 265 { 278 - return high2lowuid(current->cred->euid); 266 + return high2lowuid(from_kuid_munged(current_user_ns(), current_euid())); 279 267 } 280 268 281 269 asmlinkage long sys32_getgid16(void) 282 270 { 283 - return high2lowgid(current->cred->gid); 271 + return high2lowgid(from_kgid_munged(current_user_ns(), current_gid())); 284 272 } 285 273 286 274 asmlinkage long sys32_getegid16(void) 287 275 { 288 - return high2lowgid(current->cred->egid); 276 + return high2lowgid(from_kgid_munged(current_user_ns(), current_egid())); 289 277 } 290 278 291 279 /*
+2 -2
drivers/base/devtmpfs.c
··· 309 309 * before unlinking this node, reset permissions 310 310 * of possible references like hardlinks 311 311 */ 312 - newattrs.ia_uid = 0; 313 - newattrs.ia_gid = 0; 312 + newattrs.ia_uid = GLOBAL_ROOT_UID; 313 + newattrs.ia_gid = GLOBAL_ROOT_GID; 314 314 newattrs.ia_mode = stat.mode & ~0777; 315 315 newattrs.ia_valid = 316 316 ATTR_UID|ATTR_GID|ATTR_MODE;
+2 -2
drivers/block/loop.c
··· 1038 1038 { 1039 1039 int err; 1040 1040 struct loop_func_table *xfer; 1041 - uid_t uid = current_uid(); 1041 + kuid_t uid = current_uid(); 1042 1042 1043 1043 if (lo->lo_encrypt_key_size && 1044 - lo->lo_key_owner != uid && 1044 + !uid_eq(lo->lo_key_owner, uid) && 1045 1045 !capable(CAP_SYS_ADMIN)) 1046 1046 return -EPERM; 1047 1047 if (lo->lo_state != Lo_bound)
+14 -4
drivers/connector/cn_proc.c
··· 30 30 #include <linux/gfp.h> 31 31 #include <linux/ptrace.h> 32 32 #include <linux/atomic.h> 33 + #include <linux/pid_namespace.h> 33 34 34 35 #include <asm/unaligned.h> 35 36 ··· 128 127 rcu_read_lock(); 129 128 cred = __task_cred(task); 130 129 if (which_id == PROC_EVENT_UID) { 131 - ev->event_data.id.r.ruid = cred->uid; 132 - ev->event_data.id.e.euid = cred->euid; 130 + ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid); 131 + ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid); 133 132 } else if (which_id == PROC_EVENT_GID) { 134 - ev->event_data.id.r.rgid = cred->gid; 135 - ev->event_data.id.e.egid = cred->egid; 133 + ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid); 134 + ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid); 136 135 } else { 137 136 rcu_read_unlock(); 138 137 return; ··· 302 301 int err = 0; 303 302 304 303 if (msg->len != sizeof(*mc_op)) 304 + return; 305 + 306 + /* 307 + * Events are reported with respect to the initial pid 308 + * and user namespaces so ignore requestors from 309 + * other namespaces. 310 + */ 311 + if ((current_user_ns() != &init_user_ns) || 312 + (task_active_pid_ns(current) != &init_pid_ns)) 305 313 return; 306 314 307 315 mc_op = (enum proc_cn_mcast_op *)msg->data;
+2 -1
drivers/gpu/drm/drm_fops.c
··· 251 251 filp->private_data = priv; 252 252 priv->filp = filp; 253 253 priv->uid = current_euid(); 254 - priv->pid = task_pid_nr(current); 254 + priv->pid = get_pid(task_pid(current)); 255 255 priv->minor = idr_find(&drm_minors_idr, minor_id); 256 256 priv->ioctl_count = 0; 257 257 /* for compatibility root is always authenticated */ ··· 524 524 if (drm_core_check_feature(dev, DRIVER_PRIME)) 525 525 drm_prime_destroy_file_private(&file_priv->prime); 526 526 527 + put_pid(file_priv->pid); 527 528 kfree(file_priv); 528 529 529 530 /* ========================================================
+3 -2
drivers/gpu/drm/drm_info.c
··· 191 191 seq_printf(m, "%c %3d %5d %5d %10u %10lu\n", 192 192 priv->authenticated ? 'y' : 'n', 193 193 priv->minor->index, 194 - priv->pid, 195 - priv->uid, priv->magic, priv->ioctl_count); 194 + pid_vnr(priv->pid), 195 + from_kuid_munged(seq_user_ns(m), priv->uid), 196 + priv->magic, priv->ioctl_count); 196 197 } 197 198 mutex_unlock(&dev->struct_mutex); 198 199 return 0;
+2 -2
drivers/gpu/drm/drm_ioctl.c
··· 215 215 list_for_each_entry(pt, &dev->filelist, lhead) { 216 216 if (i++ >= idx) { 217 217 client->auth = pt->authenticated; 218 - client->pid = pt->pid; 219 - client->uid = pt->uid; 218 + client->pid = pid_vnr(pt->pid); 219 + client->uid = from_kuid_munged(current_user_ns(), pt->uid); 220 220 client->magic = pt->magic; 221 221 client->iocs = pt->ioctl_count; 222 222 mutex_unlock(&dev->struct_mutex);
+2 -2
drivers/infiniband/hw/qib/qib_fs.c
··· 61 61 62 62 inode->i_ino = get_next_ino(); 63 63 inode->i_mode = mode; 64 - inode->i_uid = 0; 65 - inode->i_gid = 0; 64 + inode->i_uid = GLOBAL_ROOT_UID; 65 + inode->i_gid = GLOBAL_ROOT_GID; 66 66 inode->i_blocks = 0; 67 67 inode->i_atime = CURRENT_TIME; 68 68 inode->i_mtime = inode->i_atime;
+32 -14
drivers/net/tun.c
··· 121 121 struct tun_struct { 122 122 struct tun_file *tfile; 123 123 unsigned int flags; 124 - uid_t owner; 125 - gid_t group; 124 + kuid_t owner; 125 + kgid_t group; 126 126 127 127 struct net_device *dev; 128 128 netdev_features_t set_features; ··· 1032 1032 { 1033 1033 struct tun_struct *tun = netdev_priv(dev); 1034 1034 1035 - tun->owner = -1; 1036 - tun->group = -1; 1035 + tun->owner = INVALID_UID; 1036 + tun->group = INVALID_GID; 1037 1037 1038 1038 dev->ethtool_ops = &tun_ethtool_ops; 1039 1039 dev->destructor = tun_free_netdev; ··· 1156 1156 char *buf) 1157 1157 { 1158 1158 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 1159 - return sprintf(buf, "%d\n", tun->owner); 1159 + return uid_valid(tun->owner)? 1160 + sprintf(buf, "%u\n", 1161 + from_kuid_munged(current_user_ns(), tun->owner)): 1162 + sprintf(buf, "-1\n"); 1160 1163 } 1161 1164 1162 1165 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 1163 1166 char *buf) 1164 1167 { 1165 1168 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 1166 - return sprintf(buf, "%d\n", tun->group); 1169 + return gid_valid(tun->group) ? 1170 + sprintf(buf, "%u\n", 1171 + from_kgid_munged(current_user_ns(), tun->group)): 1172 + sprintf(buf, "-1\n"); 1167 1173 } 1168 1174 1169 1175 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); ··· 1196 1190 else 1197 1191 return -EINVAL; 1198 1192 1199 - if (((tun->owner != -1 && cred->euid != tun->owner) || 1200 - (tun->group != -1 && !in_egroup_p(tun->group))) && 1193 + if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 1194 + (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 1201 1195 !capable(CAP_NET_ADMIN)) 1202 1196 return -EPERM; 1203 1197 err = security_tun_dev_attach(tun->socket.sk); ··· 1381 1375 void __user* argp = (void __user*)arg; 1382 1376 struct sock_fprog fprog; 1383 1377 struct ifreq ifr; 1378 + kuid_t owner; 1379 + kgid_t group; 1384 1380 int sndbuf; 1385 1381 int vnet_hdr_sz; 1386 1382 int ret; ··· 1456 1448 1457 1449 case TUNSETOWNER: 1458 1450 /* Set owner of the device */ 1459 - tun->owner = (uid_t) arg; 1460 - 1461 - tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner); 1451 + owner = make_kuid(current_user_ns(), arg); 1452 + if (!uid_valid(owner)) { 1453 + ret = -EINVAL; 1454 + break; 1455 + } 1456 + tun->owner = owner; 1457 + tun_debug(KERN_INFO, tun, "owner set to %d\n", 1458 + from_kuid(&init_user_ns, tun->owner)); 1462 1459 break; 1463 1460 1464 1461 case TUNSETGROUP: 1465 1462 /* Set group of the device */ 1466 - tun->group= (gid_t) arg; 1467 - 1468 - tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group); 1463 + group = make_kgid(current_user_ns(), arg); 1464 + if (!gid_valid(group)) { 1465 + ret = -EINVAL; 1466 + break; 1467 + } 1468 + tun->group = group; 1469 + tun_debug(KERN_INFO, tun, "group set to %d\n", 1470 + from_kgid(&init_user_ns, tun->group)); 1469 1471 break; 1470 1472 1471 1473 case TUNSETLINK:
+28 -20
drivers/net/wireless/airo.c
··· 232 232 233 233 static int probe = 1; 234 234 235 + static kuid_t proc_kuid; 235 236 static int proc_uid /* = 0 */; 236 237 238 + static kgid_t proc_kgid; 237 239 static int proc_gid /* = 0 */; 238 240 239 241 static int airo_perm = 0555; ··· 4501 4499 static int setup_proc_entry( struct net_device *dev, 4502 4500 struct airo_info *apriv ) { 4503 4501 struct proc_dir_entry *entry; 4502 + 4504 4503 /* First setup the device directory */ 4505 4504 strcpy(apriv->proc_name,dev->name); 4506 4505 apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, 4507 4506 airo_entry); 4508 4507 if (!apriv->proc_entry) 4509 4508 goto fail; 4510 - apriv->proc_entry->uid = proc_uid; 4511 - apriv->proc_entry->gid = proc_gid; 4509 + apriv->proc_entry->uid = proc_kuid; 4510 + apriv->proc_entry->gid = proc_kgid; 4512 4511 4513 4512 /* Setup the StatsDelta */ 4514 4513 entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, 4515 4514 apriv->proc_entry, &proc_statsdelta_ops, dev); 4516 4515 if (!entry) 4517 4516 goto fail_stats_delta; 4518 - entry->uid = proc_uid; 4519 - entry->gid = proc_gid; 4517 + entry->uid = proc_kuid; 4518 + entry->gid = proc_kgid; 4520 4519 4521 4520 /* Setup the Stats */ 4522 4521 entry = proc_create_data("Stats", S_IRUGO & proc_perm, 4523 4522 apriv->proc_entry, &proc_stats_ops, dev); 4524 4523 if (!entry) 4525 4524 goto fail_stats; 4526 - entry->uid = proc_uid; 4527 - entry->gid = proc_gid; 4525 + entry->uid = proc_kuid; 4526 + entry->gid = proc_kgid; 4528 4527 4529 4528 /* Setup the Status */ 4530 4529 entry = proc_create_data("Status", S_IRUGO & proc_perm, 4531 4530 apriv->proc_entry, &proc_status_ops, dev); 4532 4531 if (!entry) 4533 4532 goto fail_status; 4534 - entry->uid = proc_uid; 4535 - entry->gid = proc_gid; 4533 + entry->uid = proc_kuid; 4534 + entry->gid = proc_kgid; 4536 4535 4537 4536 /* Setup the Config */ 4538 4537 entry = proc_create_data("Config", proc_perm, 4539 4538 apriv->proc_entry, &proc_config_ops, dev); 4540 4539 if (!entry) 4541 4540 goto fail_config; 4542 - entry->uid = proc_uid; 4543 - entry->gid = proc_gid; 4541 + entry->uid = proc_kuid; 4542 + entry->gid = proc_kgid; 4544 4543 4545 4544 /* Setup the SSID */ 4546 4545 entry = proc_create_data("SSID", proc_perm, 4547 4546 apriv->proc_entry, &proc_SSID_ops, dev); 4548 4547 if (!entry) 4549 4548 goto fail_ssid; 4550 - entry->uid = proc_uid; 4551 - entry->gid = proc_gid; 4549 + entry->uid = proc_kuid; 4550 + entry->gid = proc_kgid; 4552 4551 4553 4552 /* Setup the APList */ 4554 4553 entry = proc_create_data("APList", proc_perm, 4555 4554 apriv->proc_entry, &proc_APList_ops, dev); 4556 4555 if (!entry) 4557 4556 goto fail_aplist; 4558 - entry->uid = proc_uid; 4559 - entry->gid = proc_gid; 4557 + entry->uid = proc_kuid; 4558 + entry->gid = proc_kgid; 4560 4559 4561 4560 /* Setup the BSSList */ 4562 4561 entry = proc_create_data("BSSList", proc_perm, 4563 4562 apriv->proc_entry, &proc_BSSList_ops, dev); 4564 4563 if (!entry) 4565 4564 goto fail_bsslist; 4566 - entry->uid = proc_uid; 4567 - entry->gid = proc_gid; 4565 + entry->uid = proc_kuid; 4566 + entry->gid = proc_kgid; 4568 4567 4569 4568 /* Setup the WepKey */ 4570 4569 entry = proc_create_data("WepKey", proc_perm, 4571 4570 apriv->proc_entry, &proc_wepkey_ops, dev); 4572 4571 if (!entry) 4573 4572 goto fail_wepkey; 4574 - entry->uid = proc_uid; 4575 - entry->gid = proc_gid; 4573 + entry->uid = proc_kuid; 4574 + entry->gid = proc_kgid; 4576 4575 4577 4576 return 0; 4578 4577 ··· 5700 5697 { 5701 5698 int i; 5702 5699 5700 + proc_kuid = make_kuid(&init_user_ns, proc_uid); 5701 + proc_kgid = make_kgid(&init_user_ns, proc_gid); 5702 + if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid)) 5703 + return -EINVAL; 5704 + 5703 5705 airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); 5704 5706 5705 5707 if (airo_entry) { 5706 - airo_entry->uid = proc_uid; 5707 - airo_entry->gid = proc_gid; 5708 + airo_entry->uid = proc_kuid; 5709 + airo_entry->gid = proc_kgid; 5708 5710 } 5709 5711 5710 5712 for (i = 0; i < 4 && io[i] && irq[i]; i++) {
+7 -7
drivers/staging/android/binder.c
··· 47 47 static struct dentry *binder_debugfs_dir_entry_root; 48 48 static struct dentry *binder_debugfs_dir_entry_proc; 49 49 static struct binder_node *binder_context_mgr_node; 50 - static uid_t binder_context_mgr_uid = -1; 50 + static kuid_t binder_context_mgr_uid = INVALID_UID; 51 51 static int binder_last_id; 52 52 static struct workqueue_struct *binder_deferred_workqueue; 53 53 ··· 356 356 unsigned int flags; 357 357 long priority; 358 358 long saved_priority; 359 - uid_t sender_euid; 359 + kuid_t sender_euid; 360 360 }; 361 361 362 362 static void ··· 2427 2427 } 2428 2428 tr.code = t->code; 2429 2429 tr.flags = t->flags; 2430 - tr.sender_euid = t->sender_euid; 2430 + tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2431 2431 2432 2432 if (t->from) { 2433 2433 struct task_struct *sender = t->from->proc->tsk; ··· 2705 2705 ret = -EBUSY; 2706 2706 goto err; 2707 2707 } 2708 - if (binder_context_mgr_uid != -1) { 2709 - if (binder_context_mgr_uid != current->cred->euid) { 2708 + if (uid_valid(binder_context_mgr_uid)) { 2709 + if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { 2710 2710 pr_err("binder: BINDER_SET_" 2711 2711 "CONTEXT_MGR bad uid %d != %d\n", 2712 - current->cred->euid, 2713 - binder_context_mgr_uid); 2712 + from_kuid(&init_user_ns, current->cred->euid), 2713 + from_kuid(&init_user_ns, binder_context_mgr_uid)); 2714 2714 ret = -EPERM; 2715 2715 goto err; 2716 2716 }
+10 -7
drivers/tty/tty_audit.c
··· 61 61 } 62 62 63 63 static void tty_audit_log(const char *description, struct task_struct *tsk, 64 - uid_t loginuid, unsigned sessionid, int major, 64 + kuid_t loginuid, unsigned sessionid, int major, 65 65 int minor, unsigned char *data, size_t size) 66 66 { 67 67 struct audit_buffer *ab; ··· 69 69 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); 70 70 if (ab) { 71 71 char name[sizeof(tsk->comm)]; 72 - uid_t uid = task_uid(tsk); 72 + kuid_t uid = task_uid(tsk); 73 73 74 74 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " 75 75 "major=%d minor=%d comm=", description, 76 - tsk->pid, uid, loginuid, sessionid, 76 + tsk->pid, 77 + from_kuid(&init_user_ns, uid), 78 + from_kuid(&init_user_ns, loginuid), 79 + sessionid, 77 80 major, minor); 78 81 get_task_comm(name, tsk); 79 82 audit_log_untrustedstring(ab, name); ··· 92 89 * Generate an audit message from the contents of @buf, which is owned by 93 90 * @tsk with @loginuid. @buf->mutex must be locked. 94 91 */ 95 - static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid, 92 + static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, 96 93 unsigned int sessionid, 97 94 struct tty_audit_buf *buf) 98 95 { ··· 115 112 */ 116 113 static void tty_audit_buf_push_current(struct tty_audit_buf *buf) 117 114 { 118 - uid_t auid = audit_get_loginuid(current); 115 + kuid_t auid = audit_get_loginuid(current); 119 116 unsigned int sessionid = audit_get_sessionid(current); 120 117 tty_audit_buf_push(current, auid, sessionid, buf); 121 118 } ··· 182 179 } 183 180 184 181 if (should_audit && audit_enabled) { 185 - uid_t auid; 182 + kuid_t auid; 186 183 unsigned int sessionid; 187 184 188 185 auid = audit_get_loginuid(current); ··· 202 199 * reference to the tty audit buffer if available. 203 200 * Flush the buffer or return an appropriate error code. 204 201 */ 205 - int tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) 202 + int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) 206 203 { 207 204 struct tty_audit_buf *buf = ERR_PTR(-EPERM); 208 205 unsigned long flags;
+16 -7
drivers/usb/gadget/f_fs.c
··· 224 224 /* File permissions, written once when fs is mounted */ 225 225 struct ffs_file_perms { 226 226 umode_t mode; 227 - uid_t uid; 228 - gid_t gid; 227 + kuid_t uid; 228 + kgid_t gid; 229 229 } file_perms; 230 230 231 231 /* ··· 1147 1147 break; 1148 1148 1149 1149 case 3: 1150 - if (!memcmp(opts, "uid", 3)) 1151 - data->perms.uid = value; 1150 + if (!memcmp(opts, "uid", 3)) { 1151 + data->perms.uid = make_kuid(current_user_ns(), value); 1152 + if (!uid_valid(data->perms.uid)) { 1153 + pr_err("%s: unmapped value: %lu\n", opts, value); 1154 + return -EINVAL; 1155 + } 1156 + } 1152 1157 else if (!memcmp(opts, "gid", 3)) 1153 - data->perms.gid = value; 1158 + data->perms.gid = make_kgid(current_user_ns(), value); 1159 + if (!gid_valid(data->perms.gid)) { 1160 + pr_err("%s: unmapped value: %lu\n", opts, value); 1161 + return -EINVAL; 1162 + } 1154 1163 else 1155 1164 goto invalid; 1156 1165 break; ··· 1188 1179 struct ffs_sb_fill_data data = { 1189 1180 .perms = { 1190 1181 .mode = S_IFREG | 0600, 1191 - .uid = 0, 1192 - .gid = 0 1182 + .uid = GLOBAL_ROOT_UID, 1183 + .gid = GLOBAL_ROOT_GID, 1193 1184 }, 1194 1185 .root_mode = S_IFDIR | 0500, 1195 1186 };
+2 -2
drivers/usb/gadget/inode.c
··· 1985 1985 if (inode) { 1986 1986 inode->i_ino = get_next_ino(); 1987 1987 inode->i_mode = mode; 1988 - inode->i_uid = default_uid; 1989 - inode->i_gid = default_gid; 1988 + inode->i_uid = make_kuid(&init_user_ns, default_uid); 1989 + inode->i_gid = make_kgid(&init_user_ns, default_gid); 1990 1990 inode->i_atime = inode->i_mtime = inode->i_ctime 1991 1991 = CURRENT_TIME; 1992 1992 inode->i_private = data;
+2 -1
drivers/xen/xenfs/super.c
··· 30 30 31 31 if (ret) { 32 32 ret->i_mode = mode; 33 - ret->i_uid = ret->i_gid = 0; 33 + ret->i_uid = GLOBAL_ROOT_UID; 34 + ret->i_gid = GLOBAL_ROOT_GID; 34 35 ret->i_blocks = 0; 35 36 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; 36 37 }
+4 -4
fs/9p/acl.c
··· 37 37 return ERR_PTR(-ENOMEM); 38 38 size = v9fs_fid_xattr_get(fid, name, value, size); 39 39 if (size > 0) { 40 - acl = posix_acl_from_xattr(value, size); 40 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 41 41 if (IS_ERR(acl)) 42 42 goto err_out; 43 43 } ··· 131 131 buffer = kmalloc(size, GFP_KERNEL); 132 132 if (!buffer) 133 133 return -ENOMEM; 134 - retval = posix_acl_to_xattr(acl, buffer, size); 134 + retval = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 135 135 if (retval < 0) 136 136 goto err_free_out; 137 137 switch (type) { ··· 251 251 return PTR_ERR(acl); 252 252 if (acl == NULL) 253 253 return -ENODATA; 254 - error = posix_acl_to_xattr(acl, buffer, size); 254 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 255 255 posix_acl_release(acl); 256 256 257 257 return error; ··· 304 304 return -EPERM; 305 305 if (value) { 306 306 /* update the cached acl value */ 307 - acl = posix_acl_from_xattr(value, size); 307 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 308 308 if (IS_ERR(acl)) 309 309 return PTR_ERR(acl); 310 310 else if (acl) {
+2 -2
fs/adfs/adfs.h
··· 46 46 struct adfs_discmap *s_map; /* bh list containing map */ 47 47 struct adfs_dir_ops *s_dir; /* directory operations */ 48 48 49 - uid_t s_uid; /* owner uid */ 50 - gid_t s_gid; /* owner gid */ 49 + kuid_t s_uid; /* owner uid */ 50 + kgid_t s_gid; /* owner gid */ 51 51 umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ 52 52 umode_t s_other_mask; /* ADFS other perm -> unix perm */ 53 53 int s_ftsuffix; /* ,xyz hex filetype suffix option */
+2 -2
fs/adfs/inode.c
··· 304 304 * we can't change the UID or GID of any file - 305 305 * we have a global UID/GID in the superblock 306 306 */ 307 - if ((ia_valid & ATTR_UID && attr->ia_uid != ADFS_SB(sb)->s_uid) || 308 - (ia_valid & ATTR_GID && attr->ia_gid != ADFS_SB(sb)->s_gid)) 307 + if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, ADFS_SB(sb)->s_uid)) || 308 + (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, ADFS_SB(sb)->s_gid))) 309 309 error = -EPERM; 310 310 311 311 if (error)
+13 -8
fs/adfs/super.c
··· 15 15 #include <linux/seq_file.h> 16 16 #include <linux/slab.h> 17 17 #include <linux/statfs.h> 18 + #include <linux/user_namespace.h> 18 19 #include "adfs.h" 19 20 #include "dir_f.h" 20 21 #include "dir_fplus.h" ··· 131 130 { 132 131 struct adfs_sb_info *asb = ADFS_SB(root->d_sb); 133 132 134 - if (asb->s_uid != 0) 135 - seq_printf(seq, ",uid=%u", asb->s_uid); 136 - if (asb->s_gid != 0) 137 - seq_printf(seq, ",gid=%u", asb->s_gid); 133 + if (!uid_eq(asb->s_uid, GLOBAL_ROOT_UID)) 134 + seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, asb->s_uid)); 135 + if (!gid_eq(asb->s_gid, GLOBAL_ROOT_GID)) 136 + seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, asb->s_gid)); 138 137 if (asb->s_owner_mask != ADFS_DEFAULT_OWNER_MASK) 139 138 seq_printf(seq, ",ownmask=%o", asb->s_owner_mask); 140 139 if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK) ··· 176 175 case Opt_uid: 177 176 if (match_int(args, &option)) 178 177 return -EINVAL; 179 - asb->s_uid = option; 178 + asb->s_uid = make_kuid(current_user_ns(), option); 179 + if (!uid_valid(asb->s_uid)) 180 + return -EINVAL; 180 181 break; 181 182 case Opt_gid: 182 183 if (match_int(args, &option)) 183 184 return -EINVAL; 184 - asb->s_gid = option; 185 + asb->s_gid = make_kgid(current_user_ns(), option); 186 + if (!gid_valid(asb->s_gid)) 187 + return -EINVAL; 185 188 break; 186 189 case Opt_ownmask: 187 190 if (match_octal(args, &option)) ··· 374 369 sb->s_fs_info = asb; 375 370 376 371 /* set default options */ 377 - asb->s_uid = 0; 378 - asb->s_gid = 0; 372 + asb->s_uid = GLOBAL_ROOT_UID; 373 + asb->s_gid = GLOBAL_ROOT_GID; 379 374 asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK; 380 375 asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK; 381 376 asb->s_ftsuffix = 0;
+2 -2
fs/affs/affs.h
··· 88 88 u32 s_root_block; /* FFS root block number. */ 89 89 int s_hashsize; /* Size of hash table. */ 90 90 unsigned long s_flags; /* See below. */ 91 - uid_t s_uid; /* uid to override */ 92 - gid_t s_gid; /* gid to override */ 91 + kuid_t s_uid; /* uid to override */ 92 + kgid_t s_gid; /* gid to override */ 93 93 umode_t s_mode; /* mode to override */ 94 94 struct buffer_head *s_root_bh; /* Cached root block. */ 95 95 struct mutex s_bmlock; /* Protects bitmap access. */
+10 -10
fs/affs/inode.c
··· 80 80 if (id == 0 || sbi->s_flags & SF_SETUID) 81 81 inode->i_uid = sbi->s_uid; 82 82 else if (id == 0xFFFF && sbi->s_flags & SF_MUFS) 83 - inode->i_uid = 0; 83 + i_uid_write(inode, 0); 84 84 else 85 - inode->i_uid = id; 85 + i_uid_write(inode, id); 86 86 87 87 id = be16_to_cpu(tail->gid); 88 88 if (id == 0 || sbi->s_flags & SF_SETGID) 89 89 inode->i_gid = sbi->s_gid; 90 90 else if (id == 0xFFFF && sbi->s_flags & SF_MUFS) 91 - inode->i_gid = 0; 91 + i_gid_write(inode, 0); 92 92 else 93 - inode->i_gid = id; 93 + i_gid_write(inode, id); 94 94 95 95 switch (be32_to_cpu(tail->stype)) { 96 96 case ST_ROOT: ··· 193 193 tail->size = cpu_to_be32(inode->i_size); 194 194 secs_to_datestamp(inode->i_mtime.tv_sec,&tail->change); 195 195 if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) { 196 - uid = inode->i_uid; 197 - gid = inode->i_gid; 196 + uid = i_uid_read(inode); 197 + gid = i_gid_read(inode); 198 198 if (AFFS_SB(sb)->s_flags & SF_MUFS) { 199 - if (inode->i_uid == 0 || inode->i_uid == 0xFFFF) 200 - uid = inode->i_uid ^ ~0; 201 - if (inode->i_gid == 0 || inode->i_gid == 0xFFFF) 202 - gid = inode->i_gid ^ ~0; 199 + if (uid == 0 || uid == 0xFFFF) 200 + uid = uid ^ ~0; 201 + if (gid == 0 || gid == 0xFFFF) 202 + gid = gid ^ ~0; 203 203 } 204 204 if (!(AFFS_SB(sb)->s_flags & SF_SETUID)) 205 205 tail->uid = cpu_to_be16(uid);
+11 -7
fs/affs/super.c
··· 188 188 }; 189 189 190 190 static int 191 - parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s32 *root, 191 + parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, s32 *root, 192 192 int *blocksize, char **prefix, char *volume, unsigned long *mount_opts) 193 193 { 194 194 char *p; ··· 253 253 case Opt_setgid: 254 254 if (match_int(&args[0], &option)) 255 255 return 0; 256 - *gid = option; 256 + *gid = make_kgid(current_user_ns(), option); 257 + if (!gid_valid(*gid)) 258 + return 0; 257 259 *mount_opts |= SF_SETGID; 258 260 break; 259 261 case Opt_setuid: 260 262 if (match_int(&args[0], &option)) 261 263 return 0; 262 - *uid = option; 264 + *uid = make_kuid(current_user_ns(), option); 265 + if (!uid_valid(*uid)) 266 + return 0; 263 267 *mount_opts |= SF_SETUID; 264 268 break; 265 269 case Opt_verbose: ··· 305 301 int num_bm; 306 302 int i, j; 307 303 s32 key; 308 - uid_t uid; 309 - gid_t gid; 304 + kuid_t uid; 305 + kgid_t gid; 310 306 int reserved; 311 307 unsigned long mount_flags; 312 308 int tmp_flags; /* fix remount prototype... */ ··· 531 527 { 532 528 struct affs_sb_info *sbi = AFFS_SB(sb); 533 529 int blocksize; 534 - uid_t uid; 535 - gid_t gid; 530 + kuid_t uid; 531 + kgid_t gid; 536 532 int mode; 537 533 int reserved; 538 534 int root_block;
+2 -2
fs/befs/befs.h
··· 20 20 */ 21 21 22 22 typedef struct befs_mount_options { 23 - gid_t gid; 24 - uid_t uid; 23 + kgid_t gid; 24 + kuid_t uid; 25 25 int use_gid; 26 26 int use_uid; 27 27 int debug;
+19 -8
fs/befs/linuxvfs.c
··· 15 15 #include <linux/vfs.h> 16 16 #include <linux/parser.h> 17 17 #include <linux/namei.h> 18 + #include <linux/sched.h> 18 19 19 20 #include "befs.h" 20 21 #include "btree.h" ··· 353 352 */ 354 353 355 354 inode->i_uid = befs_sb->mount_opts.use_uid ? 356 - befs_sb->mount_opts.uid : (uid_t) fs32_to_cpu(sb, raw_inode->uid); 355 + befs_sb->mount_opts.uid : 356 + make_kuid(&init_user_ns, fs32_to_cpu(sb, raw_inode->uid)); 357 357 inode->i_gid = befs_sb->mount_opts.use_gid ? 358 - befs_sb->mount_opts.gid : (gid_t) fs32_to_cpu(sb, raw_inode->gid); 358 + befs_sb->mount_opts.gid : 359 + make_kgid(&init_user_ns, fs32_to_cpu(sb, raw_inode->gid)); 359 360 360 361 set_nlink(inode, 1); 361 362 ··· 677 674 char *p; 678 675 substring_t args[MAX_OPT_ARGS]; 679 676 int option; 677 + kuid_t uid; 678 + kgid_t gid; 680 679 681 680 /* Initialize options */ 682 - opts->uid = 0; 683 - opts->gid = 0; 681 + opts->uid = GLOBAL_ROOT_UID; 682 + opts->gid = GLOBAL_ROOT_GID; 684 683 opts->use_uid = 0; 685 684 opts->use_gid = 0; 686 685 opts->iocharset = NULL; ··· 701 696 case Opt_uid: 702 697 if (match_int(&args[0], &option)) 703 698 return 0; 704 - if (option < 0) { 699 + uid = INVALID_UID; 700 + if (option >= 0) 701 + uid = make_kuid(current_user_ns(), option); 702 + if (!uid_valid(uid)) { 705 703 printk(KERN_ERR "BeFS: Invalid uid %d, " 706 704 "using default\n", option); 707 705 break; 708 706 } 709 - opts->uid = option; 707 + opts->uid = uid; 710 708 opts->use_uid = 1; 711 709 break; 712 710 case Opt_gid: 713 711 if (match_int(&args[0], &option)) 714 712 return 0; 715 - if (option < 0) { 713 + gid = INVALID_GID; 714 + if (option >= 0) 715 + gid = make_kgid(current_user_ns(), option); 716 + if (!gid_valid(gid)) { 716 717 printk(KERN_ERR "BeFS: Invalid gid %d, " 717 718 "using default\n", option); 718 719 break; 719 720 } 720 - opts->gid = option; 721 + opts->gid = gid; 721 722 opts->use_gid = 1; 722 723 break; 723 724 case Opt_charset:
+4 -4
fs/bfs/inode.c
··· 76 76 BFS_I(inode)->i_sblock = le32_to_cpu(di->i_sblock); 77 77 BFS_I(inode)->i_eblock = le32_to_cpu(di->i_eblock); 78 78 BFS_I(inode)->i_dsk_ino = le16_to_cpu(di->i_ino); 79 - inode->i_uid = le32_to_cpu(di->i_uid); 80 - inode->i_gid = le32_to_cpu(di->i_gid); 79 + i_uid_write(inode, le32_to_cpu(di->i_uid)); 80 + i_gid_write(inode, le32_to_cpu(di->i_gid)); 81 81 set_nlink(inode, le32_to_cpu(di->i_nlink)); 82 82 inode->i_size = BFS_FILESIZE(di); 83 83 inode->i_blocks = BFS_FILEBLOCKS(di); ··· 139 139 140 140 di->i_ino = cpu_to_le16(ino); 141 141 di->i_mode = cpu_to_le32(inode->i_mode); 142 - di->i_uid = cpu_to_le32(inode->i_uid); 143 - di->i_gid = cpu_to_le32(inode->i_gid); 142 + di->i_uid = cpu_to_le32(i_uid_read(inode)); 143 + di->i_gid = cpu_to_le32(i_gid_read(inode)); 144 144 di->i_nlink = cpu_to_le32(inode->i_nlink); 145 145 di->i_atime = cpu_to_le32(inode->i_atime.tv_sec); 146 146 di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+4 -4
fs/btrfs/acl.c
··· 61 61 size = __btrfs_getxattr(inode, name, value, size); 62 62 } 63 63 if (size > 0) { 64 - acl = posix_acl_from_xattr(value, size); 64 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 65 65 } else if (size == -ENOENT || size == -ENODATA || size == 0) { 66 66 /* FIXME, who returns -ENOENT? I think nobody */ 67 67 acl = NULL; ··· 91 91 return PTR_ERR(acl); 92 92 if (acl == NULL) 93 93 return -ENODATA; 94 - ret = posix_acl_to_xattr(acl, value, size); 94 + ret = posix_acl_to_xattr(&init_user_ns, acl, value, size); 95 95 posix_acl_release(acl); 96 96 97 97 return ret; ··· 141 141 goto out; 142 142 } 143 143 144 - ret = posix_acl_to_xattr(acl, value, size); 144 + ret = posix_acl_to_xattr(&init_user_ns, acl, value, size); 145 145 if (ret < 0) 146 146 goto out; 147 147 } ··· 169 169 return -EOPNOTSUPP; 170 170 171 171 if (value) { 172 - acl = posix_acl_from_xattr(value, size); 172 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 173 173 if (IS_ERR(acl)) 174 174 return PTR_ERR(acl); 175 175
+4 -4
fs/btrfs/delayed-inode.c
··· 1715 1715 struct btrfs_inode_item *inode_item, 1716 1716 struct inode *inode) 1717 1717 { 1718 - btrfs_set_stack_inode_uid(inode_item, inode->i_uid); 1719 - btrfs_set_stack_inode_gid(inode_item, inode->i_gid); 1718 + btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode)); 1719 + btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode)); 1720 1720 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); 1721 1721 btrfs_set_stack_inode_mode(inode_item, inode->i_mode); 1722 1722 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); ··· 1764 1764 1765 1765 inode_item = &delayed_node->inode_item; 1766 1766 1767 - inode->i_uid = btrfs_stack_inode_uid(inode_item); 1768 - inode->i_gid = btrfs_stack_inode_gid(inode_item); 1767 + i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); 1768 + i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); 1769 1769 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); 1770 1770 inode->i_mode = btrfs_stack_inode_mode(inode_item); 1771 1771 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
+4 -4
fs/btrfs/inode.c
··· 2572 2572 struct btrfs_inode_item); 2573 2573 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2574 2574 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 2575 - inode->i_uid = btrfs_inode_uid(leaf, inode_item); 2576 - inode->i_gid = btrfs_inode_gid(leaf, inode_item); 2575 + i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 2576 + i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 2577 2577 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 2578 2578 2579 2579 tspec = btrfs_inode_atime(inode_item); ··· 2651 2651 struct btrfs_inode_item *item, 2652 2652 struct inode *inode) 2653 2653 { 2654 - btrfs_set_inode_uid(leaf, item, inode->i_uid); 2655 - btrfs_set_inode_gid(leaf, item, inode->i_gid); 2654 + btrfs_set_inode_uid(leaf, item, i_uid_read(inode)); 2655 + btrfs_set_inode_gid(leaf, item, i_gid_read(inode)); 2656 2656 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 2657 2657 btrfs_set_inode_mode(leaf, item, inode->i_mode); 2658 2658 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+3 -3
fs/btrfs/ioctl.c
··· 575 575 */ 576 576 static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode) 577 577 { 578 - uid_t fsuid = current_fsuid(); 578 + kuid_t fsuid = current_fsuid(); 579 579 580 580 if (!(dir->i_mode & S_ISVTX)) 581 581 return 0; 582 - if (inode->i_uid == fsuid) 582 + if (uid_eq(inode->i_uid, fsuid)) 583 583 return 0; 584 - if (dir->i_uid == fsuid) 584 + if (uid_eq(dir->i_uid, fsuid)) 585 585 return 0; 586 586 return !capable(CAP_FOWNER); 587 587 }
+2 -2
fs/configfs/inode.c
··· 79 79 return -ENOMEM; 80 80 /* assign default attributes */ 81 81 sd_iattr->ia_mode = sd->s_mode; 82 - sd_iattr->ia_uid = 0; 83 - sd_iattr->ia_gid = 0; 82 + sd_iattr->ia_uid = GLOBAL_ROOT_UID; 83 + sd_iattr->ia_gid = GLOBAL_ROOT_GID; 84 84 sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME; 85 85 sd->s_iattr = sd_iattr; 86 86 }
+2 -2
fs/cramfs/inode.c
··· 90 90 } 91 91 92 92 inode->i_mode = cramfs_inode->mode; 93 - inode->i_uid = cramfs_inode->uid; 94 - inode->i_gid = cramfs_inode->gid; 93 + i_uid_write(inode, cramfs_inode->uid); 94 + i_gid_write(inode, cramfs_inode->gid); 95 95 96 96 /* if the lower 2 bits are zero, the inode contains data */ 97 97 if (!(inode->i_ino & 3)) {
+18 -8
fs/debugfs/inode.c
··· 128 128 } 129 129 130 130 struct debugfs_mount_opts { 131 - uid_t uid; 132 - gid_t gid; 131 + kuid_t uid; 132 + kgid_t gid; 133 133 umode_t mode; 134 134 }; 135 135 ··· 156 156 substring_t args[MAX_OPT_ARGS]; 157 157 int option; 158 158 int token; 159 + kuid_t uid; 160 + kgid_t gid; 159 161 char *p; 160 162 161 163 opts->mode = DEBUGFS_DEFAULT_MODE; ··· 171 169 case Opt_uid: 172 170 if (match_int(&args[0], &option)) 173 171 return -EINVAL; 174 - opts->uid = option; 172 + uid = make_kuid(current_user_ns(), option); 173 + if (!uid_valid(uid)) 174 + return -EINVAL; 175 + opts->uid = uid; 175 176 break; 176 177 case Opt_gid: 177 178 if (match_octal(&args[0], &option)) 178 179 return -EINVAL; 179 - opts->gid = option; 180 + gid = make_kgid(current_user_ns(), option); 181 + if (!gid_valid(gid)) 182 + return -EINVAL; 183 + opts->gid = gid; 180 184 break; 181 185 case Opt_mode: 182 186 if (match_octal(&args[0], &option)) ··· 234 226 struct debugfs_fs_info *fsi = root->d_sb->s_fs_info; 235 227 struct debugfs_mount_opts *opts = &fsi->mount_opts; 236 228 237 - if (opts->uid != 0) 238 - seq_printf(m, ",uid=%u", opts->uid); 239 - if (opts->gid != 0) 240 - seq_printf(m, ",gid=%u", opts->gid); 229 + if (!uid_eq(opts->uid, GLOBAL_ROOT_UID)) 230 + seq_printf(m, ",uid=%u", 231 + from_kuid_munged(&init_user_ns, opts->uid)); 232 + if (!gid_eq(opts->gid, GLOBAL_ROOT_GID)) 233 + seq_printf(m, ",gid=%u", 234 + from_kgid_munged(&init_user_ns, opts->gid)); 241 235 if (opts->mode != DEBUGFS_DEFAULT_MODE) 242 236 seq_printf(m, ",mode=%o", opts->mode); 243 237
+3 -2
fs/ecryptfs/main.c
··· 545 545 goto out_free; 546 546 } 547 547 548 - if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) { 548 + if (check_ruid && !uid_eq(path.dentry->d_inode->i_uid, current_uid())) { 549 549 rc = -EPERM; 550 550 printk(KERN_ERR "Mount of device (uid: %d) not owned by " 551 551 "requested user (uid: %d)\n", 552 - path.dentry->d_inode->i_uid, current_uid()); 552 + i_uid_read(path.dentry->d_inode), 553 + from_kuid(&init_user_ns, current_uid())); 553 554 goto out_free; 554 555 } 555 556
+2 -3
fs/ecryptfs/messaging.c
··· 33 33 struct mutex ecryptfs_daemon_hash_mux; 34 34 static int ecryptfs_hash_bits; 35 35 #define ecryptfs_current_euid_hash(uid) \ 36 - hash_long((unsigned long)current_euid(), ecryptfs_hash_bits) 36 + hash_long((unsigned long)from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits) 37 37 38 38 static u32 ecryptfs_msg_counter; 39 39 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; ··· 121 121 hlist_for_each_entry(*daemon, elem, 122 122 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], 123 123 euid_chain) { 124 - if ((*daemon)->file->f_cred->euid == current_euid() && 125 - (*daemon)->file->f_cred->user_ns == current_user_ns()) { 124 + if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) { 126 125 rc = 0; 127 126 goto out; 128 127 }
+2 -2
fs/efs/inode.c
··· 97 97 98 98 inode->i_mode = be16_to_cpu(efs_inode->di_mode); 99 99 set_nlink(inode, be16_to_cpu(efs_inode->di_nlink)); 100 - inode->i_uid = (uid_t)be16_to_cpu(efs_inode->di_uid); 101 - inode->i_gid = (gid_t)be16_to_cpu(efs_inode->di_gid); 100 + i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid)); 101 + i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid)); 102 102 inode->i_size = be32_to_cpu(efs_inode->di_size); 103 103 inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime); 104 104 inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
+4 -4
fs/exofs/inode.c
··· 1172 1172 1173 1173 /* copy stuff from on-disk struct to in-memory struct */ 1174 1174 inode->i_mode = le16_to_cpu(fcb.i_mode); 1175 - inode->i_uid = le32_to_cpu(fcb.i_uid); 1176 - inode->i_gid = le32_to_cpu(fcb.i_gid); 1175 + i_uid_write(inode, le32_to_cpu(fcb.i_uid)); 1176 + i_gid_write(inode, le32_to_cpu(fcb.i_gid)); 1177 1177 set_nlink(inode, le16_to_cpu(fcb.i_links_count)); 1178 1178 inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime); 1179 1179 inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime); ··· 1385 1385 fcb = &args->fcb; 1386 1386 1387 1387 fcb->i_mode = cpu_to_le16(inode->i_mode); 1388 - fcb->i_uid = cpu_to_le32(inode->i_uid); 1389 - fcb->i_gid = cpu_to_le32(inode->i_gid); 1388 + fcb->i_uid = cpu_to_le32(i_uid_read(inode)); 1389 + fcb->i_gid = cpu_to_le32(i_gid_read(inode)); 1390 1390 fcb->i_links_count = cpu_to_le16(inode->i_nlink); 1391 1391 fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); 1392 1392 fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+22 -10
fs/ext2/acl.c
··· 53 53 case ACL_OTHER: 54 54 value = (char *)value + 55 55 sizeof(ext2_acl_entry_short); 56 - acl->a_entries[n].e_id = ACL_UNDEFINED_ID; 57 56 break; 58 57 59 58 case ACL_USER: 59 + value = (char *)value + sizeof(ext2_acl_entry); 60 + if ((char *)value > end) 61 + goto fail; 62 + acl->a_entries[n].e_uid = 63 + make_kuid(&init_user_ns, 64 + le32_to_cpu(entry->e_id)); 65 + break; 60 66 case ACL_GROUP: 61 67 value = (char *)value + sizeof(ext2_acl_entry); 62 68 if ((char *)value > end) 63 69 goto fail; 64 - acl->a_entries[n].e_id = 65 - le32_to_cpu(entry->e_id); 70 + acl->a_entries[n].e_gid = 71 + make_kgid(&init_user_ns, 72 + le32_to_cpu(entry->e_id)); 66 73 break; 67 74 68 75 default: ··· 103 96 ext_acl->a_version = cpu_to_le32(EXT2_ACL_VERSION); 104 97 e = (char *)ext_acl + sizeof(ext2_acl_header); 105 98 for (n=0; n < acl->a_count; n++) { 99 + const struct posix_acl_entry *acl_e = &acl->a_entries[n]; 106 100 ext2_acl_entry *entry = (ext2_acl_entry *)e; 107 - entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); 108 - entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); 109 - switch(acl->a_entries[n].e_tag) { 101 + entry->e_tag = cpu_to_le16(acl_e->e_tag); 102 + entry->e_perm = cpu_to_le16(acl_e->e_perm); 103 + switch(acl_e->e_tag) { 110 104 case ACL_USER: 105 + entry->e_id = cpu_to_le32( 106 + from_kuid(&init_user_ns, acl_e->e_uid)); 107 + e += sizeof(ext2_acl_entry); 108 + break; 111 109 case ACL_GROUP: 112 - entry->e_id = 113 - cpu_to_le32(acl->a_entries[n].e_id); 110 + entry->e_id = cpu_to_le32( 111 + from_kgid(&init_user_ns, acl_e->e_gid)); 114 112 e += sizeof(ext2_acl_entry); 115 113 break; 116 114 ··· 362 350 return PTR_ERR(acl); 363 351 if (acl == NULL) 364 352 return -ENODATA; 365 - error = posix_acl_to_xattr(acl, buffer, size); 353 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 366 354 posix_acl_release(acl); 367 355 368 356 return error; ··· 383 371 return -EPERM; 384 372 385 373 if (value) { 386 - acl = posix_acl_from_xattr(value, size); 374 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 387 375 if (IS_ERR(acl)) 388 376 return PTR_ERR(acl); 389 377 else if (acl) {
+22 -10
fs/ext3/acl.c
··· 48 48 case ACL_OTHER: 49 49 value = (char *)value + 50 50 sizeof(ext3_acl_entry_short); 51 - acl->a_entries[n].e_id = ACL_UNDEFINED_ID; 52 51 break; 53 52 54 53 case ACL_USER: 54 + value = (char *)value + sizeof(ext3_acl_entry); 55 + if ((char *)value > end) 56 + goto fail; 57 + acl->a_entries[n].e_uid = 58 + make_kuid(&init_user_ns, 59 + le32_to_cpu(entry->e_id)); 60 + break; 55 61 case ACL_GROUP: 56 62 value = (char *)value + sizeof(ext3_acl_entry); 57 63 if ((char *)value > end) 58 64 goto fail; 59 - acl->a_entries[n].e_id = 60 - le32_to_cpu(entry->e_id); 65 + acl->a_entries[n].e_gid = 66 + make_kgid(&init_user_ns, 67 + le32_to_cpu(entry->e_id)); 61 68 break; 62 69 63 70 default: ··· 98 91 ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION); 99 92 e = (char *)ext_acl + sizeof(ext3_acl_header); 100 93 for (n=0; n < acl->a_count; n++) { 94 + const struct posix_acl_entry *acl_e = &acl->a_entries[n]; 101 95 ext3_acl_entry *entry = (ext3_acl_entry *)e; 102 - entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); 103 - entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); 104 - switch(acl->a_entries[n].e_tag) { 96 + entry->e_tag = cpu_to_le16(acl_e->e_tag); 97 + entry->e_perm = cpu_to_le16(acl_e->e_perm); 98 + switch(acl_e->e_tag) { 105 99 case ACL_USER: 100 + entry->e_id = cpu_to_le32( 101 + from_kuid(&init_user_ns, acl_e->e_uid)); 102 + e += sizeof(ext3_acl_entry); 103 + break; 106 104 case ACL_GROUP: 107 - entry->e_id = 108 - cpu_to_le32(acl->a_entries[n].e_id); 105 + entry->e_id = cpu_to_le32( 106 + from_kgid(&init_user_ns, acl_e->e_gid)); 109 107 e += sizeof(ext3_acl_entry); 110 108 break; 111 109 ··· 381 369 return PTR_ERR(acl); 382 370 if (acl == NULL) 383 371 return -ENODATA; 384 - error = posix_acl_to_xattr(acl, buffer, size); 372 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 385 373 posix_acl_release(acl); 386 374 387 375 return error; ··· 404 392 return -EPERM; 405 393 406 394 if (value) { 407 - acl = posix_acl_from_xattr(value, size); 395 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 408 396 if (IS_ERR(acl)) 409 397 return PTR_ERR(acl); 410 398 else if (acl) {
+1 -1
fs/ext3/super.c
··· 2803 2803 2804 2804 static inline struct inode *dquot_to_inode(struct dquot *dquot) 2805 2805 { 2806 - return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 2806 + return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 2807 2807 } 2808 2808 2809 2809 static int ext3_write_dquot(struct dquot *dquot)
+22 -9
fs/ext4/acl.c
··· 55 55 case ACL_OTHER: 56 56 value = (char *)value + 57 57 sizeof(ext4_acl_entry_short); 58 - acl->a_entries[n].e_id = ACL_UNDEFINED_ID; 59 58 break; 60 59 61 60 case ACL_USER: 61 + value = (char *)value + sizeof(ext4_acl_entry); 62 + if ((char *)value > end) 63 + goto fail; 64 + acl->a_entries[n].e_uid = 65 + make_kuid(&init_user_ns, 66 + le32_to_cpu(entry->e_id)); 67 + break; 62 68 case ACL_GROUP: 63 69 value = (char *)value + sizeof(ext4_acl_entry); 64 70 if ((char *)value > end) 65 71 goto fail; 66 - acl->a_entries[n].e_id = 67 - le32_to_cpu(entry->e_id); 72 + acl->a_entries[n].e_gid = 73 + make_kgid(&init_user_ns, 74 + le32_to_cpu(entry->e_id)); 68 75 break; 69 76 70 77 default: ··· 105 98 ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION); 106 99 e = (char *)ext_acl + sizeof(ext4_acl_header); 107 100 for (n = 0; n < acl->a_count; n++) { 101 + const struct posix_acl_entry *acl_e = &acl->a_entries[n]; 108 102 ext4_acl_entry *entry = (ext4_acl_entry *)e; 109 - entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); 110 - entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); 111 - switch (acl->a_entries[n].e_tag) { 103 + entry->e_tag = cpu_to_le16(acl_e->e_tag); 104 + entry->e_perm = cpu_to_le16(acl_e->e_perm); 105 + switch (acl_e->e_tag) { 112 106 case ACL_USER: 107 + entry->e_id = cpu_to_le32( 108 + from_kuid(&init_user_ns, acl_e->e_uid)); 109 + e += sizeof(ext4_acl_entry); 110 + break; 113 111 case ACL_GROUP: 114 - entry->e_id = cpu_to_le32(acl->a_entries[n].e_id); 112 + entry->e_id = cpu_to_le32( 113 + from_kgid(&init_user_ns, acl_e->e_gid)); 115 114 e += sizeof(ext4_acl_entry); 116 115 break; 117 116 ··· 387 374 return PTR_ERR(acl); 388 375 if (acl == NULL) 389 376 return -ENODATA; 390 - error = posix_acl_to_xattr(acl, buffer, size); 377 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 391 378 posix_acl_release(acl); 392 379 393 380 return error; ··· 410 397 return -EPERM; 411 398 412 399 if (value) { 413 - acl = posix_acl_from_xattr(value, size); 400 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 414 401 if (IS_ERR(acl)) 415 402 return PTR_ERR(acl); 416 403 else if (acl) {
+1 -1
fs/ext4/super.c
··· 4791 4791 4792 4792 static inline struct inode *dquot_to_inode(struct dquot *dquot) 4793 4793 { 4794 - return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 4794 + return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 4795 4795 } 4796 4796 4797 4797 static int ext4_write_dquot(struct dquot *dquot)
+2 -2
fs/fat/fat.h
··· 23 23 #define FAT_ERRORS_RO 3 /* remount r/o on error */ 24 24 25 25 struct fat_mount_options { 26 - uid_t fs_uid; 27 - gid_t fs_gid; 26 + kuid_t fs_uid; 27 + kgid_t fs_gid; 28 28 unsigned short fs_fmask; 29 29 unsigned short fs_dmask; 30 30 unsigned short codepage; /* Codepage for shortname conversions */
+3 -3
fs/fat/file.c
··· 352 352 { 353 353 umode_t allow_utime = sbi->options.allow_utime; 354 354 355 - if (current_fsuid() != inode->i_uid) { 355 + if (!uid_eq(current_fsuid(), inode->i_uid)) { 356 356 if (in_group_p(inode->i_gid)) 357 357 allow_utime >>= 3; 358 358 if (allow_utime & MAY_WRITE) ··· 407 407 } 408 408 409 409 if (((attr->ia_valid & ATTR_UID) && 410 - (attr->ia_uid != sbi->options.fs_uid)) || 410 + (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) || 411 411 ((attr->ia_valid & ATTR_GID) && 412 - (attr->ia_gid != sbi->options.fs_gid)) || 412 + (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) || 413 413 ((attr->ia_valid & ATTR_MODE) && 414 414 (attr->ia_mode & ~FAT_VALID_MODE))) 415 415 error = -EPERM;
+12 -6
fs/fat/inode.c
··· 791 791 struct fat_mount_options *opts = &sbi->options; 792 792 int isvfat = opts->isvfat; 793 793 794 - if (opts->fs_uid != 0) 795 - seq_printf(m, ",uid=%u", opts->fs_uid); 796 - if (opts->fs_gid != 0) 797 - seq_printf(m, ",gid=%u", opts->fs_gid); 794 + if (!uid_eq(opts->fs_uid, GLOBAL_ROOT_UID)) 795 + seq_printf(m, ",uid=%u", 796 + from_kuid_munged(&init_user_ns, opts->fs_uid)); 797 + if (!gid_eq(opts->fs_gid, GLOBAL_ROOT_GID)) 798 + seq_printf(m, ",gid=%u", 799 + from_kgid_munged(&init_user_ns, opts->fs_gid)); 798 800 seq_printf(m, ",fmask=%04o", opts->fs_fmask); 799 801 seq_printf(m, ",dmask=%04o", opts->fs_dmask); 800 802 if (opts->allow_utime) ··· 1039 1037 case Opt_uid: 1040 1038 if (match_int(&args[0], &option)) 1041 1039 return 0; 1042 - opts->fs_uid = option; 1040 + opts->fs_uid = make_kuid(current_user_ns(), option); 1041 + if (!uid_valid(opts->fs_uid)) 1042 + return 0; 1043 1043 break; 1044 1044 case Opt_gid: 1045 1045 if (match_int(&args[0], &option)) 1046 1046 return 0; 1047 - opts->fs_gid = option; 1047 + opts->fs_gid = make_kgid(current_user_ns(), option); 1048 + if (!gid_valid(opts->fs_gid)) 1049 + return 0; 1048 1050 break; 1049 1051 case Opt_umask: 1050 1052 if (match_octal(&args[0], &option))
+2 -2
fs/freevxfs/vxfs_inode.c
··· 224 224 { 225 225 226 226 ip->i_mode = vxfs_transmod(vip); 227 - ip->i_uid = (uid_t)vip->vii_uid; 228 - ip->i_gid = (gid_t)vip->vii_gid; 227 + i_uid_write(ip, (uid_t)vip->vii_uid); 228 + i_gid_write(ip, (gid_t)vip->vii_gid); 229 229 230 230 set_nlink(ip, vip->vii_nlink); 231 231 ip->i_size = vip->vii_size;
+2 -2
fs/generic_acl.c
··· 56 56 acl = get_cached_acl(dentry->d_inode, type); 57 57 if (!acl) 58 58 return -ENODATA; 59 - error = posix_acl_to_xattr(acl, buffer, size); 59 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 60 60 posix_acl_release(acl); 61 61 62 62 return error; ··· 77 77 if (!inode_owner_or_capable(inode)) 78 78 return -EPERM; 79 79 if (value) { 80 - acl = posix_acl_from_xattr(value, size); 80 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 81 81 if (IS_ERR(acl)) 82 82 return PTR_ERR(acl); 83 83 }
+7 -7
fs/gfs2/acl.c
··· 63 63 if (len == 0) 64 64 return NULL; 65 65 66 - acl = posix_acl_from_xattr(data, len); 66 + acl = posix_acl_from_xattr(&init_user_ns, data, len); 67 67 kfree(data); 68 68 return acl; 69 69 } ··· 88 88 const char *name = gfs2_acl_name(type); 89 89 90 90 BUG_ON(name == NULL); 91 - len = posix_acl_to_xattr(acl, NULL, 0); 91 + len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0); 92 92 if (len == 0) 93 93 return 0; 94 94 data = kmalloc(len, GFP_NOFS); 95 95 if (data == NULL) 96 96 return -ENOMEM; 97 - error = posix_acl_to_xattr(acl, data, len); 97 + error = posix_acl_to_xattr(&init_user_ns, acl, data, len); 98 98 if (error < 0) 99 99 goto out; 100 100 error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS); ··· 166 166 if (error) 167 167 return error; 168 168 169 - len = posix_acl_to_xattr(acl, NULL, 0); 169 + len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0); 170 170 data = kmalloc(len, GFP_NOFS); 171 171 error = -ENOMEM; 172 172 if (data == NULL) 173 173 goto out; 174 - posix_acl_to_xattr(acl, data, len); 174 + posix_acl_to_xattr(&init_user_ns, acl, data, len); 175 175 error = gfs2_xattr_acl_chmod(ip, attr, data); 176 176 kfree(data); 177 177 set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl); ··· 212 212 if (acl == NULL) 213 213 return -ENODATA; 214 214 215 - error = posix_acl_to_xattr(acl, buffer, size); 215 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 216 216 posix_acl_release(acl); 217 217 218 218 return error; ··· 245 245 if (!value) 246 246 goto set_acl; 247 247 248 - acl = posix_acl_from_xattr(value, size); 248 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 249 249 if (!acl) { 250 250 /* 251 251 * acl_set_file(3) may request that we set default ACLs with
+19 -13
fs/gfs2/quota.c
··· 1071 1071 1072 1072 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1073 1073 print_message(qd, "exceeded"); 1074 - quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? 1075 - USRQUOTA : GRPQUOTA, qd->qd_id, 1074 + quota_send_warning(make_kqid(&init_user_ns, 1075 + test_bit(QDF_USER, &qd->qd_flags) ? 1076 + USRQUOTA : GRPQUOTA, 1077 + qd->qd_id), 1076 1078 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); 1077 1079 1078 1080 error = -EDQUOT; ··· 1084 1082 time_after_eq(jiffies, qd->qd_last_warn + 1085 1083 gfs2_tune_get(sdp, 1086 1084 gt_quota_warn_period) * HZ)) { 1087 - quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? 1088 - USRQUOTA : GRPQUOTA, qd->qd_id, 1085 + quota_send_warning(make_kqid(&init_user_ns, 1086 + test_bit(QDF_USER, &qd->qd_flags) ? 1087 + USRQUOTA : GRPQUOTA, 1088 + qd->qd_id), 1089 1089 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1090 1090 error = print_message(qd, "warning"); 1091 1091 qd->qd_last_warn = jiffies; ··· 1474 1470 return 0; 1475 1471 } 1476 1472 1477 - static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id, 1473 + static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1478 1474 struct fs_disk_quota *fdq) 1479 1475 { 1480 1476 struct gfs2_sbd *sdp = sb->s_fs_info; ··· 1482 1478 struct gfs2_quota_data *qd; 1483 1479 struct gfs2_holder q_gh; 1484 1480 int error; 1481 + int type; 1485 1482 1486 1483 memset(fdq, 0, sizeof(struct fs_disk_quota)); 1487 1484 1488 1485 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1489 1486 return -ESRCH; /* Crazy XFS error code */ 1490 1487 1491 - if (type == USRQUOTA) 1488 + if (qid.type == USRQUOTA) 1492 1489 type = QUOTA_USER; 1493 - else if (type == GRPQUOTA) 1490 + else if (qid.type == GRPQUOTA) 1494 1491 type = QUOTA_GROUP; 1495 1492 else 1496 1493 return -EINVAL; 1497 1494 1498 - error = qd_get(sdp, type, id, &qd); 1495 + error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd); 1499 1496 if (error) 1500 1497 return error; 1501 1498 error = do_glock(qd, FORCE, &q_gh); ··· 1506 1501 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 1507 1502 fdq->d_version = FS_DQUOT_VERSION; 1508 1503 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1509 - fdq->d_id = id; 1504 + fdq->d_id = from_kqid(&init_user_ns, qid); 1510 1505 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; 1511 1506 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; 1512 1507 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; ··· 1520 1515 /* GFS2 only supports a subset of the XFS fields */ 1521 1516 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) 1522 1517 1523 - static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, 1518 + static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1524 1519 struct fs_disk_quota *fdq) 1525 1520 { 1526 1521 struct gfs2_sbd *sdp = sb->s_fs_info; ··· 1532 1527 int alloc_required; 1533 1528 loff_t offset; 1534 1529 int error; 1530 + int type; 1535 1531 1536 1532 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1537 1533 return -ESRCH; /* Crazy XFS error code */ 1538 1534 1539 - switch(type) { 1535 + switch(qid.type) { 1540 1536 case USRQUOTA: 1541 1537 type = QUOTA_USER; 1542 1538 if (fdq->d_flags != FS_USER_QUOTA) ··· 1554 1548 1555 1549 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1556 1550 return -EINVAL; 1557 - if (fdq->d_id != id) 1551 + if (fdq->d_id != from_kqid(&init_user_ns, qid)) 1558 1552 return -EINVAL; 1559 1553 1560 - error = qd_get(sdp, type, id, &qd); 1554 + error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd); 1561 1555 if (error) 1562 1556 return error; 1563 1557
+2 -2
fs/hfs/hfs_fs.h
··· 134 134 permissions on all files */ 135 135 umode_t s_dir_umask; /* The umask applied to the 136 136 permissions on all dirs */ 137 - uid_t s_uid; /* The uid of all files */ 138 - gid_t s_gid; /* The gid of all files */ 137 + kuid_t s_uid; /* The uid of all files */ 138 + kgid_t s_gid; /* The gid of all files */ 139 139 140 140 int session, part; 141 141 struct nls_table *nls_io, *nls_disk;
+2 -2
fs/hfs/inode.c
··· 594 594 595 595 /* no uig/gid changes and limit which mode bits can be set */ 596 596 if (((attr->ia_valid & ATTR_UID) && 597 - (attr->ia_uid != hsb->s_uid)) || 597 + (!uid_eq(attr->ia_uid, hsb->s_uid))) || 598 598 ((attr->ia_valid & ATTR_GID) && 599 - (attr->ia_gid != hsb->s_gid)) || 599 + (!gid_eq(attr->ia_gid, hsb->s_gid))) || 600 600 ((attr->ia_valid & ATTR_MODE) && 601 601 ((S_ISDIR(inode->i_mode) && 602 602 (attr->ia_mode != inode->i_mode)) ||
+13 -3
fs/hfs/super.c
··· 138 138 seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator); 139 139 if (sbi->s_type != cpu_to_be32(0x3f3f3f3f)) 140 140 seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type); 141 - seq_printf(seq, ",uid=%u,gid=%u", sbi->s_uid, sbi->s_gid); 141 + seq_printf(seq, ",uid=%u,gid=%u", 142 + from_kuid_munged(&init_user_ns, sbi->s_uid), 143 + from_kgid_munged(&init_user_ns, sbi->s_gid)); 142 144 if (sbi->s_file_umask != 0133) 143 145 seq_printf(seq, ",file_umask=%o", sbi->s_file_umask); 144 146 if (sbi->s_dir_umask != 0022) ··· 256 254 printk(KERN_ERR "hfs: uid requires an argument\n"); 257 255 return 0; 258 256 } 259 - hsb->s_uid = (uid_t)tmp; 257 + hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp); 258 + if (!uid_valid(hsb->s_uid)) { 259 + printk(KERN_ERR "hfs: invalid uid %d\n", tmp); 260 + return 0; 261 + } 260 262 break; 261 263 case opt_gid: 262 264 if (match_int(&args[0], &tmp)) { 263 265 printk(KERN_ERR "hfs: gid requires an argument\n"); 264 266 return 0; 265 267 } 266 - hsb->s_gid = (gid_t)tmp; 268 + hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp); 269 + if (!gid_valid(hsb->s_gid)) { 270 + printk(KERN_ERR "hfs: invalid gid %d\n", tmp); 271 + return 0; 272 + } 267 273 break; 268 274 case opt_umask: 269 275 if (match_octal(&args[0], &tmp)) {
+2 -2
fs/hfsplus/catalog.c
··· 80 80 81 81 perms->userflags = HFSPLUS_I(inode)->userflags; 82 82 perms->mode = cpu_to_be16(inode->i_mode); 83 - perms->owner = cpu_to_be32(inode->i_uid); 84 - perms->group = cpu_to_be32(inode->i_gid); 83 + perms->owner = cpu_to_be32(i_uid_read(inode)); 84 + perms->group = cpu_to_be32(i_gid_read(inode)); 85 85 86 86 if (S_ISREG(inode->i_mode)) 87 87 perms->dev = cpu_to_be32(inode->i_nlink);
+2 -2
fs/hfsplus/hfsplus_fs.h
··· 149 149 u32 type; 150 150 151 151 umode_t umask; 152 - uid_t uid; 153 - gid_t gid; 152 + kuid_t uid; 153 + kgid_t gid; 154 154 155 155 int part, session; 156 156 unsigned long flags;
+4 -4
fs/hfsplus/inode.c
··· 233 233 234 234 mode = be16_to_cpu(perms->mode); 235 235 236 - inode->i_uid = be32_to_cpu(perms->owner); 237 - if (!inode->i_uid && !mode) 236 + i_uid_write(inode, be32_to_cpu(perms->owner)); 237 + if (!i_uid_read(inode) && !mode) 238 238 inode->i_uid = sbi->uid; 239 239 240 - inode->i_gid = be32_to_cpu(perms->group); 241 - if (!inode->i_gid && !mode) 240 + i_gid_write(inode, be32_to_cpu(perms->group)); 241 + if (!i_gid_read(inode) && !mode) 242 242 inode->i_gid = sbi->gid; 243 243 244 244 if (dir) {
+12 -3
fs/hfsplus/options.c
··· 135 135 printk(KERN_ERR "hfs: uid requires an argument\n"); 136 136 return 0; 137 137 } 138 - sbi->uid = (uid_t)tmp; 138 + sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp); 139 + if (!uid_valid(sbi->uid)) { 140 + printk(KERN_ERR "hfs: invalid uid specified\n"); 141 + return 0; 142 + } 139 143 break; 140 144 case opt_gid: 141 145 if (match_int(&args[0], &tmp)) { 142 146 printk(KERN_ERR "hfs: gid requires an argument\n"); 143 147 return 0; 144 148 } 145 - sbi->gid = (gid_t)tmp; 149 + sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp); 150 + if (!gid_valid(sbi->gid)) { 151 + printk(KERN_ERR "hfs: invalid gid specified\n"); 152 + return 0; 153 + } 146 154 break; 147 155 case opt_part: 148 156 if (match_int(&args[0], &sbi->part)) { ··· 223 215 if (sbi->type != HFSPLUS_DEF_CR_TYPE) 224 216 seq_printf(seq, ",type=%.4s", (char *)&sbi->type); 225 217 seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask, 226 - sbi->uid, sbi->gid); 218 + from_kuid_munged(&init_user_ns, sbi->uid), 219 + from_kgid_munged(&init_user_ns, sbi->gid)); 227 220 if (sbi->part >= 0) 228 221 seq_printf(seq, ",part=%u", sbi->part); 229 222 if (sbi->session >= 0)
+4 -4
fs/hostfs/hostfs_kern.c
··· 542 542 ino->i_ino = st.ino; 543 543 ino->i_mode = st.mode; 544 544 set_nlink(ino, st.nlink); 545 - ino->i_uid = st.uid; 546 - ino->i_gid = st.gid; 545 + i_uid_write(ino, st.uid); 546 + i_gid_write(ino, st.gid); 547 547 ino->i_atime = st.atime; 548 548 ino->i_mtime = st.mtime; 549 549 ino->i_ctime = st.ctime; ··· 808 808 } 809 809 if (attr->ia_valid & ATTR_UID) { 810 810 attrs.ia_valid |= HOSTFS_ATTR_UID; 811 - attrs.ia_uid = attr->ia_uid; 811 + attrs.ia_uid = from_kuid(&init_user_ns, attr->ia_uid); 812 812 } 813 813 if (attr->ia_valid & ATTR_GID) { 814 814 attrs.ia_valid |= HOSTFS_ATTR_GID; 815 - attrs.ia_gid = attr->ia_gid; 815 + attrs.ia_gid = from_kgid(&init_user_ns, attr->ia_gid); 816 816 } 817 817 if (attr->ia_valid & ATTR_SIZE) { 818 818 attrs.ia_valid |= HOSTFS_ATTR_SIZE;
+2 -2
fs/hpfs/hpfs_fn.h
··· 63 63 unsigned sb_dmap; /* sector number of dnode bit map */ 64 64 unsigned sb_n_free; /* free blocks for statfs, or -1 */ 65 65 unsigned sb_n_free_dnodes; /* free dnodes for statfs, or -1 */ 66 - uid_t sb_uid; /* uid from mount options */ 67 - gid_t sb_gid; /* gid from mount options */ 66 + kuid_t sb_uid; /* uid from mount options */ 67 + kgid_t sb_gid; /* gid from mount options */ 68 68 umode_t sb_mode; /* mode from mount options */ 69 69 unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */ 70 70 unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */
+11 -8
fs/hpfs/inode.c
··· 7 7 */ 8 8 9 9 #include <linux/slab.h> 10 + #include <linux/user_namespace.h> 10 11 #include "hpfs_fn.h" 11 12 12 13 void hpfs_init_inode(struct inode *i) ··· 61 60 if (hpfs_sb(i->i_sb)->sb_eas) { 62 61 if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) { 63 62 if (ea_size == 2) { 64 - i->i_uid = le16_to_cpu(*(__le16*)ea); 63 + i_uid_write(i, le16_to_cpu(*(__le16*)ea)); 65 64 hpfs_inode->i_ea_uid = 1; 66 65 } 67 66 kfree(ea); 68 67 } 69 68 if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) { 70 69 if (ea_size == 2) { 71 - i->i_gid = le16_to_cpu(*(__le16*)ea); 70 + i_gid_write(i, le16_to_cpu(*(__le16*)ea)); 72 71 hpfs_inode->i_ea_gid = 1; 73 72 } 74 73 kfree(ea); ··· 150 149 hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino); 151 150 } else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) { 152 151 __le32 ea; 153 - if ((i->i_uid != hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) { 154 - ea = cpu_to_le32(i->i_uid); 152 + if (!uid_eq(i->i_uid, hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) { 153 + ea = cpu_to_le32(i_uid_read(i)); 155 154 hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2); 156 155 hpfs_inode->i_ea_uid = 1; 157 156 } 158 - if ((i->i_gid != hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) { 159 - ea = cpu_to_le32(i->i_gid); 157 + if (!gid_eq(i->i_gid, hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) { 158 + ea = cpu_to_le32(i_gid_read(i)); 160 159 hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2); 161 160 hpfs_inode->i_ea_gid = 1; 162 161 } ··· 262 261 hpfs_lock(inode->i_sb); 263 262 if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root) 264 263 goto out_unlock; 265 - if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000) 264 + if ((attr->ia_valid & ATTR_UID) && 265 + from_kuid(&init_user_ns, attr->ia_uid) >= 0x10000) 266 266 goto out_unlock; 267 - if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000) 267 + if ((attr->ia_valid & ATTR_GID) && 268 + from_kgid(&init_user_ns, attr->ia_gid) >= 0x10000) 268 269 goto out_unlock; 269 270 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) 270 271 goto out_unlock;
+4 -4
fs/hpfs/namei.c
··· 91 91 inc_nlink(dir); 92 92 insert_inode_hash(result); 93 93 94 - if (result->i_uid != current_fsuid() || 95 - result->i_gid != current_fsgid() || 94 + if (!uid_eq(result->i_uid, current_fsuid()) || 95 + !gid_eq(result->i_gid, current_fsgid()) || 96 96 result->i_mode != (mode | S_IFDIR)) { 97 97 result->i_uid = current_fsuid(); 98 98 result->i_gid = current_fsgid(); ··· 179 179 180 180 insert_inode_hash(result); 181 181 182 - if (result->i_uid != current_fsuid() || 183 - result->i_gid != current_fsgid() || 182 + if (!uid_eq(result->i_uid, current_fsuid()) || 183 + !gid_eq(result->i_gid, current_fsgid()) || 184 184 result->i_mode != (mode | S_IFREG)) { 185 185 result->i_uid = current_fsuid(); 186 186 result->i_gid = current_fsgid();
+11 -7
fs/hpfs/super.c
··· 251 251 {Opt_err, NULL}, 252 252 }; 253 253 254 - static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, 254 + static int parse_opts(char *opts, kuid_t *uid, kgid_t *gid, umode_t *umask, 255 255 int *lowercase, int *eas, int *chk, int *errs, 256 256 int *chkdsk, int *timeshift) 257 257 { ··· 276 276 case Opt_uid: 277 277 if (match_int(args, &option)) 278 278 return 0; 279 - *uid = option; 279 + *uid = make_kuid(current_user_ns(), option); 280 + if (!uid_valid(*uid)) 281 + return 0; 280 282 break; 281 283 case Opt_gid: 282 284 if (match_int(args, &option)) 283 285 return 0; 284 - *gid = option; 286 + *gid = make_kgid(current_user_ns(), option); 287 + if (!gid_valid(*gid)) 288 + return 0; 285 289 break; 286 290 case Opt_umask: 287 291 if (match_octal(args, &option)) ··· 382 378 383 379 static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) 384 380 { 385 - uid_t uid; 386 - gid_t gid; 381 + kuid_t uid; 382 + kgid_t gid; 387 383 umode_t umask; 388 384 int lowercase, eas, chk, errs, chkdsk, timeshift; 389 385 int o; ··· 459 455 struct hpfs_sb_info *sbi; 460 456 struct inode *root; 461 457 462 - uid_t uid; 463 - gid_t gid; 458 + kuid_t uid; 459 + kgid_t gid; 464 460 umode_t umask; 465 461 int lowercase, eas, chk, errs, chkdsk, timeshift; 466 462
+11 -5
fs/hugetlbfs/inode.c
··· 42 42 static const struct inode_operations hugetlbfs_inode_operations; 43 43 44 44 struct hugetlbfs_config { 45 - uid_t uid; 46 - gid_t gid; 45 + kuid_t uid; 46 + kgid_t gid; 47 47 umode_t mode; 48 48 long nr_blocks; 49 49 long nr_inodes; ··· 785 785 case Opt_uid: 786 786 if (match_int(&args[0], &option)) 787 787 goto bad_val; 788 - pconfig->uid = option; 788 + pconfig->uid = make_kuid(current_user_ns(), option); 789 + if (!uid_valid(pconfig->uid)) 790 + goto bad_val; 789 791 break; 790 792 791 793 case Opt_gid: 792 794 if (match_int(&args[0], &option)) 793 795 goto bad_val; 794 - pconfig->gid = option; 796 + pconfig->gid = make_kgid(current_user_ns(), option); 797 + if (!gid_valid(pconfig->gid)) 798 + goto bad_val; 795 799 break; 796 800 797 801 case Opt_mode: ··· 928 924 929 925 static int can_do_hugetlb_shm(void) 930 926 { 931 - return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); 927 + kgid_t shm_group; 928 + shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 929 + return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 932 930 } 933 931 934 932 struct file *hugetlb_file_setup(const char *name, unsigned long addr,
+11 -6
fs/isofs/inode.c
··· 21 21 #include <linux/cdrom.h> 22 22 #include <linux/parser.h> 23 23 #include <linux/mpage.h> 24 + #include <linux/user_namespace.h> 24 25 25 26 #include "isofs.h" 26 27 #include "zisofs.h" ··· 172 171 unsigned int blocksize; 173 172 umode_t fmode; 174 173 umode_t dmode; 175 - gid_t gid; 176 - uid_t uid; 174 + kgid_t gid; 175 + kuid_t uid; 177 176 char *iocharset; 178 177 /* LVE */ 179 178 s32 session; ··· 384 383 popt->fmode = popt->dmode = ISOFS_INVALID_MODE; 385 384 popt->uid_set = 0; 386 385 popt->gid_set = 0; 387 - popt->gid = 0; 388 - popt->uid = 0; 386 + popt->gid = GLOBAL_ROOT_GID; 387 + popt->uid = GLOBAL_ROOT_UID; 389 388 popt->iocharset = NULL; 390 389 popt->utf8 = 0; 391 390 popt->overriderockperm = 0; ··· 461 460 case Opt_uid: 462 461 if (match_int(&args[0], &option)) 463 462 return 0; 464 - popt->uid = option; 463 + popt->uid = make_kuid(current_user_ns(), option); 464 + if (!uid_valid(popt->uid)) 465 + return 0; 465 466 popt->uid_set = 1; 466 467 break; 467 468 case Opt_gid: 468 469 if (match_int(&args[0], &option)) 469 470 return 0; 470 - popt->gid = option; 471 + popt->gid = make_kgid(current_user_ns(), option); 472 + if (!gid_valid(popt->gid)) 473 + return 0; 471 474 popt->gid_set = 1; 472 475 break; 473 476 case Opt_mode:
+2 -2
fs/isofs/isofs.h
··· 52 52 53 53 umode_t s_fmode; 54 54 umode_t s_dmode; 55 - gid_t s_gid; 56 - uid_t s_uid; 55 + kgid_t s_gid; 56 + kuid_t s_uid; 57 57 struct nls_table *s_nls_iocharset; /* Native language support table */ 58 58 }; 59 59
+2 -2
fs/isofs/rock.c
··· 364 364 case SIG('P', 'X'): 365 365 inode->i_mode = isonum_733(rr->u.PX.mode); 366 366 set_nlink(inode, isonum_733(rr->u.PX.n_links)); 367 - inode->i_uid = isonum_733(rr->u.PX.uid); 368 - inode->i_gid = isonum_733(rr->u.PX.gid); 367 + i_uid_write(inode, isonum_733(rr->u.PX.uid)); 368 + i_gid_write(inode, isonum_733(rr->u.PX.gid)); 369 369 break; 370 370 case SIG('P', 'N'): 371 371 {
+22 -8
fs/jffs2/acl.c
··· 94 94 case ACL_MASK: 95 95 case ACL_OTHER: 96 96 value += sizeof(struct jffs2_acl_entry_short); 97 - acl->a_entries[i].e_id = ACL_UNDEFINED_ID; 98 97 break; 99 98 100 99 case ACL_USER: 100 + value += sizeof(struct jffs2_acl_entry); 101 + if (value > end) 102 + goto fail; 103 + acl->a_entries[i].e_uid = 104 + make_kuid(&init_user_ns, 105 + je32_to_cpu(entry->e_id)); 106 + break; 101 107 case ACL_GROUP: 102 108 value += sizeof(struct jffs2_acl_entry); 103 109 if (value > end) 104 110 goto fail; 105 - acl->a_entries[i].e_id = je32_to_cpu(entry->e_id); 111 + acl->a_entries[i].e_gid = 112 + make_kgid(&init_user_ns, 113 + je32_to_cpu(entry->e_id)); 106 114 break; 107 115 108 116 default: ··· 139 131 header->a_version = cpu_to_je32(JFFS2_ACL_VERSION); 140 132 e = header + 1; 141 133 for (i=0; i < acl->a_count; i++) { 134 + const struct posix_acl_entry *acl_e = &acl->a_entries[i]; 142 135 entry = e; 143 - entry->e_tag = cpu_to_je16(acl->a_entries[i].e_tag); 144 - entry->e_perm = cpu_to_je16(acl->a_entries[i].e_perm); 145 - switch(acl->a_entries[i].e_tag) { 136 + entry->e_tag = cpu_to_je16(acl_e->e_tag); 137 + entry->e_perm = cpu_to_je16(acl_e->e_perm); 138 + switch(acl_e->e_tag) { 146 139 case ACL_USER: 140 + entry->e_id = cpu_to_je32( 141 + from_kuid(&init_user_ns, acl_e->e_uid)); 142 + e += sizeof(struct jffs2_acl_entry); 143 + break; 147 144 case ACL_GROUP: 148 - entry->e_id = cpu_to_je32(acl->a_entries[i].e_id); 145 + entry->e_id = cpu_to_je32( 146 + from_kgid(&init_user_ns, acl_e->e_gid)); 149 147 e += sizeof(struct jffs2_acl_entry); 150 148 break; 151 149 ··· 377 363 return PTR_ERR(acl); 378 364 if (!acl) 379 365 return -ENODATA; 380 - rc = posix_acl_to_xattr(acl, buffer, size); 366 + rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 381 367 posix_acl_release(acl); 382 368 383 369 return rc; ··· 395 381 return -EPERM; 396 382 397 383 if (value) { 398 - acl = posix_acl_from_xattr(value, size); 384 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 399 385 if (IS_ERR(acl)) 400 386 return PTR_ERR(acl); 401 387 if (acl) {
+4 -4
fs/jffs2/file.c
··· 175 175 ri.ino = cpu_to_je32(f->inocache->ino); 176 176 ri.version = cpu_to_je32(++f->highest_version); 177 177 ri.mode = cpu_to_jemode(inode->i_mode); 178 - ri.uid = cpu_to_je16(inode->i_uid); 179 - ri.gid = cpu_to_je16(inode->i_gid); 178 + ri.uid = cpu_to_je16(i_uid_read(inode)); 179 + ri.gid = cpu_to_je16(i_gid_read(inode)); 180 180 ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); 181 181 ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds()); 182 182 ri.offset = cpu_to_je32(inode->i_size); ··· 283 283 /* Set the fields that the generic jffs2_write_inode_range() code can't find */ 284 284 ri->ino = cpu_to_je32(inode->i_ino); 285 285 ri->mode = cpu_to_jemode(inode->i_mode); 286 - ri->uid = cpu_to_je16(inode->i_uid); 287 - ri->gid = cpu_to_je16(inode->i_gid); 286 + ri->uid = cpu_to_je16(i_uid_read(inode)); 287 + ri->gid = cpu_to_je16(i_gid_read(inode)); 288 288 ri->isize = cpu_to_je32((uint32_t)inode->i_size); 289 289 ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds()); 290 290
+13 -11
fs/jffs2/fs.c
··· 99 99 ri->ino = cpu_to_je32(inode->i_ino); 100 100 ri->version = cpu_to_je32(++f->highest_version); 101 101 102 - ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); 103 - ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); 102 + ri->uid = cpu_to_je16((ivalid & ATTR_UID)? 103 + from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode)); 104 + ri->gid = cpu_to_je16((ivalid & ATTR_GID)? 105 + from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode)); 104 106 105 107 if (ivalid & ATTR_MODE) 106 108 ri->mode = cpu_to_jemode(iattr->ia_mode); ··· 149 147 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); 150 148 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); 151 149 inode->i_mode = jemode_to_cpu(ri->mode); 152 - inode->i_uid = je16_to_cpu(ri->uid); 153 - inode->i_gid = je16_to_cpu(ri->gid); 150 + i_uid_write(inode, je16_to_cpu(ri->uid)); 151 + i_gid_write(inode, je16_to_cpu(ri->gid)); 154 152 155 153 156 154 old_metadata = f->metadata; ··· 278 276 return ERR_PTR(ret); 279 277 } 280 278 inode->i_mode = jemode_to_cpu(latest_node.mode); 281 - inode->i_uid = je16_to_cpu(latest_node.uid); 282 - inode->i_gid = je16_to_cpu(latest_node.gid); 279 + i_uid_write(inode, je16_to_cpu(latest_node.uid)); 280 + i_gid_write(inode, je16_to_cpu(latest_node.gid)); 283 281 inode->i_size = je32_to_cpu(latest_node.isize); 284 282 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); 285 283 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); ··· 442 440 443 441 memset(ri, 0, sizeof(*ri)); 444 442 /* Set OS-specific defaults for new inodes */ 445 - ri->uid = cpu_to_je16(current_fsuid()); 443 + ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid())); 446 444 447 445 if (dir_i->i_mode & S_ISGID) { 448 - ri->gid = cpu_to_je16(dir_i->i_gid); 446 + ri->gid = cpu_to_je16(i_gid_read(dir_i)); 449 447 if (S_ISDIR(mode)) 450 448 mode |= S_ISGID; 451 449 } else { 452 - ri->gid = cpu_to_je16(current_fsgid()); 450 + ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid())); 453 451 } 454 452 455 453 /* POSIX ACLs have to be processed now, at least partly. ··· 469 467 set_nlink(inode, 1); 470 468 inode->i_ino = je32_to_cpu(ri->ino); 471 469 inode->i_mode = jemode_to_cpu(ri->mode); 472 - inode->i_gid = je16_to_cpu(ri->gid); 473 - inode->i_uid = je16_to_cpu(ri->uid); 470 + i_gid_write(inode, je16_to_cpu(ri->gid)); 471 + i_uid_write(inode, je16_to_cpu(ri->uid)); 474 472 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; 475 473 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); 476 474
+2 -2
fs/jffs2/os-linux.h
··· 27 27 28 28 #define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size) 29 29 #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) 30 - #define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) 31 - #define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) 30 + #define JFFS2_F_I_UID(f) (i_uid_read(OFNI_EDONI_2SFFJ(f))) 31 + #define JFFS2_F_I_GID(f) (i_gid_read(OFNI_EDONI_2SFFJ(f))) 32 32 #define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev) 33 33 34 34 #define ITIME(sec) ((struct timespec){sec, 0})
+2 -2
fs/jfs/acl.c
··· 64 64 else 65 65 acl = ERR_PTR(size); 66 66 } else { 67 - acl = posix_acl_from_xattr(value, size); 67 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 68 68 } 69 69 kfree(value); 70 70 if (!IS_ERR(acl)) ··· 100 100 value = kmalloc(size, GFP_KERNEL); 101 101 if (!value) 102 102 return -ENOMEM; 103 - rc = posix_acl_to_xattr(acl, value, size); 103 + rc = posix_acl_to_xattr(&init_user_ns, acl, value, size); 104 104 if (rc < 0) 105 105 goto out; 106 106 }
+2 -2
fs/jfs/file.c
··· 108 108 109 109 if (is_quota_modification(inode, iattr)) 110 110 dquot_initialize(inode); 111 - if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 112 - (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 111 + if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) || 112 + (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) { 113 113 rc = dquot_transfer(inode, iattr); 114 114 if (rc) 115 115 return rc;
+12 -10
fs/jfs/jfs_imap.c
··· 3078 3078 } 3079 3079 set_nlink(ip, le32_to_cpu(dip->di_nlink)); 3080 3080 3081 - jfs_ip->saved_uid = le32_to_cpu(dip->di_uid); 3082 - if (sbi->uid == -1) 3081 + jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid)); 3082 + if (!uid_valid(sbi->uid)) 3083 3083 ip->i_uid = jfs_ip->saved_uid; 3084 3084 else { 3085 3085 ip->i_uid = sbi->uid; 3086 3086 } 3087 3087 3088 - jfs_ip->saved_gid = le32_to_cpu(dip->di_gid); 3089 - if (sbi->gid == -1) 3088 + jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid)); 3089 + if (!gid_valid(sbi->gid)) 3090 3090 ip->i_gid = jfs_ip->saved_gid; 3091 3091 else { 3092 3092 ip->i_gid = sbi->gid; ··· 3150 3150 dip->di_size = cpu_to_le64(ip->i_size); 3151 3151 dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); 3152 3152 dip->di_nlink = cpu_to_le32(ip->i_nlink); 3153 - if (sbi->uid == -1) 3154 - dip->di_uid = cpu_to_le32(ip->i_uid); 3153 + if (!uid_valid(sbi->uid)) 3154 + dip->di_uid = cpu_to_le32(i_uid_read(ip)); 3155 3155 else 3156 - dip->di_uid = cpu_to_le32(jfs_ip->saved_uid); 3157 - if (sbi->gid == -1) 3158 - dip->di_gid = cpu_to_le32(ip->i_gid); 3156 + dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns, 3157 + jfs_ip->saved_uid)); 3158 + if (!gid_valid(sbi->gid)) 3159 + dip->di_gid = cpu_to_le32(i_gid_read(ip)); 3159 3160 else 3160 - dip->di_gid = cpu_to_le32(jfs_ip->saved_gid); 3161 + dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns, 3162 + jfs_ip->saved_gid)); 3161 3163 jfs_get_inode_flags(jfs_ip); 3162 3164 /* 3163 3165 * mode2 is only needed for storing the higher order bits.
+4 -4
fs/jfs/jfs_incore.h
··· 38 38 struct jfs_inode_info { 39 39 int fileset; /* fileset number (always 16)*/ 40 40 uint mode2; /* jfs-specific mode */ 41 - uint saved_uid; /* saved for uid mount option */ 42 - uint saved_gid; /* saved for gid mount option */ 41 + kuid_t saved_uid; /* saved for uid mount option */ 42 + kgid_t saved_gid; /* saved for gid mount option */ 43 43 pxd_t ixpxd; /* inode extent descriptor */ 44 44 dxd_t acl; /* dxd describing acl */ 45 45 dxd_t ea; /* dxd describing ea */ ··· 192 192 uint state; /* mount/recovery state */ 193 193 unsigned long flag; /* mount time flags */ 194 194 uint p_state; /* state prior to going no integrity */ 195 - uint uid; /* uid to override on-disk uid */ 196 - uint gid; /* gid to override on-disk gid */ 195 + kuid_t uid; /* uid to override on-disk uid */ 196 + kgid_t gid; /* gid to override on-disk gid */ 197 197 uint umask; /* umask to override on-disk umask */ 198 198 }; 199 199
+15 -7
fs/jfs/super.c
··· 321 321 case Opt_uid: 322 322 { 323 323 char *uid = args[0].from; 324 - sbi->uid = simple_strtoul(uid, &uid, 0); 324 + uid_t val = simple_strtoul(uid, &uid, 0); 325 + sbi->uid = make_kuid(current_user_ns(), val); 326 + if (!uid_valid(sbi->uid)) 327 + goto cleanup; 325 328 break; 326 329 } 327 330 case Opt_gid: 328 331 { 329 332 char *gid = args[0].from; 330 - sbi->gid = simple_strtoul(gid, &gid, 0); 333 + gid_t val = simple_strtoul(gid, &gid, 0); 334 + sbi->gid = make_kgid(current_user_ns(), val); 335 + if (!gid_valid(sbi->gid)) 336 + goto cleanup; 331 337 break; 332 338 } 333 339 case Opt_umask: ··· 449 443 sb->s_fs_info = sbi; 450 444 sb->s_max_links = JFS_LINK_MAX; 451 445 sbi->sb = sb; 452 - sbi->uid = sbi->gid = sbi->umask = -1; 446 + sbi->uid = INVALID_UID; 447 + sbi->gid = INVALID_GID; 448 + sbi->umask = -1; 453 449 454 450 /* initialize the mount flag and determine the default error handler */ 455 451 flag = JFS_ERR_REMOUNT_RO; ··· 625 617 { 626 618 struct jfs_sb_info *sbi = JFS_SBI(root->d_sb); 627 619 628 - if (sbi->uid != -1) 629 - seq_printf(seq, ",uid=%d", sbi->uid); 630 - if (sbi->gid != -1) 631 - seq_printf(seq, ",gid=%d", sbi->gid); 620 + if (uid_valid(sbi->uid)) 621 + seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid)); 622 + if (gid_valid(sbi->gid)) 623 + seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid)); 632 624 if (sbi->umask != -1) 633 625 seq_printf(seq, ",umask=%03o", sbi->umask); 634 626 if (sbi->flag & JFS_NOINTEGRITY)
+2 -2
fs/jfs/xattr.c
··· 685 685 * POSIX_ACL_XATTR_ACCESS is tied to i_mode 686 686 */ 687 687 if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) { 688 - acl = posix_acl_from_xattr(value, value_len); 688 + acl = posix_acl_from_xattr(&init_user_ns, value, value_len); 689 689 if (IS_ERR(acl)) { 690 690 rc = PTR_ERR(acl); 691 691 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", ··· 710 710 711 711 return 0; 712 712 } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) { 713 - acl = posix_acl_from_xattr(value, value_len); 713 + acl = posix_acl_from_xattr(&init_user_ns, value, value_len); 714 714 if (IS_ERR(acl)) { 715 715 rc = PTR_ERR(acl); 716 716 printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
+2 -2
fs/logfs/inode.c
··· 208 208 li->li_height = 0; 209 209 li->li_used_bytes = 0; 210 210 li->li_block = NULL; 211 - inode->i_uid = 0; 212 - inode->i_gid = 0; 211 + i_uid_write(inode, 0); 212 + i_gid_write(inode, 0); 213 213 inode->i_size = 0; 214 214 inode->i_blocks = 0; 215 215 inode->i_ctime = CURRENT_TIME;
+4 -4
fs/logfs/readwrite.c
··· 119 119 inode->i_mode = be16_to_cpu(di->di_mode); 120 120 li->li_height = di->di_height; 121 121 li->li_flags = be32_to_cpu(di->di_flags); 122 - inode->i_uid = be32_to_cpu(di->di_uid); 123 - inode->i_gid = be32_to_cpu(di->di_gid); 122 + i_uid_write(inode, be32_to_cpu(di->di_uid)); 123 + i_gid_write(inode, be32_to_cpu(di->di_gid)); 124 124 inode->i_size = be64_to_cpu(di->di_size); 125 125 logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes)); 126 126 inode->i_atime = be64_to_timespec(di->di_atime); ··· 156 156 di->di_height = li->li_height; 157 157 di->di_pad = 0; 158 158 di->di_flags = cpu_to_be32(li->li_flags); 159 - di->di_uid = cpu_to_be32(inode->i_uid); 160 - di->di_gid = cpu_to_be32(inode->i_gid); 159 + di->di_uid = cpu_to_be32(i_uid_read(inode)); 160 + di->di_gid = cpu_to_be32(i_gid_read(inode)); 161 161 di->di_size = cpu_to_be64(i_size_read(inode)); 162 162 di->di_used_bytes = cpu_to_be64(li->li_used_bytes); 163 163 di->di_atime = timespec_to_be64(inode->i_atime);
+8 -8
fs/minix/inode.c
··· 460 460 return ERR_PTR(-EIO); 461 461 } 462 462 inode->i_mode = raw_inode->i_mode; 463 - inode->i_uid = (uid_t)raw_inode->i_uid; 464 - inode->i_gid = (gid_t)raw_inode->i_gid; 463 + i_uid_write(inode, raw_inode->i_uid); 464 + i_gid_write(inode, raw_inode->i_gid); 465 465 set_nlink(inode, raw_inode->i_nlinks); 466 466 inode->i_size = raw_inode->i_size; 467 467 inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time; ··· 493 493 return ERR_PTR(-EIO); 494 494 } 495 495 inode->i_mode = raw_inode->i_mode; 496 - inode->i_uid = (uid_t)raw_inode->i_uid; 497 - inode->i_gid = (gid_t)raw_inode->i_gid; 496 + i_uid_write(inode, raw_inode->i_uid); 497 + i_gid_write(inode, raw_inode->i_gid); 498 498 set_nlink(inode, raw_inode->i_nlinks); 499 499 inode->i_size = raw_inode->i_size; 500 500 inode->i_mtime.tv_sec = raw_inode->i_mtime; ··· 545 545 if (!raw_inode) 546 546 return NULL; 547 547 raw_inode->i_mode = inode->i_mode; 548 - raw_inode->i_uid = fs_high2lowuid(inode->i_uid); 549 - raw_inode->i_gid = fs_high2lowgid(inode->i_gid); 548 + raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); 549 + raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); 550 550 raw_inode->i_nlinks = inode->i_nlink; 551 551 raw_inode->i_size = inode->i_size; 552 552 raw_inode->i_time = inode->i_mtime.tv_sec; ··· 572 572 if (!raw_inode) 573 573 return NULL; 574 574 raw_inode->i_mode = inode->i_mode; 575 - raw_inode->i_uid = fs_high2lowuid(inode->i_uid); 576 - raw_inode->i_gid = fs_high2lowgid(inode->i_gid); 575 + raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); 576 + raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); 577 577 raw_inode->i_nlinks = inode->i_nlink; 578 578 raw_inode->i_size = inode->i_size; 579 579 raw_inode->i_mtime = inode->i_mtime.tv_sec;
+3 -3
fs/namei.c
··· 680 680 681 681 /* Allowed if owner and follower match. */ 682 682 inode = link->dentry->d_inode; 683 - if (current_cred()->fsuid == inode->i_uid) 683 + if (uid_eq(current_cred()->fsuid, inode->i_uid)) 684 684 return 0; 685 685 686 686 /* Allowed if parent directory not sticky and world-writable. */ ··· 689 689 return 0; 690 690 691 691 /* Allowed if parent directory and link owner match. */ 692 - if (parent->i_uid == inode->i_uid) 692 + if (uid_eq(parent->i_uid, inode->i_uid)) 693 693 return 0; 694 694 695 695 path_put_conditional(link, nd); ··· 759 759 /* Source inode owner (or CAP_FOWNER) can hardlink all they like, 760 760 * otherwise, it must be a safe source. 761 761 */ 762 - if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) || 762 + if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) || 763 763 capable(CAP_FOWNER)) 764 764 return 0; 765 765
+2 -2
fs/nfs/nfs3acl.c
··· 70 70 if (type == ACL_TYPE_ACCESS && acl->a_count == 0) 71 71 error = -ENODATA; 72 72 else 73 - error = posix_acl_to_xattr(acl, buffer, size); 73 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 74 74 posix_acl_release(acl); 75 75 } else 76 76 error = -ENODATA; ··· 92 92 else 93 93 return -EOPNOTSUPP; 94 94 95 - acl = posix_acl_from_xattr(value, size); 95 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 96 96 if (IS_ERR(acl)) 97 97 return PTR_ERR(acl); 98 98 error = nfs3_proc_setacl(inode, type, acl);
+4 -4
fs/nfsd/vfs.c
··· 480 480 if (buf == NULL) 481 481 goto out; 482 482 483 - len = posix_acl_to_xattr(pacl, buf, buflen); 483 + len = posix_acl_to_xattr(&init_user_ns, pacl, buf, buflen); 484 484 if (len < 0) { 485 485 error = len; 486 486 goto out; ··· 549 549 if (buflen <= 0) 550 550 return ERR_PTR(buflen); 551 551 552 - pacl = posix_acl_from_xattr(buf, buflen); 552 + pacl = posix_acl_from_xattr(&init_user_ns, buf, buflen); 553 553 kfree(buf); 554 554 return pacl; 555 555 } ··· 2264 2264 if (size < 0) 2265 2265 return ERR_PTR(size); 2266 2266 2267 - acl = posix_acl_from_xattr(value, size); 2267 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 2268 2268 kfree(value); 2269 2269 return acl; 2270 2270 } ··· 2297 2297 value = kmalloc(size, GFP_KERNEL); 2298 2298 if (!value) 2299 2299 return -ENOMEM; 2300 - error = posix_acl_to_xattr(acl, value, size); 2300 + error = posix_acl_to_xattr(&init_user_ns, acl, value, size); 2301 2301 if (error < 0) 2302 2302 goto getout; 2303 2303 size = error;
+4 -4
fs/nilfs2/inode.c
··· 401 401 int err; 402 402 403 403 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 404 - inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 405 - inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 404 + i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 405 + i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 406 406 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 407 407 inode->i_size = le64_to_cpu(raw_inode->i_size); 408 408 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); ··· 590 590 struct nilfs_inode_info *ii = NILFS_I(inode); 591 591 592 592 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 593 - raw_inode->i_uid = cpu_to_le32(inode->i_uid); 594 - raw_inode->i_gid = cpu_to_le32(inode->i_gid); 593 + raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 594 + raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 595 595 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 596 596 raw_inode->i_size = cpu_to_le64(inode->i_size); 597 597 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+4 -3
fs/ntfs/inode.c
··· 2124 2124 * ntfs_read_inode() will have set up the default ones. 2125 2125 */ 2126 2126 /* Set uid and gid to root. */ 2127 - vi->i_uid = vi->i_gid = 0; 2127 + vi->i_uid = GLOBAL_ROOT_UID; 2128 + vi->i_gid = GLOBAL_ROOT_GID; 2128 2129 /* Regular file. No access for anyone. */ 2129 2130 vi->i_mode = S_IFREG; 2130 2131 /* No VFS initiated operations allowed for $MFT. */ ··· 2313 2312 ntfs_volume *vol = NTFS_SB(root->d_sb); 2314 2313 int i; 2315 2314 2316 - seq_printf(sf, ",uid=%i", vol->uid); 2317 - seq_printf(sf, ",gid=%i", vol->gid); 2315 + seq_printf(sf, ",uid=%i", from_kuid_munged(&init_user_ns, vol->uid)); 2316 + seq_printf(sf, ",gid=%i", from_kgid_munged(&init_user_ns, vol->gid)); 2318 2317 if (vol->fmask == vol->dmask) 2319 2318 seq_printf(sf, ",umask=0%o", vol->fmask); 2320 2319 else {
+32 -7
fs/ntfs/super.c
··· 102 102 char *p, *v, *ov; 103 103 static char *utf8 = "utf8"; 104 104 int errors = 0, sloppy = 0; 105 - uid_t uid = (uid_t)-1; 106 - gid_t gid = (gid_t)-1; 105 + kuid_t uid = INVALID_UID; 106 + kgid_t gid = INVALID_GID; 107 107 umode_t fmask = (umode_t)-1, dmask = (umode_t)-1; 108 108 int mft_zone_multiplier = -1, on_errors = -1; 109 109 int show_sys_files = -1, case_sensitive = -1, disable_sparse = -1; ··· 126 126 goto needs_arg; \ 127 127 variable = simple_strtoul(ov = v, &v, 0); \ 128 128 if (*v) \ 129 + goto needs_val; \ 130 + } 131 + #define NTFS_GETOPT_UID(option, variable) \ 132 + if (!strcmp(p, option)) { \ 133 + uid_t uid_value; \ 134 + if (!v || !*v) \ 135 + goto needs_arg; \ 136 + uid_value = simple_strtoul(ov = v, &v, 0); \ 137 + if (*v) \ 138 + goto needs_val; \ 139 + variable = make_kuid(current_user_ns(), uid_value); \ 140 + if (!uid_valid(variable)) \ 141 + goto needs_val; \ 142 + } 143 + #define NTFS_GETOPT_GID(option, variable) \ 144 + if (!strcmp(p, option)) { \ 145 + gid_t gid_value; \ 146 + if (!v || !*v) \ 147 + goto needs_arg; \ 148 + gid_value = simple_strtoul(ov = v, &v, 0); \ 149 + if (*v) \ 150 + goto needs_val; \ 151 + variable = make_kgid(current_user_ns(), gid_value); \ 152 + if (!gid_valid(variable)) \ 129 153 goto needs_val; \ 130 154 } 131 155 #define NTFS_GETOPT_OCTAL(option, variable) \ ··· 189 165 while ((p = strsep(&opt, ","))) { 190 166 if ((v = strchr(p, '='))) 191 167 *v++ = 0; 192 - NTFS_GETOPT("uid", uid) 193 - else NTFS_GETOPT("gid", gid) 168 + NTFS_GETOPT_UID("uid", uid) 169 + else NTFS_GETOPT_GID("gid", gid) 194 170 else NTFS_GETOPT_OCTAL("umask", fmask = dmask) 195 171 else NTFS_GETOPT_OCTAL("fmask", fmask) 196 172 else NTFS_GETOPT_OCTAL("dmask", dmask) ··· 307 283 vol->on_errors = on_errors; 308 284 if (!vol->on_errors || vol->on_errors == ON_ERRORS_RECOVER) 309 285 vol->on_errors |= ON_ERRORS_CONTINUE; 310 - if (uid != (uid_t)-1) 286 + if (uid_valid(uid)) 311 287 vol->uid = uid; 312 - if (gid != (gid_t)-1) 288 + if (gid_valid(gid)) 313 289 vol->gid = gid; 314 290 if (fmask != (umode_t)-1) 315 291 vol->fmask = fmask; ··· 1047 1023 * ntfs_read_inode() will have set up the default ones. 1048 1024 */ 1049 1025 /* Set uid and gid to root. */ 1050 - tmp_ino->i_uid = tmp_ino->i_gid = 0; 1026 + tmp_ino->i_uid = GLOBAL_ROOT_UID; 1027 + tmp_ino->i_gid = GLOBAL_ROOT_GID; 1051 1028 /* Regular file. No access for anyone. */ 1052 1029 tmp_ino->i_mode = S_IFREG; 1053 1030 /* No VFS initiated operations allowed for $MFTMirr. */
+3 -2
fs/ntfs/volume.h
··· 25 25 #define _LINUX_NTFS_VOLUME_H 26 26 27 27 #include <linux/rwsem.h> 28 + #include <linux/uidgid.h> 28 29 29 30 #include "types.h" 30 31 #include "layout.h" ··· 47 46 sized blocks on the device. */ 48 47 /* Configuration provided by user at mount time. */ 49 48 unsigned long flags; /* Miscellaneous flags, see below. */ 50 - uid_t uid; /* uid that files will be mounted as. */ 51 - gid_t gid; /* gid that files will be mounted as. */ 49 + kuid_t uid; /* uid that files will be mounted as. */ 50 + kgid_t gid; /* gid that files will be mounted as. */ 52 51 umode_t fmask; /* The mask for file permissions. */ 53 52 umode_t dmask; /* The mask for directory 54 53 permissions. */
+2 -2
fs/ocfs2/acl.c
··· 452 452 return PTR_ERR(acl); 453 453 if (acl == NULL) 454 454 return -ENODATA; 455 - ret = posix_acl_to_xattr(acl, buffer, size); 455 + ret = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 456 456 posix_acl_release(acl); 457 457 458 458 return ret; ··· 475 475 return -EPERM; 476 476 477 477 if (value) { 478 - acl = posix_acl_from_xattr(value, size); 478 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 479 479 if (IS_ERR(acl)) 480 480 return PTR_ERR(acl); 481 481 else if (acl) {
+2 -4
fs/ocfs2/file.c
··· 1184 1184 if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid 1185 1185 && OCFS2_HAS_RO_COMPAT_FEATURE(sb, 1186 1186 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { 1187 - transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid, 1188 - USRQUOTA); 1187 + transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); 1189 1188 if (!transfer_to[USRQUOTA]) { 1190 1189 status = -ESRCH; 1191 1190 goto bail_unlock; ··· 1193 1194 if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid 1194 1195 && OCFS2_HAS_RO_COMPAT_FEATURE(sb, 1195 1196 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { 1196 - transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid, 1197 - GRPQUOTA); 1197 + transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); 1198 1198 if (!transfer_to[GRPQUOTA]) { 1199 1199 status = -ESRCH; 1200 1200 goto bail_unlock;
+26 -17
fs/ocfs2/quota_global.c
··· 95 95 struct ocfs2_global_disk_dqblk *d = dp; 96 96 struct mem_dqblk *m = &dquot->dq_dqb; 97 97 98 - d->dqb_id = cpu_to_le32(dquot->dq_id); 98 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); 99 99 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count); 100 100 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); 101 101 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); ··· 112 112 { 113 113 struct ocfs2_global_disk_dqblk *d = dp; 114 114 struct ocfs2_mem_dqinfo *oinfo = 115 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 115 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 116 116 117 117 if (qtree_entry_unused(&oinfo->dqi_gi, dp)) 118 118 return 0; 119 - return le32_to_cpu(d->dqb_id) == dquot->dq_id; 119 + 120 + return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type, 121 + le32_to_cpu(d->dqb_id)), 122 + dquot->dq_id); 120 123 } 121 124 122 125 struct qtree_fmt_operations ocfs2_global_ops = { ··· 478 475 { 479 476 int err, err2; 480 477 struct super_block *sb = dquot->dq_sb; 481 - int type = dquot->dq_type; 478 + int type = dquot->dq_id.type; 482 479 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 483 480 struct ocfs2_global_disk_dqblk dqblk; 484 481 s64 spacechange, inodechange; ··· 507 504 olditime = dquot->dq_dqb.dqb_itime; 508 505 oldbtime = dquot->dq_dqb.dqb_btime; 509 506 ocfs2_global_disk2memdqb(dquot, &dqblk); 510 - trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace, 507 + trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id), 508 + dquot->dq_dqb.dqb_curspace, 511 509 (long long)spacechange, 512 510 dquot->dq_dqb.dqb_curinodes, 513 511 (long long)inodechange); ··· 559 555 err = ocfs2_qinfo_lock(info, freeing); 560 556 if (err < 0) { 561 557 mlog(ML_ERROR, "Failed to lock quota info, losing quota write" 562 - " (type=%d, id=%u)\n", dquot->dq_type, 563 - (unsigned)dquot->dq_id); 558 + " (type=%d, id=%u)\n", dquot->dq_id.type, 559 + (unsigned)from_kqid(&init_user_ns, dquot->dq_id)); 564 560 goto out; 565 561 } 566 562 if (freeing) ··· 595 591 struct ocfs2_super *osb = OCFS2_SB(sb); 596 592 int status = 0; 597 593 598 - trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type, 594 + trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id), 595 + dquot->dq_id.type, 599 596 type, sb->s_id); 600 - if (type != dquot->dq_type) 597 + if (type != dquot->dq_id.type) 601 598 goto out; 602 599 status = ocfs2_lock_global_qf(oinfo, 1); 603 600 if (status < 0) ··· 648 643 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 649 644 int status = 0; 650 645 651 - trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type); 646 + trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id), 647 + dquot->dq_id.type); 652 648 653 649 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); 654 650 if (IS_ERR(handle)) { ··· 683 677 { 684 678 handle_t *handle; 685 679 struct ocfs2_mem_dqinfo *oinfo = 686 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 680 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 687 681 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 688 682 int status = 0; 689 683 690 - trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type); 684 + trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id), 685 + dquot->dq_id.type); 691 686 692 687 mutex_lock(&dquot->dq_lock); 693 688 /* Check whether we are not racing with some other dqget() */ ··· 698 691 if (status < 0) 699 692 goto out; 700 693 handle = ocfs2_start_trans(osb, 701 - ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type)); 694 + ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type)); 702 695 if (IS_ERR(handle)) { 703 696 status = PTR_ERR(handle); 704 697 mlog_errno(status); ··· 740 733 int ex = 0; 741 734 struct super_block *sb = dquot->dq_sb; 742 735 struct ocfs2_super *osb = OCFS2_SB(sb); 743 - int type = dquot->dq_type; 736 + int type = dquot->dq_id.type; 744 737 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 745 738 struct inode *gqinode = info->dqi_gqinode; 746 739 int need_alloc = ocfs2_global_qinit_alloc(sb, type); 747 740 handle_t *handle; 748 741 749 - trace_ocfs2_acquire_dquot(dquot->dq_id, type); 742 + trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id), 743 + type); 750 744 mutex_lock(&dquot->dq_lock); 751 745 /* 752 746 * We need an exclusive lock, because we're going to update use count ··· 829 821 int sync = 0; 830 822 int status; 831 823 struct super_block *sb = dquot->dq_sb; 832 - int type = dquot->dq_type; 824 + int type = dquot->dq_id.type; 833 825 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 834 826 handle_t *handle; 835 827 struct ocfs2_super *osb = OCFS2_SB(sb); 836 828 837 - trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type); 829 + trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id), 830 + type); 838 831 839 832 /* In case user set some limits, sync dquot immediately to global 840 833 * quota file so that information propagates quicker */
+9 -6
fs/ocfs2/quota_local.c
··· 501 501 } 502 502 dqblk = (struct ocfs2_local_disk_dqblk *)(qbh->b_data + 503 503 ol_dqblk_block_off(sb, chunk, bit)); 504 - dquot = dqget(sb, le64_to_cpu(dqblk->dqb_id), type); 504 + dquot = dqget(sb, 505 + make_kqid(&init_user_ns, type, 506 + le64_to_cpu(dqblk->dqb_id))); 505 507 if (!dquot) { 506 508 status = -EIO; 507 509 mlog(ML_ERROR, "Failed to get quota structure " ··· 883 881 dqblk = (struct ocfs2_local_disk_dqblk *)(bh->b_data 884 882 + ol_dqblk_block_offset(sb, od->dq_local_off)); 885 883 886 - dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id); 884 + dqblk->dqb_id = cpu_to_le64(from_kqid(&init_user_ns, 885 + od->dq_dquot.dq_id)); 887 886 spin_lock(&dq_data_lock); 888 887 dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace - 889 888 od->dq_origspace); ··· 894 891 trace_olq_set_dquot( 895 892 (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod), 896 893 (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod), 897 - od->dq_dquot.dq_id); 894 + from_kqid(&init_user_ns, od->dq_dquot.dq_id)); 898 895 } 899 896 900 897 /* Write dquot to local quota file */ ··· 903 900 struct super_block *sb = dquot->dq_sb; 904 901 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); 905 902 struct buffer_head *bh; 906 - struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type]; 903 + struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_id.type]; 907 904 int status; 908 905 909 906 status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk, ··· 1224 1221 int ocfs2_create_local_dquot(struct dquot *dquot) 1225 1222 { 1226 1223 struct super_block *sb = dquot->dq_sb; 1227 - int type = dquot->dq_type; 1224 + int type = dquot->dq_id.type; 1228 1225 struct inode *lqinode = sb_dqopt(sb)->files[type]; 1229 1226 struct ocfs2_quota_chunk *chunk; 1230 1227 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); ··· 1278 1275 int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) 1279 1276 { 1280 1277 int status; 1281 - int type = dquot->dq_type; 1278 + int type = dquot->dq_id.type; 1282 1279 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); 1283 1280 struct super_block *sb = dquot->dq_sb; 1284 1281 struct ocfs2_local_disk_chunk *dchunk;
+6 -2
fs/omfs/inode.c
··· 391 391 case Opt_uid: 392 392 if (match_int(&args[0], &option)) 393 393 return 0; 394 - sbi->s_uid = option; 394 + sbi->s_uid = make_kuid(current_user_ns(), option); 395 + if (!uid_valid(sbi->s_uid)) 396 + return 0; 395 397 break; 396 398 case Opt_gid: 397 399 if (match_int(&args[0], &option)) 398 400 return 0; 399 - sbi->s_gid = option; 401 + sbi->s_gid = make_kgid(current_user_ns(), option); 402 + if (!gid_valid(sbi->s_gid)) 403 + return 0; 400 404 break; 401 405 case Opt_umask: 402 406 if (match_octal(&args[0], &option))
+2 -2
fs/omfs/omfs.h
··· 19 19 unsigned long **s_imap; 20 20 int s_imap_size; 21 21 struct mutex s_bitmap_lock; 22 - int s_uid; 23 - int s_gid; 22 + kuid_t s_uid; 23 + kgid_t s_gid; 24 24 int s_dmask; 25 25 int s_fmask; 26 26 };
+1 -1
fs/open.c
··· 534 534 newattrs.ia_valid |= 535 535 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; 536 536 mutex_lock(&inode->i_mutex); 537 - error = security_path_chown(path, user, group); 537 + error = security_path_chown(path, uid, gid); 538 538 if (!error) 539 539 error = notify_change(path->dentry, &newattrs); 540 540 mutex_unlock(&inode->i_mutex);
+15 -15
fs/posix_acl.c
··· 78 78 { 79 79 const struct posix_acl_entry *pa, *pe; 80 80 int state = ACL_USER_OBJ; 81 - unsigned int id = 0; /* keep gcc happy */ 81 + kuid_t prev_uid = INVALID_UID; 82 + kgid_t prev_gid = INVALID_GID; 82 83 int needs_mask = 0; 83 84 84 85 FOREACH_ACL_ENTRY(pa, acl, pe) { ··· 88 87 switch (pa->e_tag) { 89 88 case ACL_USER_OBJ: 90 89 if (state == ACL_USER_OBJ) { 91 - id = 0; 92 90 state = ACL_USER; 93 91 break; 94 92 } ··· 96 96 case ACL_USER: 97 97 if (state != ACL_USER) 98 98 return -EINVAL; 99 - if (pa->e_id == ACL_UNDEFINED_ID || 100 - pa->e_id < id) 99 + if (!uid_valid(pa->e_uid)) 101 100 return -EINVAL; 102 - id = pa->e_id + 1; 101 + if (uid_valid(prev_uid) && 102 + uid_lte(pa->e_uid, prev_uid)) 103 + return -EINVAL; 104 + prev_uid = pa->e_uid; 103 105 needs_mask = 1; 104 106 break; 105 107 106 108 case ACL_GROUP_OBJ: 107 109 if (state == ACL_USER) { 108 - id = 0; 109 110 state = ACL_GROUP; 110 111 break; 111 112 } ··· 115 114 case ACL_GROUP: 116 115 if (state != ACL_GROUP) 117 116 return -EINVAL; 118 - if (pa->e_id == ACL_UNDEFINED_ID || 119 - pa->e_id < id) 117 + if (!gid_valid(pa->e_gid)) 120 118 return -EINVAL; 121 - id = pa->e_id + 1; 119 + if (gid_valid(prev_gid) && 120 + gid_lte(pa->e_gid, prev_gid)) 121 + return -EINVAL; 122 + prev_gid = pa->e_gid; 122 123 needs_mask = 1; 123 124 break; 124 125 ··· 198 195 return ERR_PTR(-ENOMEM); 199 196 200 197 acl->a_entries[0].e_tag = ACL_USER_OBJ; 201 - acl->a_entries[0].e_id = ACL_UNDEFINED_ID; 202 198 acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6; 203 199 204 200 acl->a_entries[1].e_tag = ACL_GROUP_OBJ; 205 - acl->a_entries[1].e_id = ACL_UNDEFINED_ID; 206 201 acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3; 207 202 208 203 acl->a_entries[2].e_tag = ACL_OTHER; 209 - acl->a_entries[2].e_id = ACL_UNDEFINED_ID; 210 204 acl->a_entries[2].e_perm = (mode & S_IRWXO); 211 205 return acl; 212 206 } ··· 224 224 switch(pa->e_tag) { 225 225 case ACL_USER_OBJ: 226 226 /* (May have been checked already) */ 227 - if (inode->i_uid == current_fsuid()) 227 + if (uid_eq(inode->i_uid, current_fsuid())) 228 228 goto check_perm; 229 229 break; 230 230 case ACL_USER: 231 - if (pa->e_id == current_fsuid()) 231 + if (uid_eq(pa->e_uid, current_fsuid())) 232 232 goto mask; 233 233 break; 234 234 case ACL_GROUP_OBJ: ··· 239 239 } 240 240 break; 241 241 case ACL_GROUP: 242 - if (in_group_p(pa->e_id)) { 242 + if (in_group_p(pa->e_gid)) { 243 243 found = 1; 244 244 if ((pa->e_perm & want) == want) 245 245 goto mask;
+25 -2
fs/proc/base.c
··· 1089 1089 if (!task) 1090 1090 return -ESRCH; 1091 1091 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1092 - audit_get_loginuid(task)); 1092 + from_kuid(file->f_cred->user_ns, 1093 + audit_get_loginuid(task))); 1093 1094 put_task_struct(task); 1094 1095 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1095 1096 } ··· 1102 1101 char *page, *tmp; 1103 1102 ssize_t length; 1104 1103 uid_t loginuid; 1104 + kuid_t kloginuid; 1105 1105 1106 1106 rcu_read_lock(); 1107 1107 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) { ··· 1132 1130 goto out_free_page; 1133 1131 1134 1132 } 1135 - length = audit_set_loginuid(loginuid); 1133 + kloginuid = make_kuid(file->f_cred->user_ns, loginuid); 1134 + if (!uid_valid(kloginuid)) { 1135 + length = -EINVAL; 1136 + goto out_free_page; 1137 + } 1138 + 1139 + length = audit_set_loginuid(kloginuid); 1136 1140 if (likely(length == 0)) 1137 1141 length = count; 1138 1142 ··· 2991 2983 return proc_id_map_open(inode, file, &proc_gid_seq_operations); 2992 2984 } 2993 2985 2986 + static int proc_projid_map_open(struct inode *inode, struct file *file) 2987 + { 2988 + return proc_id_map_open(inode, file, &proc_projid_seq_operations); 2989 + } 2990 + 2994 2991 static const struct file_operations proc_uid_map_operations = { 2995 2992 .open = proc_uid_map_open, 2996 2993 .write = proc_uid_map_write, ··· 3007 2994 static const struct file_operations proc_gid_map_operations = { 3008 2995 .open = proc_gid_map_open, 3009 2996 .write = proc_gid_map_write, 2997 + .read = seq_read, 2998 + .llseek = seq_lseek, 2999 + .release = proc_id_map_release, 3000 + }; 3001 + 3002 + static const struct file_operations proc_projid_map_operations = { 3003 + .open = proc_projid_map_open, 3004 + .write = proc_projid_map_write, 3010 3005 .read = seq_read, 3011 3006 .llseek = seq_lseek, 3012 3007 .release = proc_id_map_release, ··· 3126 3105 #ifdef CONFIG_USER_NS 3127 3106 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), 3128 3107 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), 3108 + REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 3129 3109 #endif 3130 3110 }; 3131 3111 ··· 3490 3468 #ifdef CONFIG_USER_NS 3491 3469 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), 3492 3470 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), 3471 + REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 3493 3472 #endif 3494 3473 }; 3495 3474
+2 -2
fs/qnx4/inode.c
··· 312 312 (ino % QNX4_INODES_PER_BLOCK); 313 313 314 314 inode->i_mode = le16_to_cpu(raw_inode->di_mode); 315 - inode->i_uid = (uid_t)le16_to_cpu(raw_inode->di_uid); 316 - inode->i_gid = (gid_t)le16_to_cpu(raw_inode->di_gid); 315 + i_uid_write(inode, (uid_t)le16_to_cpu(raw_inode->di_uid)); 316 + i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid)); 317 317 set_nlink(inode, le16_to_cpu(raw_inode->di_nlink)); 318 318 inode->i_size = le32_to_cpu(raw_inode->di_size); 319 319 inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime);
+2 -2
fs/qnx6/inode.c
··· 574 574 raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs; 575 575 576 576 inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode); 577 - inode->i_uid = (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid); 578 - inode->i_gid = (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid); 577 + i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid)); 578 + i_gid_write(inode, (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid)); 579 579 inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size); 580 580 inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_mtime); 581 581 inode->i_mtime.tv_nsec = 0;
+1 -1
fs/quota/Makefile
··· 2 2 obj-$(CONFIG_QFMT_V1) += quota_v1.o 3 3 obj-$(CONFIG_QFMT_V2) += quota_v2.o 4 4 obj-$(CONFIG_QUOTA_TREE) += quota_tree.o 5 - obj-$(CONFIG_QUOTACTL) += quota.o 5 + obj-$(CONFIG_QUOTACTL) += quota.o kqid.o 6 6 obj-$(CONFIG_QUOTACTL_COMPAT) += compat.o 7 7 obj-$(CONFIG_QUOTA_NETLINK_INTERFACE) += netlink.o
+57 -57
fs/quota/dquot.c
··· 253 253 static void __dquot_initialize(struct inode *inode, int type); 254 254 255 255 static inline unsigned int 256 - hashfn(const struct super_block *sb, unsigned int id, int type) 256 + hashfn(const struct super_block *sb, struct kqid qid) 257 257 { 258 + unsigned int id = from_kqid(&init_user_ns, qid); 259 + int type = qid.type; 258 260 unsigned long tmp; 259 261 260 262 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); ··· 269 267 static inline void insert_dquot_hash(struct dquot *dquot) 270 268 { 271 269 struct hlist_head *head; 272 - head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); 270 + head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); 273 271 hlist_add_head(&dquot->dq_hash, head); 274 272 } 275 273 ··· 279 277 } 280 278 281 279 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 282 - unsigned int id, int type) 280 + struct kqid qid) 283 281 { 284 282 struct hlist_node *node; 285 283 struct dquot *dquot; 286 284 287 285 hlist_for_each (node, dquot_hash+hashent) { 288 286 dquot = hlist_entry(node, struct dquot, dq_hash); 289 - if (dquot->dq_sb == sb && dquot->dq_id == id && 290 - dquot->dq_type == type) 287 + if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) 291 288 return dquot; 292 289 } 293 290 return NULL; ··· 352 351 spin_lock(&dq_list_lock); 353 352 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 354 353 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 355 - info[dquot->dq_type].dqi_dirty_list); 354 + info[dquot->dq_id.type].dqi_dirty_list); 356 355 ret = 0; 357 356 } 358 357 spin_unlock(&dq_list_lock); ··· 411 410 mutex_lock(&dquot->dq_lock); 412 411 mutex_lock(&dqopt->dqio_mutex); 413 412 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) 414 - ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); 413 + ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); 415 414 if (ret < 0) 416 415 goto out_iolock; 417 416 set_bit(DQ_READ_B, &dquot->dq_flags); 418 417 /* Instantiate dquot if needed */ 419 418 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { 420 - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 419 + ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 421 420 /* Write the info if needed */ 422 - if (info_dirty(&dqopt->info[dquot->dq_type])) { 423 - ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 424 - dquot->dq_sb, dquot->dq_type); 421 + if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 422 + ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 423 + dquot->dq_sb, dquot->dq_id.type); 425 424 } 426 425 if (ret < 0) 427 426 goto out_iolock; ··· 456 455 /* Inactive dquot can be only if there was error during read/init 457 456 * => we have better not writing it */ 458 457 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) 459 - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 458 + ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 460 459 else 461 460 ret = -EIO; 462 461 out_sem: ··· 478 477 if (atomic_read(&dquot->dq_count) > 1) 479 478 goto out_dqlock; 480 479 mutex_lock(&dqopt->dqio_mutex); 481 - if (dqopt->ops[dquot->dq_type]->release_dqblk) { 482 - ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 480 + if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { 481 + ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); 483 482 /* Write the info */ 484 - if (info_dirty(&dqopt->info[dquot->dq_type])) { 485 - ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 486 - dquot->dq_sb, dquot->dq_type); 483 + if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 484 + ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 485 + dquot->dq_sb, dquot->dq_id.type); 487 486 } 488 487 if (ret >= 0) 489 488 ret = ret2; ··· 522 521 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 523 522 if (dquot->dq_sb != sb) 524 523 continue; 525 - if (dquot->dq_type != type) 524 + if (dquot->dq_id.type != type) 526 525 continue; 527 526 /* Wait for dquot users */ 528 527 if (atomic_read(&dquot->dq_count)) { ··· 742 741 #ifdef CONFIG_QUOTA_DEBUG 743 742 if (!atomic_read(&dquot->dq_count)) { 744 743 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 745 - quotatypes[dquot->dq_type], dquot->dq_id); 744 + quotatypes[dquot->dq_id.type], 745 + from_kqid(&init_user_ns, dquot->dq_id)); 746 746 BUG(); 747 747 } 748 748 #endif ··· 754 752 /* We have more than one user... nothing to do */ 755 753 atomic_dec(&dquot->dq_count); 756 754 /* Releasing dquot during quotaoff phase? */ 757 - if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && 755 + if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && 758 756 atomic_read(&dquot->dq_count) == 1) 759 757 wake_up(&dquot->dq_wait_unused); 760 758 spin_unlock(&dq_list_lock); ··· 817 815 INIT_LIST_HEAD(&dquot->dq_dirty); 818 816 init_waitqueue_head(&dquot->dq_wait_unused); 819 817 dquot->dq_sb = sb; 820 - dquot->dq_type = type; 818 + dquot->dq_id = make_kqid_invalid(type); 821 819 atomic_set(&dquot->dq_count, 1); 822 820 823 821 return dquot; ··· 831 829 * a) checking for quota flags under dq_list_lock and 832 830 * b) getting a reference to dquot before we release dq_list_lock 833 831 */ 834 - struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 832 + struct dquot *dqget(struct super_block *sb, struct kqid qid) 835 833 { 836 - unsigned int hashent = hashfn(sb, id, type); 834 + unsigned int hashent = hashfn(sb, qid); 837 835 struct dquot *dquot = NULL, *empty = NULL; 838 836 839 - if (!sb_has_quota_active(sb, type)) 837 + if (!sb_has_quota_active(sb, qid.type)) 840 838 return NULL; 841 839 we_slept: 842 840 spin_lock(&dq_list_lock); 843 841 spin_lock(&dq_state_lock); 844 - if (!sb_has_quota_active(sb, type)) { 842 + if (!sb_has_quota_active(sb, qid.type)) { 845 843 spin_unlock(&dq_state_lock); 846 844 spin_unlock(&dq_list_lock); 847 845 goto out; 848 846 } 849 847 spin_unlock(&dq_state_lock); 850 848 851 - dquot = find_dquot(hashent, sb, id, type); 849 + dquot = find_dquot(hashent, sb, qid); 852 850 if (!dquot) { 853 851 if (!empty) { 854 852 spin_unlock(&dq_list_lock); 855 - empty = get_empty_dquot(sb, type); 853 + empty = get_empty_dquot(sb, qid.type); 856 854 if (!empty) 857 855 schedule(); /* Try to wait for a moment... */ 858 856 goto we_slept; 859 857 } 860 858 dquot = empty; 861 859 empty = NULL; 862 - dquot->dq_id = id; 860 + dquot->dq_id = qid; 863 861 /* all dquots go on the inuse_list */ 864 862 put_inuse(dquot); 865 863 /* hash it first so it can be found */ ··· 1131 1129 1132 1130 struct dquot_warn { 1133 1131 struct super_block *w_sb; 1134 - qid_t w_dq_id; 1135 - short w_dq_type; 1132 + struct kqid w_dq_id; 1136 1133 short w_type; 1137 1134 }; 1138 1135 ··· 1155 1154 if (!flag_print_warnings) 1156 1155 return 0; 1157 1156 1158 - switch (warn->w_dq_type) { 1157 + switch (warn->w_dq_id.type) { 1159 1158 case USRQUOTA: 1160 - return current_fsuid() == warn->w_dq_id; 1159 + return uid_eq(current_fsuid(), warn->w_dq_id.uid); 1161 1160 case GRPQUOTA: 1162 - return in_group_p(warn->w_dq_id); 1161 + return in_group_p(warn->w_dq_id.gid); 1163 1162 } 1164 1163 return 0; 1165 1164 } ··· 1185 1184 tty_write_message(tty, ": warning, "); 1186 1185 else 1187 1186 tty_write_message(tty, ": write failed, "); 1188 - tty_write_message(tty, quotatypes[warn->w_dq_type]); 1187 + tty_write_message(tty, quotatypes[warn->w_dq_id.type]); 1189 1188 switch (warntype) { 1190 1189 case QUOTA_NL_IHARDWARN: 1191 1190 msg = " file limit reached.\r\n"; ··· 1219 1218 warn->w_type = warntype; 1220 1219 warn->w_sb = dquot->dq_sb; 1221 1220 warn->w_dq_id = dquot->dq_id; 1222 - warn->w_dq_type = dquot->dq_type; 1223 1221 } 1224 1222 1225 1223 /* ··· 1236 1236 #ifdef CONFIG_PRINT_QUOTA_WARNING 1237 1237 print_warning(&warn[i]); 1238 1238 #endif 1239 - quota_send_warning(warn[i].w_dq_type, warn[i].w_dq_id, 1239 + quota_send_warning(warn[i].w_dq_id, 1240 1240 warn[i].w_sb->s_dev, warn[i].w_type); 1241 1241 } 1242 1242 } 1243 1243 1244 1244 static int ignore_hardlimit(struct dquot *dquot) 1245 1245 { 1246 - struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 1246 + struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 1247 1247 1248 1248 return capable(CAP_SYS_RESOURCE) && 1249 1249 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || ··· 1256 1256 { 1257 1257 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1258 1258 1259 - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1259 + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || 1260 1260 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1261 1261 return 0; 1262 1262 ··· 1281 1281 dquot->dq_dqb.dqb_itime == 0) { 1282 1282 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); 1283 1283 dquot->dq_dqb.dqb_itime = get_seconds() + 1284 - sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1284 + sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; 1285 1285 } 1286 1286 1287 1287 return 0; ··· 1294 1294 qsize_t tspace; 1295 1295 struct super_block *sb = dquot->dq_sb; 1296 1296 1297 - if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || 1297 + if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1298 1298 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1299 1299 return 0; 1300 1300 ··· 1325 1325 if (!prealloc) { 1326 1326 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); 1327 1327 dquot->dq_dqb.dqb_btime = get_seconds() + 1328 - sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; 1328 + sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; 1329 1329 } 1330 1330 else 1331 1331 /* ··· 1344 1344 1345 1345 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1346 1346 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1347 - !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) 1347 + !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) 1348 1348 return QUOTA_NL_NOWARN; 1349 1349 1350 1350 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; ··· 1390 1390 */ 1391 1391 static void __dquot_initialize(struct inode *inode, int type) 1392 1392 { 1393 - unsigned int id = 0; 1394 1393 int cnt; 1395 1394 struct dquot *got[MAXQUOTAS]; 1396 1395 struct super_block *sb = inode->i_sb; ··· 1402 1403 1403 1404 /* First get references to structures we might need. */ 1404 1405 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1406 + struct kqid qid; 1405 1407 got[cnt] = NULL; 1406 1408 if (type != -1 && cnt != type) 1407 1409 continue; 1408 1410 switch (cnt) { 1409 1411 case USRQUOTA: 1410 - id = inode->i_uid; 1412 + qid = make_kqid_uid(inode->i_uid); 1411 1413 break; 1412 1414 case GRPQUOTA: 1413 - id = inode->i_gid; 1415 + qid = make_kqid_gid(inode->i_gid); 1414 1416 break; 1415 1417 } 1416 - got[cnt] = dqget(sb, id, cnt); 1418 + got[cnt] = dqget(sb, qid); 1417 1419 } 1418 1420 1419 1421 down_write(&sb_dqopt(sb)->dqptr_sem); ··· 1897 1897 if (!dquot_active(inode)) 1898 1898 return 0; 1899 1899 1900 - if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) 1901 - transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA); 1902 - if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) 1903 - transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA); 1900 + if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) 1901 + transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid)); 1902 + if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)) 1903 + transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid)); 1904 1904 1905 1905 ret = __dquot_transfer(inode, transfer_to); 1906 1906 dqput_all(transfer_to); ··· 2360 2360 2361 2361 memset(di, 0, sizeof(*di)); 2362 2362 di->d_version = FS_DQUOT_VERSION; 2363 - di->d_flags = dquot->dq_type == USRQUOTA ? 2363 + di->d_flags = dquot->dq_id.type == USRQUOTA ? 2364 2364 FS_USER_QUOTA : FS_GROUP_QUOTA; 2365 - di->d_id = dquot->dq_id; 2365 + di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); 2366 2366 2367 2367 spin_lock(&dq_data_lock); 2368 2368 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); ··· 2376 2376 spin_unlock(&dq_data_lock); 2377 2377 } 2378 2378 2379 - int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, 2379 + int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2380 2380 struct fs_disk_quota *di) 2381 2381 { 2382 2382 struct dquot *dquot; 2383 2383 2384 - dquot = dqget(sb, id, type); 2384 + dquot = dqget(sb, qid); 2385 2385 if (!dquot) 2386 2386 return -ESRCH; 2387 2387 do_get_dqblk(dquot, di); ··· 2401 2401 { 2402 2402 struct mem_dqblk *dm = &dquot->dq_dqb; 2403 2403 int check_blim = 0, check_ilim = 0; 2404 - struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 2404 + struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2405 2405 2406 2406 if (di->d_fieldmask & ~VFS_FS_DQ_MASK) 2407 2407 return -EINVAL; ··· 2488 2488 return 0; 2489 2489 } 2490 2490 2491 - int dquot_set_dqblk(struct super_block *sb, int type, qid_t id, 2491 + int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2492 2492 struct fs_disk_quota *di) 2493 2493 { 2494 2494 struct dquot *dquot; 2495 2495 int rc; 2496 2496 2497 - dquot = dqget(sb, id, type); 2497 + dquot = dqget(sb, qid); 2498 2498 if (!dquot) { 2499 2499 rc = -ESRCH; 2500 2500 goto out;
+132
fs/quota/kqid.c
··· 1 + #include <linux/fs.h> 2 + #include <linux/quota.h> 3 + #include <linux/export.h> 4 + 5 + /** 6 + * qid_eq - Test to see if to kquid values are the same 7 + * @left: A qid value 8 + * @right: Another quid value 9 + * 10 + * Return true if the two qid values are equal and false otherwise. 11 + */ 12 + bool qid_eq(struct kqid left, struct kqid right) 13 + { 14 + if (left.type != right.type) 15 + return false; 16 + switch(left.type) { 17 + case USRQUOTA: 18 + return uid_eq(left.uid, right.uid); 19 + case GRPQUOTA: 20 + return gid_eq(left.gid, right.gid); 21 + case PRJQUOTA: 22 + return projid_eq(left.projid, right.projid); 23 + default: 24 + BUG(); 25 + } 26 + } 27 + EXPORT_SYMBOL(qid_eq); 28 + 29 + /** 30 + * qid_lt - Test to see if one qid value is less than another 31 + * @left: The possibly lesser qid value 32 + * @right: The possibly greater qid value 33 + * 34 + * Return true if left is less than right and false otherwise. 35 + */ 36 + bool qid_lt(struct kqid left, struct kqid right) 37 + { 38 + if (left.type < right.type) 39 + return true; 40 + if (left.type > right.type) 41 + return false; 42 + switch (left.type) { 43 + case USRQUOTA: 44 + return uid_lt(left.uid, right.uid); 45 + case GRPQUOTA: 46 + return gid_lt(left.gid, right.gid); 47 + case PRJQUOTA: 48 + return projid_lt(left.projid, right.projid); 49 + default: 50 + BUG(); 51 + } 52 + } 53 + EXPORT_SYMBOL(qid_lt); 54 + 55 + /** 56 + * from_kqid - Create a qid from a kqid user-namespace pair. 57 + * @targ: The user namespace we want a qid in. 58 + * @kuid: The kernel internal quota identifier to start with. 59 + * 60 + * Map @kqid into the user-namespace specified by @targ and 61 + * return the resulting qid. 62 + * 63 + * There is always a mapping into the initial user_namespace. 64 + * 65 + * If @kqid has no mapping in @targ (qid_t)-1 is returned. 66 + */ 67 + qid_t from_kqid(struct user_namespace *targ, struct kqid kqid) 68 + { 69 + switch (kqid.type) { 70 + case USRQUOTA: 71 + return from_kuid(targ, kqid.uid); 72 + case GRPQUOTA: 73 + return from_kgid(targ, kqid.gid); 74 + case PRJQUOTA: 75 + return from_kprojid(targ, kqid.projid); 76 + default: 77 + BUG(); 78 + } 79 + } 80 + EXPORT_SYMBOL(from_kqid); 81 + 82 + /** 83 + * from_kqid_munged - Create a qid from a kqid user-namespace pair. 84 + * @targ: The user namespace we want a qid in. 85 + * @kqid: The kernel internal quota identifier to start with. 86 + * 87 + * Map @kqid into the user-namespace specified by @targ and 88 + * return the resulting qid. 89 + * 90 + * There is always a mapping into the initial user_namespace. 91 + * 92 + * Unlike from_kqid from_kqid_munged never fails and always 93 + * returns a valid projid. This makes from_kqid_munged 94 + * appropriate for use in places where failing to provide 95 + * a qid_t is not a good option. 96 + * 97 + * If @kqid has no mapping in @targ the kqid.type specific 98 + * overflow identifier is returned. 99 + */ 100 + qid_t from_kqid_munged(struct user_namespace *targ, struct kqid kqid) 101 + { 102 + switch (kqid.type) { 103 + case USRQUOTA: 104 + return from_kuid_munged(targ, kqid.uid); 105 + case GRPQUOTA: 106 + return from_kgid_munged(targ, kqid.gid); 107 + case PRJQUOTA: 108 + return from_kprojid_munged(targ, kqid.projid); 109 + default: 110 + BUG(); 111 + } 112 + } 113 + EXPORT_SYMBOL(from_kqid_munged); 114 + 115 + /** 116 + * qid_valid - Report if a valid value is stored in a kqid. 117 + * @qid: The kernel internal quota identifier to test. 118 + */ 119 + bool qid_valid(struct kqid qid) 120 + { 121 + switch (qid.type) { 122 + case USRQUOTA: 123 + return uid_valid(qid.uid); 124 + case GRPQUOTA: 125 + return gid_valid(qid.gid); 126 + case PRJQUOTA: 127 + return projid_valid(qid.projid); 128 + default: 129 + BUG(); 130 + } 131 + } 132 + EXPORT_SYMBOL(qid_valid);
+6 -4
fs/quota/netlink.c
··· 30 30 * 31 31 */ 32 32 33 - void quota_send_warning(short type, unsigned int id, dev_t dev, 33 + void quota_send_warning(struct kqid qid, dev_t dev, 34 34 const char warntype) 35 35 { 36 36 static atomic_t seq; ··· 56 56 "VFS: Cannot store netlink header in quota warning.\n"); 57 57 goto err_out; 58 58 } 59 - ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); 59 + ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); 60 60 if (ret) 61 61 goto attr_err_out; 62 - ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); 62 + ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, 63 + from_kqid_munged(&init_user_ns, qid)); 63 64 if (ret) 64 65 goto attr_err_out; 65 66 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); ··· 72 71 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); 73 72 if (ret) 74 73 goto attr_err_out; 75 - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); 74 + ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, 75 + from_kuid_munged(&init_user_ns, current_uid())); 76 76 if (ret) 77 77 goto attr_err_out; 78 78 genlmsg_end(skb, msg_head);
+22 -6
fs/quota/quota.c
··· 32 32 /* allow to query information for dquots we "own" */ 33 33 case Q_GETQUOTA: 34 34 case Q_XGETQUOTA: 35 - if ((type == USRQUOTA && current_euid() == id) || 36 - (type == GRPQUOTA && in_egroup_p(id))) 35 + if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || 36 + (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) 37 37 break; 38 38 /*FALLTHROUGH*/ 39 39 default: ··· 130 130 static int quota_getquota(struct super_block *sb, int type, qid_t id, 131 131 void __user *addr) 132 132 { 133 + struct kqid qid; 133 134 struct fs_disk_quota fdq; 134 135 struct if_dqblk idq; 135 136 int ret; 136 137 137 138 if (!sb->s_qcop->get_dqblk) 138 139 return -ENOSYS; 139 - ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq); 140 + qid = make_kqid(current_user_ns(), type, id); 141 + if (!qid_valid(qid)) 142 + return -EINVAL; 143 + ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 140 144 if (ret) 141 145 return ret; 142 146 copy_to_if_dqblk(&idq, &fdq); ··· 180 176 { 181 177 struct fs_disk_quota fdq; 182 178 struct if_dqblk idq; 179 + struct kqid qid; 183 180 184 181 if (copy_from_user(&idq, addr, sizeof(idq))) 185 182 return -EFAULT; 186 183 if (!sb->s_qcop->set_dqblk) 187 184 return -ENOSYS; 185 + qid = make_kqid(current_user_ns(), type, id); 186 + if (!qid_valid(qid)) 187 + return -EINVAL; 188 188 copy_from_if_dqblk(&fdq, &idq); 189 - return sb->s_qcop->set_dqblk(sb, type, id, &fdq); 189 + return sb->s_qcop->set_dqblk(sb, qid, &fdq); 190 190 } 191 191 192 192 static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr) ··· 221 213 void __user *addr) 222 214 { 223 215 struct fs_disk_quota fdq; 216 + struct kqid qid; 224 217 225 218 if (copy_from_user(&fdq, addr, sizeof(fdq))) 226 219 return -EFAULT; 227 220 if (!sb->s_qcop->set_dqblk) 228 221 return -ENOSYS; 229 - return sb->s_qcop->set_dqblk(sb, type, id, &fdq); 222 + qid = make_kqid(current_user_ns(), type, id); 223 + if (!qid_valid(qid)) 224 + return -EINVAL; 225 + return sb->s_qcop->set_dqblk(sb, qid, &fdq); 230 226 } 231 227 232 228 static int quota_getxquota(struct super_block *sb, int type, qid_t id, 233 229 void __user *addr) 234 230 { 235 231 struct fs_disk_quota fdq; 232 + struct kqid qid; 236 233 int ret; 237 234 238 235 if (!sb->s_qcop->get_dqblk) 239 236 return -ENOSYS; 240 - ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq); 237 + qid = make_kqid(current_user_ns(), type, id); 238 + if (!qid_valid(qid)) 239 + return -EINVAL; 240 + ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 241 241 if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) 242 242 return -EFAULT; 243 243 return ret;
+13 -9
fs/quota/quota_tree.c
··· 22 22 23 23 #define __QUOTA_QT_PARANOIA 24 24 25 - static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 25 + static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) 26 26 { 27 27 unsigned int epb = info->dqi_usable_bs >> 2; 28 + qid_t id = from_kqid(&init_user_ns, qid); 28 29 29 30 depth = info->dqi_qtree_depth - depth - 1; 30 31 while (depth--) ··· 245 244 /* This is enough as the block is already zeroed and the entry 246 245 * list is empty... */ 247 246 info->dqi_free_entry = blk; 248 - mark_info_dirty(dquot->dq_sb, dquot->dq_type); 247 + mark_info_dirty(dquot->dq_sb, dquot->dq_id.type); 249 248 } 250 249 /* Block will be full? */ 251 250 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { ··· 358 357 */ 359 358 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 360 359 { 361 - int type = dquot->dq_type; 360 + int type = dquot->dq_id.type; 362 361 struct super_block *sb = dquot->dq_sb; 363 362 ssize_t ret; 364 363 char *ddquot = getdqbuf(info->dqi_entry_size); ··· 539 538 ddquot += info->dqi_entry_size; 540 539 } 541 540 if (i == qtree_dqstr_in_blk(info)) { 542 - quota_error(dquot->dq_sb, "Quota for id %u referenced " 543 - "but not present", dquot->dq_id); 541 + quota_error(dquot->dq_sb, 542 + "Quota for id %u referenced but not present", 543 + from_kqid(&init_user_ns, dquot->dq_id)); 544 544 ret = -EIO; 545 545 goto out_buf; 546 546 } else { ··· 591 589 592 590 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 593 591 { 594 - int type = dquot->dq_type; 592 + int type = dquot->dq_id.type; 595 593 struct super_block *sb = dquot->dq_sb; 596 594 loff_t offset; 597 595 char *ddquot; ··· 609 607 offset = find_dqentry(info, dquot); 610 608 if (offset <= 0) { /* Entry not present? */ 611 609 if (offset < 0) 612 - quota_error(sb, "Can't read quota structure " 613 - "for id %u", dquot->dq_id); 610 + quota_error(sb,"Can't read quota structure " 611 + "for id %u", 612 + from_kqid(&init_user_ns, 613 + dquot->dq_id)); 614 614 dquot->dq_off = 0; 615 615 set_bit(DQ_FAKE_B, &dquot->dq_flags); 616 616 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); ··· 630 626 if (ret >= 0) 631 627 ret = -EIO; 632 628 quota_error(sb, "Error while reading quota structure for id %u", 633 - dquot->dq_id); 629 + from_kqid(&init_user_ns, dquot->dq_id)); 634 630 set_bit(DQ_FAKE_B, &dquot->dq_flags); 635 631 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 636 632 kfree(ddquot);
+7 -5
fs/quota/quota_v1.c
··· 54 54 55 55 static int v1_read_dqblk(struct dquot *dquot) 56 56 { 57 - int type = dquot->dq_type; 57 + int type = dquot->dq_id.type; 58 58 struct v1_disk_dqblk dqblk; 59 59 60 60 if (!sb_dqopt(dquot->dq_sb)->files[type]) ··· 63 63 /* Set structure to 0s in case read fails/is after end of file */ 64 64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); 65 65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, 66 - sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); 66 + sizeof(struct v1_disk_dqblk), 67 + v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id))); 67 68 68 69 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); 69 70 if (dquot->dq_dqb.dqb_bhardlimit == 0 && ··· 79 78 80 79 static int v1_commit_dqblk(struct dquot *dquot) 81 80 { 82 - short type = dquot->dq_type; 81 + short type = dquot->dq_id.type; 83 82 ssize_t ret; 84 83 struct v1_disk_dqblk dqblk; 85 84 86 85 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); 87 - if (dquot->dq_id == 0) { 86 + if (((type == USRQUOTA) && uid_eq(dquot->dq_id.uid, GLOBAL_ROOT_UID)) || 87 + ((type == GRPQUOTA) && gid_eq(dquot->dq_id.gid, GLOBAL_ROOT_GID))) { 88 88 dqblk.dqb_btime = 89 89 sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; 90 90 dqblk.dqb_itime = ··· 95 93 if (sb_dqopt(dquot->dq_sb)->files[type]) 96 94 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, 97 95 (char *)&dqblk, sizeof(struct v1_disk_dqblk), 98 - v1_dqoff(dquot->dq_id)); 96 + v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id))); 99 97 if (ret != sizeof(struct v1_disk_dqblk)) { 100 98 quota_error(dquot->dq_sb, "dquota write failed"); 101 99 if (ret >= 0)
+15 -11
fs/quota/quota_v2.c
··· 196 196 struct v2r0_disk_dqblk *d = dp; 197 197 struct mem_dqblk *m = &dquot->dq_dqb; 198 198 struct qtree_mem_dqinfo *info = 199 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 199 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 200 200 201 201 d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); 202 202 d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); ··· 206 206 d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); 207 207 d->dqb_curspace = cpu_to_le64(m->dqb_curspace); 208 208 d->dqb_btime = cpu_to_le64(m->dqb_btime); 209 - d->dqb_id = cpu_to_le32(dquot->dq_id); 209 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); 210 210 if (qtree_entry_unused(info, dp)) 211 211 d->dqb_itime = cpu_to_le64(1); 212 212 } ··· 215 215 { 216 216 struct v2r0_disk_dqblk *d = dp; 217 217 struct qtree_mem_dqinfo *info = 218 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 218 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 219 219 220 220 if (qtree_entry_unused(info, dp)) 221 221 return 0; 222 - return le32_to_cpu(d->dqb_id) == dquot->dq_id; 222 + return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type, 223 + le32_to_cpu(d->dqb_id)), 224 + dquot->dq_id); 223 225 } 224 226 225 227 static void v2r1_disk2memdqb(struct dquot *dquot, void *dp) ··· 249 247 struct v2r1_disk_dqblk *d = dp; 250 248 struct mem_dqblk *m = &dquot->dq_dqb; 251 249 struct qtree_mem_dqinfo *info = 252 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 250 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 253 251 254 252 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); 255 253 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); ··· 259 257 d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit)); 260 258 d->dqb_curspace = cpu_to_le64(m->dqb_curspace); 261 259 d->dqb_btime = cpu_to_le64(m->dqb_btime); 262 - d->dqb_id = cpu_to_le32(dquot->dq_id); 260 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); 263 261 if (qtree_entry_unused(info, dp)) 264 262 d->dqb_itime = cpu_to_le64(1); 265 263 } ··· 268 266 { 269 267 struct v2r1_disk_dqblk *d = dp; 270 268 struct qtree_mem_dqinfo *info = 271 - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 269 + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv; 272 270 273 271 if (qtree_entry_unused(info, dp)) 274 272 return 0; 275 - return le32_to_cpu(d->dqb_id) == dquot->dq_id; 273 + return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type, 274 + le32_to_cpu(d->dqb_id)), 275 + dquot->dq_id); 276 276 } 277 277 278 278 static int v2_read_dquot(struct dquot *dquot) 279 279 { 280 - return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); 280 + return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 281 281 } 282 282 283 283 static int v2_write_dquot(struct dquot *dquot) 284 284 { 285 - return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); 285 + return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 286 286 } 287 287 288 288 static int v2_release_dquot(struct dquot *dquot) 289 289 { 290 - return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); 290 + return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 291 291 } 292 292 293 293 static int v2_free_file_info(struct super_block *sb, int type)
+13 -13
fs/reiserfs/inode.c
··· 1155 1155 set_inode_sd_version(inode, STAT_DATA_V1); 1156 1156 inode->i_mode = sd_v1_mode(sd); 1157 1157 set_nlink(inode, sd_v1_nlink(sd)); 1158 - inode->i_uid = sd_v1_uid(sd); 1159 - inode->i_gid = sd_v1_gid(sd); 1158 + i_uid_write(inode, sd_v1_uid(sd)); 1159 + i_gid_write(inode, sd_v1_gid(sd)); 1160 1160 inode->i_size = sd_v1_size(sd); 1161 1161 inode->i_atime.tv_sec = sd_v1_atime(sd); 1162 1162 inode->i_mtime.tv_sec = sd_v1_mtime(sd); ··· 1200 1200 1201 1201 inode->i_mode = sd_v2_mode(sd); 1202 1202 set_nlink(inode, sd_v2_nlink(sd)); 1203 - inode->i_uid = sd_v2_uid(sd); 1203 + i_uid_write(inode, sd_v2_uid(sd)); 1204 1204 inode->i_size = sd_v2_size(sd); 1205 - inode->i_gid = sd_v2_gid(sd); 1205 + i_gid_write(inode, sd_v2_gid(sd)); 1206 1206 inode->i_mtime.tv_sec = sd_v2_mtime(sd); 1207 1207 inode->i_atime.tv_sec = sd_v2_atime(sd); 1208 1208 inode->i_ctime.tv_sec = sd_v2_ctime(sd); ··· 1258 1258 1259 1259 set_sd_v2_mode(sd_v2, inode->i_mode); 1260 1260 set_sd_v2_nlink(sd_v2, inode->i_nlink); 1261 - set_sd_v2_uid(sd_v2, inode->i_uid); 1261 + set_sd_v2_uid(sd_v2, i_uid_read(inode)); 1262 1262 set_sd_v2_size(sd_v2, size); 1263 - set_sd_v2_gid(sd_v2, inode->i_gid); 1263 + set_sd_v2_gid(sd_v2, i_gid_read(inode)); 1264 1264 set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec); 1265 1265 set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec); 1266 1266 set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec); ··· 1280 1280 struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd; 1281 1281 1282 1282 set_sd_v1_mode(sd_v1, inode->i_mode); 1283 - set_sd_v1_uid(sd_v1, inode->i_uid); 1284 - set_sd_v1_gid(sd_v1, inode->i_gid); 1283 + set_sd_v1_uid(sd_v1, i_uid_read(inode)); 1284 + set_sd_v1_gid(sd_v1, i_gid_read(inode)); 1285 1285 set_sd_v1_nlink(sd_v1, inode->i_nlink); 1286 1286 set_sd_v1_size(sd_v1, size); 1287 1287 set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec); ··· 1869 1869 goto out_bad_inode; 1870 1870 } 1871 1871 if (old_format_only(sb)) { 1872 - if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) { 1872 + if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) { 1873 1873 pathrelse(&path_to_key); 1874 1874 /* i_uid or i_gid is too big to be stored in stat data v3.5 */ 1875 1875 err = -EINVAL; ··· 3140 3140 } 3141 3141 } 3142 3142 3143 - if ((((attr->ia_valid & ATTR_UID) && (attr->ia_uid & ~0xffff)) || 3144 - ((attr->ia_valid & ATTR_GID) && (attr->ia_gid & ~0xffff))) && 3143 + if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) || 3144 + ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) && 3145 3145 (get_inode_sd_version(inode) == STAT_DATA_V1)) { 3146 3146 /* stat data of format v3.5 has 16 bit uid and gid */ 3147 3147 error = -EINVAL; 3148 3148 goto out; 3149 3149 } 3150 3150 3151 - if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 3152 - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 3151 + if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 3152 + (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 3153 3153 struct reiserfs_transaction_handle th; 3154 3154 int jbegin_count = 3155 3155 2 *
+19 -5
fs/reiserfs/xattr_acl.c
··· 30 30 return -EPERM; 31 31 32 32 if (value) { 33 - acl = posix_acl_from_xattr(value, size); 33 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 34 34 if (IS_ERR(acl)) { 35 35 return PTR_ERR(acl); 36 36 } else if (acl) { ··· 77 77 return PTR_ERR(acl); 78 78 if (acl == NULL) 79 79 return -ENODATA; 80 - error = posix_acl_to_xattr(acl, buffer, size); 80 + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); 81 81 posix_acl_release(acl); 82 82 83 83 return error; ··· 121 121 case ACL_OTHER: 122 122 value = (char *)value + 123 123 sizeof(reiserfs_acl_entry_short); 124 - acl->a_entries[n].e_id = ACL_UNDEFINED_ID; 125 124 break; 126 125 127 126 case ACL_USER: 127 + value = (char *)value + sizeof(reiserfs_acl_entry); 128 + if ((char *)value > end) 129 + goto fail; 130 + acl->a_entries[n].e_uid = 131 + make_kuid(&init_user_ns, 132 + le32_to_cpu(entry->e_id)); 133 + break; 128 134 case ACL_GROUP: 129 135 value = (char *)value + sizeof(reiserfs_acl_entry); 130 136 if ((char *)value > end) 131 137 goto fail; 132 - acl->a_entries[n].e_id = le32_to_cpu(entry->e_id); 138 + acl->a_entries[n].e_gid = 139 + make_kgid(&init_user_ns, 140 + le32_to_cpu(entry->e_id)); 133 141 break; 134 142 135 143 default: ··· 172 164 ext_acl->a_version = cpu_to_le32(REISERFS_ACL_VERSION); 173 165 e = (char *)ext_acl + sizeof(reiserfs_acl_header); 174 166 for (n = 0; n < acl->a_count; n++) { 167 + const struct posix_acl_entry *acl_e = &acl->a_entries[n]; 175 168 reiserfs_acl_entry *entry = (reiserfs_acl_entry *) e; 176 169 entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); 177 170 entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); 178 171 switch (acl->a_entries[n].e_tag) { 179 172 case ACL_USER: 173 + entry->e_id = cpu_to_le32( 174 + from_kuid(&init_user_ns, acl_e->e_uid)); 175 + e += sizeof(reiserfs_acl_entry); 176 + break; 180 177 case ACL_GROUP: 181 - entry->e_id = cpu_to_le32(acl->a_entries[n].e_id); 178 + entry->e_id = cpu_to_le32( 179 + from_kgid(&init_user_ns, acl_e->e_gid)); 182 180 e += sizeof(reiserfs_acl_entry); 183 181 break; 184 182
+4
fs/seq_file.c
··· 9 9 #include <linux/export.h> 10 10 #include <linux/seq_file.h> 11 11 #include <linux/slab.h> 12 + #include <linux/cred.h> 12 13 13 14 #include <asm/uaccess.h> 14 15 #include <asm/page.h> ··· 57 56 memset(p, 0, sizeof(*p)); 58 57 mutex_init(&p->lock); 59 58 p->op = op; 59 + #ifdef CONFIG_USER_NS 60 + p->user_ns = file->f_cred->user_ns; 61 + #endif 60 62 61 63 /* 62 64 * Wrappers around seq_open(e.g. swaps_open) need to be
+6 -2
fs/squashfs/inode.c
··· 56 56 static int squashfs_new_inode(struct super_block *sb, struct inode *inode, 57 57 struct squashfs_base_inode *sqsh_ino) 58 58 { 59 + uid_t i_uid; 60 + gid_t i_gid; 59 61 int err; 60 62 61 - err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &inode->i_uid); 63 + err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid); 62 64 if (err) 63 65 return err; 64 66 65 - err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &inode->i_gid); 67 + err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid); 66 68 if (err) 67 69 return err; 68 70 71 + i_uid_write(inode, i_uid); 72 + i_gid_write(inode, i_gid); 69 73 inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); 70 74 inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime); 71 75 inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
+4 -4
fs/sysv/inode.c
··· 202 202 } 203 203 /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */ 204 204 inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode); 205 - inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid); 206 - inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid); 205 + i_uid_write(inode, (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid)); 206 + i_gid_write(inode, (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid)); 207 207 set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink)); 208 208 inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); 209 209 inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime); ··· 256 256 } 257 257 258 258 raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); 259 - raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); 260 - raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); 259 + raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(i_uid_read(inode))); 260 + raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(i_gid_read(inode))); 261 261 raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink); 262 262 raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); 263 263 raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec);
+2 -2
fs/ubifs/budget.c
··· 272 272 */ 273 273 static int can_use_rp(struct ubifs_info *c) 274 274 { 275 - if (current_fsuid() == c->rp_uid || capable(CAP_SYS_RESOURCE) || 276 - (c->rp_gid != 0 && in_group_p(c->rp_gid))) 275 + if (uid_eq(current_fsuid(), c->rp_uid) || capable(CAP_SYS_RESOURCE) || 276 + (!gid_eq(c->rp_gid, GLOBAL_ROOT_GID) && in_group_p(c->rp_gid))) 277 277 return 1; 278 278 return 0; 279 279 }
+2 -2
fs/ubifs/debug.c
··· 243 243 printk(KERN_ERR "\tsize %llu\n", 244 244 (unsigned long long)i_size_read(inode)); 245 245 printk(KERN_ERR "\tnlink %u\n", inode->i_nlink); 246 - printk(KERN_ERR "\tuid %u\n", (unsigned int)inode->i_uid); 247 - printk(KERN_ERR "\tgid %u\n", (unsigned int)inode->i_gid); 246 + printk(KERN_ERR "\tuid %u\n", (unsigned int)i_uid_read(inode)); 247 + printk(KERN_ERR "\tgid %u\n", (unsigned int)i_gid_read(inode)); 248 248 printk(KERN_ERR "\tatime %u.%u\n", 249 249 (unsigned int)inode->i_atime.tv_sec, 250 250 (unsigned int)inode->i_atime.tv_nsec);
+2 -2
fs/ubifs/journal.c
··· 469 469 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 470 470 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec); 471 471 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 472 - ino->uid = cpu_to_le32(inode->i_uid); 473 - ino->gid = cpu_to_le32(inode->i_gid); 472 + ino->uid = cpu_to_le32(i_uid_read(inode)); 473 + ino->gid = cpu_to_le32(i_gid_read(inode)); 474 474 ino->mode = cpu_to_le32(inode->i_mode); 475 475 ino->flags = cpu_to_le32(ui->flags); 476 476 ino->size = cpu_to_le64(ui->ui_size);
+2 -2
fs/ubifs/sb.c
··· 611 611 c->fanout = le32_to_cpu(sup->fanout); 612 612 c->lsave_cnt = le32_to_cpu(sup->lsave_cnt); 613 613 c->rp_size = le64_to_cpu(sup->rp_size); 614 - c->rp_uid = le32_to_cpu(sup->rp_uid); 615 - c->rp_gid = le32_to_cpu(sup->rp_gid); 614 + c->rp_uid = make_kuid(&init_user_ns, le32_to_cpu(sup->rp_uid)); 615 + c->rp_gid = make_kgid(&init_user_ns, le32_to_cpu(sup->rp_gid)); 616 616 sup_flags = le32_to_cpu(sup->flags); 617 617 if (!c->mount_opts.override_compr) 618 618 c->default_compr = le16_to_cpu(sup->default_compr);
+2 -2
fs/ubifs/super.c
··· 130 130 131 131 inode->i_flags |= (S_NOCMTIME | S_NOATIME); 132 132 set_nlink(inode, le32_to_cpu(ino->nlink)); 133 - inode->i_uid = le32_to_cpu(ino->uid); 134 - inode->i_gid = le32_to_cpu(ino->gid); 133 + i_uid_write(inode, le32_to_cpu(ino->uid)); 134 + i_gid_write(inode, le32_to_cpu(ino->gid)); 135 135 inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec); 136 136 inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec); 137 137 inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
+2 -2
fs/ubifs/ubifs.h
··· 1426 1426 1427 1427 long long rp_size; 1428 1428 long long report_rp_size; 1429 - uid_t rp_uid; 1430 - gid_t rp_gid; 1429 + kuid_t rp_uid; 1430 + kgid_t rp_gid; 1431 1431 1432 1432 /* The below fields are used only during mounting and re-mounting */ 1433 1433 unsigned int empty:1;
+6 -6
fs/udf/inode.c
··· 1312 1312 } 1313 1313 1314 1314 read_lock(&sbi->s_cred_lock); 1315 - inode->i_uid = le32_to_cpu(fe->uid); 1316 - if (inode->i_uid == -1 || 1315 + i_uid_write(inode, le32_to_cpu(fe->uid)); 1316 + if (!uid_valid(inode->i_uid) || 1317 1317 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || 1318 1318 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) 1319 1319 inode->i_uid = UDF_SB(inode->i_sb)->s_uid; 1320 1320 1321 - inode->i_gid = le32_to_cpu(fe->gid); 1322 - if (inode->i_gid == -1 || 1321 + i_gid_write(inode, le32_to_cpu(fe->gid)); 1322 + if (!gid_valid(inode->i_gid) || 1323 1323 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || 1324 1324 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) 1325 1325 inode->i_gid = UDF_SB(inode->i_sb)->s_gid; ··· 1542 1542 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1543 1543 fe->uid = cpu_to_le32(-1); 1544 1544 else 1545 - fe->uid = cpu_to_le32(inode->i_uid); 1545 + fe->uid = cpu_to_le32(i_uid_read(inode)); 1546 1546 1547 1547 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) 1548 1548 fe->gid = cpu_to_le32(-1); 1549 1549 else 1550 - fe->gid = cpu_to_le32(inode->i_gid); 1550 + fe->gid = cpu_to_le32(i_gid_read(inode)); 1551 1551 1552 1552 udfperms = ((inode->i_mode & S_IRWXO)) | 1553 1553 ((inode->i_mode & S_IRWXG) << 2) |
+12 -8
fs/udf/super.c
··· 199 199 unsigned int rootdir; 200 200 unsigned int flags; 201 201 umode_t umask; 202 - gid_t gid; 203 - uid_t uid; 202 + kgid_t gid; 203 + kuid_t uid; 204 204 umode_t fmode; 205 205 umode_t dmode; 206 206 struct nls_table *nls_map; ··· 335 335 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE)) 336 336 seq_puts(seq, ",gid=ignore"); 337 337 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) 338 - seq_printf(seq, ",uid=%u", sbi->s_uid); 338 + seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid)); 339 339 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) 340 - seq_printf(seq, ",gid=%u", sbi->s_gid); 340 + seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid)); 341 341 if (sbi->s_umask != 0) 342 342 seq_printf(seq, ",umask=%ho", sbi->s_umask); 343 343 if (sbi->s_fmode != UDF_INVALID_MODE) ··· 516 516 case Opt_gid: 517 517 if (match_int(args, &option)) 518 518 return 0; 519 - uopt->gid = option; 519 + uopt->gid = make_kgid(current_user_ns(), option); 520 + if (!gid_valid(uopt->gid)) 521 + return 0; 520 522 uopt->flags |= (1 << UDF_FLAG_GID_SET); 521 523 break; 522 524 case Opt_uid: 523 525 if (match_int(args, &option)) 524 526 return 0; 525 - uopt->uid = option; 527 + uopt->uid = make_kuid(current_user_ns(), option); 528 + if (!uid_valid(uopt->uid)) 529 + return 0; 526 530 uopt->flags |= (1 << UDF_FLAG_UID_SET); 527 531 break; 528 532 case Opt_umask: ··· 1938 1934 struct udf_sb_info *sbi; 1939 1935 1940 1936 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 1941 - uopt.uid = -1; 1942 - uopt.gid = -1; 1937 + uopt.uid = INVALID_UID; 1938 + uopt.gid = INVALID_GID; 1943 1939 uopt.umask = 0; 1944 1940 uopt.fmode = UDF_INVALID_MODE; 1945 1941 uopt.dmode = UDF_INVALID_MODE;
+2 -2
fs/udf/udf_sb.h
··· 128 128 129 129 /* Default permissions */ 130 130 umode_t s_umask; 131 - gid_t s_gid; 132 - uid_t s_uid; 131 + kgid_t s_gid; 132 + kuid_t s_uid; 133 133 umode_t s_fmode; 134 134 umode_t s_dmode; 135 135 /* Lock protecting consistency of above permission settings */
+8 -8
fs/ufs/inode.c
··· 597 597 /* 598 598 * Linux now has 32-bit uid and gid, so we can support EFT. 599 599 */ 600 - inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); 601 - inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); 600 + i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); 601 + i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 602 602 603 603 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 604 604 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); ··· 645 645 /* 646 646 * Linux now has 32-bit uid and gid, so we can support EFT. 647 647 */ 648 - inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); 649 - inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); 648 + i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); 649 + i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); 650 650 651 651 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 652 652 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); ··· 745 745 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 746 746 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 747 747 748 - ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); 749 - ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); 748 + ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 749 + ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 750 750 751 751 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 752 752 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); ··· 789 789 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 790 790 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 791 791 792 - ufs_inode->ui_uid = cpu_to_fs32(sb, inode->i_uid); 793 - ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid); 792 + ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); 793 + ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); 794 794 795 795 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 796 796 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
+7
fs/xattr.c
··· 20 20 #include <linux/fsnotify.h> 21 21 #include <linux/audit.h> 22 22 #include <linux/vmalloc.h> 23 + #include <linux/posix_acl_xattr.h> 23 24 24 25 #include <asm/uaccess.h> 25 26 ··· 348 347 error = -EFAULT; 349 348 goto out; 350 349 } 350 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || 351 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) 352 + posix_acl_fix_xattr_from_user(kvalue, size); 351 353 } 352 354 353 355 error = vfs_setxattr(d, kname, kvalue, size, flags); ··· 454 450 455 451 error = vfs_getxattr(d, kname, kvalue, size); 456 452 if (error > 0) { 453 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || 454 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) 455 + posix_acl_fix_xattr_to_user(kvalue, size); 457 456 if (size && copy_to_user(value, kvalue, error)) 458 457 error = -EFAULT; 459 458 } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
+89 -7
fs/xattr_acl.c
··· 9 9 #include <linux/fs.h> 10 10 #include <linux/posix_acl_xattr.h> 11 11 #include <linux/gfp.h> 12 + #include <linux/user_namespace.h> 12 13 14 + /* 15 + * Fix up the uids and gids in posix acl extended attributes in place. 16 + */ 17 + static void posix_acl_fix_xattr_userns( 18 + struct user_namespace *to, struct user_namespace *from, 19 + void *value, size_t size) 20 + { 21 + posix_acl_xattr_header *header = (posix_acl_xattr_header *)value; 22 + posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end; 23 + int count; 24 + kuid_t uid; 25 + kgid_t gid; 26 + 27 + if (!value) 28 + return; 29 + if (size < sizeof(posix_acl_xattr_header)) 30 + return; 31 + if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION)) 32 + return; 33 + 34 + count = posix_acl_xattr_count(size); 35 + if (count < 0) 36 + return; 37 + if (count == 0) 38 + return; 39 + 40 + for (end = entry + count; entry != end; entry++) { 41 + switch(le16_to_cpu(entry->e_tag)) { 42 + case ACL_USER: 43 + uid = make_kuid(from, le32_to_cpu(entry->e_id)); 44 + entry->e_id = cpu_to_le32(from_kuid(to, uid)); 45 + break; 46 + case ACL_GROUP: 47 + gid = make_kgid(from, le32_to_cpu(entry->e_id)); 48 + entry->e_id = cpu_to_le32(from_kuid(to, uid)); 49 + break; 50 + default: 51 + break; 52 + } 53 + } 54 + } 55 + 56 + void posix_acl_fix_xattr_from_user(void *value, size_t size) 57 + { 58 + struct user_namespace *user_ns = current_user_ns(); 59 + if (user_ns == &init_user_ns) 60 + return; 61 + posix_acl_fix_xattr_userns(&init_user_ns, user_ns, value, size); 62 + } 63 + 64 + void posix_acl_fix_xattr_to_user(void *value, size_t size) 65 + { 66 + struct user_namespace *user_ns = current_user_ns(); 67 + if (user_ns == &init_user_ns) 68 + return; 69 + posix_acl_fix_xattr_userns(user_ns, &init_user_ns, value, size); 70 + } 13 71 14 72 /* 15 73 * Convert from extended attribute to in-memory representation. 16 74 */ 17 75 struct posix_acl * 18 - posix_acl_from_xattr(const void *value, size_t size) 76 + posix_acl_from_xattr(struct user_namespace *user_ns, 77 + const void *value, size_t size) 19 78 { 20 79 posix_acl_xattr_header *header = (posix_acl_xattr_header *)value; 21 80 posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end; ··· 109 50 case ACL_GROUP_OBJ: 110 51 case ACL_MASK: 111 52 case ACL_OTHER: 112 - acl_e->e_id = ACL_UNDEFINED_ID; 113 53 break; 114 54 115 55 case ACL_USER: 56 + acl_e->e_uid = 57 + make_kuid(user_ns, 58 + le32_to_cpu(entry->e_id)); 59 + if (!uid_valid(acl_e->e_uid)) 60 + goto fail; 61 + break; 116 62 case ACL_GROUP: 117 - acl_e->e_id = le32_to_cpu(entry->e_id); 63 + acl_e->e_gid = 64 + make_kgid(user_ns, 65 + le32_to_cpu(entry->e_id)); 66 + if (!gid_valid(acl_e->e_gid)) 67 + goto fail; 118 68 break; 119 69 120 70 default: ··· 142 74 * Convert from in-memory to extended attribute representation. 143 75 */ 144 76 int 145 - posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size) 77 + posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl, 78 + void *buffer, size_t size) 146 79 { 147 80 posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer; 148 81 posix_acl_xattr_entry *ext_entry = ext_acl->a_entries; ··· 158 89 ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); 159 90 160 91 for (n=0; n < acl->a_count; n++, ext_entry++) { 161 - ext_entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); 162 - ext_entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); 163 - ext_entry->e_id = cpu_to_le32(acl->a_entries[n].e_id); 92 + const struct posix_acl_entry *acl_e = &acl->a_entries[n]; 93 + ext_entry->e_tag = cpu_to_le16(acl_e->e_tag); 94 + ext_entry->e_perm = cpu_to_le16(acl_e->e_perm); 95 + switch(acl_e->e_tag) { 96 + case ACL_USER: 97 + ext_entry->e_id = 98 + cpu_to_le32(from_kuid(user_ns, acl_e->e_uid)); 99 + break; 100 + case ACL_GROUP: 101 + ext_entry->e_id = 102 + cpu_to_le32(from_kgid(user_ns, acl_e->e_gid)); 103 + break; 104 + default: 105 + ext_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID); 106 + break; 107 + } 164 108 } 165 109 return real_size; 166 110 }
+2 -2
fs/xfs/xfs_acl.c
··· 337 337 if (acl == NULL) 338 338 return -ENODATA; 339 339 340 - error = posix_acl_to_xattr(acl, value, size); 340 + error = posix_acl_to_xattr(&init_user_ns, acl, value, size); 341 341 posix_acl_release(acl); 342 342 343 343 return error; ··· 361 361 if (!value) 362 362 goto set_acl; 363 363 364 - acl = posix_acl_from_xattr(value, size); 364 + acl = posix_acl_from_xattr(&init_user_ns, value, size); 365 365 if (!acl) { 366 366 /* 367 367 * acl_set_file(3) may request that we set default ACLs with
+6 -6
fs/xfs/xfs_quotaops.c
··· 97 97 STATIC int 98 98 xfs_fs_get_dqblk( 99 99 struct super_block *sb, 100 - int type, 101 - qid_t id, 100 + struct kqid qid, 102 101 struct fs_disk_quota *fdq) 103 102 { 104 103 struct xfs_mount *mp = XFS_M(sb); ··· 107 108 if (!XFS_IS_QUOTA_ON(mp)) 108 109 return -ESRCH; 109 110 110 - return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq); 111 + return -xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), 112 + xfs_quota_type(qid.type), fdq); 111 113 } 112 114 113 115 STATIC int 114 116 xfs_fs_set_dqblk( 115 117 struct super_block *sb, 116 - int type, 117 - qid_t id, 118 + struct kqid qid, 118 119 struct fs_disk_quota *fdq) 119 120 { 120 121 struct xfs_mount *mp = XFS_M(sb); ··· 126 127 if (!XFS_IS_QUOTA_ON(mp)) 127 128 return -ESRCH; 128 129 129 - return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); 130 + return -xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), 131 + xfs_quota_type(qid.type), fdq); 130 132 } 131 133 132 134 const struct quotactl_ops xfs_quotactl_operations = {
+5 -3
fs/xfs/xfs_trans_dquot.c
··· 578 578 /* no warnings for project quotas - we just return ENOSPC later */ 579 579 if (dqp->dq_flags & XFS_DQ_PROJ) 580 580 return; 581 - quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA, 582 - be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev, 583 - type); 581 + quota_send_warning(make_kqid(&init_user_ns, 582 + (dqp->dq_flags & XFS_DQ_USER) ? 583 + USRQUOTA : GRPQUOTA, 584 + be32_to_cpu(dqp->q_core.d_id)), 585 + mp->m_super->s_dev, type); 584 586 } 585 587 586 588 /*
+2 -2
include/drm/drmP.h
··· 426 426 /** File private data */ 427 427 struct drm_file { 428 428 int authenticated; 429 - pid_t pid; 430 - uid_t uid; 429 + struct pid *pid; 430 + kuid_t uid; 431 431 drm_magic_t magic; 432 432 unsigned long ioctl_count; 433 433 struct list_head lhead;
+7 -5
include/linux/audit.h
··· 442 442 struct audit_field { 443 443 u32 type; 444 444 u32 val; 445 + kuid_t uid; 446 + kgid_t gid; 445 447 u32 op; 446 448 char *lsm_str; 447 449 void *lsm_rule; ··· 527 525 extern unsigned int audit_serial(void); 528 526 extern int auditsc_get_stamp(struct audit_context *ctx, 529 527 struct timespec *t, unsigned int *serial); 530 - extern int audit_set_loginuid(uid_t loginuid); 528 + extern int audit_set_loginuid(kuid_t loginuid); 531 529 #define audit_get_loginuid(t) ((t)->loginuid) 532 530 #define audit_get_sessionid(t) ((t)->sessionid) 533 531 extern void audit_log_task_context(struct audit_buffer *ab); ··· 639 637 #define audit_core_dumps(i) do { ; } while (0) 640 638 #define audit_seccomp(i,s,c) do { ; } while (0) 641 639 #define auditsc_get_stamp(c,t,s) (0) 642 - #define audit_get_loginuid(t) (-1) 640 + #define audit_get_loginuid(t) (INVALID_UID) 643 641 #define audit_get_sessionid(t) (-1) 644 642 #define audit_log_task_context(b) do { ; } while (0) 645 643 #define audit_ipc_obj(i) ((void)0) ··· 702 700 extern int audit_update_lsm_rules(void); 703 701 704 702 /* Private API (for audit.c only) */ 705 - extern int audit_filter_user(struct netlink_skb_parms *cb); 703 + extern int audit_filter_user(void); 706 704 extern int audit_filter_type(int type); 707 - extern int audit_receive_filter(int type, int pid, int uid, int seq, 708 - void *data, size_t datasz, uid_t loginuid, 705 + extern int audit_receive_filter(int type, int pid, int seq, 706 + void *data, size_t datasz, kuid_t loginuid, 709 707 u32 sessionid, u32 sid); 710 708 extern int audit_enabled; 711 709 #else
+1
include/linux/inet_diag.h
··· 159 159 struct inet_connection_sock; 160 160 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 161 161 struct sk_buff *skb, struct inet_diag_req_v2 *req, 162 + struct user_namespace *user_ns, 162 163 u32 pid, u32 seq, u16 nlmsg_flags, 163 164 const struct nlmsghdr *unlh); 164 165 void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+1 -1
include/linux/init_task.h
··· 92 92 93 93 #ifdef CONFIG_AUDITSYSCALL 94 94 #define INIT_IDS \ 95 - .loginuid = -1, \ 95 + .loginuid = INVALID_UID, \ 96 96 .sessionid = -1, 97 97 #else 98 98 #define INIT_IDS
+5 -4
include/linux/ipc.h
··· 79 79 80 80 #ifdef __KERNEL__ 81 81 #include <linux/spinlock.h> 82 + #include <linux/uidgid.h> 82 83 83 84 #define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ 84 85 ··· 90 89 int deleted; 91 90 int id; 92 91 key_t key; 93 - uid_t uid; 94 - gid_t gid; 95 - uid_t cuid; 96 - gid_t cgid; 92 + kuid_t uid; 93 + kgid_t gid; 94 + kuid_t cuid; 95 + kgid_t cgid; 97 96 umode_t mode; 98 97 unsigned long seq; 99 98 void *security;
+5 -4
include/linux/key.h
··· 24 24 #include <linux/atomic.h> 25 25 26 26 #ifdef __KERNEL__ 27 + #include <linux/uidgid.h> 27 28 28 29 /* key handle serial number */ 29 30 typedef int32_t key_serial_t; ··· 138 137 time_t revoked_at; /* time at which key was revoked */ 139 138 }; 140 139 time_t last_used_at; /* last time used for LRU keyring discard */ 141 - uid_t uid; 142 - gid_t gid; 140 + kuid_t uid; 141 + kgid_t gid; 143 142 key_perm_t perm; /* access permissions */ 144 143 unsigned short quotalen; /* length added to quota */ 145 144 unsigned short datalen; /* payload data length ··· 194 193 195 194 extern struct key *key_alloc(struct key_type *type, 196 195 const char *desc, 197 - uid_t uid, gid_t gid, 196 + kuid_t uid, kgid_t gid, 198 197 const struct cred *cred, 199 198 key_perm_t perm, 200 199 unsigned long flags); ··· 263 262 extern int key_unlink(struct key *keyring, 264 263 struct key *key); 265 264 266 - extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, 265 + extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, 267 266 const struct cred *cred, 268 267 unsigned long flags, 269 268 struct key *dest);
+1 -1
include/linux/loop.h
··· 44 44 int lo_encrypt_key_size; 45 45 struct loop_func_table *lo_encryption; 46 46 __u32 lo_init[2]; 47 - uid_t lo_key_owner; /* Who set the key */ 47 + kuid_t lo_key_owner; /* Who set the key */ 48 48 int (*ioctl)(struct loop_device *, int cmd, 49 49 unsigned long arg); 50 50
+1
include/linux/netlink.h
··· 165 165 struct ucred creds; /* Skb credentials */ 166 166 __u32 pid; 167 167 __u32 dst_group; 168 + struct sock *ssk; 168 169 }; 169 170 170 171 #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
+7 -1
include/linux/posix_acl.h
··· 36 36 struct posix_acl_entry { 37 37 short e_tag; 38 38 unsigned short e_perm; 39 - unsigned int e_id; 39 + union { 40 + kuid_t e_uid; 41 + kgid_t e_gid; 42 + #ifndef CONFIG_UIDGID_STRICT_TYPE_CHECKS 43 + unsigned int e_id; 44 + #endif 45 + }; 40 46 }; 41 47 42 48 struct posix_acl {
+16 -2
include/linux/posix_acl_xattr.h
··· 52 52 return size / sizeof(posix_acl_xattr_entry); 53 53 } 54 54 55 - struct posix_acl *posix_acl_from_xattr(const void *value, size_t size); 56 - int posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size); 55 + #ifdef CONFIG_FS_POSIX_ACL 56 + void posix_acl_fix_xattr_from_user(void *value, size_t size); 57 + void posix_acl_fix_xattr_to_user(void *value, size_t size); 58 + #else 59 + static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) 60 + { 61 + } 62 + static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) 63 + { 64 + } 65 + #endif 66 + 67 + struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns, 68 + const void *value, size_t size); 69 + int posix_acl_to_xattr(struct user_namespace *user_ns, 70 + const struct posix_acl *acl, void *buffer, size_t size); 57 71 58 72 #endif /* _POSIX_ACL_XATTR_H */
+104
include/linux/projid.h
··· 1 + #ifndef _LINUX_PROJID_H 2 + #define _LINUX_PROJID_H 3 + 4 + /* 5 + * A set of types for the internal kernel types representing project ids. 6 + * 7 + * The types defined in this header allow distinguishing which project ids in 8 + * the kernel are values used by userspace and which project id values are 9 + * the internal kernel values. With the addition of user namespaces the values 10 + * can be different. Using the type system makes it possible for the compiler 11 + * to detect when we overlook these differences. 12 + * 13 + */ 14 + #include <linux/types.h> 15 + 16 + struct user_namespace; 17 + extern struct user_namespace init_user_ns; 18 + 19 + typedef __kernel_uid32_t projid_t; 20 + 21 + #ifdef CONFIG_UIDGID_STRICT_TYPE_CHECKS 22 + 23 + typedef struct { 24 + projid_t val; 25 + } kprojid_t; 26 + 27 + static inline projid_t __kprojid_val(kprojid_t projid) 28 + { 29 + return projid.val; 30 + } 31 + 32 + #define KPROJIDT_INIT(value) (kprojid_t){ value } 33 + 34 + #else 35 + 36 + typedef projid_t kprojid_t; 37 + 38 + static inline projid_t __kprojid_val(kprojid_t projid) 39 + { 40 + return projid; 41 + } 42 + 43 + #define KPROJIDT_INIT(value) ((kprojid_t) value ) 44 + 45 + #endif 46 + 47 + #define INVALID_PROJID KPROJIDT_INIT(-1) 48 + #define OVERFLOW_PROJID 65534 49 + 50 + static inline bool projid_eq(kprojid_t left, kprojid_t right) 51 + { 52 + return __kprojid_val(left) == __kprojid_val(right); 53 + } 54 + 55 + static inline bool projid_lt(kprojid_t left, kprojid_t right) 56 + { 57 + return __kprojid_val(left) < __kprojid_val(right); 58 + } 59 + 60 + static inline bool projid_valid(kprojid_t projid) 61 + { 62 + return !projid_eq(projid, INVALID_PROJID); 63 + } 64 + 65 + #ifdef CONFIG_USER_NS 66 + 67 + extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid); 68 + 69 + extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid); 70 + extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid); 71 + 72 + static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) 73 + { 74 + return from_kprojid(ns, projid) != (projid_t)-1; 75 + } 76 + 77 + #else 78 + 79 + static inline kprojid_t make_kprojid(struct user_namespace *from, projid_t projid) 80 + { 81 + return KPROJIDT_INIT(projid); 82 + } 83 + 84 + static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid) 85 + { 86 + return __kprojid_val(kprojid); 87 + } 88 + 89 + static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid) 90 + { 91 + projid_t projid = from_kprojid(to, kprojid); 92 + if (projid == (projid_t)-1) 93 + projid = OVERFLOW_PROJID; 94 + return projid; 95 + } 96 + 97 + static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) 98 + { 99 + return true; 100 + } 101 + 102 + #endif /* CONFIG_USER_NS */ 103 + 104 + #endif /* _LINUX_PROJID_H */
+130 -6
include/linux/quota.h
··· 181 181 #include <linux/dqblk_v2.h> 182 182 183 183 #include <linux/atomic.h> 184 + #include <linux/uidgid.h> 185 + #include <linux/projid.h> 186 + 187 + #undef USRQUOTA 188 + #undef GRPQUOTA 189 + enum quota_type { 190 + USRQUOTA = 0, /* element used for user quotas */ 191 + GRPQUOTA = 1, /* element used for group quotas */ 192 + PRJQUOTA = 2, /* element used for project quotas */ 193 + }; 184 194 185 195 typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ 186 196 typedef long long qsize_t; /* Type in which we store sizes */ 197 + 198 + struct kqid { /* Type in which we store the quota identifier */ 199 + union { 200 + kuid_t uid; 201 + kgid_t gid; 202 + kprojid_t projid; 203 + }; 204 + enum quota_type type; /* USRQUOTA (uid) or GRPQUOTA (gid) or PRJQUOTA (projid) */ 205 + }; 206 + 207 + extern bool qid_eq(struct kqid left, struct kqid right); 208 + extern bool qid_lt(struct kqid left, struct kqid right); 209 + extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); 210 + extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); 211 + extern bool qid_valid(struct kqid qid); 212 + 213 + /** 214 + * make_kqid - Map a user-namespace, type, qid tuple into a kqid. 215 + * @from: User namespace that the qid is in 216 + * @type: The type of quota 217 + * @qid: Quota identifier 218 + * 219 + * Maps a user-namespace, type qid tuple into a kernel internal 220 + * kqid, and returns that kqid. 221 + * 222 + * When there is no mapping defined for the user-namespace, type, 223 + * qid tuple an invalid kqid is returned. Callers are expected to 224 + * test for and handle handle invalid kqids being returned. 225 + * Invalid kqids may be tested for using qid_valid(). 226 + */ 227 + static inline struct kqid make_kqid(struct user_namespace *from, 228 + enum quota_type type, qid_t qid) 229 + { 230 + struct kqid kqid; 231 + 232 + kqid.type = type; 233 + switch (type) { 234 + case USRQUOTA: 235 + kqid.uid = make_kuid(from, qid); 236 + break; 237 + case GRPQUOTA: 238 + kqid.gid = make_kgid(from, qid); 239 + break; 240 + case PRJQUOTA: 241 + kqid.projid = make_kprojid(from, qid); 242 + break; 243 + default: 244 + BUG(); 245 + } 246 + return kqid; 247 + } 248 + 249 + /** 250 + * make_kqid_invalid - Explicitly make an invalid kqid 251 + * @type: The type of quota identifier 252 + * 253 + * Returns an invalid kqid with the specified type. 254 + */ 255 + static inline struct kqid make_kqid_invalid(enum quota_type type) 256 + { 257 + struct kqid kqid; 258 + 259 + kqid.type = type; 260 + switch (type) { 261 + case USRQUOTA: 262 + kqid.uid = INVALID_UID; 263 + break; 264 + case GRPQUOTA: 265 + kqid.gid = INVALID_GID; 266 + break; 267 + case PRJQUOTA: 268 + kqid.projid = INVALID_PROJID; 269 + break; 270 + default: 271 + BUG(); 272 + } 273 + return kqid; 274 + } 275 + 276 + /** 277 + * make_kqid_uid - Make a kqid from a kuid 278 + * @uid: The kuid to make the quota identifier from 279 + */ 280 + static inline struct kqid make_kqid_uid(kuid_t uid) 281 + { 282 + struct kqid kqid; 283 + kqid.type = USRQUOTA; 284 + kqid.uid = uid; 285 + return kqid; 286 + } 287 + 288 + /** 289 + * make_kqid_gid - Make a kqid from a kgid 290 + * @gid: The kgid to make the quota identifier from 291 + */ 292 + static inline struct kqid make_kqid_gid(kgid_t gid) 293 + { 294 + struct kqid kqid; 295 + kqid.type = GRPQUOTA; 296 + kqid.gid = gid; 297 + return kqid; 298 + } 299 + 300 + /** 301 + * make_kqid_projid - Make a kqid from a projid 302 + * @projid: The kprojid to make the quota identifier from 303 + */ 304 + static inline struct kqid make_kqid_projid(kprojid_t projid) 305 + { 306 + struct kqid kqid; 307 + kqid.type = PRJQUOTA; 308 + kqid.projid = projid; 309 + return kqid; 310 + } 311 + 187 312 188 313 extern spinlock_t dq_data_lock; 189 314 ··· 419 294 atomic_t dq_count; /* Use count */ 420 295 wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ 421 296 struct super_block *dq_sb; /* superblock this applies to */ 422 - unsigned int dq_id; /* ID this applies to (uid, gid) */ 297 + struct kqid dq_id; /* ID this applies to (uid, gid, projid) */ 423 298 loff_t dq_off; /* Offset of dquot on disk */ 424 299 unsigned long dq_flags; /* See DQ_* */ 425 - short dq_type; /* Type of quota */ 426 300 struct mem_dqblk dq_dqb; /* Diskquota usage */ 427 301 }; 428 302 ··· 460 336 int (*quota_sync)(struct super_block *, int); 461 337 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 462 338 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 463 - int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); 464 - int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); 339 + int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 340 + int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 465 341 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 466 342 int (*set_xstate)(struct super_block *, unsigned int, int); 467 343 }; ··· 510 386 } 511 387 512 388 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE 513 - extern void quota_send_warning(short type, unsigned int id, dev_t dev, 389 + extern void quota_send_warning(struct kqid qid, dev_t dev, 514 390 const char warntype); 515 391 #else 516 - static inline void quota_send_warning(short type, unsigned int id, dev_t dev, 392 + static inline void quota_send_warning(struct kqid qid, dev_t dev, 517 393 const char warntype) 518 394 { 519 395 return;
+3 -3
include/linux/quotaops.h
··· 44 44 45 45 void dquot_initialize(struct inode *inode); 46 46 void dquot_drop(struct inode *inode); 47 - struct dquot *dqget(struct super_block *sb, unsigned int id, int type); 47 + struct dquot *dqget(struct super_block *sb, struct kqid qid); 48 48 void dqput(struct dquot *dquot); 49 49 int dquot_scan_active(struct super_block *sb, 50 50 int (*fn)(struct dquot *dquot, unsigned long priv), ··· 87 87 int dquot_quota_sync(struct super_block *sb, int type); 88 88 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 89 89 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 90 - int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, 90 + int dquot_get_dqblk(struct super_block *sb, struct kqid id, 91 91 struct fs_disk_quota *di); 92 - int dquot_set_dqblk(struct super_block *sb, int type, qid_t id, 92 + int dquot_set_dqblk(struct super_block *sb, struct kqid id, 93 93 struct fs_disk_quota *di); 94 94 95 95 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
+1 -1
include/linux/sched.h
··· 1414 1414 1415 1415 struct audit_context *audit_context; 1416 1416 #ifdef CONFIG_AUDITSYSCALL 1417 - uid_t loginuid; 1417 + kuid_t loginuid; 1418 1418 unsigned int sessionid; 1419 1419 #endif 1420 1420 struct seccomp seccomp;
+3 -3
include/linux/security.h
··· 1436 1436 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, 1437 1437 struct path *new_dir, struct dentry *new_dentry); 1438 1438 int (*path_chmod) (struct path *path, umode_t mode); 1439 - int (*path_chown) (struct path *path, uid_t uid, gid_t gid); 1439 + int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid); 1440 1440 int (*path_chroot) (struct path *path); 1441 1441 #endif 1442 1442 ··· 2831 2831 int security_path_rename(struct path *old_dir, struct dentry *old_dentry, 2832 2832 struct path *new_dir, struct dentry *new_dentry); 2833 2833 int security_path_chmod(struct path *path, umode_t mode); 2834 - int security_path_chown(struct path *path, uid_t uid, gid_t gid); 2834 + int security_path_chown(struct path *path, kuid_t uid, kgid_t gid); 2835 2835 int security_path_chroot(struct path *path); 2836 2836 #else /* CONFIG_SECURITY_PATH */ 2837 2837 static inline int security_path_unlink(struct path *dir, struct dentry *dentry) ··· 2887 2887 return 0; 2888 2888 } 2889 2889 2890 - static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid) 2890 + static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) 2891 2891 { 2892 2892 return 0; 2893 2893 }
+14
include/linux/seq_file.h
··· 13 13 struct path; 14 14 struct inode; 15 15 struct dentry; 16 + struct user_namespace; 16 17 17 18 struct seq_file { 18 19 char *buf; ··· 26 25 struct mutex lock; 27 26 const struct seq_operations *op; 28 27 int poll_event; 28 + #ifdef CONFIG_USER_NS 29 + struct user_namespace *user_ns; 30 + #endif 29 31 void *private; 30 32 }; 31 33 ··· 131 127 unsigned long long num); 132 128 int seq_put_decimal_ll(struct seq_file *m, char delimiter, 133 129 long long num); 130 + 131 + static inline struct user_namespace *seq_user_ns(struct seq_file *seq) 132 + { 133 + #ifdef CONFIG_USER_NS 134 + return seq->user_ns; 135 + #else 136 + extern struct user_namespace init_user_ns; 137 + return &init_user_ns; 138 + #endif 139 + } 134 140 135 141 #define SEQ_START_TOKEN ((void *)1) 136 142 /*
+6 -2
include/linux/tsacct_kern.h
··· 10 10 #include <linux/taskstats.h> 11 11 12 12 #ifdef CONFIG_TASKSTATS 13 - extern void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk); 13 + extern void bacct_add_tsk(struct user_namespace *user_ns, 14 + struct pid_namespace *pid_ns, 15 + struct taskstats *stats, struct task_struct *tsk); 14 16 #else 15 - static inline void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) 17 + static inline void bacct_add_tsk(struct user_namespace *user_ns, 18 + struct pid_namespace *pid_ns, 19 + struct taskstats *stats, struct task_struct *tsk) 16 20 {} 17 21 #endif /* CONFIG_TASKSTATS */ 18 22
+2 -2
include/linux/tty.h
··· 575 575 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); 576 576 extern void tty_audit_push(struct tty_struct *tty); 577 577 extern int tty_audit_push_task(struct task_struct *tsk, 578 - uid_t loginuid, u32 sessionid); 578 + kuid_t loginuid, u32 sessionid); 579 579 #else 580 580 static inline void tty_audit_add_data(struct tty_struct *tty, 581 581 unsigned char *data, size_t size) ··· 594 594 { 595 595 } 596 596 static inline int tty_audit_push_task(struct task_struct *tsk, 597 - uid_t loginuid, u32 sessionid) 597 + kuid_t loginuid, u32 sessionid) 598 598 { 599 599 return 0; 600 600 }
+3
include/linux/user_namespace.h
··· 20 20 struct user_namespace { 21 21 struct uid_gid_map uid_map; 22 22 struct uid_gid_map gid_map; 23 + struct uid_gid_map projid_map; 23 24 struct kref kref; 24 25 struct user_namespace *parent; 25 26 kuid_t owner; ··· 50 49 struct seq_operations; 51 50 extern struct seq_operations proc_uid_seq_operations; 52 51 extern struct seq_operations proc_gid_seq_operations; 52 + extern struct seq_operations proc_projid_seq_operations; 53 53 extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); 54 54 extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); 55 + extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); 55 56 #else 56 57 57 58 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
+2 -2
include/net/ax25.h
··· 157 157 typedef struct ax25_uid_assoc { 158 158 struct hlist_node uid_node; 159 159 atomic_t refcount; 160 - uid_t uid; 160 + kuid_t uid; 161 161 ax25_address call; 162 162 } ax25_uid_assoc; 163 163 ··· 434 434 435 435 /* ax25_uid.c */ 436 436 extern int ax25_uid_policy; 437 - extern ax25_uid_assoc *ax25_findbyuid(uid_t); 437 + extern ax25_uid_assoc *ax25_findbyuid(kuid_t); 438 438 extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); 439 439 extern const struct file_operations ax25_uid_fops; 440 440 extern void ax25_uid_free(void);
+4 -1
include/net/ipv6.h
··· 222 222 struct ipv6_txoptions *opt; 223 223 unsigned long linger; 224 224 u8 share; 225 - u32 owner; 225 + union { 226 + struct pid *pid; 227 + kuid_t uid; 228 + } owner; 226 229 unsigned long lastuse; 227 230 unsigned long expires; 228 231 struct net *fl_net;
+1 -1
include/net/netlabel.h
··· 110 110 /* NetLabel audit information */ 111 111 struct netlbl_audit { 112 112 u32 secid; 113 - uid_t loginuid; 113 + kuid_t loginuid; 114 114 u32 sessionid; 115 115 }; 116 116
+2 -1
include/net/netns/ipv4.h
··· 5 5 #ifndef __NETNS_IPV4_H__ 6 6 #define __NETNS_IPV4_H__ 7 7 8 + #include <linux/uidgid.h> 8 9 #include <net/inet_frag.h> 9 10 10 11 struct tcpm_hash_bucket; ··· 63 62 int sysctl_icmp_ratemask; 64 63 int sysctl_icmp_errors_use_inbound_ifaddr; 65 64 66 - unsigned int sysctl_ping_group_range[2]; 65 + kgid_t sysctl_ping_group_range[2]; 67 66 long sysctl_tcp_mem[3]; 68 67 69 68 atomic_t dev_addr_genid;
+2 -1
include/net/sch_generic.h
··· 188 188 189 189 unsigned long (*get)(struct tcf_proto*, u32 handle); 190 190 void (*put)(struct tcf_proto*, unsigned long); 191 - int (*change)(struct tcf_proto*, unsigned long, 191 + int (*change)(struct sk_buff *, 192 + struct tcf_proto*, unsigned long, 192 193 u32 handle, struct nlattr **, 193 194 unsigned long *); 194 195 int (*delete)(struct tcf_proto*, unsigned long);
+10 -1
include/net/sock.h
··· 606 606 #define sk_for_each_bound(__sk, node, list) \ 607 607 hlist_for_each_entry(__sk, node, list, sk_bind_node) 608 608 609 + static inline struct user_namespace *sk_user_ns(struct sock *sk) 610 + { 611 + /* Careful only use this in a context where these parameters 612 + * can not change and must all be valid, such as recvmsg from 613 + * userspace. 614 + */ 615 + return sk->sk_socket->file->f_cred->user_ns; 616 + } 617 + 609 618 /* Sock flags */ 610 619 enum sock_flags { 611 620 SOCK_DEAD, ··· 1671 1662 write_unlock_bh(&sk->sk_callback_lock); 1672 1663 } 1673 1664 1674 - extern int sock_i_uid(struct sock *sk); 1665 + extern kuid_t sock_i_uid(struct sock *sk); 1675 1666 extern unsigned long sock_i_ino(struct sock *sk); 1676 1667 1677 1668 static inline struct dst_entry *
+2 -1
include/net/tcp.h
··· 1510 1510 sa_family_t family; 1511 1511 enum tcp_seq_states state; 1512 1512 struct sock *syn_wait_sk; 1513 - int bucket, offset, sbucket, num, uid; 1513 + int bucket, offset, sbucket, num; 1514 + kuid_t uid; 1514 1515 loff_t last_pos; 1515 1516 }; 1516 1517
+12 -11
include/net/xfrm.h
··· 671 671 /* Audit Information */ 672 672 struct xfrm_audit { 673 673 u32 secid; 674 - uid_t loginuid; 674 + kuid_t loginuid; 675 675 u32 sessionid; 676 676 }; 677 677 ··· 690 690 return audit_buf; 691 691 } 692 692 693 - static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid, 693 + static inline void xfrm_audit_helper_usrinfo(kuid_t auid, u32 ses, u32 secid, 694 694 struct audit_buffer *audit_buf) 695 695 { 696 696 char *secctx; 697 697 u32 secctx_len; 698 698 699 - audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses); 699 + audit_log_format(audit_buf, " auid=%u ses=%u", 700 + from_kuid(&init_user_ns, auid), ses); 700 701 if (secid != 0 && 701 702 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) { 702 703 audit_log_format(audit_buf, " subj=%s", secctx); ··· 707 706 } 708 707 709 708 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 710 - u32 auid, u32 ses, u32 secid); 709 + kuid_t auid, u32 ses, u32 secid); 711 710 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 712 - u32 auid, u32 ses, u32 secid); 711 + kuid_t auid, u32 ses, u32 secid); 713 712 extern void xfrm_audit_state_add(struct xfrm_state *x, int result, 714 - u32 auid, u32 ses, u32 secid); 713 + kuid_t auid, u32 ses, u32 secid); 715 714 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result, 716 - u32 auid, u32 ses, u32 secid); 715 + kuid_t auid, u32 ses, u32 secid); 717 716 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 718 717 struct sk_buff *skb); 719 718 extern void xfrm_audit_state_replay(struct xfrm_state *x, ··· 726 725 #else 727 726 728 727 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 729 - u32 auid, u32 ses, u32 secid) 728 + kuid_t auid, u32 ses, u32 secid) 730 729 { 731 730 } 732 731 733 732 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 734 - u32 auid, u32 ses, u32 secid) 733 + kuid_t auid, u32 ses, u32 secid) 735 734 { 736 735 } 737 736 738 737 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result, 739 - u32 auid, u32 ses, u32 secid) 738 + kuid_t auid, u32 ses, u32 secid) 740 739 { 741 740 } 742 741 743 742 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result, 744 - u32 auid, u32 ses, u32 secid) 743 + kuid_t auid, u32 ses, u32 secid) 745 744 { 746 745 } 747 746
-89
init/Kconfig
··· 986 986 bool 987 987 default y 988 988 989 - # List of kernel pieces that need user namespace work 990 - # Features 991 - depends on SYSVIPC = n 992 - depends on IMA = n 993 - depends on EVM = n 994 - depends on KEYS = n 995 - depends on AUDIT = n 996 - depends on AUDITSYSCALL = n 997 - depends on TASKSTATS = n 998 - depends on TRACING = n 999 - depends on FS_POSIX_ACL = n 1000 - depends on QUOTA = n 1001 - depends on QUOTACTL = n 1002 - depends on DEBUG_CREDENTIALS = n 1003 - depends on BSD_PROCESS_ACCT = n 1004 - depends on DRM = n 1005 - depends on PROC_EVENTS = n 1006 - 1007 989 # Networking 1008 - depends on NET = n 1009 990 depends on NET_9P = n 1010 - depends on IPX = n 1011 - depends on PHONET = n 1012 - depends on NET_CLS_FLOW = n 1013 - depends on NETFILTER_XT_MATCH_OWNER = n 1014 - depends on NETFILTER_XT_MATCH_RECENT = n 1015 - depends on NETFILTER_XT_TARGET_LOG = n 1016 - depends on NETFILTER_NETLINK_LOG = n 1017 - depends on INET = n 1018 - depends on IPV6 = n 1019 - depends on IP_SCTP = n 1020 - depends on AF_RXRPC = n 1021 - depends on LLC2 = n 1022 - depends on NET_KEY = n 1023 - depends on INET_DIAG = n 1024 - depends on DNS_RESOLVER = n 1025 - depends on AX25 = n 1026 - depends on ATALK = n 1027 991 1028 992 # Filesystems 1029 - depends on USB_DEVICEFS = n 1030 - depends on USB_GADGETFS = n 1031 - depends on USB_FUNCTIONFS = n 1032 - depends on DEVTMPFS = n 1033 - depends on XENFS = n 1034 - 1035 993 depends on 9P_FS = n 1036 - depends on ADFS_FS = n 1037 - depends on AFFS_FS = n 1038 994 depends on AFS_FS = n 1039 995 depends on AUTOFS4_FS = n 1040 - depends on BEFS_FS = n 1041 - depends on BFS_FS = n 1042 - depends on BTRFS_FS = n 1043 996 depends on CEPH_FS = n 1044 997 depends on CIFS = n 1045 998 depends on CODA_FS = n 1046 - depends on CONFIGFS_FS = n 1047 - depends on CRAMFS = n 1048 - depends on DEBUG_FS = n 1049 - depends on ECRYPT_FS = n 1050 - depends on EFS_FS = n 1051 - depends on EXOFS_FS = n 1052 - depends on FAT_FS = n 1053 999 depends on FUSE_FS = n 1054 1000 depends on GFS2_FS = n 1055 - depends on HFS_FS = n 1056 - depends on HFSPLUS_FS = n 1057 - depends on HPFS_FS = n 1058 - depends on HUGETLBFS = n 1059 - depends on ISO9660_FS = n 1060 - depends on JFFS2_FS = n 1061 - depends on JFS_FS = n 1062 - depends on LOGFS = n 1063 - depends on MINIX_FS = n 1064 1001 depends on NCP_FS = n 1065 1002 depends on NFSD = n 1066 1003 depends on NFS_FS = n 1067 - depends on NILFS2_FS = n 1068 - depends on NTFS_FS = n 1069 1004 depends on OCFS2_FS = n 1070 - depends on OMFS_FS = n 1071 - depends on QNX4FS_FS = n 1072 - depends on QNX6FS_FS = n 1073 - depends on REISERFS_FS = n 1074 - depends on SQUASHFS = n 1075 - depends on SYSV_FS = n 1076 - depends on UBIFS_FS = n 1077 - depends on UDF_FS = n 1078 - depends on UFS_FS = n 1079 - depends on VXFS_FS = n 1080 1005 depends on XFS_FS = n 1081 - 1082 - depends on !UML || HOSTFS = n 1083 - 1084 - # The rare drivers that won't build 1085 - depends on AIRO = n 1086 - depends on AIRO_CS = n 1087 - depends on TUN = n 1088 - depends on INFINIBAND_QIB = n 1089 - depends on BLK_DEV_LOOP = n 1090 - depends on ANDROID_BINDER_IPC = n 1091 - 1092 - # Security modules 1093 - depends on SECURITY_TOMOYO = n 1094 - depends on SECURITY_APPARMOR = n 1095 1006 1096 1007 config UIDGID_STRICT_TYPE_CHECKS 1097 1008 bool "Require conversions between uid/gids and their internal representation"
+9 -5
ipc/msg.c
··· 443 443 goto out_unlock; 444 444 } 445 445 446 + err = ipc_update_perm(&msqid64.msg_perm, ipcp); 447 + if (err) 448 + goto out_unlock; 449 + 446 450 msq->q_qbytes = msqid64.msg_qbytes; 447 451 448 - ipc_update_perm(&msqid64.msg_perm, ipcp); 449 452 msq->q_ctime = get_seconds(); 450 453 /* sleeping receivers might be excluded by 451 454 * stricter permissions. ··· 925 922 #ifdef CONFIG_PROC_FS 926 923 static int sysvipc_msg_proc_show(struct seq_file *s, void *it) 927 924 { 925 + struct user_namespace *user_ns = seq_user_ns(s); 928 926 struct msg_queue *msq = it; 929 927 930 928 return seq_printf(s, ··· 937 933 msq->q_qnum, 938 934 msq->q_lspid, 939 935 msq->q_lrpid, 940 - msq->q_perm.uid, 941 - msq->q_perm.gid, 942 - msq->q_perm.cuid, 943 - msq->q_perm.cgid, 936 + from_kuid_munged(user_ns, msq->q_perm.uid), 937 + from_kgid_munged(user_ns, msq->q_perm.gid), 938 + from_kuid_munged(user_ns, msq->q_perm.cuid), 939 + from_kgid_munged(user_ns, msq->q_perm.cgid), 944 940 msq->q_stime, 945 941 msq->q_rtime, 946 942 msq->q_ctime);
+8 -5
ipc/sem.c
··· 1104 1104 freeary(ns, ipcp); 1105 1105 goto out_up; 1106 1106 case IPC_SET: 1107 - ipc_update_perm(&semid64.sem_perm, ipcp); 1107 + err = ipc_update_perm(&semid64.sem_perm, ipcp); 1108 + if (err) 1109 + goto out_unlock; 1108 1110 sma->sem_ctime = get_seconds(); 1109 1111 break; 1110 1112 default: ··· 1679 1677 #ifdef CONFIG_PROC_FS 1680 1678 static int sysvipc_sem_proc_show(struct seq_file *s, void *it) 1681 1679 { 1680 + struct user_namespace *user_ns = seq_user_ns(s); 1682 1681 struct sem_array *sma = it; 1683 1682 1684 1683 return seq_printf(s, ··· 1688 1685 sma->sem_perm.id, 1689 1686 sma->sem_perm.mode, 1690 1687 sma->sem_nsems, 1691 - sma->sem_perm.uid, 1692 - sma->sem_perm.gid, 1693 - sma->sem_perm.cuid, 1694 - sma->sem_perm.cgid, 1688 + from_kuid_munged(user_ns, sma->sem_perm.uid), 1689 + from_kgid_munged(user_ns, sma->sem_perm.gid), 1690 + from_kuid_munged(user_ns, sma->sem_perm.cuid), 1691 + from_kgid_munged(user_ns, sma->sem_perm.cgid), 1695 1692 sma->sem_otime, 1696 1693 sma->sem_ctime); 1697 1694 }
+11 -8
ipc/shm.c
··· 758 758 do_shm_rmid(ns, ipcp); 759 759 goto out_up; 760 760 case IPC_SET: 761 - ipc_update_perm(&shmid64.shm_perm, ipcp); 761 + err = ipc_update_perm(&shmid64.shm_perm, ipcp); 762 + if (err) 763 + goto out_unlock; 762 764 shp->shm_ctim = get_seconds(); 763 765 break; 764 766 default: ··· 895 893 audit_ipc_obj(&(shp->shm_perm)); 896 894 897 895 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 898 - uid_t euid = current_euid(); 896 + kuid_t euid = current_euid(); 899 897 err = -EPERM; 900 - if (euid != shp->shm_perm.uid && 901 - euid != shp->shm_perm.cuid) 898 + if (!uid_eq(euid, shp->shm_perm.uid) && 899 + !uid_eq(euid, shp->shm_perm.cuid)) 902 900 goto out_unlock; 903 901 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) 904 902 goto out_unlock; ··· 1222 1220 #ifdef CONFIG_PROC_FS 1223 1221 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1224 1222 { 1223 + struct user_namespace *user_ns = seq_user_ns(s); 1225 1224 struct shmid_kernel *shp = it; 1226 1225 unsigned long rss = 0, swp = 0; 1227 1226 ··· 1245 1242 shp->shm_cprid, 1246 1243 shp->shm_lprid, 1247 1244 shp->shm_nattch, 1248 - shp->shm_perm.uid, 1249 - shp->shm_perm.gid, 1250 - shp->shm_perm.cuid, 1251 - shp->shm_perm.cgid, 1245 + from_kuid_munged(user_ns, shp->shm_perm.uid), 1246 + from_kgid_munged(user_ns, shp->shm_perm.gid), 1247 + from_kuid_munged(user_ns, shp->shm_perm.cuid), 1248 + from_kgid_munged(user_ns, shp->shm_perm.cgid), 1252 1249 shp->shm_atim, 1253 1250 shp->shm_dtim, 1254 1251 shp->shm_ctim,
+21 -14
ipc/util.c
··· 249 249 250 250 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 251 251 { 252 - uid_t euid; 253 - gid_t egid; 252 + kuid_t euid; 253 + kgid_t egid; 254 254 int id, err; 255 255 256 256 if (size > IPCMNI) ··· 606 606 607 607 int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) 608 608 { 609 - uid_t euid = current_euid(); 609 + kuid_t euid = current_euid(); 610 610 int requested_mode, granted_mode; 611 611 612 612 audit_ipc_obj(ipcp); 613 613 requested_mode = (flag >> 6) | (flag >> 3) | flag; 614 614 granted_mode = ipcp->mode; 615 - if (euid == ipcp->cuid || 616 - euid == ipcp->uid) 615 + if (uid_eq(euid, ipcp->cuid) || 616 + uid_eq(euid, ipcp->uid)) 617 617 granted_mode >>= 6; 618 618 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) 619 619 granted_mode >>= 3; ··· 643 643 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) 644 644 { 645 645 out->key = in->key; 646 - out->uid = in->uid; 647 - out->gid = in->gid; 648 - out->cuid = in->cuid; 649 - out->cgid = in->cgid; 646 + out->uid = from_kuid_munged(current_user_ns(), in->uid); 647 + out->gid = from_kgid_munged(current_user_ns(), in->gid); 648 + out->cuid = from_kuid_munged(current_user_ns(), in->cuid); 649 + out->cgid = from_kgid_munged(current_user_ns(), in->cgid); 650 650 out->mode = in->mode; 651 651 out->seq = in->seq; 652 652 } ··· 747 747 * @in: the permission given as input. 748 748 * @out: the permission of the ipc to set. 749 749 */ 750 - void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) 750 + int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) 751 751 { 752 - out->uid = in->uid; 753 - out->gid = in->gid; 752 + kuid_t uid = make_kuid(current_user_ns(), in->uid); 753 + kgid_t gid = make_kgid(current_user_ns(), in->gid); 754 + if (!uid_valid(uid) || !gid_valid(gid)) 755 + return -EINVAL; 756 + 757 + out->uid = uid; 758 + out->gid = gid; 754 759 out->mode = (out->mode & ~S_IRWXUGO) 755 760 | (in->mode & S_IRWXUGO); 761 + 762 + return 0; 756 763 } 757 764 758 765 /** ··· 784 777 struct ipc64_perm *perm, int extra_perm) 785 778 { 786 779 struct kern_ipc_perm *ipcp; 787 - uid_t euid; 780 + kuid_t euid; 788 781 int err; 789 782 790 783 down_write(&ids->rw_mutex); ··· 800 793 perm->gid, perm->mode); 801 794 802 795 euid = current_euid(); 803 - if (euid == ipcp->cuid || euid == ipcp->uid || 796 + if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) || 804 797 ns_capable(ns->user_ns, CAP_SYS_ADMIN)) 805 798 return ipcp; 806 799
+1 -1
ipc/util.h
··· 125 125 126 126 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); 127 127 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); 128 - void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); 128 + int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); 129 129 struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, 130 130 struct ipc_ids *ids, int id, int cmd, 131 131 struct ipc64_perm *perm, int extra_perm);
+2 -2
kernel/acct.c
··· 507 507 do_div(elapsed, AHZ); 508 508 ac.ac_btime = get_seconds() - elapsed; 509 509 /* we really need to bite the bullet and change layout */ 510 - ac.ac_uid = orig_cred->uid; 511 - ac.ac_gid = orig_cred->gid; 510 + ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid); 511 + ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid); 512 512 #if ACCT_VERSION==2 513 513 ac.ac_ahz = AHZ; 514 514 #endif
+46 -71
kernel/audit.c
··· 61 61 #include <linux/netlink.h> 62 62 #include <linux/freezer.h> 63 63 #include <linux/tty.h> 64 + #include <linux/pid_namespace.h> 64 65 65 66 #include "audit.h" 66 67 ··· 105 104 static int audit_backlog_wait_overflow = 0; 106 105 107 106 /* The identity of the user shutting down the audit system. */ 108 - uid_t audit_sig_uid = -1; 107 + kuid_t audit_sig_uid = INVALID_UID; 109 108 pid_t audit_sig_pid = -1; 110 109 u32 audit_sig_sid = 0; 111 110 ··· 265 264 } 266 265 267 266 static int audit_log_config_change(char *function_name, int new, int old, 268 - uid_t loginuid, u32 sessionid, u32 sid, 267 + kuid_t loginuid, u32 sessionid, u32 sid, 269 268 int allow_changes) 270 269 { 271 270 struct audit_buffer *ab; ··· 273 272 274 273 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 275 274 audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, 276 - old, loginuid, sessionid); 275 + old, from_kuid(&init_user_ns, loginuid), sessionid); 277 276 if (sid) { 278 277 char *ctx = NULL; 279 278 u32 len; ··· 293 292 } 294 293 295 294 static int audit_do_config_change(char *function_name, int *to_change, 296 - int new, uid_t loginuid, u32 sessionid, 295 + int new, kuid_t loginuid, u32 sessionid, 297 296 u32 sid) 298 297 { 299 298 int allow_changes, rc = 0, old = *to_change; ··· 320 319 return rc; 321 320 } 322 321 323 - static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sessionid, 322 + static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid, 324 323 u32 sid) 325 324 { 326 325 return audit_do_config_change("audit_rate_limit", &audit_rate_limit, 327 326 limit, loginuid, sessionid, sid); 328 327 } 329 328 330 - static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sessionid, 329 + static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid, 331 330 u32 sid) 332 331 { 333 332 return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, 334 333 limit, loginuid, sessionid, sid); 335 334 } 336 335 337 - static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid) 336 + static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid) 338 337 { 339 338 int rc; 340 339 if (state < AUDIT_OFF || state > AUDIT_LOCKED) ··· 349 348 return rc; 350 349 } 351 350 352 - static int audit_set_failure(int state, uid_t loginuid, u32 sessionid, u32 sid) 351 + static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid) 353 352 { 354 353 if (state != AUDIT_FAIL_SILENT 355 354 && state != AUDIT_FAIL_PRINTK ··· 468 467 return 0; 469 468 } 470 469 471 - static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid) 472 - { 473 - struct task_struct *tsk; 474 - int err; 475 - 476 - rcu_read_lock(); 477 - tsk = find_task_by_vpid(pid); 478 - if (!tsk) { 479 - rcu_read_unlock(); 480 - return -ESRCH; 481 - } 482 - get_task_struct(tsk); 483 - rcu_read_unlock(); 484 - err = tty_audit_push_task(tsk, loginuid, sessionid); 485 - put_task_struct(tsk); 486 - return err; 487 - } 488 - 489 470 int audit_send_list(void *_dest) 490 471 { 491 472 struct audit_netlink_list *dest = _dest; ··· 571 588 { 572 589 int err = 0; 573 590 591 + /* Only support the initial namespaces for now. */ 592 + if ((current_user_ns() != &init_user_ns) || 593 + (task_active_pid_ns(current) != &init_pid_ns)) 594 + return -EPERM; 595 + 574 596 switch (msg_type) { 575 597 case AUDIT_GET: 576 598 case AUDIT_LIST: ··· 607 619 } 608 620 609 621 static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, 610 - u32 pid, u32 uid, uid_t auid, u32 ses, 611 - u32 sid) 622 + kuid_t auid, u32 ses, u32 sid) 612 623 { 613 624 int rc = 0; 614 625 char *ctx = NULL; ··· 620 633 621 634 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); 622 635 audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", 623 - pid, uid, auid, ses); 636 + task_tgid_vnr(current), 637 + from_kuid(&init_user_ns, current_uid()), 638 + from_kuid(&init_user_ns, auid), ses); 624 639 if (sid) { 625 640 rc = security_secid_to_secctx(sid, &ctx, &len); 626 641 if (rc) ··· 638 649 639 650 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 640 651 { 641 - u32 uid, pid, seq, sid; 652 + u32 seq, sid; 642 653 void *data; 643 654 struct audit_status *status_get, status_set; 644 655 int err; 645 656 struct audit_buffer *ab; 646 657 u16 msg_type = nlh->nlmsg_type; 647 - uid_t loginuid; /* loginuid of sender */ 658 + kuid_t loginuid; /* loginuid of sender */ 648 659 u32 sessionid; 649 660 struct audit_sig_info *sig_data; 650 661 char *ctx = NULL; ··· 664 675 return err; 665 676 } 666 677 667 - pid = NETLINK_CREDS(skb)->pid; 668 - uid = NETLINK_CREDS(skb)->uid; 669 678 loginuid = audit_get_loginuid(current); 670 679 sessionid = audit_get_sessionid(current); 671 680 security_task_getsecid(current, &sid); ··· 725 738 if (!audit_enabled && msg_type != AUDIT_USER_AVC) 726 739 return 0; 727 740 728 - err = audit_filter_user(&NETLINK_CB(skb)); 741 + err = audit_filter_user(); 729 742 if (err == 1) { 730 743 err = 0; 731 744 if (msg_type == AUDIT_USER_TTY) { 732 - err = audit_prepare_user_tty(pid, loginuid, 745 + err = tty_audit_push_task(current, loginuid, 733 746 sessionid); 734 747 if (err) 735 748 break; 736 749 } 737 - audit_log_common_recv_msg(&ab, msg_type, pid, uid, 750 + audit_log_common_recv_msg(&ab, msg_type, 738 751 loginuid, sessionid, sid); 739 752 740 753 if (msg_type != AUDIT_USER_TTY) ··· 750 763 size--; 751 764 audit_log_n_untrustedstring(ab, data, size); 752 765 } 753 - audit_set_pid(ab, pid); 766 + audit_set_pid(ab, NETLINK_CB(skb).pid); 754 767 audit_log_end(ab); 755 768 } 756 769 break; ··· 759 772 if (nlmsg_len(nlh) < sizeof(struct audit_rule)) 760 773 return -EINVAL; 761 774 if (audit_enabled == AUDIT_LOCKED) { 762 - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, 763 - uid, loginuid, sessionid, sid); 775 + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, 776 + loginuid, sessionid, sid); 764 777 765 778 audit_log_format(ab, " audit_enabled=%d res=0", 766 779 audit_enabled); ··· 770 783 /* fallthrough */ 771 784 case AUDIT_LIST: 772 785 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, 773 - uid, seq, data, nlmsg_len(nlh), 786 + seq, data, nlmsg_len(nlh), 774 787 loginuid, sessionid, sid); 775 788 break; 776 789 case AUDIT_ADD_RULE: ··· 778 791 if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) 779 792 return -EINVAL; 780 793 if (audit_enabled == AUDIT_LOCKED) { 781 - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, 782 - uid, loginuid, sessionid, sid); 794 + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, 795 + loginuid, sessionid, sid); 783 796 784 797 audit_log_format(ab, " audit_enabled=%d res=0", 785 798 audit_enabled); ··· 789 802 /* fallthrough */ 790 803 case AUDIT_LIST_RULES: 791 804 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, 792 - uid, seq, data, nlmsg_len(nlh), 805 + seq, data, nlmsg_len(nlh), 793 806 loginuid, sessionid, sid); 794 807 break; 795 808 case AUDIT_TRIM: 796 809 audit_trim_trees(); 797 810 798 - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, 799 - uid, loginuid, sessionid, sid); 811 + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, 812 + loginuid, sessionid, sid); 800 813 801 814 audit_log_format(ab, " op=trim res=1"); 802 815 audit_log_end(ab); ··· 827 840 /* OK, here comes... */ 828 841 err = audit_tag_tree(old, new); 829 842 830 - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, 831 - uid, loginuid, sessionid, sid); 843 + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, 844 + loginuid, sessionid, sid); 832 845 833 846 audit_log_format(ab, " op=make_equiv old="); 834 847 audit_log_untrustedstring(ab, old); ··· 853 866 security_release_secctx(ctx, len); 854 867 return -ENOMEM; 855 868 } 856 - sig_data->uid = audit_sig_uid; 869 + sig_data->uid = from_kuid(&init_user_ns, audit_sig_uid); 857 870 sig_data->pid = audit_sig_pid; 858 871 if (audit_sig_sid) { 859 872 memcpy(sig_data->ctx, ctx, len); ··· 865 878 break; 866 879 case AUDIT_TTY_GET: { 867 880 struct audit_tty_status s; 868 - struct task_struct *tsk; 869 - unsigned long flags; 881 + struct task_struct *tsk = current; 870 882 871 - rcu_read_lock(); 872 - tsk = find_task_by_vpid(pid); 873 - if (tsk && lock_task_sighand(tsk, &flags)) { 874 - s.enabled = tsk->signal->audit_tty != 0; 875 - unlock_task_sighand(tsk, &flags); 876 - } else 877 - err = -ESRCH; 878 - rcu_read_unlock(); 883 + spin_lock_irq(&tsk->sighand->siglock); 884 + s.enabled = tsk->signal->audit_tty != 0; 885 + spin_unlock_irq(&tsk->sighand->siglock); 879 886 880 - if (!err) 881 - audit_send_reply(NETLINK_CB(skb).pid, seq, 882 - AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); 887 + audit_send_reply(NETLINK_CB(skb).pid, seq, 888 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); 883 889 break; 884 890 } 885 891 case AUDIT_TTY_SET: { 886 892 struct audit_tty_status *s; 887 - struct task_struct *tsk; 888 - unsigned long flags; 893 + struct task_struct *tsk = current; 889 894 890 895 if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) 891 896 return -EINVAL; 892 897 s = data; 893 898 if (s->enabled != 0 && s->enabled != 1) 894 899 return -EINVAL; 895 - rcu_read_lock(); 896 - tsk = find_task_by_vpid(pid); 897 - if (tsk && lock_task_sighand(tsk, &flags)) { 898 - tsk->signal->audit_tty = s->enabled != 0; 899 - unlock_task_sighand(tsk, &flags); 900 - } else 901 - err = -ESRCH; 902 - rcu_read_unlock(); 900 + 901 + spin_lock_irq(&tsk->sighand->siglock); 902 + tsk->signal->audit_tty = s->enabled != 0; 903 + spin_unlock_irq(&tsk->sighand->siglock); 903 904 break; 904 905 } 905 906 default:
+3 -1
kernel/audit.h
··· 76 76 77 77 extern int audit_match_class(int class, unsigned syscall); 78 78 extern int audit_comparator(const u32 left, const u32 op, const u32 right); 79 + extern int audit_uid_comparator(kuid_t left, u32 op, kuid_t right); 80 + extern int audit_gid_comparator(kgid_t left, u32 op, kgid_t right); 79 81 extern int audit_compare_dname_path(const char *dname, const char *path, 80 82 int *dirlen); 81 83 extern struct sk_buff * audit_make_reply(int pid, int seq, int type, ··· 146 144 extern char *audit_unpack_string(void **, size_t *, size_t); 147 145 148 146 extern pid_t audit_sig_pid; 149 - extern uid_t audit_sig_uid; 147 + extern kuid_t audit_sig_uid; 150 148 extern u32 audit_sig_sid; 151 149 152 150 #ifdef CONFIG_AUDITSYSCALL
+1 -1
kernel/audit_watch.c
··· 241 241 struct audit_buffer *ab; 242 242 ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); 243 243 audit_log_format(ab, "auid=%u ses=%u op=", 244 - audit_get_loginuid(current), 244 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 245 245 audit_get_sessionid(current)); 246 246 audit_log_string(ab, op); 247 247 audit_log_format(ab, " path=");
+118 -19
kernel/auditfilter.c
··· 342 342 343 343 f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); 344 344 f->val = rule->values[i]; 345 + f->uid = INVALID_UID; 346 + f->gid = INVALID_GID; 345 347 346 348 err = -EINVAL; 347 349 if (f->op == Audit_bad) ··· 352 350 switch(f->type) { 353 351 default: 354 352 goto exit_free; 355 - case AUDIT_PID: 356 353 case AUDIT_UID: 357 354 case AUDIT_EUID: 358 355 case AUDIT_SUID: 359 356 case AUDIT_FSUID: 357 + case AUDIT_LOGINUID: 358 + /* bit ops not implemented for uid comparisons */ 359 + if (f->op == Audit_bitmask || f->op == Audit_bittest) 360 + goto exit_free; 361 + 362 + f->uid = make_kuid(current_user_ns(), f->val); 363 + if (!uid_valid(f->uid)) 364 + goto exit_free; 365 + break; 360 366 case AUDIT_GID: 361 367 case AUDIT_EGID: 362 368 case AUDIT_SGID: 363 369 case AUDIT_FSGID: 364 - case AUDIT_LOGINUID: 370 + /* bit ops not implemented for gid comparisons */ 371 + if (f->op == Audit_bitmask || f->op == Audit_bittest) 372 + goto exit_free; 373 + 374 + f->gid = make_kgid(current_user_ns(), f->val); 375 + if (!gid_valid(f->gid)) 376 + goto exit_free; 377 + break; 378 + case AUDIT_PID: 365 379 case AUDIT_PERS: 366 380 case AUDIT_MSGTYPE: 367 381 case AUDIT_PPID: ··· 455 437 456 438 f->type = data->fields[i]; 457 439 f->val = data->values[i]; 440 + f->uid = INVALID_UID; 441 + f->gid = INVALID_GID; 458 442 f->lsm_str = NULL; 459 443 f->lsm_rule = NULL; 460 444 switch(f->type) { 461 - case AUDIT_PID: 462 445 case AUDIT_UID: 463 446 case AUDIT_EUID: 464 447 case AUDIT_SUID: 465 448 case AUDIT_FSUID: 449 + case AUDIT_LOGINUID: 450 + case AUDIT_OBJ_UID: 451 + /* bit ops not implemented for uid comparisons */ 452 + if (f->op == Audit_bitmask || f->op == Audit_bittest) 453 + goto exit_free; 454 + 455 + f->uid = make_kuid(current_user_ns(), f->val); 456 + if (!uid_valid(f->uid)) 457 + goto exit_free; 458 + break; 466 459 case AUDIT_GID: 467 460 case AUDIT_EGID: 468 461 case AUDIT_SGID: 469 462 case AUDIT_FSGID: 470 - case AUDIT_LOGINUID: 463 + case AUDIT_OBJ_GID: 464 + /* bit ops not implemented for gid comparisons */ 465 + if (f->op == Audit_bitmask || f->op == Audit_bittest) 466 + goto exit_free; 467 + 468 + f->gid = make_kgid(current_user_ns(), f->val); 469 + if (!gid_valid(f->gid)) 470 + goto exit_free; 471 + break; 472 + case AUDIT_PID: 471 473 case AUDIT_PERS: 472 474 case AUDIT_MSGTYPE: 473 475 case AUDIT_PPID: ··· 499 461 case AUDIT_ARG1: 500 462 case AUDIT_ARG2: 501 463 case AUDIT_ARG3: 502 - case AUDIT_OBJ_UID: 503 - case AUDIT_OBJ_GID: 504 464 break; 505 465 case AUDIT_ARCH: 506 466 entry->rule.arch_f = f; ··· 741 705 case AUDIT_FILTERKEY: 742 706 /* both filterkeys exist based on above type compare */ 743 707 if (strcmp(a->filterkey, b->filterkey)) 708 + return 1; 709 + break; 710 + case AUDIT_UID: 711 + case AUDIT_EUID: 712 + case AUDIT_SUID: 713 + case AUDIT_FSUID: 714 + case AUDIT_LOGINUID: 715 + case AUDIT_OBJ_UID: 716 + if (!uid_eq(a->fields[i].uid, b->fields[i].uid)) 717 + return 1; 718 + break; 719 + case AUDIT_GID: 720 + case AUDIT_EGID: 721 + case AUDIT_SGID: 722 + case AUDIT_FSGID: 723 + case AUDIT_OBJ_GID: 724 + if (!gid_eq(a->fields[i].gid, b->fields[i].gid)) 744 725 return 1; 745 726 break; 746 727 default: ··· 1109 1056 } 1110 1057 1111 1058 /* Log rule additions and removals */ 1112 - static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid, 1059 + static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid, 1113 1060 char *action, struct audit_krule *rule, 1114 1061 int res) 1115 1062 { ··· 1121 1068 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 1122 1069 if (!ab) 1123 1070 return; 1124 - audit_log_format(ab, "auid=%u ses=%u", loginuid, sessionid); 1071 + audit_log_format(ab, "auid=%u ses=%u", 1072 + from_kuid(&init_user_ns, loginuid), sessionid); 1125 1073 if (sid) { 1126 1074 char *ctx = NULL; 1127 1075 u32 len; ··· 1152 1098 * @sessionid: sessionid for netlink audit message 1153 1099 * @sid: SE Linux Security ID of sender 1154 1100 */ 1155 - int audit_receive_filter(int type, int pid, int uid, int seq, void *data, 1156 - size_t datasz, uid_t loginuid, u32 sessionid, u32 sid) 1101 + int audit_receive_filter(int type, int pid, int seq, void *data, 1102 + size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid) 1157 1103 { 1158 1104 struct task_struct *tsk; 1159 1105 struct audit_netlink_list *dest; ··· 1252 1198 } 1253 1199 } 1254 1200 1201 + int audit_uid_comparator(kuid_t left, u32 op, kuid_t right) 1202 + { 1203 + switch (op) { 1204 + case Audit_equal: 1205 + return uid_eq(left, right); 1206 + case Audit_not_equal: 1207 + return !uid_eq(left, right); 1208 + case Audit_lt: 1209 + return uid_lt(left, right); 1210 + case Audit_le: 1211 + return uid_lte(left, right); 1212 + case Audit_gt: 1213 + return uid_gt(left, right); 1214 + case Audit_ge: 1215 + return uid_gte(left, right); 1216 + case Audit_bitmask: 1217 + case Audit_bittest: 1218 + default: 1219 + BUG(); 1220 + return 0; 1221 + } 1222 + } 1223 + 1224 + int audit_gid_comparator(kgid_t left, u32 op, kgid_t right) 1225 + { 1226 + switch (op) { 1227 + case Audit_equal: 1228 + return gid_eq(left, right); 1229 + case Audit_not_equal: 1230 + return !gid_eq(left, right); 1231 + case Audit_lt: 1232 + return gid_lt(left, right); 1233 + case Audit_le: 1234 + return gid_lte(left, right); 1235 + case Audit_gt: 1236 + return gid_gt(left, right); 1237 + case Audit_ge: 1238 + return gid_gte(left, right); 1239 + case Audit_bitmask: 1240 + case Audit_bittest: 1241 + default: 1242 + BUG(); 1243 + return 0; 1244 + } 1245 + } 1246 + 1255 1247 /* Compare given dentry name with last component in given path, 1256 1248 * return of 0 indicates a match. */ 1257 1249 int audit_compare_dname_path(const char *dname, const char *path, ··· 1336 1236 return strncmp(p, dname, dlen); 1337 1237 } 1338 1238 1339 - static int audit_filter_user_rules(struct netlink_skb_parms *cb, 1340 - struct audit_krule *rule, 1239 + static int audit_filter_user_rules(struct audit_krule *rule, 1341 1240 enum audit_state *state) 1342 1241 { 1343 1242 int i; ··· 1348 1249 1349 1250 switch (f->type) { 1350 1251 case AUDIT_PID: 1351 - result = audit_comparator(cb->creds.pid, f->op, f->val); 1252 + result = audit_comparator(task_pid_vnr(current), f->op, f->val); 1352 1253 break; 1353 1254 case AUDIT_UID: 1354 - result = audit_comparator(cb->creds.uid, f->op, f->val); 1255 + result = audit_uid_comparator(current_uid(), f->op, f->uid); 1355 1256 break; 1356 1257 case AUDIT_GID: 1357 - result = audit_comparator(cb->creds.gid, f->op, f->val); 1258 + result = audit_gid_comparator(current_gid(), f->op, f->gid); 1358 1259 break; 1359 1260 case AUDIT_LOGINUID: 1360 - result = audit_comparator(audit_get_loginuid(current), 1361 - f->op, f->val); 1261 + result = audit_uid_comparator(audit_get_loginuid(current), 1262 + f->op, f->uid); 1362 1263 break; 1363 1264 case AUDIT_SUBJ_USER: 1364 1265 case AUDIT_SUBJ_ROLE: ··· 1386 1287 return 1; 1387 1288 } 1388 1289 1389 - int audit_filter_user(struct netlink_skb_parms *cb) 1290 + int audit_filter_user(void) 1390 1291 { 1391 1292 enum audit_state state = AUDIT_DISABLED; 1392 1293 struct audit_entry *e; ··· 1394 1295 1395 1296 rcu_read_lock(); 1396 1297 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) { 1397 - if (audit_filter_user_rules(cb, &e->rule, &state)) { 1298 + if (audit_filter_user_rules(&e->rule, &state)) { 1398 1299 if (state == AUDIT_DISABLED) 1399 1300 ret = 0; 1400 1301 break;
+112 -107
kernel/auditsc.c
··· 113 113 unsigned long ino; 114 114 dev_t dev; 115 115 umode_t mode; 116 - uid_t uid; 117 - gid_t gid; 116 + kuid_t uid; 117 + kgid_t gid; 118 118 dev_t rdev; 119 119 u32 osid; 120 120 struct audit_cap_data fcap; ··· 149 149 struct audit_aux_data_pids { 150 150 struct audit_aux_data d; 151 151 pid_t target_pid[AUDIT_AUX_PIDS]; 152 - uid_t target_auid[AUDIT_AUX_PIDS]; 153 - uid_t target_uid[AUDIT_AUX_PIDS]; 152 + kuid_t target_auid[AUDIT_AUX_PIDS]; 153 + kuid_t target_uid[AUDIT_AUX_PIDS]; 154 154 unsigned int target_sessionid[AUDIT_AUX_PIDS]; 155 155 u32 target_sid[AUDIT_AUX_PIDS]; 156 156 char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN]; ··· 208 208 size_t sockaddr_len; 209 209 /* Save things to print about task_struct */ 210 210 pid_t pid, ppid; 211 - uid_t uid, euid, suid, fsuid; 212 - gid_t gid, egid, sgid, fsgid; 211 + kuid_t uid, euid, suid, fsuid; 212 + kgid_t gid, egid, sgid, fsgid; 213 213 unsigned long personality; 214 214 int arch; 215 215 216 216 pid_t target_pid; 217 - uid_t target_auid; 218 - uid_t target_uid; 217 + kuid_t target_auid; 218 + kuid_t target_uid; 219 219 unsigned int target_sessionid; 220 220 u32 target_sid; 221 221 char target_comm[TASK_COMM_LEN]; ··· 231 231 long args[6]; 232 232 } socketcall; 233 233 struct { 234 - uid_t uid; 235 - gid_t gid; 234 + kuid_t uid; 235 + kgid_t gid; 236 236 umode_t mode; 237 237 u32 osid; 238 238 int has_perm; ··· 464 464 return 0; 465 465 } 466 466 467 - static int audit_compare_id(uid_t uid1, 468 - struct audit_names *name, 469 - unsigned long name_offset, 470 - struct audit_field *f, 471 - struct audit_context *ctx) 467 + static int audit_compare_uid(kuid_t uid, 468 + struct audit_names *name, 469 + struct audit_field *f, 470 + struct audit_context *ctx) 472 471 { 473 472 struct audit_names *n; 474 - unsigned long addr; 475 - uid_t uid2; 476 473 int rc; 477 - 478 - BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t)); 479 - 474 + 480 475 if (name) { 481 - addr = (unsigned long)name; 482 - addr += name_offset; 483 - 484 - uid2 = *(uid_t *)addr; 485 - rc = audit_comparator(uid1, f->op, uid2); 476 + rc = audit_uid_comparator(uid, f->op, name->uid); 486 477 if (rc) 487 478 return rc; 488 479 } 489 - 480 + 490 481 if (ctx) { 491 482 list_for_each_entry(n, &ctx->names_list, list) { 492 - addr = (unsigned long)n; 493 - addr += name_offset; 483 + rc = audit_uid_comparator(uid, f->op, n->uid); 484 + if (rc) 485 + return rc; 486 + } 487 + } 488 + return 0; 489 + } 494 490 495 - uid2 = *(uid_t *)addr; 496 - 497 - rc = audit_comparator(uid1, f->op, uid2); 491 + static int audit_compare_gid(kgid_t gid, 492 + struct audit_names *name, 493 + struct audit_field *f, 494 + struct audit_context *ctx) 495 + { 496 + struct audit_names *n; 497 + int rc; 498 + 499 + if (name) { 500 + rc = audit_gid_comparator(gid, f->op, name->gid); 501 + if (rc) 502 + return rc; 503 + } 504 + 505 + if (ctx) { 506 + list_for_each_entry(n, &ctx->names_list, list) { 507 + rc = audit_gid_comparator(gid, f->op, n->gid); 498 508 if (rc) 499 509 return rc; 500 510 } ··· 521 511 switch (f->val) { 522 512 /* process to file object comparisons */ 523 513 case AUDIT_COMPARE_UID_TO_OBJ_UID: 524 - return audit_compare_id(cred->uid, 525 - name, offsetof(struct audit_names, uid), 526 - f, ctx); 514 + return audit_compare_uid(cred->uid, name, f, ctx); 527 515 case AUDIT_COMPARE_GID_TO_OBJ_GID: 528 - return audit_compare_id(cred->gid, 529 - name, offsetof(struct audit_names, gid), 530 - f, ctx); 516 + return audit_compare_gid(cred->gid, name, f, ctx); 531 517 case AUDIT_COMPARE_EUID_TO_OBJ_UID: 532 - return audit_compare_id(cred->euid, 533 - name, offsetof(struct audit_names, uid), 534 - f, ctx); 518 + return audit_compare_uid(cred->euid, name, f, ctx); 535 519 case AUDIT_COMPARE_EGID_TO_OBJ_GID: 536 - return audit_compare_id(cred->egid, 537 - name, offsetof(struct audit_names, gid), 538 - f, ctx); 520 + return audit_compare_gid(cred->egid, name, f, ctx); 539 521 case AUDIT_COMPARE_AUID_TO_OBJ_UID: 540 - return audit_compare_id(tsk->loginuid, 541 - name, offsetof(struct audit_names, uid), 542 - f, ctx); 522 + return audit_compare_uid(tsk->loginuid, name, f, ctx); 543 523 case AUDIT_COMPARE_SUID_TO_OBJ_UID: 544 - return audit_compare_id(cred->suid, 545 - name, offsetof(struct audit_names, uid), 546 - f, ctx); 524 + return audit_compare_uid(cred->suid, name, f, ctx); 547 525 case AUDIT_COMPARE_SGID_TO_OBJ_GID: 548 - return audit_compare_id(cred->sgid, 549 - name, offsetof(struct audit_names, gid), 550 - f, ctx); 526 + return audit_compare_gid(cred->sgid, name, f, ctx); 551 527 case AUDIT_COMPARE_FSUID_TO_OBJ_UID: 552 - return audit_compare_id(cred->fsuid, 553 - name, offsetof(struct audit_names, uid), 554 - f, ctx); 528 + return audit_compare_uid(cred->fsuid, name, f, ctx); 555 529 case AUDIT_COMPARE_FSGID_TO_OBJ_GID: 556 - return audit_compare_id(cred->fsgid, 557 - name, offsetof(struct audit_names, gid), 558 - f, ctx); 530 + return audit_compare_gid(cred->fsgid, name, f, ctx); 559 531 /* uid comparisons */ 560 532 case AUDIT_COMPARE_UID_TO_AUID: 561 - return audit_comparator(cred->uid, f->op, tsk->loginuid); 533 + return audit_uid_comparator(cred->uid, f->op, tsk->loginuid); 562 534 case AUDIT_COMPARE_UID_TO_EUID: 563 - return audit_comparator(cred->uid, f->op, cred->euid); 535 + return audit_uid_comparator(cred->uid, f->op, cred->euid); 564 536 case AUDIT_COMPARE_UID_TO_SUID: 565 - return audit_comparator(cred->uid, f->op, cred->suid); 537 + return audit_uid_comparator(cred->uid, f->op, cred->suid); 566 538 case AUDIT_COMPARE_UID_TO_FSUID: 567 - return audit_comparator(cred->uid, f->op, cred->fsuid); 539 + return audit_uid_comparator(cred->uid, f->op, cred->fsuid); 568 540 /* auid comparisons */ 569 541 case AUDIT_COMPARE_AUID_TO_EUID: 570 - return audit_comparator(tsk->loginuid, f->op, cred->euid); 542 + return audit_uid_comparator(tsk->loginuid, f->op, cred->euid); 571 543 case AUDIT_COMPARE_AUID_TO_SUID: 572 - return audit_comparator(tsk->loginuid, f->op, cred->suid); 544 + return audit_uid_comparator(tsk->loginuid, f->op, cred->suid); 573 545 case AUDIT_COMPARE_AUID_TO_FSUID: 574 - return audit_comparator(tsk->loginuid, f->op, cred->fsuid); 546 + return audit_uid_comparator(tsk->loginuid, f->op, cred->fsuid); 575 547 /* euid comparisons */ 576 548 case AUDIT_COMPARE_EUID_TO_SUID: 577 - return audit_comparator(cred->euid, f->op, cred->suid); 549 + return audit_uid_comparator(cred->euid, f->op, cred->suid); 578 550 case AUDIT_COMPARE_EUID_TO_FSUID: 579 - return audit_comparator(cred->euid, f->op, cred->fsuid); 551 + return audit_uid_comparator(cred->euid, f->op, cred->fsuid); 580 552 /* suid comparisons */ 581 553 case AUDIT_COMPARE_SUID_TO_FSUID: 582 - return audit_comparator(cred->suid, f->op, cred->fsuid); 554 + return audit_uid_comparator(cred->suid, f->op, cred->fsuid); 583 555 /* gid comparisons */ 584 556 case AUDIT_COMPARE_GID_TO_EGID: 585 - return audit_comparator(cred->gid, f->op, cred->egid); 557 + return audit_gid_comparator(cred->gid, f->op, cred->egid); 586 558 case AUDIT_COMPARE_GID_TO_SGID: 587 - return audit_comparator(cred->gid, f->op, cred->sgid); 559 + return audit_gid_comparator(cred->gid, f->op, cred->sgid); 588 560 case AUDIT_COMPARE_GID_TO_FSGID: 589 - return audit_comparator(cred->gid, f->op, cred->fsgid); 561 + return audit_gid_comparator(cred->gid, f->op, cred->fsgid); 590 562 /* egid comparisons */ 591 563 case AUDIT_COMPARE_EGID_TO_SGID: 592 - return audit_comparator(cred->egid, f->op, cred->sgid); 564 + return audit_gid_comparator(cred->egid, f->op, cred->sgid); 593 565 case AUDIT_COMPARE_EGID_TO_FSGID: 594 - return audit_comparator(cred->egid, f->op, cred->fsgid); 566 + return audit_gid_comparator(cred->egid, f->op, cred->fsgid); 595 567 /* sgid comparison */ 596 568 case AUDIT_COMPARE_SGID_TO_FSGID: 597 - return audit_comparator(cred->sgid, f->op, cred->fsgid); 569 + return audit_gid_comparator(cred->sgid, f->op, cred->fsgid); 598 570 default: 599 571 WARN(1, "Missing AUDIT_COMPARE define. Report as a bug\n"); 600 572 return 0; ··· 622 630 } 623 631 break; 624 632 case AUDIT_UID: 625 - result = audit_comparator(cred->uid, f->op, f->val); 633 + result = audit_uid_comparator(cred->uid, f->op, f->uid); 626 634 break; 627 635 case AUDIT_EUID: 628 - result = audit_comparator(cred->euid, f->op, f->val); 636 + result = audit_uid_comparator(cred->euid, f->op, f->uid); 629 637 break; 630 638 case AUDIT_SUID: 631 - result = audit_comparator(cred->suid, f->op, f->val); 639 + result = audit_uid_comparator(cred->suid, f->op, f->uid); 632 640 break; 633 641 case AUDIT_FSUID: 634 - result = audit_comparator(cred->fsuid, f->op, f->val); 642 + result = audit_uid_comparator(cred->fsuid, f->op, f->uid); 635 643 break; 636 644 case AUDIT_GID: 637 - result = audit_comparator(cred->gid, f->op, f->val); 645 + result = audit_gid_comparator(cred->gid, f->op, f->gid); 638 646 break; 639 647 case AUDIT_EGID: 640 - result = audit_comparator(cred->egid, f->op, f->val); 648 + result = audit_gid_comparator(cred->egid, f->op, f->gid); 641 649 break; 642 650 case AUDIT_SGID: 643 - result = audit_comparator(cred->sgid, f->op, f->val); 651 + result = audit_gid_comparator(cred->sgid, f->op, f->gid); 644 652 break; 645 653 case AUDIT_FSGID: 646 - result = audit_comparator(cred->fsgid, f->op, f->val); 654 + result = audit_gid_comparator(cred->fsgid, f->op, f->gid); 647 655 break; 648 656 case AUDIT_PERS: 649 657 result = audit_comparator(tsk->personality, f->op, f->val); ··· 709 717 break; 710 718 case AUDIT_OBJ_UID: 711 719 if (name) { 712 - result = audit_comparator(name->uid, f->op, f->val); 720 + result = audit_uid_comparator(name->uid, f->op, f->uid); 713 721 } else if (ctx) { 714 722 list_for_each_entry(n, &ctx->names_list, list) { 715 - if (audit_comparator(n->uid, f->op, f->val)) { 723 + if (audit_uid_comparator(n->uid, f->op, f->uid)) { 716 724 ++result; 717 725 break; 718 726 } ··· 721 729 break; 722 730 case AUDIT_OBJ_GID: 723 731 if (name) { 724 - result = audit_comparator(name->gid, f->op, f->val); 732 + result = audit_gid_comparator(name->gid, f->op, f->gid); 725 733 } else if (ctx) { 726 734 list_for_each_entry(n, &ctx->names_list, list) { 727 - if (audit_comparator(n->gid, f->op, f->val)) { 735 + if (audit_gid_comparator(n->gid, f->op, f->gid)) { 728 736 ++result; 729 737 break; 730 738 } ··· 742 750 case AUDIT_LOGINUID: 743 751 result = 0; 744 752 if (ctx) 745 - result = audit_comparator(tsk->loginuid, f->op, f->val); 753 + result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); 746 754 break; 747 755 case AUDIT_SUBJ_USER: 748 756 case AUDIT_SUBJ_ROLE: ··· 1176 1184 } 1177 1185 1178 1186 static int audit_log_pid_context(struct audit_context *context, pid_t pid, 1179 - uid_t auid, uid_t uid, unsigned int sessionid, 1187 + kuid_t auid, kuid_t uid, unsigned int sessionid, 1180 1188 u32 sid, char *comm) 1181 1189 { 1182 1190 struct audit_buffer *ab; ··· 1188 1196 if (!ab) 1189 1197 return rc; 1190 1198 1191 - audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, auid, 1192 - uid, sessionid); 1199 + audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, 1200 + from_kuid(&init_user_ns, auid), 1201 + from_kuid(&init_user_ns, uid), sessionid); 1193 1202 if (security_secid_to_secctx(sid, &ctx, &len)) { 1194 1203 audit_log_format(ab, " obj=(none)"); 1195 1204 rc = 1; ··· 1440 1447 u32 osid = context->ipc.osid; 1441 1448 1442 1449 audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho", 1443 - context->ipc.uid, context->ipc.gid, context->ipc.mode); 1450 + from_kuid(&init_user_ns, context->ipc.uid), 1451 + from_kgid(&init_user_ns, context->ipc.gid), 1452 + context->ipc.mode); 1444 1453 if (osid) { 1445 1454 char *ctx = NULL; 1446 1455 u32 len; ··· 1555 1560 MAJOR(n->dev), 1556 1561 MINOR(n->dev), 1557 1562 n->mode, 1558 - n->uid, 1559 - n->gid, 1563 + from_kuid(&init_user_ns, n->uid), 1564 + from_kgid(&init_user_ns, n->gid), 1560 1565 MAJOR(n->rdev), 1561 1566 MINOR(n->rdev)); 1562 1567 } ··· 1633 1638 context->name_count, 1634 1639 context->ppid, 1635 1640 context->pid, 1636 - tsk->loginuid, 1637 - context->uid, 1638 - context->gid, 1639 - context->euid, context->suid, context->fsuid, 1640 - context->egid, context->sgid, context->fsgid, tty, 1641 + from_kuid(&init_user_ns, tsk->loginuid), 1642 + from_kuid(&init_user_ns, context->uid), 1643 + from_kgid(&init_user_ns, context->gid), 1644 + from_kuid(&init_user_ns, context->euid), 1645 + from_kuid(&init_user_ns, context->suid), 1646 + from_kuid(&init_user_ns, context->fsuid), 1647 + from_kgid(&init_user_ns, context->egid), 1648 + from_kgid(&init_user_ns, context->sgid), 1649 + from_kgid(&init_user_ns, context->fsgid), 1650 + tty, 1641 1651 tsk->sessionid); 1642 1652 1643 1653 ··· 2299 2299 * 2300 2300 * Called (set) from fs/proc/base.c::proc_loginuid_write(). 2301 2301 */ 2302 - int audit_set_loginuid(uid_t loginuid) 2302 + int audit_set_loginuid(kuid_t loginuid) 2303 2303 { 2304 2304 struct task_struct *task = current; 2305 2305 struct audit_context *context = task->audit_context; 2306 2306 unsigned int sessionid; 2307 2307 2308 2308 #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE 2309 - if (task->loginuid != -1) 2309 + if (uid_valid(task->loginuid)) 2310 2310 return -EPERM; 2311 2311 #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ 2312 2312 if (!capable(CAP_AUDIT_CONTROL)) ··· 2322 2322 audit_log_format(ab, "login pid=%d uid=%u " 2323 2323 "old auid=%u new auid=%u" 2324 2324 " old ses=%u new ses=%u", 2325 - task->pid, task_uid(task), 2326 - task->loginuid, loginuid, 2325 + task->pid, 2326 + from_kuid(&init_user_ns, task_uid(task)), 2327 + from_kuid(&init_user_ns, task->loginuid), 2328 + from_kuid(&init_user_ns, loginuid), 2327 2329 task->sessionid, sessionid); 2328 2330 audit_log_end(ab); 2329 2331 } ··· 2548 2546 struct audit_aux_data_pids *axp; 2549 2547 struct task_struct *tsk = current; 2550 2548 struct audit_context *ctx = tsk->audit_context; 2551 - uid_t uid = current_uid(), t_uid = task_uid(t); 2549 + kuid_t uid = current_uid(), t_uid = task_uid(t); 2552 2550 2553 2551 if (audit_pid && t->tgid == audit_pid) { 2554 2552 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2555 2553 audit_sig_pid = tsk->pid; 2556 - if (tsk->loginuid != -1) 2554 + if (uid_valid(tsk->loginuid)) 2557 2555 audit_sig_uid = tsk->loginuid; 2558 2556 else 2559 2557 audit_sig_uid = uid; ··· 2674 2672 2675 2673 static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) 2676 2674 { 2677 - uid_t auid, uid; 2678 - gid_t gid; 2675 + kuid_t auid, uid; 2676 + kgid_t gid; 2679 2677 unsigned int sessionid; 2680 2678 2681 2679 auid = audit_get_loginuid(current); ··· 2683 2681 current_uid_gid(&uid, &gid); 2684 2682 2685 2683 audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", 2686 - auid, uid, gid, sessionid); 2684 + from_kuid(&init_user_ns, auid), 2685 + from_kuid(&init_user_ns, uid), 2686 + from_kgid(&init_user_ns, gid), 2687 + sessionid); 2687 2688 audit_log_task_context(ab); 2688 2689 audit_log_format(ab, " pid=%d comm=", current->pid); 2689 2690 audit_log_untrustedstring(ab, current->comm);
+8 -2
kernel/cred.c
··· 799 799 atomic_read(&cred->usage), 800 800 read_cred_subscribers(cred)); 801 801 printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n", 802 - cred->uid, cred->euid, cred->suid, cred->fsuid); 802 + from_kuid_munged(&init_user_ns, cred->uid), 803 + from_kuid_munged(&init_user_ns, cred->euid), 804 + from_kuid_munged(&init_user_ns, cred->suid), 805 + from_kuid_munged(&init_user_ns, cred->fsuid)); 803 806 printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n", 804 - cred->gid, cred->egid, cred->sgid, cred->fsgid); 807 + from_kgid_munged(&init_user_ns, cred->gid), 808 + from_kgid_munged(&init_user_ns, cred->egid), 809 + from_kgid_munged(&init_user_ns, cred->sgid), 810 + from_kgid_munged(&init_user_ns, cred->fsgid)); 805 811 #ifdef CONFIG_SECURITY 806 812 printk(KERN_ERR "CRED: ->security is %p\n", cred->security); 807 813 if ((unsigned long) cred->security >= PAGE_SIZE &&
+1
kernel/pid.c
··· 479 479 } 480 480 return nr; 481 481 } 482 + EXPORT_SYMBOL_GPL(pid_nr_ns); 482 483 483 484 pid_t pid_vnr(struct pid *pid) 484 485 {
+2
kernel/pid_namespace.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/proc_fs.h> 18 18 #include <linux/reboot.h> 19 + #include <linux/export.h> 19 20 20 21 #define BITS_PER_PAGE (PAGE_SIZE*8) 21 22 ··· 145 144 if (parent != NULL) 146 145 put_pid_ns(parent); 147 146 } 147 + EXPORT_SYMBOL_GPL(free_pid_ns); 148 148 149 149 void zap_pid_ns_processes(struct pid_namespace *pid_ns) 150 150 {
+17 -6
kernel/taskstats.c
··· 27 27 #include <linux/cgroup.h> 28 28 #include <linux/fs.h> 29 29 #include <linux/file.h> 30 + #include <linux/pid_namespace.h> 30 31 #include <net/genetlink.h> 31 32 #include <linux/atomic.h> 32 33 ··· 175 174 up_write(&listeners->sem); 176 175 } 177 176 178 - static void fill_stats(struct task_struct *tsk, struct taskstats *stats) 177 + static void fill_stats(struct user_namespace *user_ns, 178 + struct pid_namespace *pid_ns, 179 + struct task_struct *tsk, struct taskstats *stats) 179 180 { 180 181 memset(stats, 0, sizeof(*stats)); 181 182 /* ··· 193 190 stats->version = TASKSTATS_VERSION; 194 191 stats->nvcsw = tsk->nvcsw; 195 192 stats->nivcsw = tsk->nivcsw; 196 - bacct_add_tsk(stats, tsk); 193 + bacct_add_tsk(user_ns, pid_ns, stats, tsk); 197 194 198 195 /* fill in extended acct fields */ 199 196 xacct_add_tsk(stats, tsk); ··· 210 207 rcu_read_unlock(); 211 208 if (!tsk) 212 209 return -ESRCH; 213 - fill_stats(tsk, stats); 210 + fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); 214 211 put_task_struct(tsk); 215 212 return 0; 216 213 } ··· 292 289 unsigned int cpu; 293 290 294 291 if (!cpumask_subset(mask, cpu_possible_mask)) 292 + return -EINVAL; 293 + 294 + if (current_user_ns() != &init_user_ns) 295 + return -EINVAL; 296 + 297 + if (task_active_pid_ns(current) != &init_pid_ns) 295 298 return -EINVAL; 296 299 297 300 if (isadd == REGISTER) { ··· 640 631 if (rc < 0) 641 632 return; 642 633 643 - stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid); 634 + stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, 635 + task_pid_nr_ns(tsk, &init_pid_ns)); 644 636 if (!stats) 645 637 goto err; 646 638 647 - fill_stats(tsk, stats); 639 + fill_stats(&init_user_ns, &init_pid_ns, tsk, stats); 648 640 649 641 /* 650 642 * Doesn't matter if tsk is the leader or the last group member leaving ··· 653 643 if (!is_thread_group || !group_dead) 654 644 goto send; 655 645 656 - stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid); 646 + stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, 647 + task_tgid_nr_ns(tsk, &init_pid_ns)); 657 648 if (!stats) 658 649 goto err; 659 650
+2 -1
kernel/trace/trace.c
··· 2061 2061 seq_puts(m, "# -----------------\n"); 2062 2062 seq_printf(m, "# | task: %.16s-%d " 2063 2063 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 2064 - data->comm, data->pid, data->uid, data->nice, 2064 + data->comm, data->pid, 2065 + from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 2065 2066 data->policy, data->rt_priority); 2066 2067 seq_puts(m, "# -----------------\n"); 2067 2068
+1 -1
kernel/trace/trace.h
··· 147 147 unsigned long skipped_entries; 148 148 cycle_t preempt_timestamp; 149 149 pid_t pid; 150 - uid_t uid; 150 + kuid_t uid; 151 151 char comm[TASK_COMM_LEN]; 152 152 }; 153 153
+7 -5
kernel/tsacct.c
··· 26 26 /* 27 27 * fill in basic accounting fields 28 28 */ 29 - void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) 29 + void bacct_add_tsk(struct user_namespace *user_ns, 30 + struct pid_namespace *pid_ns, 31 + struct taskstats *stats, struct task_struct *tsk) 30 32 { 31 33 const struct cred *tcred; 32 34 struct timespec uptime, ts; ··· 57 55 stats->ac_flag |= AXSIG; 58 56 stats->ac_nice = task_nice(tsk); 59 57 stats->ac_sched = tsk->policy; 60 - stats->ac_pid = tsk->pid; 58 + stats->ac_pid = task_pid_nr_ns(tsk, pid_ns); 61 59 rcu_read_lock(); 62 60 tcred = __task_cred(tsk); 63 - stats->ac_uid = tcred->uid; 64 - stats->ac_gid = tcred->gid; 61 + stats->ac_uid = from_kuid_munged(user_ns, tcred->uid); 62 + stats->ac_gid = from_kgid_munged(user_ns, tcred->gid); 65 63 stats->ac_ppid = pid_alive(tsk) ? 66 - rcu_dereference(tsk->real_parent)->tgid : 0; 64 + task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; 67 65 rcu_read_unlock(); 68 66 stats->ac_utime = cputime_to_usecs(tsk->utime); 69 67 stats->ac_stime = cputime_to_usecs(tsk->stime);
+8
kernel/user.c
··· 38 38 .count = 4294967295U, 39 39 }, 40 40 }, 41 + .projid_map = { 42 + .nr_extents = 1, 43 + .extent[0] = { 44 + .first = 0, 45 + .lower_first = 0, 46 + .count = 4294967295U, 47 + }, 48 + }, 41 49 .kref = { 42 50 .refcount = ATOMIC_INIT(3), 43 51 },
+127 -1
kernel/user_namespace.c
··· 19 19 #include <linux/fs.h> 20 20 #include <linux/uaccess.h> 21 21 #include <linux/ctype.h> 22 + #include <linux/projid.h> 22 23 23 24 static struct kmem_cache *user_ns_cachep __read_mostly; 24 25 ··· 296 295 } 297 296 EXPORT_SYMBOL(from_kgid_munged); 298 297 298 + /** 299 + * make_kprojid - Map a user-namespace projid pair into a kprojid. 300 + * @ns: User namespace that the projid is in 301 + * @projid: Project identifier 302 + * 303 + * Maps a user-namespace uid pair into a kernel internal kuid, 304 + * and returns that kuid. 305 + * 306 + * When there is no mapping defined for the user-namespace projid 307 + * pair INVALID_PROJID is returned. Callers are expected to test 308 + * for and handle handle INVALID_PROJID being returned. INVALID_PROJID 309 + * may be tested for using projid_valid(). 310 + */ 311 + kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid) 312 + { 313 + /* Map the uid to a global kernel uid */ 314 + return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid)); 315 + } 316 + EXPORT_SYMBOL(make_kprojid); 317 + 318 + /** 319 + * from_kprojid - Create a projid from a kprojid user-namespace pair. 320 + * @targ: The user namespace we want a projid in. 321 + * @kprojid: The kernel internal project identifier to start with. 322 + * 323 + * Map @kprojid into the user-namespace specified by @targ and 324 + * return the resulting projid. 325 + * 326 + * There is always a mapping into the initial user_namespace. 327 + * 328 + * If @kprojid has no mapping in @targ (projid_t)-1 is returned. 329 + */ 330 + projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid) 331 + { 332 + /* Map the uid from a global kernel uid */ 333 + return map_id_up(&targ->projid_map, __kprojid_val(kprojid)); 334 + } 335 + EXPORT_SYMBOL(from_kprojid); 336 + 337 + /** 338 + * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair. 339 + * @targ: The user namespace we want a projid in. 340 + * @kprojid: The kernel internal projid to start with. 341 + * 342 + * Map @kprojid into the user-namespace specified by @targ and 343 + * return the resulting projid. 344 + * 345 + * There is always a mapping into the initial user_namespace. 346 + * 347 + * Unlike from_kprojid from_kprojid_munged never fails and always 348 + * returns a valid projid. This makes from_kprojid_munged 349 + * appropriate for use in syscalls like stat and where 350 + * failing the system call and failing to provide a valid projid are 351 + * not an options. 352 + * 353 + * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned. 354 + */ 355 + projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid) 356 + { 357 + projid_t projid; 358 + projid = from_kprojid(targ, kprojid); 359 + 360 + if (projid == (projid_t) -1) 361 + projid = OVERFLOW_PROJID; 362 + return projid; 363 + } 364 + EXPORT_SYMBOL(from_kprojid_munged); 365 + 366 + 299 367 static int uid_m_show(struct seq_file *seq, void *v) 300 368 { 301 369 struct user_namespace *ns = seq->private; ··· 407 337 return 0; 408 338 } 409 339 340 + static int projid_m_show(struct seq_file *seq, void *v) 341 + { 342 + struct user_namespace *ns = seq->private; 343 + struct uid_gid_extent *extent = v; 344 + struct user_namespace *lower_ns; 345 + projid_t lower; 346 + 347 + lower_ns = seq_user_ns(seq); 348 + if ((lower_ns == ns) && lower_ns->parent) 349 + lower_ns = lower_ns->parent; 350 + 351 + lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first)); 352 + 353 + seq_printf(seq, "%10u %10u %10u\n", 354 + extent->first, 355 + lower, 356 + extent->count); 357 + 358 + return 0; 359 + } 360 + 410 361 static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map) 411 362 { 412 363 struct uid_gid_extent *extent = NULL; ··· 453 362 return m_start(seq, ppos, &ns->gid_map); 454 363 } 455 364 365 + static void *projid_m_start(struct seq_file *seq, loff_t *ppos) 366 + { 367 + struct user_namespace *ns = seq->private; 368 + 369 + return m_start(seq, ppos, &ns->projid_map); 370 + } 371 + 456 372 static void *m_next(struct seq_file *seq, void *v, loff_t *pos) 457 373 { 458 374 (*pos)++; ··· 483 385 .stop = m_stop, 484 386 .next = m_next, 485 387 .show = gid_m_show, 388 + }; 389 + 390 + struct seq_operations proc_projid_seq_operations = { 391 + .start = projid_m_start, 392 + .stop = m_stop, 393 + .next = m_next, 394 + .show = projid_m_show, 486 395 }; 487 396 488 397 static DEFINE_MUTEX(id_map_mutex); ··· 539 434 /* Require the appropriate privilege CAP_SETUID or CAP_SETGID 540 435 * over the user namespace in order to set the id mapping. 541 436 */ 542 - if (!ns_capable(ns, cap_setid)) 437 + if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) 543 438 goto out; 544 439 545 440 /* Get a buffer */ ··· 689 584 &ns->gid_map, &ns->parent->gid_map); 690 585 } 691 586 587 + ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) 588 + { 589 + struct seq_file *seq = file->private_data; 590 + struct user_namespace *ns = seq->private; 591 + struct user_namespace *seq_ns = seq_user_ns(seq); 592 + 593 + if (!ns->parent) 594 + return -EPERM; 595 + 596 + if ((seq_ns != ns) && (seq_ns != ns->parent)) 597 + return -EPERM; 598 + 599 + /* Anyone can set any valid project id no capability needed */ 600 + return map_write(file, buf, size, ppos, -1, 601 + &ns->projid_map, &ns->parent->projid_map); 602 + } 603 + 692 604 static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, 693 605 struct uid_gid_map *new_map) 694 606 { 607 + /* Allow anyone to set a mapping that doesn't require privilege */ 608 + if (!cap_valid(cap_setid)) 609 + return true; 610 + 695 611 /* Allow the specified ids if we have the appropriate capability 696 612 * (CAP_SETUID or CAP_SETGID) over the parent user namespace. 697 613 */
+2 -1
net/appletalk/atalk_proc.c
··· 183 183 ntohs(at->dest_net), at->dest_node, at->dest_port, 184 184 sk_wmem_alloc_get(s), 185 185 sk_rmem_alloc_get(s), 186 - s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); 186 + s->sk_state, 187 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(s))); 187 188 out: 188 189 return 0; 189 190 }
+14 -7
net/ax25/ax25_uid.c
··· 51 51 52 52 EXPORT_SYMBOL(ax25_uid_policy); 53 53 54 - ax25_uid_assoc *ax25_findbyuid(uid_t uid) 54 + ax25_uid_assoc *ax25_findbyuid(kuid_t uid) 55 55 { 56 56 ax25_uid_assoc *ax25_uid, *res = NULL; 57 57 struct hlist_node *node; 58 58 59 59 read_lock(&ax25_uid_lock); 60 60 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 61 - if (ax25_uid->uid == uid) { 61 + if (uid_eq(ax25_uid->uid, uid)) { 62 62 ax25_uid_hold(ax25_uid); 63 63 res = ax25_uid; 64 64 break; ··· 84 84 read_lock(&ax25_uid_lock); 85 85 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 86 86 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { 87 - res = ax25_uid->uid; 87 + res = from_kuid_munged(current_user_ns(), ax25_uid->uid); 88 88 break; 89 89 } 90 90 } ··· 93 93 return res; 94 94 95 95 case SIOCAX25ADDUID: 96 + { 97 + kuid_t sax25_kuid; 96 98 if (!capable(CAP_NET_ADMIN)) 97 99 return -EPERM; 98 - user = ax25_findbyuid(sax->sax25_uid); 100 + sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid); 101 + if (!uid_valid(sax25_kuid)) 102 + return -EINVAL; 103 + user = ax25_findbyuid(sax25_kuid); 99 104 if (user) { 100 105 ax25_uid_put(user); 101 106 return -EEXIST; ··· 111 106 return -ENOMEM; 112 107 113 108 atomic_set(&ax25_uid->refcount, 1); 114 - ax25_uid->uid = sax->sax25_uid; 109 + ax25_uid->uid = sax25_kuid; 115 110 ax25_uid->call = sax->sax25_call; 116 111 117 112 write_lock(&ax25_uid_lock); ··· 119 114 write_unlock(&ax25_uid_lock); 120 115 121 116 return 0; 122 - 117 + } 123 118 case SIOCAX25DELUID: 124 119 if (!capable(CAP_NET_ADMIN)) 125 120 return -EPERM; ··· 177 172 struct ax25_uid_assoc *pt; 178 173 179 174 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node); 180 - seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call)); 175 + seq_printf(seq, "%6d %s\n", 176 + from_kuid_munged(seq_user_ns(seq), pt->uid), 177 + ax2asc(buf, &pt->call)); 181 178 } 182 179 return 0; 183 180 }
+5 -4
net/core/dev.c
··· 4512 4512 static int __dev_set_promiscuity(struct net_device *dev, int inc) 4513 4513 { 4514 4514 unsigned int old_flags = dev->flags; 4515 - uid_t uid; 4516 - gid_t gid; 4515 + kuid_t uid; 4516 + kgid_t gid; 4517 4517 4518 4518 ASSERT_RTNL(); 4519 4519 ··· 4544 4544 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4545 4545 dev->name, (dev->flags & IFF_PROMISC), 4546 4546 (old_flags & IFF_PROMISC), 4547 - audit_get_loginuid(current), 4548 - uid, gid, 4547 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 4548 + from_kuid(&init_user_ns, uid), 4549 + from_kgid(&init_user_ns, gid), 4549 4550 audit_get_sessionid(current)); 4550 4551 } 4551 4552
+23 -8
net/core/scm.c
··· 45 45 static __inline__ int scm_check_creds(struct ucred *creds) 46 46 { 47 47 const struct cred *cred = current_cred(); 48 + kuid_t uid = make_kuid(cred->user_ns, creds->uid); 49 + kgid_t gid = make_kgid(cred->user_ns, creds->gid); 50 + 51 + if (!uid_valid(uid) || !gid_valid(gid)) 52 + return -EINVAL; 48 53 49 54 if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && 50 - ((creds->uid == cred->uid || creds->uid == cred->euid || 51 - creds->uid == cred->suid) || capable(CAP_SETUID)) && 52 - ((creds->gid == cred->gid || creds->gid == cred->egid || 53 - creds->gid == cred->sgid) || capable(CAP_SETGID))) { 55 + ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 56 + uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) && 57 + ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 58 + gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) { 54 59 return 0; 55 60 } 56 61 return -EPERM; ··· 154 149 goto error; 155 150 break; 156 151 case SCM_CREDENTIALS: 152 + { 153 + kuid_t uid; 154 + kgid_t gid; 157 155 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) 158 156 goto error; 159 157 memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); ··· 174 166 p->pid = pid; 175 167 } 176 168 169 + err = -EINVAL; 170 + uid = make_kuid(current_user_ns(), p->creds.uid); 171 + gid = make_kgid(current_user_ns(), p->creds.gid); 172 + if (!uid_valid(uid) || !gid_valid(gid)) 173 + goto error; 174 + 177 175 if (!p->cred || 178 - (p->cred->euid != p->creds.uid) || 179 - (p->cred->egid != p->creds.gid)) { 176 + !uid_eq(p->cred->euid, uid) || 177 + !gid_eq(p->cred->egid, gid)) { 180 178 struct cred *cred; 181 179 err = -ENOMEM; 182 180 cred = prepare_creds(); 183 181 if (!cred) 184 182 goto error; 185 183 186 - cred->uid = cred->euid = p->creds.uid; 187 - cred->gid = cred->egid = p->creds.gid; 184 + cred->uid = cred->euid = uid; 185 + cred->gid = cred->egid = gid; 188 186 if (p->cred) 189 187 put_cred(p->cred); 190 188 p->cred = cred; 191 189 } 192 190 break; 191 + } 193 192 default: 194 193 goto error; 195 194 }
+5 -5
net/core/sock.c
··· 858 858 if (cred) { 859 859 struct user_namespace *current_ns = current_user_ns(); 860 860 861 - ucred->uid = from_kuid(current_ns, cred->euid); 862 - ucred->gid = from_kgid(current_ns, cred->egid); 861 + ucred->uid = from_kuid_munged(current_ns, cred->euid); 862 + ucred->gid = from_kgid_munged(current_ns, cred->egid); 863 863 } 864 864 } 865 865 EXPORT_SYMBOL_GPL(cred_to_ucred); ··· 1528 1528 } 1529 1529 EXPORT_SYMBOL(sock_edemux); 1530 1530 1531 - int sock_i_uid(struct sock *sk) 1531 + kuid_t sock_i_uid(struct sock *sk) 1532 1532 { 1533 - int uid; 1533 + kuid_t uid; 1534 1534 1535 1535 read_lock_bh(&sk->sk_callback_lock); 1536 - uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1536 + uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 1537 1537 read_unlock_bh(&sk->sk_callback_lock); 1538 1538 return uid; 1539 1539 }
+2 -1
net/dns_resolver/dns_key.c
··· 259 259 if (!cred) 260 260 return -ENOMEM; 261 261 262 - keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, 262 + keyring = key_alloc(&key_type_keyring, ".dns_resolver", 263 + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 263 264 (KEY_POS_ALL & ~KEY_POS_SETATTR) | 264 265 KEY_USR_VIEW | KEY_USR_READ, 265 266 KEY_ALLOC_NOT_IN_QUOTA);
+15 -6
net/ipv4/inet_diag.c
··· 69 69 70 70 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 71 71 struct sk_buff *skb, struct inet_diag_req_v2 *req, 72 + struct user_namespace *user_ns, 72 73 u32 pid, u32 seq, u16 nlmsg_flags, 73 74 const struct nlmsghdr *unlh) 74 75 { ··· 125 124 } 126 125 #endif 127 126 128 - r->idiag_uid = sock_i_uid(sk); 127 + r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 129 128 r->idiag_inode = sock_i_ino(sk); 130 129 131 130 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { ··· 200 199 201 200 static int inet_csk_diag_fill(struct sock *sk, 202 201 struct sk_buff *skb, struct inet_diag_req_v2 *req, 202 + struct user_namespace *user_ns, 203 203 u32 pid, u32 seq, u16 nlmsg_flags, 204 204 const struct nlmsghdr *unlh) 205 205 { 206 206 return inet_sk_diag_fill(sk, inet_csk(sk), 207 - skb, req, pid, seq, nlmsg_flags, unlh); 207 + skb, req, user_ns, pid, seq, nlmsg_flags, unlh); 208 208 } 209 209 210 210 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, ··· 258 256 } 259 257 260 258 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 261 - struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags, 259 + struct inet_diag_req_v2 *r, 260 + struct user_namespace *user_ns, 261 + u32 pid, u32 seq, u16 nlmsg_flags, 262 262 const struct nlmsghdr *unlh) 263 263 { 264 264 if (sk->sk_state == TCP_TIME_WAIT) 265 265 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 266 266 skb, r, pid, seq, nlmsg_flags, 267 267 unlh); 268 - return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh); 268 + return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh); 269 269 } 270 270 271 271 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, ··· 315 311 } 316 312 317 313 err = sk_diag_fill(sk, rep, req, 314 + sk_user_ns(NETLINK_CB(in_skb).ssk), 318 315 NETLINK_CB(in_skb).pid, 319 316 nlh->nlmsg_seq, 0, nlh); 320 317 if (err < 0) { ··· 556 551 return 0; 557 552 558 553 return inet_csk_diag_fill(sk, skb, r, 554 + sk_user_ns(NETLINK_CB(cb->skb).ssk), 559 555 NETLINK_CB(cb->skb).pid, 560 556 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 561 557 } ··· 597 591 } 598 592 599 593 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 600 - struct request_sock *req, u32 pid, u32 seq, 594 + struct request_sock *req, 595 + struct user_namespace *user_ns, 596 + u32 pid, u32 seq, 601 597 const struct nlmsghdr *unlh) 602 598 { 603 599 const struct inet_request_sock *ireq = inet_rsk(req); ··· 633 625 r->idiag_expires = jiffies_to_msecs(tmo); 634 626 r->idiag_rqueue = 0; 635 627 r->idiag_wqueue = 0; 636 - r->idiag_uid = sock_i_uid(sk); 628 + r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 637 629 r->idiag_inode = 0; 638 630 #if IS_ENABLED(CONFIG_IPV6) 639 631 if (r->idiag_family == AF_INET6) { ··· 710 702 } 711 703 712 704 err = inet_diag_fill_req(skb, sk, req, 705 + sk_user_ns(NETLINK_CB(cb->skb).ssk), 713 706 NETLINK_CB(cb->skb).pid, 714 707 cb->nlh->nlmsg_seq, cb->nlh); 715 708 if (err < 0) {
+9 -13
net/ipv4/ping.c
··· 185 185 return sk; 186 186 } 187 187 188 - static void inet_get_ping_group_range_net(struct net *net, gid_t *low, 189 - gid_t *high) 188 + static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, 189 + kgid_t *high) 190 190 { 191 - gid_t *data = net->ipv4.sysctl_ping_group_range; 191 + kgid_t *data = net->ipv4.sysctl_ping_group_range; 192 192 unsigned int seq; 193 193 194 194 do { ··· 203 203 static int ping_init_sock(struct sock *sk) 204 204 { 205 205 struct net *net = sock_net(sk); 206 - gid_t group = current_egid(); 207 - gid_t range[2]; 206 + kgid_t group = current_egid(); 208 207 struct group_info *group_info = get_current_groups(); 209 208 int i, j, count = group_info->ngroups; 210 209 kgid_t low, high; 211 210 212 - inet_get_ping_group_range_net(net, range, range+1); 213 - low = make_kgid(&init_user_ns, range[0]); 214 - high = make_kgid(&init_user_ns, range[1]); 215 - if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low)) 216 - return -EACCES; 217 - 218 - if (range[0] <= group && group <= range[1]) 211 + inet_get_ping_group_range_net(net, &low, &high); 212 + if (gid_lte(low, group) && gid_lte(group, high)) 219 213 return 0; 220 214 221 215 for (i = 0; i < group_info->nblocks; i++) { ··· 839 845 bucket, src, srcp, dest, destp, sp->sk_state, 840 846 sk_wmem_alloc_get(sp), 841 847 sk_rmem_alloc_get(sp), 842 - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 848 + 0, 0L, 0, 849 + from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 850 + 0, sock_i_ino(sp), 843 851 atomic_read(&sp->sk_refcnt), sp, 844 852 atomic_read(&sp->sk_drops), len); 845 853 }
+3 -1
net/ipv4/raw.c
··· 994 994 i, src, srcp, dest, destp, sp->sk_state, 995 995 sk_wmem_alloc_get(sp), 996 996 sk_rmem_alloc_get(sp), 997 - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 997 + 0, 0L, 0, 998 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 999 + 0, sock_i_ino(sp), 998 1000 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 999 1001 } 1000 1002
+27 -15
net/ipv4/sysctl_net_ipv4.c
··· 76 76 } 77 77 78 78 79 - static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) 79 + static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high) 80 80 { 81 - gid_t *data = table->data; 81 + kgid_t *data = table->data; 82 82 unsigned int seq; 83 83 do { 84 84 seq = read_seqbegin(&sysctl_local_ports.lock); ··· 89 89 } 90 90 91 91 /* Update system visible IP port range */ 92 - static void set_ping_group_range(struct ctl_table *table, gid_t range[2]) 92 + static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high) 93 93 { 94 - gid_t *data = table->data; 94 + kgid_t *data = table->data; 95 95 write_seqlock(&sysctl_local_ports.lock); 96 - data[0] = range[0]; 97 - data[1] = range[1]; 96 + data[0] = low; 97 + data[1] = high; 98 98 write_sequnlock(&sysctl_local_ports.lock); 99 99 } 100 100 ··· 103 103 void __user *buffer, 104 104 size_t *lenp, loff_t *ppos) 105 105 { 106 + struct user_namespace *user_ns = current_user_ns(); 106 107 int ret; 107 - gid_t range[2]; 108 + gid_t urange[2]; 109 + kgid_t low, high; 108 110 ctl_table tmp = { 109 - .data = &range, 110 - .maxlen = sizeof(range), 111 + .data = &urange, 112 + .maxlen = sizeof(urange), 111 113 .mode = table->mode, 112 114 .extra1 = &ip_ping_group_range_min, 113 115 .extra2 = &ip_ping_group_range_max, 114 116 }; 115 117 116 - inet_get_ping_group_range_table(table, range, range + 1); 118 + inet_get_ping_group_range_table(table, &low, &high); 119 + urange[0] = from_kgid_munged(user_ns, low); 120 + urange[1] = from_kgid_munged(user_ns, high); 117 121 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 118 122 119 - if (write && ret == 0) 120 - set_ping_group_range(table, range); 123 + if (write && ret == 0) { 124 + low = make_kgid(user_ns, urange[0]); 125 + high = make_kgid(user_ns, urange[1]); 126 + if (!gid_valid(low) || !gid_valid(high) || 127 + (urange[1] < urange[0]) || gid_lt(high, low)) { 128 + low = make_kgid(&init_user_ns, 1); 129 + high = make_kgid(&init_user_ns, 0); 130 + } 131 + set_ping_group_range(table, low, high); 132 + } 121 133 122 134 return ret; 123 135 } ··· 798 786 { 799 787 .procname = "ping_group_range", 800 788 .data = &init_net.ipv4.sysctl_ping_group_range, 801 - .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range), 789 + .maxlen = sizeof(gid_t)*2, 802 790 .mode = 0644, 803 791 .proc_handler = ipv4_ping_group_range, 804 792 }, ··· 842 830 * Sane defaults - nobody may create ping sockets. 843 831 * Boot scripts should set this to distro-specific group. 844 832 */ 845 - net->ipv4.sysctl_ping_group_range[0] = 1; 846 - net->ipv4.sysctl_ping_group_range[1] = 0; 833 + net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1); 834 + net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0); 847 835 848 836 tcp_init_mem(net); 849 837
+3 -3
net/ipv4/tcp_ipv4.c
··· 2393 2393 EXPORT_SYMBOL(tcp_proc_unregister); 2394 2394 2395 2395 static void get_openreq4(const struct sock *sk, const struct request_sock *req, 2396 - struct seq_file *f, int i, int uid, int *len) 2396 + struct seq_file *f, int i, kuid_t uid, int *len) 2397 2397 { 2398 2398 const struct inet_request_sock *ireq = inet_rsk(req); 2399 2399 int ttd = req->expires - jiffies; ··· 2410 2410 1, /* timers active (only the expire timer) */ 2411 2411 jiffies_to_clock_t(ttd), 2412 2412 req->retrans, 2413 - uid, 2413 + from_kuid_munged(seq_user_ns(f), uid), 2414 2414 0, /* non standard timer */ 2415 2415 0, /* open_requests have no inode */ 2416 2416 atomic_read(&sk->sk_refcnt), ··· 2461 2461 timer_active, 2462 2462 jiffies_to_clock_t(timer_expires - jiffies), 2463 2463 icsk->icsk_retransmits, 2464 - sock_i_uid(sk), 2464 + from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), 2465 2465 icsk->icsk_probes_out, 2466 2466 sock_i_ino(sk), 2467 2467 atomic_read(&sk->sk_refcnt), sk,
+3 -1
net/ipv4/udp.c
··· 2115 2115 bucket, src, srcp, dest, destp, sp->sk_state, 2116 2116 sk_wmem_alloc_get(sp), 2117 2117 sk_rmem_alloc_get(sp), 2118 - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 2118 + 0, 0L, 0, 2119 + from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 2120 + 0, sock_i_ino(sp), 2119 2121 atomic_read(&sp->sk_refcnt), sp, 2120 2122 atomic_read(&sp->sk_drops), len); 2121 2123 }
+4 -1
net/ipv4/udp_diag.c
··· 24 24 if (!inet_diag_bc_sk(bc, sk)) 25 25 return 0; 26 26 27 - return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid, 27 + return inet_sk_diag_fill(sk, NULL, skb, req, 28 + sk_user_ns(NETLINK_CB(cb->skb).ssk), 29 + NETLINK_CB(cb->skb).pid, 28 30 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 29 31 } 30 32 ··· 71 69 goto out; 72 70 73 71 err = inet_sk_diag_fill(sk, NULL, rep, req, 72 + sk_user_ns(NETLINK_CB(in_skb).ssk), 74 73 NETLINK_CB(in_skb).pid, 75 74 nlh->nlmsg_seq, 0, nlh); 76 75 if (err < 0) {
+40 -7
net/ipv6/ip6_flowlabel.c
··· 22 22 #include <linux/seq_file.h> 23 23 #include <linux/slab.h> 24 24 #include <linux/export.h> 25 + #include <linux/pid_namespace.h> 25 26 26 27 #include <net/net_namespace.h> 27 28 #include <net/sock.h> ··· 92 91 static void fl_free(struct ip6_flowlabel *fl) 93 92 { 94 93 if (fl) { 94 + if (fl->share == IPV6_FL_S_PROCESS) 95 + put_pid(fl->owner.pid); 95 96 release_net(fl->fl_net); 96 97 kfree(fl->opt); 97 98 } ··· 397 394 case IPV6_FL_S_ANY: 398 395 break; 399 396 case IPV6_FL_S_PROCESS: 400 - fl->owner = current->pid; 397 + fl->owner.pid = get_task_pid(current, PIDTYPE_PID); 401 398 break; 402 399 case IPV6_FL_S_USER: 403 - fl->owner = current_euid(); 400 + fl->owner.uid = current_euid(); 404 401 break; 405 402 default: 406 403 err = -EINVAL; ··· 564 561 err = -EPERM; 565 562 if (fl1->share == IPV6_FL_S_EXCL || 566 563 fl1->share != fl->share || 567 - fl1->owner != fl->owner) 564 + ((fl1->share == IPV6_FL_S_PROCESS) && 565 + (fl1->owner.pid == fl->owner.pid)) || 566 + ((fl1->share == IPV6_FL_S_USER) && 567 + uid_eq(fl1->owner.uid, fl->owner.uid))) 568 568 goto release; 569 569 570 570 err = -EINVAL; ··· 627 621 628 622 struct ip6fl_iter_state { 629 623 struct seq_net_private p; 624 + struct pid_namespace *pid_ns; 630 625 int bucket; 631 626 }; 632 627 ··· 706 699 707 700 static int ip6fl_seq_show(struct seq_file *seq, void *v) 708 701 { 702 + struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 709 703 if (v == SEQ_START_TOKEN) 710 704 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", 711 705 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); ··· 716 708 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", 717 709 (unsigned int)ntohl(fl->label), 718 710 fl->share, 719 - (int)fl->owner, 711 + ((fl->share == IPV6_FL_S_PROCESS) ? 712 + pid_nr_ns(fl->owner.pid, state->pid_ns) : 713 + ((fl->share == IPV6_FL_S_USER) ? 714 + from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : 715 + 0)), 720 716 atomic_read(&fl->users), 721 717 fl->linger/HZ, 722 718 (long)(fl->expires - jiffies)/HZ, ··· 739 727 740 728 static int ip6fl_seq_open(struct inode *inode, struct file *file) 741 729 { 742 - return seq_open_net(inode, file, &ip6fl_seq_ops, 743 - sizeof(struct ip6fl_iter_state)); 730 + struct seq_file *seq; 731 + struct ip6fl_iter_state *state; 732 + int err; 733 + 734 + err = seq_open_net(inode, file, &ip6fl_seq_ops, 735 + sizeof(struct ip6fl_iter_state)); 736 + 737 + if (!err) { 738 + seq = file->private_data; 739 + state = ip6fl_seq_private(seq); 740 + rcu_read_lock(); 741 + state->pid_ns = get_pid_ns(task_active_pid_ns(current)); 742 + rcu_read_unlock(); 743 + } 744 + return err; 745 + } 746 + 747 + static int ip6fl_seq_release(struct inode *inode, struct file *file) 748 + { 749 + struct seq_file *seq = file->private_data; 750 + struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 751 + put_pid_ns(state->pid_ns); 752 + return seq_release_net(inode, file); 744 753 } 745 754 746 755 static const struct file_operations ip6fl_seq_fops = { ··· 769 736 .open = ip6fl_seq_open, 770 737 .read = seq_read, 771 738 .llseek = seq_lseek, 772 - .release = seq_release_net, 739 + .release = ip6fl_seq_release, 773 740 }; 774 741 775 742 static int __net_init ip6_flowlabel_proc_init(struct net *net)
+2 -1
net/ipv6/raw.c
··· 1250 1250 sk_wmem_alloc_get(sp), 1251 1251 sk_rmem_alloc_get(sp), 1252 1252 0, 0L, 0, 1253 - sock_i_uid(sp), 0, 1253 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1254 + 0, 1254 1255 sock_i_ino(sp), 1255 1256 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 1256 1257 }
+3 -3
net/ipv6/tcp_ipv6.c
··· 1829 1829 #ifdef CONFIG_PROC_FS 1830 1830 /* Proc filesystem TCPv6 sock list dumping. */ 1831 1831 static void get_openreq6(struct seq_file *seq, 1832 - const struct sock *sk, struct request_sock *req, int i, int uid) 1832 + const struct sock *sk, struct request_sock *req, int i, kuid_t uid) 1833 1833 { 1834 1834 int ttd = req->expires - jiffies; 1835 1835 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; ··· 1853 1853 1, /* timers active (only the expire timer) */ 1854 1854 jiffies_to_clock_t(ttd), 1855 1855 req->retrans, 1856 - uid, 1856 + from_kuid_munged(seq_user_ns(seq), uid), 1857 1857 0, /* non standard timer */ 1858 1858 0, /* open_requests have no inode */ 1859 1859 0, req); ··· 1903 1903 timer_active, 1904 1904 jiffies_to_clock_t(timer_expires - jiffies), 1905 1905 icsk->icsk_retransmits, 1906 - sock_i_uid(sp), 1906 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1907 1907 icsk->icsk_probes_out, 1908 1908 sock_i_ino(sp), 1909 1909 atomic_read(&sp->sk_refcnt), sp,
+2 -1
net/ipv6/udp.c
··· 1469 1469 sk_wmem_alloc_get(sp), 1470 1470 sk_rmem_alloc_get(sp), 1471 1471 0, 0L, 0, 1472 - sock_i_uid(sp), 0, 1472 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1473 + 0, 1473 1474 sock_i_ino(sp), 1474 1475 atomic_read(&sp->sk_refcnt), sp, 1475 1476 atomic_read(&sp->sk_drops));
+2 -1
net/ipx/ipx_proc.c
··· 217 217 seq_printf(seq, "%08X %08X %02X %03d\n", 218 218 sk_wmem_alloc_get(s), 219 219 sk_rmem_alloc_get(s), 220 - s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); 220 + s->sk_state, 221 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(s))); 221 222 out: 222 223 return 0; 223 224 }
+1 -1
net/key/af_key.c
··· 3661 3661 atomic_read(&s->sk_refcnt), 3662 3662 sk_rmem_alloc_get(s), 3663 3663 sk_wmem_alloc_get(s), 3664 - sock_i_uid(s), 3664 + from_kuid_munged(seq_user_ns(f), sock_i_uid(s)), 3665 3665 sock_i_ino(s) 3666 3666 ); 3667 3667 return 0;
+1 -1
net/llc/llc_proc.c
··· 151 151 sk_wmem_alloc_get(sk), 152 152 sk_rmem_alloc_get(sk) - llc->copied_seq, 153 153 sk->sk_state, 154 - sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, 154 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 155 155 llc->link); 156 156 out: 157 157 return 0;
+5 -3
net/netfilter/xt_LOG.c
··· 151 151 return; 152 152 153 153 read_lock_bh(&sk->sk_callback_lock); 154 - if (sk->sk_socket && sk->sk_socket->file) 154 + if (sk->sk_socket && sk->sk_socket->file) { 155 + const struct cred *cred = sk->sk_socket->file->f_cred; 155 156 sb_add(m, "UID=%u GID=%u ", 156 - sk->sk_socket->file->f_cred->fsuid, 157 - sk->sk_socket->file->f_cred->fsgid); 157 + from_kuid_munged(&init_user_ns, cred->fsuid), 158 + from_kgid_munged(&init_user_ns, cred->fsgid)); 159 + } 158 160 read_unlock_bh(&sk->sk_callback_lock); 159 161 } 160 162
+24 -6
net/netfilter/xt_owner.c
··· 17 17 #include <linux/netfilter/x_tables.h> 18 18 #include <linux/netfilter/xt_owner.h> 19 19 20 + static int owner_check(const struct xt_mtchk_param *par) 21 + { 22 + struct xt_owner_match_info *info = par->matchinfo; 23 + 24 + /* For now only allow adding matches from the initial user namespace */ 25 + if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) && 26 + (current_user_ns() != &init_user_ns)) 27 + return -EINVAL; 28 + return 0; 29 + } 30 + 20 31 static bool 21 32 owner_mt(const struct sk_buff *skb, struct xt_action_param *par) 22 33 { ··· 48 37 return ((info->match ^ info->invert) & 49 38 (XT_OWNER_UID | XT_OWNER_GID)) == 0; 50 39 51 - if (info->match & XT_OWNER_UID) 52 - if ((filp->f_cred->fsuid >= info->uid_min && 53 - filp->f_cred->fsuid <= info->uid_max) ^ 40 + if (info->match & XT_OWNER_UID) { 41 + kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min); 42 + kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max); 43 + if ((uid_gte(filp->f_cred->fsuid, uid_min) && 44 + uid_lte(filp->f_cred->fsuid, uid_max)) ^ 54 45 !(info->invert & XT_OWNER_UID)) 55 46 return false; 47 + } 56 48 57 - if (info->match & XT_OWNER_GID) 58 - if ((filp->f_cred->fsgid >= info->gid_min && 59 - filp->f_cred->fsgid <= info->gid_max) ^ 49 + if (info->match & XT_OWNER_GID) { 50 + kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min); 51 + kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max); 52 + if ((gid_gte(filp->f_cred->fsgid, gid_min) && 53 + gid_lte(filp->f_cred->fsgid, gid_max)) ^ 60 54 !(info->invert & XT_OWNER_GID)) 61 55 return false; 56 + } 62 57 63 58 return true; 64 59 } ··· 73 56 .name = "owner", 74 57 .revision = 1, 75 58 .family = NFPROTO_UNSPEC, 59 + .checkentry = owner_check, 76 60 .match = owner_mt, 77 61 .matchsize = sizeof(struct xt_owner_match_info), 78 62 .hooks = (1 << NF_INET_LOCAL_OUT) |
+11 -2
net/netfilter/xt_recent.c
··· 317 317 struct recent_table *t; 318 318 #ifdef CONFIG_PROC_FS 319 319 struct proc_dir_entry *pde; 320 + kuid_t uid; 321 + kgid_t gid; 320 322 #endif 321 323 unsigned int i; 322 324 int ret = -EINVAL; ··· 374 372 for (i = 0; i < ip_list_hash_size; i++) 375 373 INIT_LIST_HEAD(&t->iphash[i]); 376 374 #ifdef CONFIG_PROC_FS 375 + uid = make_kuid(&init_user_ns, ip_list_uid); 376 + gid = make_kgid(&init_user_ns, ip_list_gid); 377 + if (!uid_valid(uid) || !gid_valid(gid)) { 378 + kfree(t); 379 + ret = -EINVAL; 380 + goto out; 381 + } 377 382 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, 378 383 &recent_mt_fops, t); 379 384 if (pde == NULL) { ··· 388 379 ret = -ENOMEM; 389 380 goto out; 390 381 } 391 - pde->uid = ip_list_uid; 392 - pde->gid = ip_list_gid; 382 + pde->uid = uid; 383 + pde->gid = gid; 393 384 #endif 394 385 spin_lock_bh(&recent_lock); 395 386 list_add_tail(&t->list, &recent_net->tables);
+1 -1
net/netlabel/netlabel_unlabeled.c
··· 1541 1541 * it is called is at bootup before the audit subsystem is reporting 1542 1542 * messages so don't worry to much about these values. */ 1543 1543 security_task_getsecid(current, &audit_info.secid); 1544 - audit_info.loginuid = 0; 1544 + audit_info.loginuid = GLOBAL_ROOT_UID; 1545 1545 audit_info.sessionid = 0; 1546 1546 1547 1547 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+1 -1
net/netlabel/netlabel_user.c
··· 109 109 return NULL; 110 110 111 111 audit_log_format(audit_buf, "netlabel: auid=%u ses=%u", 112 - audit_info->loginuid, 112 + from_kuid(&init_user_ns, audit_info->loginuid), 113 113 audit_info->sessionid); 114 114 115 115 if (audit_info->secid != 0 &&
+4 -2
net/netlink/af_netlink.c
··· 912 912 wake_up_interruptible(&nlk->wait); 913 913 } 914 914 915 - static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) 915 + static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, 916 + struct sock *ssk) 916 917 { 917 918 int ret; 918 919 struct netlink_sock *nlk = nlk_sk(sk); ··· 922 921 if (nlk->netlink_rcv != NULL) { 923 922 ret = skb->len; 924 923 skb_set_owner_r(skb, sk); 924 + NETLINK_CB(skb).ssk = ssk; 925 925 nlk->netlink_rcv(skb); 926 926 consume_skb(skb); 927 927 } else { ··· 949 947 return PTR_ERR(sk); 950 948 } 951 949 if (netlink_is_kernel(sk)) 952 - return netlink_unicast_kernel(sk, skb); 950 + return netlink_unicast_kernel(sk, skb, ssk); 953 951 954 952 if (sk_filter(sk, skb)) { 955 953 err = skb->len;
+1 -1
net/packet/af_packet.c
··· 3854 3854 po->ifindex, 3855 3855 po->running, 3856 3856 atomic_read(&s->sk_rmem_alloc), 3857 - sock_i_uid(s), 3857 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 3858 3858 sock_i_ino(s)); 3859 3859 } 3860 3860
+4 -2
net/phonet/socket.c
··· 612 612 sk->sk_protocol, pn->sobject, pn->dobject, 613 613 pn->resource, sk->sk_state, 614 614 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 615 - sock_i_uid(sk), sock_i_ino(sk), 615 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 616 + sock_i_ino(sk), 616 617 atomic_read(&sk->sk_refcnt), sk, 617 618 atomic_read(&sk->sk_drops), &len); 618 619 } ··· 797 796 struct sock *sk = *psk; 798 797 799 798 seq_printf(seq, "%02X %5d %lu%n", 800 - (int) (psk - pnres.sk), sock_i_uid(sk), 799 + (int) (psk - pnres.sk), 800 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 801 801 sock_i_ino(sk), &len); 802 802 } 803 803 seq_printf(seq, "%*s\n", 63 - len, "");
+4 -2
net/rxrpc/ar-key.c
··· 948 948 949 949 _enter(""); 950 950 951 - key = key_alloc(&key_type_rxrpc, "x", 0, 0, cred, 0, 951 + key = key_alloc(&key_type_rxrpc, "x", 952 + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 0, 952 953 KEY_ALLOC_NOT_IN_QUOTA); 953 954 if (IS_ERR(key)) { 954 955 _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key)); ··· 995 994 struct key *key; 996 995 int ret; 997 996 998 - key = key_alloc(&key_type_rxrpc, keyname, 0, 0, cred, 997 + key = key_alloc(&key_type_rxrpc, keyname, 998 + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 999 999 KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA); 1000 1000 if (IS_ERR(key)) 1001 1001 return key;
+1 -1
net/sched/cls_api.c
··· 319 319 } 320 320 } 321 321 322 - err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); 322 + err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh); 323 323 if (err == 0) { 324 324 if (tp_created) { 325 325 spin_lock_bh(root_lock);
+2 -1
net/sched/cls_basic.c
··· 162 162 return err; 163 163 } 164 164 165 - static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, 165 + static int basic_change(struct sk_buff *in_skb, 166 + struct tcf_proto *tp, unsigned long base, u32 handle, 166 167 struct nlattr **tca, unsigned long *arg) 167 168 { 168 169 int err;
+2 -1
net/sched/cls_cgroup.c
··· 158 158 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 159 159 }; 160 160 161 - static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, 161 + static int cls_cgroup_change(struct sk_buff *in_skb, 162 + struct tcf_proto *tp, unsigned long base, 162 163 u32 handle, struct nlattr **tca, 163 164 unsigned long *arg) 164 165 {
+14 -5
net/sched/cls_flow.c
··· 193 193 194 194 static u32 flow_get_skuid(const struct sk_buff *skb) 195 195 { 196 - if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) 197 - return skb->sk->sk_socket->file->f_cred->fsuid; 196 + if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { 197 + kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; 198 + return from_kuid(&init_user_ns, skuid); 199 + } 198 200 return 0; 199 201 } 200 202 201 203 static u32 flow_get_skgid(const struct sk_buff *skb) 202 204 { 203 - if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) 204 - return skb->sk->sk_socket->file->f_cred->fsgid; 205 + if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { 206 + kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; 207 + return from_kgid(&init_user_ns, skgid); 208 + } 205 209 return 0; 206 210 } 207 211 ··· 351 347 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 352 348 }; 353 349 354 - static int flow_change(struct tcf_proto *tp, unsigned long base, 350 + static int flow_change(struct sk_buff *in_skb, 351 + struct tcf_proto *tp, unsigned long base, 355 352 u32 handle, struct nlattr **tca, 356 353 unsigned long *arg) 357 354 { ··· 390 385 return -EINVAL; 391 386 392 387 if (fls(keymask) - 1 > FLOW_KEY_MAX) 388 + return -EOPNOTSUPP; 389 + 390 + if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && 391 + sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns) 393 392 return -EOPNOTSUPP; 394 393 } 395 394
+2 -1
net/sched/cls_fw.c
··· 233 233 return err; 234 234 } 235 235 236 - static int fw_change(struct tcf_proto *tp, unsigned long base, 236 + static int fw_change(struct sk_buff *in_skb, 237 + struct tcf_proto *tp, unsigned long base, 237 238 u32 handle, 238 239 struct nlattr **tca, 239 240 unsigned long *arg)
+2 -1
net/sched/cls_route.c
··· 427 427 return err; 428 428 } 429 429 430 - static int route4_change(struct tcf_proto *tp, unsigned long base, 430 + static int route4_change(struct sk_buff *in_skb, 431 + struct tcf_proto *tp, unsigned long base, 431 432 u32 handle, 432 433 struct nlattr **tca, 433 434 unsigned long *arg)
+2 -1
net/sched/cls_rsvp.h
··· 416 416 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, 417 417 }; 418 418 419 - static int rsvp_change(struct tcf_proto *tp, unsigned long base, 419 + static int rsvp_change(struct sk_buff *in_skb, 420 + struct tcf_proto *tp, unsigned long base, 420 421 u32 handle, 421 422 struct nlattr **tca, 422 423 unsigned long *arg)
+2 -1
net/sched/cls_tcindex.c
··· 332 332 } 333 333 334 334 static int 335 - tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle, 335 + tcindex_change(struct sk_buff *in_skb, 336 + struct tcf_proto *tp, unsigned long base, u32 handle, 336 337 struct nlattr **tca, unsigned long *arg) 337 338 { 338 339 struct nlattr *opt = tca[TCA_OPTIONS];
+2 -1
net/sched/cls_u32.c
··· 544 544 return err; 545 545 } 546 546 547 - static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, 547 + static int u32_change(struct sk_buff *in_skb, 548 + struct tcf_proto *tp, unsigned long base, u32 handle, 548 549 struct nlattr **tca, 549 550 unsigned long *arg) 550 551 {
+4 -2
net/sctp/proc.c
··· 216 216 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 217 217 sctp_sk(sk)->type, sk->sk_state, hash, 218 218 epb->bind_addr.port, 219 - sock_i_uid(sk), sock_i_ino(sk)); 219 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 220 + sock_i_ino(sk)); 220 221 221 222 sctp_seq_dump_local_addrs(seq, epb); 222 223 seq_printf(seq, "\n"); ··· 325 324 assoc->assoc_id, 326 325 assoc->sndbuf_used, 327 326 atomic_read(&assoc->rmem_alloc), 328 - sock_i_uid(sk), sock_i_ino(sk), 327 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 328 + sock_i_ino(sk), 329 329 epb->bind_addr.port, 330 330 assoc->peer.port); 331 331 seq_printf(seq, " ");
+4 -4
net/xfrm/xfrm_policy.c
··· 2633 2633 2634 2634 flush_work(&net->xfrm.policy_hash_work); 2635 2635 #ifdef CONFIG_XFRM_SUB_POLICY 2636 - audit_info.loginuid = -1; 2636 + audit_info.loginuid = INVALID_UID; 2637 2637 audit_info.sessionid = -1; 2638 2638 audit_info.secid = 0; 2639 2639 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2640 2640 #endif 2641 - audit_info.loginuid = -1; 2641 + audit_info.loginuid = INVALID_UID; 2642 2642 audit_info.sessionid = -1; 2643 2643 audit_info.secid = 0; 2644 2644 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); ··· 2745 2745 } 2746 2746 2747 2747 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2748 - uid_t auid, u32 sessionid, u32 secid) 2748 + kuid_t auid, u32 sessionid, u32 secid) 2749 2749 { 2750 2750 struct audit_buffer *audit_buf; 2751 2751 ··· 2760 2760 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2761 2761 2762 2762 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2763 - uid_t auid, u32 sessionid, u32 secid) 2763 + kuid_t auid, u32 sessionid, u32 secid) 2764 2764 { 2765 2765 struct audit_buffer *audit_buf; 2766 2766
+3 -3
net/xfrm/xfrm_state.c
··· 2060 2060 unsigned int sz; 2061 2061 2062 2062 flush_work(&net->xfrm.state_hash_work); 2063 - audit_info.loginuid = -1; 2063 + audit_info.loginuid = INVALID_UID; 2064 2064 audit_info.sessionid = -1; 2065 2065 audit_info.secid = 0; 2066 2066 xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info); ··· 2127 2127 } 2128 2128 2129 2129 void xfrm_audit_state_add(struct xfrm_state *x, int result, 2130 - uid_t auid, u32 sessionid, u32 secid) 2130 + kuid_t auid, u32 sessionid, u32 secid) 2131 2131 { 2132 2132 struct audit_buffer *audit_buf; 2133 2133 ··· 2142 2142 EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 2143 2143 2144 2144 void xfrm_audit_state_delete(struct xfrm_state *x, int result, 2145 - uid_t auid, u32 sessionid, u32 secid) 2145 + kuid_t auid, u32 sessionid, u32 secid) 2146 2146 { 2147 2147 struct audit_buffer *audit_buf; 2148 2148
+6 -6
net/xfrm/xfrm_user.c
··· 595 595 struct xfrm_state *x; 596 596 int err; 597 597 struct km_event c; 598 - uid_t loginuid = audit_get_loginuid(current); 598 + kuid_t loginuid = audit_get_loginuid(current); 599 599 u32 sessionid = audit_get_sessionid(current); 600 600 u32 sid; 601 601 ··· 674 674 int err = -ESRCH; 675 675 struct km_event c; 676 676 struct xfrm_usersa_id *p = nlmsg_data(nlh); 677 - uid_t loginuid = audit_get_loginuid(current); 677 + kuid_t loginuid = audit_get_loginuid(current); 678 678 u32 sessionid = audit_get_sessionid(current); 679 679 u32 sid; 680 680 ··· 1393 1393 struct km_event c; 1394 1394 int err; 1395 1395 int excl; 1396 - uid_t loginuid = audit_get_loginuid(current); 1396 + kuid_t loginuid = audit_get_loginuid(current); 1397 1397 u32 sessionid = audit_get_sessionid(current); 1398 1398 u32 sid; 1399 1399 ··· 1651 1651 NETLINK_CB(skb).pid); 1652 1652 } 1653 1653 } else { 1654 - uid_t loginuid = audit_get_loginuid(current); 1654 + kuid_t loginuid = audit_get_loginuid(current); 1655 1655 u32 sessionid = audit_get_sessionid(current); 1656 1656 u32 sid; 1657 1657 ··· 1945 1945 1946 1946 err = 0; 1947 1947 if (up->hard) { 1948 - uid_t loginuid = audit_get_loginuid(current); 1948 + kuid_t loginuid = audit_get_loginuid(current); 1949 1949 u32 sessionid = audit_get_sessionid(current); 1950 1950 u32 sid; 1951 1951 ··· 1988 1988 km_state_expired(x, ue->hard, current->pid); 1989 1989 1990 1990 if (ue->hard) { 1991 - uid_t loginuid = audit_get_loginuid(current); 1991 + kuid_t loginuid = audit_get_loginuid(current); 1992 1992 u32 sessionid = audit_get_sessionid(current); 1993 1993 u32 sid; 1994 1994
+2 -2
security/apparmor/domain.c
··· 721 721 if (!permtest) 722 722 error = aa_audit_file(profile, &perms, GFP_KERNEL, 723 723 OP_CHANGE_HAT, AA_MAY_CHANGEHAT, NULL, 724 - target, 0, info, error); 724 + target, GLOBAL_ROOT_UID, info, error); 725 725 726 726 out: 727 727 aa_put_profile(hat); ··· 848 848 audit: 849 849 if (!permtest) 850 850 error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request, 851 - name, hname, 0, info, error); 851 + name, hname, GLOBAL_ROOT_UID, info, error); 852 852 853 853 aa_put_namespace(ns); 854 854 aa_put_profile(target);
+7 -5
security/apparmor/file.c
··· 65 65 static void file_audit_cb(struct audit_buffer *ab, void *va) 66 66 { 67 67 struct common_audit_data *sa = va; 68 - uid_t fsuid = current_fsuid(); 68 + kuid_t fsuid = current_fsuid(); 69 69 70 70 if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) { 71 71 audit_log_format(ab, " requested_mask="); ··· 76 76 audit_file_mask(ab, sa->aad->fs.denied); 77 77 } 78 78 if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) { 79 - audit_log_format(ab, " fsuid=%d", fsuid); 80 - audit_log_format(ab, " ouid=%d", sa->aad->fs.ouid); 79 + audit_log_format(ab, " fsuid=%d", 80 + from_kuid(&init_user_ns, fsuid)); 81 + audit_log_format(ab, " ouid=%d", 82 + from_kuid(&init_user_ns, sa->aad->fs.ouid)); 81 83 } 82 84 83 85 if (sa->aad->fs.target) { ··· 105 103 */ 106 104 int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, 107 105 gfp_t gfp, int op, u32 request, const char *name, 108 - const char *target, uid_t ouid, const char *info, int error) 106 + const char *target, kuid_t ouid, const char *info, int error) 109 107 { 110 108 int type = AUDIT_APPARMOR_AUTO; 111 109 struct common_audit_data sa; ··· 203 201 */ 204 202 perms.kill = 0; 205 203 206 - if (current_fsuid() == cond->uid) { 204 + if (uid_eq(current_fsuid(), cond->uid)) { 207 205 perms.allow = map_old_perms(dfa_user_allow(dfa, state)); 208 206 perms.audit = map_old_perms(dfa_user_audit(dfa, state)); 209 207 perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
+1 -1
security/apparmor/include/audit.h
··· 125 125 const char *target; 126 126 u32 request; 127 127 u32 denied; 128 - uid_t ouid; 128 + kuid_t ouid; 129 129 } fs; 130 130 }; 131 131 };
+2 -2
security/apparmor/include/file.h
··· 71 71 72 72 /* need to make conditional which ones are being set */ 73 73 struct path_cond { 74 - uid_t uid; 74 + kuid_t uid; 75 75 umode_t mode; 76 76 }; 77 77 ··· 146 146 147 147 int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, 148 148 gfp_t gfp, int op, u32 request, const char *name, 149 - const char *target, uid_t ouid, const char *info, int error); 149 + const char *target, kuid_t ouid, const char *info, int error); 150 150 151 151 /** 152 152 * struct aa_file_rules - components used for file rule permissions
+1 -1
security/apparmor/lsm.c
··· 352 352 return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); 353 353 } 354 354 355 - static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid) 355 + static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid) 356 356 { 357 357 struct path_cond cond = { path->dentry->d_inode->i_uid, 358 358 path->dentry->d_inode->i_mode
+1 -1
security/capability.c
··· 284 284 return 0; 285 285 } 286 286 287 - static int cap_path_chown(struct path *path, uid_t uid, gid_t gid) 287 + static int cap_path_chown(struct path *path, kuid_t uid, kgid_t gid) 288 288 { 289 289 return 0; 290 290 }
+2 -2
security/integrity/evm/evm_crypto.c
··· 106 106 memset(&hmac_misc, 0, sizeof hmac_misc); 107 107 hmac_misc.ino = inode->i_ino; 108 108 hmac_misc.generation = inode->i_generation; 109 - hmac_misc.uid = inode->i_uid; 110 - hmac_misc.gid = inode->i_gid; 109 + hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid); 110 + hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid); 111 111 hmac_misc.mode = inode->i_mode; 112 112 crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc); 113 113 crypto_shash_final(desc, digest);
+3 -2
security/integrity/ima/ima_audit.c
··· 39 39 40 40 ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno); 41 41 audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u", 42 - current->pid, current_cred()->uid, 43 - audit_get_loginuid(current), 42 + current->pid, 43 + from_kuid(&init_user_ns, current_cred()->uid), 44 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 44 45 audit_get_sessionid(current)); 45 46 audit_log_task_context(ab); 46 47 audit_log_format(ab, " op=");
+7 -7
security/integrity/ima/ima_policy.c
··· 39 39 enum ima_hooks func; 40 40 int mask; 41 41 unsigned long fsmagic; 42 - uid_t uid; 42 + kuid_t uid; 43 43 struct { 44 44 void *rule; /* LSM file metadata specific */ 45 45 int type; /* audit type */ ··· 71 71 .flags = IMA_FUNC | IMA_MASK}, 72 72 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, 73 73 .flags = IMA_FUNC | IMA_MASK}, 74 - {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, 74 + {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID, 75 75 .flags = IMA_FUNC | IMA_MASK | IMA_UID}, 76 76 }; 77 77 ··· 112 112 if ((rule->flags & IMA_FSMAGIC) 113 113 && rule->fsmagic != inode->i_sb->s_magic) 114 114 return false; 115 - if ((rule->flags & IMA_UID) && rule->uid != cred->uid) 115 + if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid)) 116 116 return false; 117 117 for (i = 0; i < MAX_LSM_RULES; i++) { 118 118 int rc = 0; ··· 277 277 278 278 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE); 279 279 280 - entry->uid = -1; 280 + entry->uid = INVALID_UID; 281 281 entry->action = UNKNOWN; 282 282 while ((p = strsep(&rule, " \t")) != NULL) { 283 283 substring_t args[MAX_OPT_ARGS]; ··· 361 361 case Opt_uid: 362 362 ima_log_string(ab, "uid", args[0].from); 363 363 364 - if (entry->uid != -1) { 364 + if (uid_valid(entry->uid)) { 365 365 result = -EINVAL; 366 366 break; 367 367 } 368 368 369 369 result = strict_strtoul(args[0].from, 10, &lnum); 370 370 if (!result) { 371 - entry->uid = (uid_t) lnum; 372 - if (entry->uid != lnum) 371 + entry->uid = make_kuid(current_user_ns(), (uid_t)lnum); 372 + if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum)) 373 373 result = -EINVAL; 374 374 else 375 375 entry->flags |= IMA_UID;
+2 -4
security/keys/internal.h
··· 52 52 atomic_t usage; /* for accessing qnkeys & qnbytes */ 53 53 atomic_t nkeys; /* number of keys */ 54 54 atomic_t nikeys; /* number of instantiated keys */ 55 - uid_t uid; 56 - struct user_namespace *user_ns; 55 + kuid_t uid; 57 56 int qnkeys; /* number of keys allocated to this user */ 58 57 int qnbytes; /* number of bytes allocated to this user */ 59 58 }; ··· 61 62 extern spinlock_t key_user_lock; 62 63 extern struct key_user root_key_user; 63 64 64 - extern struct key_user *key_user_lookup(uid_t uid, 65 - struct user_namespace *user_ns); 65 + extern struct key_user *key_user_lookup(kuid_t uid); 66 66 extern void key_user_put(struct key_user *user); 67 67 68 68 /*
+8 -15
security/keys/key.c
··· 18 18 #include <linux/workqueue.h> 19 19 #include <linux/random.h> 20 20 #include <linux/err.h> 21 - #include <linux/user_namespace.h> 22 21 #include "internal.h" 23 22 24 23 struct kmem_cache *key_jar; ··· 51 52 * Get the key quota record for a user, allocating a new record if one doesn't 52 53 * already exist. 53 54 */ 54 - struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) 55 + struct key_user *key_user_lookup(kuid_t uid) 55 56 { 56 57 struct key_user *candidate = NULL, *user; 57 58 struct rb_node *parent = NULL; ··· 66 67 parent = *p; 67 68 user = rb_entry(parent, struct key_user, node); 68 69 69 - if (uid < user->uid) 70 + if (uid_lt(uid, user->uid)) 70 71 p = &(*p)->rb_left; 71 - else if (uid > user->uid) 72 - p = &(*p)->rb_right; 73 - else if (user_ns < user->user_ns) 74 - p = &(*p)->rb_left; 75 - else if (user_ns > user->user_ns) 72 + else if (uid_gt(uid, user->uid)) 76 73 p = &(*p)->rb_right; 77 74 else 78 75 goto found; ··· 97 102 atomic_set(&candidate->nkeys, 0); 98 103 atomic_set(&candidate->nikeys, 0); 99 104 candidate->uid = uid; 100 - candidate->user_ns = get_user_ns(user_ns); 101 105 candidate->qnkeys = 0; 102 106 candidate->qnbytes = 0; 103 107 spin_lock_init(&candidate->lock); ··· 125 131 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 126 132 rb_erase(&user->node, &key_user_tree); 127 133 spin_unlock(&key_user_lock); 128 - put_user_ns(user->user_ns); 129 134 130 135 kfree(user); 131 136 } ··· 222 229 * key_alloc() calls don't race with module unloading. 223 230 */ 224 231 struct key *key_alloc(struct key_type *type, const char *desc, 225 - uid_t uid, gid_t gid, const struct cred *cred, 232 + kuid_t uid, kgid_t gid, const struct cred *cred, 226 233 key_perm_t perm, unsigned long flags) 227 234 { 228 235 struct key_user *user = NULL; ··· 246 253 quotalen = desclen + type->def_datalen; 247 254 248 255 /* get hold of the key tracking for this user */ 249 - user = key_user_lookup(uid, cred->user_ns); 256 + user = key_user_lookup(uid); 250 257 if (!user) 251 258 goto no_memory_1; 252 259 253 260 /* check that the user's quota permits allocation of another key and 254 261 * its description */ 255 262 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 256 - unsigned maxkeys = (uid == 0) ? 263 + unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? 257 264 key_quota_root_maxkeys : key_quota_maxkeys; 258 - unsigned maxbytes = (uid == 0) ? 265 + unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? 259 266 key_quota_root_maxbytes : key_quota_maxbytes; 260 267 261 268 spin_lock(&user->lock); ··· 373 380 374 381 /* contemplate the quota adjustment */ 375 382 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 376 - unsigned maxbytes = (key->user->uid == 0) ? 383 + unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? 377 384 key_quota_root_maxbytes : key_quota_maxbytes; 378 385 379 386 spin_lock(&key->user->lock);
+30 -20
security/keys/keyctl.c
··· 569 569 ret = snprintf(tmpbuf, PAGE_SIZE - 1, 570 570 "%s;%d;%d;%08x;%s", 571 571 key->type->name, 572 - key->uid, 573 - key->gid, 572 + from_kuid_munged(current_user_ns(), key->uid), 573 + from_kgid_munged(current_user_ns(), key->gid), 574 574 key->perm, 575 575 key->description ?: ""); 576 576 ··· 766 766 * 767 767 * If successful, 0 will be returned. 768 768 */ 769 - long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) 769 + long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) 770 770 { 771 771 struct key_user *newowner, *zapowner = NULL; 772 772 struct key *key; 773 773 key_ref_t key_ref; 774 774 long ret; 775 + kuid_t uid; 776 + kgid_t gid; 777 + 778 + uid = make_kuid(current_user_ns(), user); 779 + gid = make_kgid(current_user_ns(), group); 780 + ret = -EINVAL; 781 + if ((user != (uid_t) -1) && !uid_valid(uid)) 782 + goto error; 783 + if ((group != (gid_t) -1) && !gid_valid(gid)) 784 + goto error; 775 785 776 786 ret = 0; 777 - if (uid == (uid_t) -1 && gid == (gid_t) -1) 787 + if (user == (uid_t) -1 && group == (gid_t) -1) 778 788 goto error; 779 789 780 790 key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, ··· 802 792 803 793 if (!capable(CAP_SYS_ADMIN)) { 804 794 /* only the sysadmin can chown a key to some other UID */ 805 - if (uid != (uid_t) -1 && key->uid != uid) 795 + if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) 806 796 goto error_put; 807 797 808 798 /* only the sysadmin can set the key's GID to a group other 809 799 * than one of those that the current process subscribes to */ 810 - if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid)) 800 + if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) 811 801 goto error_put; 812 802 } 813 803 814 804 /* change the UID */ 815 - if (uid != (uid_t) -1 && uid != key->uid) { 805 + if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { 816 806 ret = -ENOMEM; 817 - newowner = key_user_lookup(uid, current_user_ns()); 807 + newowner = key_user_lookup(uid); 818 808 if (!newowner) 819 809 goto error_put; 820 810 821 811 /* transfer the quota burden to the new user */ 822 812 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 823 - unsigned maxkeys = (uid == 0) ? 813 + unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? 824 814 key_quota_root_maxkeys : key_quota_maxkeys; 825 - unsigned maxbytes = (uid == 0) ? 815 + unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? 826 816 key_quota_root_maxbytes : key_quota_maxbytes; 827 817 828 818 spin_lock(&newowner->lock); ··· 856 846 } 857 847 858 848 /* change the GID */ 859 - if (gid != (gid_t) -1) 849 + if (group != (gid_t) -1) 860 850 key->gid = gid; 861 851 862 852 ret = 0; ··· 907 897 down_write(&key->sem); 908 898 909 899 /* if we're not the sysadmin, we can only change a key that we own */ 910 - if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) { 900 + if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { 911 901 key->perm = perm; 912 902 ret = 0; 913 903 } ··· 1516 1506 1517 1507 /* the parent must have the same effective ownership and mustn't be 1518 1508 * SUID/SGID */ 1519 - if (pcred->uid != mycred->euid || 1520 - pcred->euid != mycred->euid || 1521 - pcred->suid != mycred->euid || 1522 - pcred->gid != mycred->egid || 1523 - pcred->egid != mycred->egid || 1524 - pcred->sgid != mycred->egid) 1509 + if (!uid_eq(pcred->uid, mycred->euid) || 1510 + !uid_eq(pcred->euid, mycred->euid) || 1511 + !uid_eq(pcred->suid, mycred->euid) || 1512 + !gid_eq(pcred->gid, mycred->egid) || 1513 + !gid_eq(pcred->egid, mycred->egid) || 1514 + !gid_eq(pcred->sgid, mycred->egid)) 1525 1515 goto unlock; 1526 1516 1527 1517 /* the keyrings must have the same UID */ 1528 1518 if ((pcred->tgcred->session_keyring && 1529 - pcred->tgcred->session_keyring->uid != mycred->euid) || 1530 - mycred->tgcred->session_keyring->uid != mycred->euid) 1519 + !uid_eq(pcred->tgcred->session_keyring->uid, mycred->euid)) || 1520 + !uid_eq(mycred->tgcred->session_keyring->uid, mycred->euid)) 1531 1521 goto unlock; 1532 1522 1533 1523 /* cancel an already pending keyring replacement */
+2 -2
security/keys/keyring.c
··· 256 256 /* 257 257 * Allocate a keyring and link into the destination keyring. 258 258 */ 259 - struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, 259 + struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, 260 260 const struct cred *cred, unsigned long flags, 261 261 struct key *dest) 262 262 { ··· 612 612 &keyring_name_hash[bucket], 613 613 type_data.link 614 614 ) { 615 - if (keyring->user->user_ns != current_user_ns()) 615 + if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) 616 616 continue; 617 617 618 618 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+4 -10
security/keys/permission.c
··· 36 36 37 37 key = key_ref_to_ptr(key_ref); 38 38 39 - if (key->user->user_ns != cred->user_ns) 40 - goto use_other_perms; 41 - 42 39 /* use the second 8-bits of permissions for keys the caller owns */ 43 - if (key->uid == cred->fsuid) { 40 + if (uid_eq(key->uid, cred->fsuid)) { 44 41 kperm = key->perm >> 16; 45 42 goto use_these_perms; 46 43 } 47 44 48 45 /* use the third 8-bits of permissions for keys the caller has a group 49 46 * membership in common with */ 50 - if (key->gid != -1 && key->perm & KEY_GRP_ALL) { 51 - if (key->gid == cred->fsgid) { 47 + if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) { 48 + if (gid_eq(key->gid, cred->fsgid)) { 52 49 kperm = key->perm >> 8; 53 50 goto use_these_perms; 54 51 } 55 52 56 - ret = groups_search(cred->group_info, 57 - make_kgid(current_user_ns(), key->gid)); 53 + ret = groups_search(cred->group_info, key->gid); 58 54 if (ret) { 59 55 kperm = key->perm >> 8; 60 56 goto use_these_perms; 61 57 } 62 58 } 63 - 64 - use_other_perms: 65 59 66 60 /* otherwise use the least-significant 8-bits */ 67 61 kperm = key->perm;
+22 -22
security/keys/proc.c
··· 88 88 */ 89 89 #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS 90 90 91 - static struct rb_node *key_serial_next(struct rb_node *n) 91 + static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n) 92 92 { 93 - struct user_namespace *user_ns = current_user_ns(); 93 + struct user_namespace *user_ns = seq_user_ns(p); 94 94 95 95 n = rb_next(n); 96 96 while (n) { 97 97 struct key *key = rb_entry(n, struct key, serial_node); 98 - if (key->user->user_ns == user_ns) 98 + if (kuid_has_mapping(user_ns, key->user->uid)) 99 99 break; 100 100 n = rb_next(n); 101 101 } ··· 107 107 return seq_open(file, &proc_keys_ops); 108 108 } 109 109 110 - static struct key *find_ge_key(key_serial_t id) 110 + static struct key *find_ge_key(struct seq_file *p, key_serial_t id) 111 111 { 112 - struct user_namespace *user_ns = current_user_ns(); 112 + struct user_namespace *user_ns = seq_user_ns(p); 113 113 struct rb_node *n = key_serial_tree.rb_node; 114 114 struct key *minkey = NULL; 115 115 ··· 132 132 return NULL; 133 133 134 134 for (;;) { 135 - if (minkey->user->user_ns == user_ns) 135 + if (kuid_has_mapping(user_ns, minkey->user->uid)) 136 136 return minkey; 137 137 n = rb_next(&minkey->serial_node); 138 138 if (!n) ··· 151 151 152 152 if (*_pos > INT_MAX) 153 153 return NULL; 154 - key = find_ge_key(pos); 154 + key = find_ge_key(p, pos); 155 155 if (!key) 156 156 return NULL; 157 157 *_pos = key->serial; ··· 168 168 { 169 169 struct rb_node *n; 170 170 171 - n = key_serial_next(v); 171 + n = key_serial_next(p, v); 172 172 if (n) 173 173 *_pos = key_node_serial(n); 174 174 return n; ··· 254 254 atomic_read(&key->usage), 255 255 xbuf, 256 256 key->perm, 257 - key->uid, 258 - key->gid, 257 + from_kuid_munged(seq_user_ns(m), key->uid), 258 + from_kgid_munged(seq_user_ns(m), key->gid), 259 259 key->type->name); 260 260 261 261 #undef showflag ··· 270 270 271 271 #endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */ 272 272 273 - static struct rb_node *__key_user_next(struct rb_node *n) 273 + static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n) 274 274 { 275 275 while (n) { 276 276 struct key_user *user = rb_entry(n, struct key_user, node); 277 - if (user->user_ns == current_user_ns()) 277 + if (kuid_has_mapping(user_ns, user->uid)) 278 278 break; 279 279 n = rb_next(n); 280 280 } 281 281 return n; 282 282 } 283 283 284 - static struct rb_node *key_user_next(struct rb_node *n) 284 + static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n) 285 285 { 286 - return __key_user_next(rb_next(n)); 286 + return __key_user_next(user_ns, rb_next(n)); 287 287 } 288 288 289 - static struct rb_node *key_user_first(struct rb_root *r) 289 + static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r) 290 290 { 291 291 struct rb_node *n = rb_first(r); 292 - return __key_user_next(n); 292 + return __key_user_next(user_ns, n); 293 293 } 294 294 295 295 /* ··· 309 309 310 310 spin_lock(&key_user_lock); 311 311 312 - _p = key_user_first(&key_user_tree); 312 + _p = key_user_first(seq_user_ns(p), &key_user_tree); 313 313 while (pos > 0 && _p) { 314 314 pos--; 315 - _p = key_user_next(_p); 315 + _p = key_user_next(seq_user_ns(p), _p); 316 316 } 317 317 318 318 return _p; ··· 321 321 static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos) 322 322 { 323 323 (*_pos)++; 324 - return key_user_next((struct rb_node *)v); 324 + return key_user_next(seq_user_ns(p), (struct rb_node *)v); 325 325 } 326 326 327 327 static void proc_key_users_stop(struct seq_file *p, void *v) ··· 334 334 { 335 335 struct rb_node *_p = v; 336 336 struct key_user *user = rb_entry(_p, struct key_user, node); 337 - unsigned maxkeys = (user->uid == 0) ? 337 + unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ? 338 338 key_quota_root_maxkeys : key_quota_maxkeys; 339 - unsigned maxbytes = (user->uid == 0) ? 339 + unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ? 340 340 key_quota_root_maxbytes : key_quota_maxbytes; 341 341 342 342 seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", 343 - user->uid, 343 + from_kuid_munged(seq_user_ns(m), user->uid), 344 344 atomic_read(&user->usage), 345 345 atomic_read(&user->nkeys), 346 346 atomic_read(&user->nikeys),
+8 -7
security/keys/process_keys.c
··· 34 34 .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock), 35 35 .nkeys = ATOMIC_INIT(2), 36 36 .nikeys = ATOMIC_INIT(2), 37 - .uid = 0, 38 - .user_ns = &init_user_ns, 37 + .uid = GLOBAL_ROOT_UID, 39 38 }; 40 39 41 40 /* ··· 47 48 struct key *uid_keyring, *session_keyring; 48 49 char buf[20]; 49 50 int ret; 51 + uid_t uid; 50 52 51 53 cred = current_cred(); 52 54 user = cred->user; 55 + uid = from_kuid(cred->user_ns, user->uid); 53 56 54 - kenter("%p{%u}", user, user->uid); 57 + kenter("%p{%u}", user, uid); 55 58 56 59 if (user->uid_keyring) { 57 60 kleave(" = 0 [exist]"); ··· 68 67 * - there may be one in existence already as it may have been 69 68 * pinned by a session, but the user_struct pointing to it 70 69 * may have been destroyed by setuid */ 71 - sprintf(buf, "_uid.%u", user->uid); 70 + sprintf(buf, "_uid.%u", uid); 72 71 73 72 uid_keyring = find_keyring_by_name(buf, true); 74 73 if (IS_ERR(uid_keyring)) { 75 - uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, 74 + uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, 76 75 cred, KEY_ALLOC_IN_QUOTA, 77 76 NULL); 78 77 if (IS_ERR(uid_keyring)) { ··· 83 82 84 83 /* get a default session keyring (which might also exist 85 84 * already) */ 86 - sprintf(buf, "_uid_ses.%u", user->uid); 85 + sprintf(buf, "_uid_ses.%u", uid); 87 86 88 87 session_keyring = find_keyring_by_name(buf, true); 89 88 if (IS_ERR(session_keyring)) { 90 89 session_keyring = 91 - keyring_alloc(buf, user->uid, (gid_t) -1, 90 + keyring_alloc(buf, user->uid, INVALID_GID, 92 91 cred, KEY_ALLOC_IN_QUOTA, NULL); 93 92 if (IS_ERR(session_keyring)) { 94 93 ret = PTR_ERR(session_keyring);
+3 -3
security/keys/request_key.c
··· 139 139 goto error_link; 140 140 141 141 /* record the UID and GID */ 142 - sprintf(uid_str, "%d", cred->fsuid); 143 - sprintf(gid_str, "%d", cred->fsgid); 142 + sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); 143 + sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); 144 144 145 145 /* we say which key is under construction */ 146 146 sprintf(key_str, "%d", key->serial); ··· 442 442 443 443 kenter(""); 444 444 445 - user = key_user_lookup(current_fsuid(), current_user_ns()); 445 + user = key_user_lookup(current_fsuid()); 446 446 if (!user) 447 447 return ERR_PTR(-ENOMEM); 448 448
+1 -1
security/security.c
··· 434 434 return security_ops->path_chmod(path, mode); 435 435 } 436 436 437 - int security_path_chown(struct path *path, uid_t uid, gid_t gid) 437 + int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) 438 438 { 439 439 if (unlikely(IS_PRIVATE(path->dentry->d_inode))) 440 440 return 0;
+3 -3
security/selinux/selinuxfs.c
··· 174 174 audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS, 175 175 "enforcing=%d old_enforcing=%d auid=%u ses=%u", 176 176 new_value, selinux_enforcing, 177 - audit_get_loginuid(current), 177 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 178 178 audit_get_sessionid(current)); 179 179 selinux_enforcing = new_value; 180 180 if (selinux_enforcing) ··· 305 305 goto out; 306 306 audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS, 307 307 "selinux=0 auid=%u ses=%u", 308 - audit_get_loginuid(current), 308 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 309 309 audit_get_sessionid(current)); 310 310 } 311 311 ··· 551 551 out1: 552 552 audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD, 553 553 "policy loaded auid=%u ses=%u", 554 - audit_get_loginuid(current), 554 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 555 555 audit_get_sessionid(current)); 556 556 out: 557 557 mutex_unlock(&sel_mutex);
+1 -1
security/selinux/ss/services.c
··· 2440 2440 sym_name(&policydb, SYM_BOOLS, i), 2441 2441 !!values[i], 2442 2442 policydb.bool_val_to_struct[i]->state, 2443 - audit_get_loginuid(current), 2443 + from_kuid(&init_user_ns, audit_get_loginuid(current)), 2444 2444 audit_get_sessionid(current)); 2445 2445 } 2446 2446 if (values[i])
+16 -7
security/tomoyo/audit.c
··· 168 168 stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile, 169 169 tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid, 170 170 tomoyo_sys_getpid(), tomoyo_sys_getppid(), 171 - current_uid(), current_gid(), current_euid(), 172 - current_egid(), current_suid(), current_sgid(), 173 - current_fsuid(), current_fsgid()); 171 + from_kuid(&init_user_ns, current_uid()), 172 + from_kgid(&init_user_ns, current_gid()), 173 + from_kuid(&init_user_ns, current_euid()), 174 + from_kgid(&init_user_ns, current_egid()), 175 + from_kuid(&init_user_ns, current_suid()), 176 + from_kgid(&init_user_ns, current_sgid()), 177 + from_kuid(&init_user_ns, current_fsuid()), 178 + from_kgid(&init_user_ns, current_fsgid())); 174 179 if (!obj) 175 180 goto no_obj_info; 176 181 if (!obj->validate_done) { ··· 196 191 tomoyo_buffer_len - 1 - pos, 197 192 " path%u.parent={ uid=%u gid=%u " 198 193 "ino=%lu perm=0%o }", (i >> 1) + 1, 199 - stat->uid, stat->gid, (unsigned long) 200 - stat->ino, stat->mode & S_IALLUGO); 194 + from_kuid(&init_user_ns, stat->uid), 195 + from_kgid(&init_user_ns, stat->gid), 196 + (unsigned long)stat->ino, 197 + stat->mode & S_IALLUGO); 201 198 continue; 202 199 } 203 200 pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, 204 201 " path%u={ uid=%u gid=%u ino=%lu major=%u" 205 202 " minor=%u perm=0%o type=%s", (i >> 1) + 1, 206 - stat->uid, stat->gid, (unsigned long) 207 - stat->ino, MAJOR(dev), MINOR(dev), 203 + from_kuid(&init_user_ns, stat->uid), 204 + from_kgid(&init_user_ns, stat->gid), 205 + (unsigned long)stat->ino, 206 + MAJOR(dev), MINOR(dev), 208 207 mode & S_IALLUGO, tomoyo_filetype(mode)); 209 208 if (S_ISCHR(mode) || S_ISBLK(mode)) { 210 209 dev = stat->rdev;
+3 -1
security/tomoyo/common.c
··· 925 925 926 926 if (!tomoyo_policy_loaded) 927 927 return true; 928 - if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid)) 928 + if (!tomoyo_manage_by_non_root && 929 + (!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) || 930 + !uid_eq(task->cred->euid, GLOBAL_ROOT_UID))) 929 931 return false; 930 932 exe = tomoyo_get_exe(); 931 933 if (!exe)
+2 -2
security/tomoyo/common.h
··· 561 561 562 562 /* Subset of "struct stat". Used by conditional ACL and audit logs. */ 563 563 struct tomoyo_mini_stat { 564 - uid_t uid; 565 - gid_t gid; 564 + kuid_t uid; 565 + kgid_t gid; 566 566 ino_t ino; 567 567 umode_t mode; 568 568 dev_t dev;
+10 -10
security/tomoyo/condition.c
··· 813 813 unsigned long value = 0; 814 814 switch (index) { 815 815 case TOMOYO_TASK_UID: 816 - value = current_uid(); 816 + value = from_kuid(&init_user_ns, current_uid()); 817 817 break; 818 818 case TOMOYO_TASK_EUID: 819 - value = current_euid(); 819 + value = from_kuid(&init_user_ns, current_euid()); 820 820 break; 821 821 case TOMOYO_TASK_SUID: 822 - value = current_suid(); 822 + value = from_kuid(&init_user_ns, current_suid()); 823 823 break; 824 824 case TOMOYO_TASK_FSUID: 825 - value = current_fsuid(); 825 + value = from_kuid(&init_user_ns, current_fsuid()); 826 826 break; 827 827 case TOMOYO_TASK_GID: 828 - value = current_gid(); 828 + value = from_kgid(&init_user_ns, current_gid()); 829 829 break; 830 830 case TOMOYO_TASK_EGID: 831 - value = current_egid(); 831 + value = from_kgid(&init_user_ns, current_egid()); 832 832 break; 833 833 case TOMOYO_TASK_SGID: 834 - value = current_sgid(); 834 + value = from_kgid(&init_user_ns, current_sgid()); 835 835 break; 836 836 case TOMOYO_TASK_FSGID: 837 - value = current_fsgid(); 837 + value = from_kgid(&init_user_ns, current_fsgid()); 838 838 break; 839 839 case TOMOYO_TASK_PID: 840 840 value = tomoyo_sys_getpid(); ··· 970 970 case TOMOYO_PATH2_UID: 971 971 case TOMOYO_PATH1_PARENT_UID: 972 972 case TOMOYO_PATH2_PARENT_UID: 973 - value = stat->uid; 973 + value = from_kuid(&init_user_ns, stat->uid); 974 974 break; 975 975 case TOMOYO_PATH1_GID: 976 976 case TOMOYO_PATH2_GID: 977 977 case TOMOYO_PATH1_PARENT_GID: 978 978 case TOMOYO_PATH2_PARENT_GID: 979 - value = stat->gid; 979 + value = from_kgid(&init_user_ns, stat->gid); 980 980 break; 981 981 case TOMOYO_PATH1_INO: 982 982 case TOMOYO_PATH2_INO:
+7 -5
security/tomoyo/tomoyo.c
··· 373 373 * 374 374 * Returns 0 on success, negative value otherwise. 375 375 */ 376 - static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid) 376 + static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid) 377 377 { 378 378 int error = 0; 379 - if (uid != (uid_t) -1) 380 - error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, uid); 381 - if (!error && gid != (gid_t) -1) 382 - error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, gid); 379 + if (uid_valid(uid)) 380 + error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, 381 + from_kuid(&init_user_ns, uid)); 382 + if (!error && gid_valid(gid)) 383 + error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, 384 + from_kgid(&init_user_ns, gid)); 383 385 return error; 384 386 } 385 387