Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://selinuxproject.org/~jmorris/linux-security

* 'next' of git://selinuxproject.org/~jmorris/linux-security: (95 commits)
TOMOYO: Fix incomplete read after seek.
Smack: allow to access /smack/access as normal user
TOMOYO: Fix unused kernel config option.
Smack: fix: invalid length set for the result of /smack/access
Smack: compilation fix
Smack: fix for /smack/access output, use string instead of byte
Smack: domain transition protections (v3)
Smack: Provide information for UDS getsockopt(SO_PEERCRED)
Smack: Clean up comments
Smack: Repair processing of fcntl
Smack: Rule list lookup performance
Smack: check permissions from user space (v2)
TOMOYO: Fix quota and garbage collector.
TOMOYO: Remove redundant tasklist_lock.
TOMOYO: Fix domain transition failure warning.
TOMOYO: Remove tomoyo_policy_memory_lock spinlock.
TOMOYO: Simplify garbage collector.
TOMOYO: Fix make namespacecheck warnings.
target: check hex2bin result
encrypted-keys: check hex2bin result
...

+4706 -1437
+23
Documentation/ABI/testing/evm
··· 1 + What: security/evm 2 + Date: March 2011 3 + Contact: Mimi Zohar <zohar@us.ibm.com> 4 + Description: 5 + EVM protects a file's security extended attributes(xattrs) 6 + against integrity attacks. The initial method maintains an 7 + HMAC-sha1 value across the extended attributes, storing the 8 + value as the extended attribute 'security.evm'. 9 + 10 + EVM depends on the Kernel Key Retention System to provide it 11 + with a trusted/encrypted key for the HMAC-sha1 operation. 12 + The key is loaded onto the root's keyring using keyctl. Until 13 + EVM receives notification that the key has been successfully 14 + loaded onto the keyring (echo 1 > <securityfs>/evm), EVM 15 + can not create or validate the 'security.evm' xattr, but 16 + returns INTEGRITY_UNKNOWN. Loading the key and signaling EVM 17 + should be done as early as possible. Normally this is done 18 + in the initramfs, which has already been measured as part 19 + of the trusted boot. For more information on creating and 20 + loading existing trusted/encrypted keys, refer to: 21 + Documentation/keys-trusted-encrypted.txt. (A sample dracut 22 + patch, which loads the trusted/encrypted key and enables 23 + EVM, is available from http://linux-ima.sourceforge.net/#EVM.)
+6
Documentation/kernel-parameters.txt
··· 49 49 EDD BIOS Enhanced Disk Drive Services (EDD) is enabled 50 50 EFI EFI Partitioning (GPT) is enabled 51 51 EIDE EIDE/ATAPI support is enabled. 52 + EVM Extended Verification Module 52 53 FB The frame buffer device is enabled. 53 54 FTRACE Function tracing enabled. 54 55 GCOV GCOV profiling is enabled. ··· 760 759 ether= [HW,NET] Ethernet cards parameters 761 760 This option is obsoleted by the "netdev=" option, which 762 761 has equivalent usage. See its documentation for details. 762 + 763 + evm= [EVM] 764 + Format: { "fix" } 765 + Permit 'security.evm' to be updated regardless of 766 + current integrity status. 763 767 764 768 failslab= 765 769 fail_page_alloc=
+6 -1
MAINTAINERS
··· 2552 2552 F: Documentation/filesystems/ext4.txt 2553 2553 F: fs/ext4/ 2554 2554 2555 + Extended Verification Module (EVM) 2556 + M: Mimi Zohar <zohar@us.ibm.com> 2557 + S: Supported 2558 + F: security/integrity/evm/ 2559 + 2555 2560 F71805F HARDWARE MONITORING DRIVER 2556 2561 M: Jean Delvare <khali@linux-fr.org> 2557 2562 L: lm-sensors@lm-sensors.org ··· 6452 6447 L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) 6453 6448 L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) 6454 6449 W: http://tomoyo.sourceforge.jp/ 6455 - T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.4.x/tomoyo-lsm/patches/ 6450 + T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/ 6456 6451 S: Maintained 6457 6452 F: security/tomoyo/ 6458 6453
+3
drivers/char/tpm/tpm.c
··· 966 966 { 967 967 struct tpm_chip *chip = dev_get_drvdata(dev); 968 968 969 + if (chip->vendor.duration[TPM_LONG] == 0) 970 + return 0; 971 + 969 972 return sprintf(buf, "%d %d %d [%s]\n", 970 973 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), 971 974 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+9 -3
drivers/target/target_core_fabric_lib.c
··· 63 63 unsigned char *buf) 64 64 { 65 65 unsigned char *ptr; 66 + int ret; 66 67 67 68 /* 68 69 * Set PROTOCOL IDENTIFIER to 6h for SAS ··· 75 74 */ 76 75 ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ 77 76 78 - hex2bin(&buf[4], ptr, 8); 77 + ret = hex2bin(&buf[4], ptr, 8); 78 + if (ret < 0) 79 + pr_debug("sas transport_id: invalid hex string\n"); 79 80 80 81 /* 81 82 * The SAS Transport ID is a hardcoded 24-byte length ··· 159 156 unsigned char *buf) 160 157 { 161 158 unsigned char *ptr; 162 - int i; 159 + int i, ret; 163 160 u32 off = 8; 161 + 164 162 /* 165 163 * PROTOCOL IDENTIFIER is 0h for FCP-2 166 164 * ··· 178 174 i++; 179 175 continue; 180 176 } 181 - hex2bin(&buf[off++], &ptr[i], 1); 177 + ret = hex2bin(&buf[off++], &ptr[i], 1); 178 + if (ret < 0) 179 + pr_debug("fc transport_id: invalid hex string\n"); 182 180 i += 2; 183 181 } 184 182 /*
+4 -1
fs/attr.c
··· 13 13 #include <linux/fsnotify.h> 14 14 #include <linux/fcntl.h> 15 15 #include <linux/security.h> 16 + #include <linux/evm.h> 16 17 17 18 /** 18 19 * inode_change_ok - check if attribute changes to an inode are allowed ··· 238 237 else 239 238 error = simple_setattr(dentry, attr); 240 239 241 - if (!error) 240 + if (!error) { 242 241 fsnotify_change(dentry, ia_valid); 242 + evm_inode_post_setattr(dentry, ia_valid); 243 + } 243 244 244 245 return error; 245 246 }
+28 -28
fs/btrfs/xattr.c
··· 383 383 XATTR_REPLACE); 384 384 } 385 385 386 + int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, 387 + void *fs_info) 388 + { 389 + const struct xattr *xattr; 390 + struct btrfs_trans_handle *trans = fs_info; 391 + char *name; 392 + int err = 0; 393 + 394 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 395 + name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 396 + strlen(xattr->name) + 1, GFP_NOFS); 397 + if (!name) { 398 + err = -ENOMEM; 399 + break; 400 + } 401 + strcpy(name, XATTR_SECURITY_PREFIX); 402 + strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); 403 + err = __btrfs_setxattr(trans, inode, name, 404 + xattr->value, xattr->value_len, 0); 405 + kfree(name); 406 + if (err < 0) 407 + break; 408 + } 409 + return err; 410 + } 411 + 386 412 int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, 387 413 struct inode *inode, struct inode *dir, 388 414 const struct qstr *qstr) 389 415 { 390 - int err; 391 - size_t len; 392 - void *value; 393 - char *suffix; 394 - char *name; 395 - 396 - err = security_inode_init_security(inode, dir, qstr, &suffix, &value, 397 - &len); 398 - if (err) { 399 - if (err == -EOPNOTSUPP) 400 - return 0; 401 - return err; 402 - } 403 - 404 - name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1, 405 - GFP_NOFS); 406 - if (!name) { 407 - err = -ENOMEM; 408 - } else { 409 - strcpy(name, XATTR_SECURITY_PREFIX); 410 - strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix); 411 - err = __btrfs_setxattr(trans, inode, name, value, len, 0); 412 - kfree(name); 413 - } 414 - 415 - kfree(suffix); 416 - kfree(value); 417 - return err; 416 + return security_inode_init_security(inode, dir, qstr, 417 + &btrfs_initxattrs, trans); 418 418 }
+18 -22
fs/cifs/xattr.c
··· 22 22 #include <linux/fs.h> 23 23 #include <linux/posix_acl_xattr.h> 24 24 #include <linux/slab.h> 25 + #include <linux/xattr.h> 25 26 #include "cifsfs.h" 26 27 #include "cifspdu.h" 27 28 #include "cifsglob.h" ··· 32 31 #define MAX_EA_VALUE_SIZE 65535 33 32 #define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib" 34 33 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl" 35 - #define CIFS_XATTR_USER_PREFIX "user." 36 - #define CIFS_XATTR_SYSTEM_PREFIX "system." 37 - #define CIFS_XATTR_OS2_PREFIX "os2." 38 - #define CIFS_XATTR_SECURITY_PREFIX "security." 39 - #define CIFS_XATTR_TRUSTED_PREFIX "trusted." 40 - #define XATTR_TRUSTED_PREFIX_LEN 8 41 - #define XATTR_SECURITY_PREFIX_LEN 9 34 + 42 35 /* BB need to add server (Samba e.g) support for security and trusted prefix */ 43 - 44 - 45 36 46 37 int cifs_removexattr(struct dentry *direntry, const char *ea_name) 47 38 { ··· 69 76 } 70 77 if (ea_name == NULL) { 71 78 cFYI(1, "Null xattr names not supported"); 72 - } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) 73 - && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) { 79 + } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) 80 + && (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) { 74 81 cFYI(1, 75 82 "illegal xattr request %s (only user namespace supported)", 76 83 ea_name); ··· 81 88 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 82 89 goto remove_ea_exit; 83 90 84 - ea_name += 5; /* skip past user. prefix */ 91 + ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */ 85 92 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL, 86 93 (__u16)0, cifs_sb->local_nls, 87 94 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); ··· 142 149 143 150 if (ea_name == NULL) { 144 151 cFYI(1, "Null xattr names not supported"); 145 - } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { 152 + } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) 153 + == 0) { 146 154 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 147 155 goto set_ea_exit; 148 156 if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) 149 157 cFYI(1, "attempt to set cifs inode metadata"); 150 158 151 - ea_name += 5; /* skip past user. prefix */ 159 + ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */ 152 160 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, 153 161 (__u16)value_size, cifs_sb->local_nls, 154 162 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 155 - } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) { 163 + } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) 164 + == 0) { 156 165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 157 166 goto set_ea_exit; 158 167 159 - ea_name += 4; /* skip past os2. prefix */ 168 + ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */ 160 169 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, 161 170 (__u16)value_size, cifs_sb->local_nls, 162 171 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); ··· 264 269 /* return alt name if available as pseudo attr */ 265 270 if (ea_name == NULL) { 266 271 cFYI(1, "Null xattr names not supported"); 267 - } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { 272 + } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) 273 + == 0) { 268 274 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 269 275 goto get_ea_exit; 270 276 ··· 273 277 cFYI(1, "attempt to query cifs inode metadata"); 274 278 /* revalidate/getattr then populate from inode */ 275 279 } /* BB add else when above is implemented */ 276 - ea_name += 5; /* skip past user. prefix */ 280 + ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */ 277 281 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value, 278 282 buf_size, cifs_sb->local_nls, 279 283 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 280 - } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) { 284 + } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 281 285 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 282 286 goto get_ea_exit; 283 287 284 - ea_name += 4; /* skip past os2. prefix */ 288 + ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */ 285 289 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value, 286 290 buf_size, cifs_sb->local_nls, 287 291 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); ··· 335 339 cFYI(1, "Query CIFS ACL not supported yet"); 336 340 #endif /* CONFIG_CIFS_ACL */ 337 341 } else if (strncmp(ea_name, 338 - CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { 342 + XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { 339 343 cFYI(1, "Trusted xattr namespace not supported yet"); 340 344 } else if (strncmp(ea_name, 341 - CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { 345 + XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { 342 346 cFYI(1, "Security xattr namespace not supported yet"); 343 347 } else 344 348 cFYI(1,
+18 -16
fs/ext2/xattr_security.c
··· 46 46 value, size, flags); 47 47 } 48 48 49 + int ext2_initxattrs(struct inode *inode, const struct xattr *xattr_array, 50 + void *fs_info) 51 + { 52 + const struct xattr *xattr; 53 + int err = 0; 54 + 55 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 56 + err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, 57 + xattr->name, xattr->value, 58 + xattr->value_len, 0); 59 + if (err < 0) 60 + break; 61 + } 62 + return err; 63 + } 64 + 49 65 int 50 66 ext2_init_security(struct inode *inode, struct inode *dir, 51 67 const struct qstr *qstr) 52 68 { 53 - int err; 54 - size_t len; 55 - void *value; 56 - char *name; 57 - 58 - err = security_inode_init_security(inode, dir, qstr, &name, &value, &len); 59 - if (err) { 60 - if (err == -EOPNOTSUPP) 61 - return 0; 62 - return err; 63 - } 64 - err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, 65 - name, value, len, 0); 66 - kfree(name); 67 - kfree(value); 68 - return err; 69 + return security_inode_init_security(inode, dir, qstr, 70 + &ext2_initxattrs, NULL); 69 71 } 70 72 71 73 const struct xattr_handler ext2_xattr_security_handler = {
+20 -16
fs/ext3/xattr_security.c
··· 48 48 name, value, size, flags); 49 49 } 50 50 51 + int ext3_initxattrs(struct inode *inode, const struct xattr *xattr_array, 52 + void *fs_info) 53 + { 54 + const struct xattr *xattr; 55 + handle_t *handle = fs_info; 56 + int err = 0; 57 + 58 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 59 + err = ext3_xattr_set_handle(handle, inode, 60 + EXT3_XATTR_INDEX_SECURITY, 61 + xattr->name, xattr->value, 62 + xattr->value_len, 0); 63 + if (err < 0) 64 + break; 65 + } 66 + return err; 67 + } 68 + 51 69 int 52 70 ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir, 53 71 const struct qstr *qstr) 54 72 { 55 - int err; 56 - size_t len; 57 - void *value; 58 - char *name; 59 - 60 - err = security_inode_init_security(inode, dir, qstr, &name, &value, &len); 61 - if (err) { 62 - if (err == -EOPNOTSUPP) 63 - return 0; 64 - return err; 65 - } 66 - err = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_SECURITY, 67 - name, value, len, 0); 68 - kfree(name); 69 - kfree(value); 70 - return err; 73 + return security_inode_init_security(inode, dir, qstr, 74 + &ext3_initxattrs, handle); 71 75 } 72 76 73 77 const struct xattr_handler ext3_xattr_security_handler = {
+20 -16
fs/ext4/xattr_security.c
··· 48 48 name, value, size, flags); 49 49 } 50 50 51 + int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array, 52 + void *fs_info) 53 + { 54 + const struct xattr *xattr; 55 + handle_t *handle = fs_info; 56 + int err = 0; 57 + 58 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 59 + err = ext4_xattr_set_handle(handle, inode, 60 + EXT4_XATTR_INDEX_SECURITY, 61 + xattr->name, xattr->value, 62 + xattr->value_len, 0); 63 + if (err < 0) 64 + break; 65 + } 66 + return err; 67 + } 68 + 51 69 int 52 70 ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir, 53 71 const struct qstr *qstr) 54 72 { 55 - int err; 56 - size_t len; 57 - void *value; 58 - char *name; 59 - 60 - err = security_inode_init_security(inode, dir, qstr, &name, &value, &len); 61 - if (err) { 62 - if (err == -EOPNOTSUPP) 63 - return 0; 64 - return err; 65 - } 66 - err = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_SECURITY, 67 - name, value, len, 0); 68 - kfree(name); 69 - kfree(value); 70 - return err; 73 + return security_inode_init_security(inode, dir, qstr, 74 + &ext4_initxattrs, handle); 71 75 } 72 76 73 77 const struct xattr_handler ext4_xattr_security_handler = {
+18 -20
fs/gfs2/inode.c
··· 624 624 return error; 625 625 } 626 626 627 + int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, 628 + void *fs_info) 629 + { 630 + const struct xattr *xattr; 631 + int err = 0; 632 + 633 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 634 + err = __gfs2_xattr_set(inode, xattr->name, xattr->value, 635 + xattr->value_len, 0, 636 + GFS2_EATYPE_SECURITY); 637 + if (err < 0) 638 + break; 639 + } 640 + return err; 641 + } 642 + 627 643 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, 628 644 const struct qstr *qstr) 629 645 { 630 - int err; 631 - size_t len; 632 - void *value; 633 - char *name; 634 - 635 - err = security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr, 636 - &name, &value, &len); 637 - 638 - if (err) { 639 - if (err == -EOPNOTSUPP) 640 - return 0; 641 - return err; 642 - } 643 - 644 - err = __gfs2_xattr_set(&ip->i_inode, name, value, len, 0, 645 - GFS2_EATYPE_SECURITY); 646 - kfree(value); 647 - kfree(name); 648 - 649 - return err; 646 + return security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr, 647 + &gfs2_initxattrs, NULL); 650 648 } 651 649 652 650 /**
+20 -17
fs/jffs2/security.c
··· 22 22 #include <linux/security.h> 23 23 #include "nodelist.h" 24 24 25 - /* ---- Initial Security Label Attachment -------------- */ 25 + /* ---- Initial Security Label(s) Attachment callback --- */ 26 + int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, 27 + void *fs_info) 28 + { 29 + const struct xattr *xattr; 30 + int err = 0; 31 + 32 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 33 + err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, 34 + xattr->name, xattr->value, 35 + xattr->value_len, 0); 36 + if (err < 0) 37 + break; 38 + } 39 + return err; 40 + } 41 + 42 + /* ---- Initial Security Label(s) Attachment ----------- */ 26 43 int jffs2_init_security(struct inode *inode, struct inode *dir, 27 44 const struct qstr *qstr) 28 45 { 29 - int rc; 30 - size_t len; 31 - void *value; 32 - char *name; 33 - 34 - rc = security_inode_init_security(inode, dir, qstr, &name, &value, &len); 35 - if (rc) { 36 - if (rc == -EOPNOTSUPP) 37 - return 0; 38 - return rc; 39 - } 40 - rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0); 41 - 42 - kfree(name); 43 - kfree(value); 44 - return rc; 46 + return security_inode_init_security(inode, dir, qstr, 47 + &jffs2_initxattrs, NULL); 45 48 } 46 49 47 50 /* ---- XATTR Handler for "security.*" ----------------- */
+29 -30
fs/jfs/xattr.c
··· 1089 1089 } 1090 1090 1091 1091 #ifdef CONFIG_JFS_SECURITY 1092 + int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, 1093 + void *fs_info) 1094 + { 1095 + const struct xattr *xattr; 1096 + tid_t *tid = fs_info; 1097 + char *name; 1098 + int err = 0; 1099 + 1100 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 1101 + name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 1102 + strlen(xattr->name) + 1, GFP_NOFS); 1103 + if (!name) { 1104 + err = -ENOMEM; 1105 + break; 1106 + } 1107 + strcpy(name, XATTR_SECURITY_PREFIX); 1108 + strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); 1109 + 1110 + err = __jfs_setxattr(*tid, inode, name, 1111 + xattr->value, xattr->value_len, 0); 1112 + kfree(name); 1113 + if (err < 0) 1114 + break; 1115 + } 1116 + return err; 1117 + } 1118 + 1092 1119 int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir, 1093 1120 const struct qstr *qstr) 1094 1121 { 1095 - int rc; 1096 - size_t len; 1097 - void *value; 1098 - char *suffix; 1099 - char *name; 1100 - 1101 - rc = security_inode_init_security(inode, dir, qstr, &suffix, &value, 1102 - &len); 1103 - if (rc) { 1104 - if (rc == -EOPNOTSUPP) 1105 - return 0; 1106 - return rc; 1107 - } 1108 - name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 1 + strlen(suffix), 1109 - GFP_NOFS); 1110 - if (!name) { 1111 - rc = -ENOMEM; 1112 - goto kmalloc_failed; 1113 - } 1114 - strcpy(name, XATTR_SECURITY_PREFIX); 1115 - strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix); 1116 - 1117 - rc = __jfs_setxattr(tid, inode, name, value, len, 0); 1118 - 1119 - kfree(name); 1120 - kmalloc_failed: 1121 - kfree(suffix); 1122 - kfree(value); 1123 - 1124 - return rc; 1122 + return security_inode_init_security(inode, dir, qstr, 1123 + &jfs_initxattrs, &tid); 1125 1124 } 1126 1125 #endif
+24 -14
fs/ocfs2/xattr.c
··· 7185 7185 { 7186 7186 int ret = 0; 7187 7187 struct buffer_head *dir_bh = NULL; 7188 - struct ocfs2_security_xattr_info si = { 7189 - .enable = 1, 7190 - }; 7191 7188 7192 - ret = ocfs2_init_security_get(inode, dir, qstr, &si); 7189 + ret = ocfs2_init_security_get(inode, dir, qstr, NULL); 7193 7190 if (!ret) { 7194 - ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY, 7195 - si.name, si.value, si.value_len, 7196 - XATTR_CREATE); 7197 - if (ret) { 7198 - mlog_errno(ret); 7199 - goto leave; 7200 - } 7201 - } else if (ret != -EOPNOTSUPP) { 7202 7191 mlog_errno(ret); 7203 7192 goto leave; 7204 7193 } ··· 7244 7255 name, value, size, flags); 7245 7256 } 7246 7257 7258 + int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, 7259 + void *fs_info) 7260 + { 7261 + const struct xattr *xattr; 7262 + int err = 0; 7263 + 7264 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 7265 + err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY, 7266 + xattr->name, xattr->value, 7267 + xattr->value_len, XATTR_CREATE); 7268 + if (err) 7269 + break; 7270 + } 7271 + return err; 7272 + } 7273 + 7247 7274 int ocfs2_init_security_get(struct inode *inode, 7248 7275 struct inode *dir, 7249 7276 const struct qstr *qstr, ··· 7268 7263 /* check whether ocfs2 support feature xattr */ 7269 7264 if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb))) 7270 7265 return -EOPNOTSUPP; 7271 - return security_inode_init_security(inode, dir, qstr, &si->name, 7272 - &si->value, &si->value_len); 7266 + if (si) 7267 + return security_old_inode_init_security(inode, dir, qstr, 7268 + &si->name, &si->value, 7269 + &si->value_len); 7270 + 7271 + return security_inode_init_security(inode, dir, qstr, 7272 + &ocfs2_initxattrs, NULL); 7273 7273 } 7274 7274 7275 7275 int ocfs2_init_security_set(handle_t *handle,
+2 -2
fs/reiserfs/xattr_security.c
··· 66 66 if (IS_PRIVATE(dir)) 67 67 return 0; 68 68 69 - error = security_inode_init_security(inode, dir, qstr, &sec->name, 70 - &sec->value, &sec->length); 69 + error = security_old_inode_init_security(inode, dir, qstr, &sec->name, 70 + &sec->value, &sec->length); 71 71 if (error) { 72 72 if (error == -EOPNOTSUPP) 73 73 error = 0;
+62 -1
fs/xattr.c
··· 14 14 #include <linux/mount.h> 15 15 #include <linux/namei.h> 16 16 #include <linux/security.h> 17 + #include <linux/evm.h> 17 18 #include <linux/syscalls.h> 18 19 #include <linux/module.h> 19 20 #include <linux/fsnotify.h> ··· 167 166 } 168 167 EXPORT_SYMBOL_GPL(xattr_getsecurity); 169 168 169 + /* 170 + * vfs_getxattr_alloc - allocate memory, if necessary, before calling getxattr 171 + * 172 + * Allocate memory, if not already allocated, or re-allocate correct size, 173 + * before retrieving the extended attribute. 174 + * 175 + * Returns the result of alloc, if failed, or the getxattr operation. 176 + */ 177 + ssize_t 178 + vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, 179 + size_t xattr_size, gfp_t flags) 180 + { 181 + struct inode *inode = dentry->d_inode; 182 + char *value = *xattr_value; 183 + int error; 184 + 185 + error = xattr_permission(inode, name, MAY_READ); 186 + if (error) 187 + return error; 188 + 189 + if (!inode->i_op->getxattr) 190 + return -EOPNOTSUPP; 191 + 192 + error = inode->i_op->getxattr(dentry, name, NULL, 0); 193 + if (error < 0) 194 + return error; 195 + 196 + if (!value || (error > xattr_size)) { 197 + value = krealloc(*xattr_value, error + 1, flags); 198 + if (!value) 199 + return -ENOMEM; 200 + memset(value, 0, error + 1); 201 + } 202 + 203 + error = inode->i_op->getxattr(dentry, name, value, error); 204 + *xattr_value = value; 205 + return error; 206 + } 207 + 208 + /* Compare an extended attribute value with the given value */ 209 + int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, 210 + const char *value, size_t size, gfp_t flags) 211 + { 212 + char *xattr_value = NULL; 213 + int rc; 214 + 215 + rc = vfs_getxattr_alloc(dentry, xattr_name, &xattr_value, 0, flags); 216 + if (rc < 0) 217 + return rc; 218 + 219 + if ((rc != size) || (memcmp(xattr_value, value, rc) != 0)) 220 + rc = -EINVAL; 221 + else 222 + rc = 0; 223 + kfree(xattr_value); 224 + return rc; 225 + } 226 + 170 227 ssize_t 171 228 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) 172 229 { ··· 302 243 error = inode->i_op->removexattr(dentry, name); 303 244 mutex_unlock(&inode->i_mutex); 304 245 305 - if (!error) 246 + if (!error) { 306 247 fsnotify_xattr(dentry); 248 + evm_inode_post_removexattr(dentry, name); 249 + } 307 250 return error; 308 251 } 309 252 EXPORT_SYMBOL_GPL(vfs_removexattr);
+20 -19
fs/xfs/xfs_iops.c
··· 102 102 103 103 } 104 104 105 + 106 + int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, 107 + void *fs_info) 108 + { 109 + const struct xattr *xattr; 110 + struct xfs_inode *ip = XFS_I(inode); 111 + int error = 0; 112 + 113 + for (xattr = xattr_array; xattr->name != NULL; xattr++) { 114 + error = xfs_attr_set(ip, xattr->name, xattr->value, 115 + xattr->value_len, ATTR_SECURE); 116 + if (error < 0) 117 + break; 118 + } 119 + return error; 120 + } 121 + 105 122 /* 106 123 * Hook in SELinux. This is not quite correct yet, what we really need 107 124 * here (as we do for default ACLs) is a mechanism by which creation of 108 125 * these attrs can be journalled at inode creation time (along with the 109 126 * inode, of course, such that log replay can't cause these to be lost). 110 127 */ 128 + 111 129 STATIC int 112 130 xfs_init_security( 113 131 struct inode *inode, 114 132 struct inode *dir, 115 133 const struct qstr *qstr) 116 134 { 117 - struct xfs_inode *ip = XFS_I(inode); 118 - size_t length; 119 - void *value; 120 - unsigned char *name; 121 - int error; 122 - 123 - error = security_inode_init_security(inode, dir, qstr, (char **)&name, 124 - &value, &length); 125 - if (error) { 126 - if (error == -EOPNOTSUPP) 127 - return 0; 128 - return -error; 129 - } 130 - 131 - error = xfs_attr_set(ip, name, value, length, ATTR_SECURE); 132 - 133 - kfree(name); 134 - kfree(value); 135 - return error; 135 + return security_inode_init_security(inode, dir, qstr, 136 + &xfs_initxattrs, NULL); 136 137 } 137 138 138 139 static void
+100
include/linux/evm.h
··· 1 + /* 2 + * evm.h 3 + * 4 + * Copyright (c) 2009 IBM Corporation 5 + * Author: Mimi Zohar <zohar@us.ibm.com> 6 + */ 7 + 8 + #ifndef _LINUX_EVM_H 9 + #define _LINUX_EVM_H 10 + 11 + #include <linux/integrity.h> 12 + #include <linux/xattr.h> 13 + 14 + struct integrity_iint_cache; 15 + 16 + #ifdef CONFIG_EVM 17 + extern enum integrity_status evm_verifyxattr(struct dentry *dentry, 18 + const char *xattr_name, 19 + void *xattr_value, 20 + size_t xattr_value_len, 21 + struct integrity_iint_cache *iint); 22 + extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); 23 + extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); 24 + extern int evm_inode_setxattr(struct dentry *dentry, const char *name, 25 + const void *value, size_t size); 26 + extern void evm_inode_post_setxattr(struct dentry *dentry, 27 + const char *xattr_name, 28 + const void *xattr_value, 29 + size_t xattr_value_len); 30 + extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); 31 + extern void evm_inode_post_removexattr(struct dentry *dentry, 32 + const char *xattr_name); 33 + extern int evm_inode_init_security(struct inode *inode, 34 + const struct xattr *xattr_array, 35 + struct xattr *evm); 36 + #ifdef CONFIG_FS_POSIX_ACL 37 + extern int posix_xattr_acl(const char *xattrname); 38 + #else 39 + static inline int posix_xattr_acl(const char *xattrname) 40 + { 41 + return 0; 42 + } 43 + #endif 44 + #else 45 + #ifdef CONFIG_INTEGRITY 46 + static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, 47 + const char *xattr_name, 48 + void *xattr_value, 49 + size_t xattr_value_len, 50 + struct integrity_iint_cache *iint) 51 + { 52 + return INTEGRITY_UNKNOWN; 53 + } 54 + #endif 55 + 56 + static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) 57 + { 58 + return 0; 59 + } 60 + 61 + static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) 62 + { 63 + return; 64 + } 65 + 66 + static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, 67 + const void *value, size_t size) 68 + { 69 + return 0; 70 + } 71 + 72 + static inline void evm_inode_post_setxattr(struct dentry *dentry, 73 + const char *xattr_name, 74 + const void *xattr_value, 75 + size_t xattr_value_len) 76 + { 77 + return; 78 + } 79 + 80 + static inline int evm_inode_removexattr(struct dentry *dentry, 81 + const char *xattr_name) 82 + { 83 + return 0; 84 + } 85 + 86 + static inline void evm_inode_post_removexattr(struct dentry *dentry, 87 + const char *xattr_name) 88 + { 89 + return; 90 + } 91 + 92 + static inline int evm_inode_init_security(struct inode *inode, 93 + const struct xattr *xattr_array, 94 + struct xattr *evm) 95 + { 96 + return 0; 97 + } 98 + 99 + #endif /* CONFIG_EVM_H */ 100 + #endif /* LINUX_EVM_H */
-13
include/linux/ima.h
··· 15 15 16 16 #ifdef CONFIG_IMA 17 17 extern int ima_bprm_check(struct linux_binprm *bprm); 18 - extern int ima_inode_alloc(struct inode *inode); 19 - extern void ima_inode_free(struct inode *inode); 20 18 extern int ima_file_check(struct file *file, int mask); 21 19 extern void ima_file_free(struct file *file); 22 20 extern int ima_file_mmap(struct file *file, unsigned long prot); ··· 23 25 static inline int ima_bprm_check(struct linux_binprm *bprm) 24 26 { 25 27 return 0; 26 - } 27 - 28 - static inline int ima_inode_alloc(struct inode *inode) 29 - { 30 - return 0; 31 - } 32 - 33 - static inline void ima_inode_free(struct inode *inode) 34 - { 35 - return; 36 28 } 37 29 38 30 static inline int ima_file_check(struct file *file, int mask) ··· 39 51 { 40 52 return 0; 41 53 } 42 - 43 54 #endif /* CONFIG_IMA_H */ 44 55 #endif /* _LINUX_IMA_H */
+39
include/linux/integrity.h
··· 1 + /* 2 + * Copyright (C) 2009 IBM Corporation 3 + * Author: Mimi Zohar <zohar@us.ibm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation, version 2 of the License. 8 + */ 9 + 10 + #ifndef _LINUX_INTEGRITY_H 11 + #define _LINUX_INTEGRITY_H 12 + 13 + #include <linux/fs.h> 14 + 15 + enum integrity_status { 16 + INTEGRITY_PASS = 0, 17 + INTEGRITY_FAIL, 18 + INTEGRITY_NOLABEL, 19 + INTEGRITY_NOXATTRS, 20 + INTEGRITY_UNKNOWN, 21 + }; 22 + 23 + /* List of EVM protected security xattrs */ 24 + #ifdef CONFIG_INTEGRITY 25 + extern int integrity_inode_alloc(struct inode *inode); 26 + extern void integrity_inode_free(struct inode *inode); 27 + 28 + #else 29 + static inline int integrity_inode_alloc(struct inode *inode) 30 + { 31 + return 0; 32 + } 33 + 34 + static inline void integrity_inode_free(struct inode *inode) 35 + { 36 + return; 37 + } 38 + #endif /* CONFIG_INTEGRITY_H */ 39 + #endif /* _LINUX_INTEGRITY_H */
+1 -1
include/linux/kernel.h
··· 382 382 } 383 383 384 384 extern int hex_to_bin(char ch); 385 - extern void hex2bin(u8 *dst, const char *src, size_t count); 385 + extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); 386 386 387 387 /* 388 388 * General tracing related utility functions - trace_printk(),
+25 -7
include/linux/security.h
··· 36 36 #include <linux/key.h> 37 37 #include <linux/xfrm.h> 38 38 #include <linux/slab.h> 39 + #include <linux/xattr.h> 39 40 #include <net/flow.h> 40 41 41 42 /* Maximum number of letters for an LSM name string */ ··· 147 146 extern int mmap_min_addr_handler(struct ctl_table *table, int write, 148 147 void __user *buffer, size_t *lenp, loff_t *ppos); 149 148 #endif 149 + 150 + /* security_inode_init_security callback function to write xattrs */ 151 + typedef int (*initxattrs) (struct inode *inode, 152 + const struct xattr *xattr_array, void *fs_data); 150 153 151 154 #ifdef CONFIG_SECURITY 152 155 ··· 1372 1367 * @inode_getsecctx: 1373 1368 * Returns a string containing all relavent security context information 1374 1369 * 1375 - * @inode we wish to set the security context of. 1370 + * @inode we wish to get the security context of. 1376 1371 * @ctx is a pointer in which to place the allocated security context. 1377 1372 * @ctxlen points to the place to put the length of @ctx. 1378 1373 * This is the main security structure. ··· 1660 1655 extern int security_init(void); 1661 1656 extern int security_module_enable(struct security_operations *ops); 1662 1657 extern int register_security(struct security_operations *ops); 1658 + extern void __init security_fixup_ops(struct security_operations *ops); 1659 + 1663 1660 1664 1661 /* Security operations */ 1665 1662 int security_ptrace_access_check(struct task_struct *child, unsigned int mode); ··· 1711 1704 int security_inode_alloc(struct inode *inode); 1712 1705 void security_inode_free(struct inode *inode); 1713 1706 int security_inode_init_security(struct inode *inode, struct inode *dir, 1714 - const struct qstr *qstr, char **name, 1715 - void **value, size_t *len); 1707 + const struct qstr *qstr, 1708 + initxattrs initxattrs, void *fs_data); 1709 + int security_old_inode_init_security(struct inode *inode, struct inode *dir, 1710 + const struct qstr *qstr, char **name, 1711 + void **value, size_t *len); 1716 1712 int security_inode_create(struct inode *dir, struct dentry *dentry, int mode); 1717 1713 int security_inode_link(struct dentry *old_dentry, struct inode *dir, 1718 1714 struct dentry *new_dentry); ··· 2044 2034 static inline int security_inode_init_security(struct inode *inode, 2045 2035 struct inode *dir, 2046 2036 const struct qstr *qstr, 2047 - char **name, 2048 - void **value, 2049 - size_t *len) 2037 + initxattrs initxattrs, 2038 + void *fs_data) 2050 2039 { 2051 - return -EOPNOTSUPP; 2040 + return 0; 2041 + } 2042 + 2043 + static inline int security_old_inode_init_security(struct inode *inode, 2044 + struct inode *dir, 2045 + const struct qstr *qstr, 2046 + char **name, void **value, 2047 + size_t *len) 2048 + { 2049 + return 0; 2052 2050 } 2053 2051 2054 2052 static inline int security_inode_create(struct inode *dir,
+18 -1
include/linux/xattr.h
··· 30 30 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) 31 31 32 32 /* Security namespace */ 33 + #define XATTR_EVM_SUFFIX "evm" 34 + #define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX 35 + 33 36 #define XATTR_SELINUX_SUFFIX "selinux" 34 37 #define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX 35 38 ··· 52 49 #define XATTR_CAPS_SUFFIX "capability" 53 50 #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX 54 51 52 + #define XATTR_POSIX_ACL_ACCESS "posix_acl_access" 53 + #define XATTR_NAME_POSIX_ACL_ACCESS XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_ACCESS 54 + #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" 55 + #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT 56 + 55 57 #ifdef __KERNEL__ 56 58 57 59 #include <linux/types.h> ··· 75 67 size_t size, int flags, int handler_flags); 76 68 }; 77 69 70 + struct xattr { 71 + char *name; 72 + void *value; 73 + size_t value_len; 74 + }; 75 + 78 76 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); 79 77 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); 80 78 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); ··· 92 78 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); 93 79 int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); 94 80 int generic_removexattr(struct dentry *dentry, const char *name); 95 - 81 + ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, 82 + char **xattr_value, size_t size, gfp_t flags); 83 + int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, 84 + const char *value, size_t size, gfp_t flags); 96 85 #endif /* __KERNEL__ */ 97 86 98 87 #endif /* _LINUX_XATTR_H */
+16 -2
kernel/cred.c
··· 644 644 */ 645 645 struct cred *prepare_kernel_cred(struct task_struct *daemon) 646 646 { 647 + #ifdef CONFIG_KEYS 648 + struct thread_group_cred *tgcred; 649 + #endif 647 650 const struct cred *old; 648 651 struct cred *new; 649 652 650 653 new = kmem_cache_alloc(cred_jar, GFP_KERNEL); 651 654 if (!new) 652 655 return NULL; 656 + 657 + #ifdef CONFIG_KEYS 658 + tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); 659 + if (!tgcred) { 660 + kmem_cache_free(cred_jar, new); 661 + return NULL; 662 + } 663 + #endif 653 664 654 665 kdebug("prepare_kernel_cred() alloc %p", new); 655 666 ··· 678 667 get_group_info(new->group_info); 679 668 680 669 #ifdef CONFIG_KEYS 681 - atomic_inc(&init_tgcred.usage); 682 - new->tgcred = &init_tgcred; 670 + atomic_set(&tgcred->usage, 1); 671 + spin_lock_init(&tgcred->lock); 672 + tgcred->process_keyring = NULL; 673 + tgcred->session_keyring = NULL; 674 + new->tgcred = tgcred; 683 675 new->request_key_auth = NULL; 684 676 new->thread_keyring = NULL; 685 677 new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+11 -4
lib/hexdump.c
··· 38 38 * @dst: binary result 39 39 * @src: ascii hexadecimal string 40 40 * @count: result length 41 + * 42 + * Return 0 on success, -1 in case of bad input. 41 43 */ 42 - void hex2bin(u8 *dst, const char *src, size_t count) 44 + int hex2bin(u8 *dst, const char *src, size_t count) 43 45 { 44 46 while (count--) { 45 - *dst = hex_to_bin(*src++) << 4; 46 - *dst += hex_to_bin(*src++); 47 - dst++; 47 + int hi = hex_to_bin(*src++); 48 + int lo = hex_to_bin(*src++); 49 + 50 + if ((hi < 0) || (lo < 0)) 51 + return -1; 52 + 53 + *dst++ = (hi << 4) | lo; 48 54 } 55 + return 0; 49 56 } 50 57 EXPORT_SYMBOL(hex2bin); 51 58
+2 -2
mm/shmem.c
··· 1458 1458 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1459 1459 if (inode) { 1460 1460 error = security_inode_init_security(inode, dir, 1461 - &dentry->d_name, NULL, 1461 + &dentry->d_name, 1462 1462 NULL, NULL); 1463 1463 if (error) { 1464 1464 if (error != -EOPNOTSUPP) { ··· 1598 1598 if (!inode) 1599 1599 return -ENOSPC; 1600 1600 1601 - error = security_inode_init_security(inode, dir, &dentry->d_name, NULL, 1601 + error = security_inode_init_security(inode, dir, &dentry->d_name, 1602 1602 NULL, NULL); 1603 1603 if (error) { 1604 1604 if (error != -EOPNOTSUPP) {
+4 -2
security/Kconfig
··· 38 38 39 39 config ENCRYPTED_KEYS 40 40 tristate "ENCRYPTED KEYS" 41 - depends on KEYS && TRUSTED_KEYS 41 + depends on KEYS 42 + select CRYPTO 43 + select CRYPTO_HMAC 42 44 select CRYPTO_AES 43 45 select CRYPTO_CBC 44 46 select CRYPTO_SHA256 ··· 188 186 source security/tomoyo/Kconfig 189 187 source security/apparmor/Kconfig 190 188 191 - source security/integrity/ima/Kconfig 189 + source security/integrity/Kconfig 192 190 193 191 choice 194 192 prompt "Default security module"
+2 -2
security/Makefile
··· 24 24 obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o 25 25 26 26 # Object integrity file lists 27 - subdir-$(CONFIG_IMA) += integrity/ima 28 - obj-$(CONFIG_IMA) += integrity/ima/built-in.o 27 + subdir-$(CONFIG_INTEGRITY) += integrity 28 + obj-$(CONFIG_INTEGRITY) += integrity/built-in.o
+1 -1
security/apparmor/apparmorfs.c
··· 200 200 * 201 201 * Returns: error on failure 202 202 */ 203 - int __init aa_create_aafs(void) 203 + static int __init aa_create_aafs(void) 204 204 { 205 205 int error; 206 206
+1
security/apparmor/ipc.c
··· 19 19 #include "include/capability.h" 20 20 #include "include/context.h" 21 21 #include "include/policy.h" 22 + #include "include/ipc.h" 22 23 23 24 /* call back to audit ptrace fields */ 24 25 static void audit_cb(struct audit_buffer *ab, void *va)
+1
security/apparmor/lib.c
··· 18 18 #include <linux/vmalloc.h> 19 19 20 20 #include "include/audit.h" 21 + #include "include/apparmor.h" 21 22 22 23 23 24 /**
+6 -6
security/apparmor/policy_unpack.c
··· 381 381 profile->file.trans.size = size; 382 382 for (i = 0; i < size; i++) { 383 383 char *str; 384 - int c, j, size = unpack_strdup(e, &str, NULL); 384 + int c, j, size2 = unpack_strdup(e, &str, NULL); 385 385 /* unpack_strdup verifies that the last character is 386 386 * null termination byte. 387 387 */ 388 - if (!size) 388 + if (!size2) 389 389 goto fail; 390 390 profile->file.trans.table[i] = str; 391 391 /* verify that name doesn't start with space */ ··· 393 393 goto fail; 394 394 395 395 /* count internal # of internal \0 */ 396 - for (c = j = 0; j < size - 2; j++) { 396 + for (c = j = 0; j < size2 - 2; j++) { 397 397 if (!str[j]) 398 398 c++; 399 399 } ··· 440 440 if (size > RLIM_NLIMITS) 441 441 goto fail; 442 442 for (i = 0; i < size; i++) { 443 - u64 tmp = 0; 443 + u64 tmp2 = 0; 444 444 int a = aa_map_resource(i); 445 - if (!unpack_u64(e, &tmp, NULL)) 445 + if (!unpack_u64(e, &tmp2, NULL)) 446 446 goto fail; 447 - profile->rlimits.limits[a].rlim_max = tmp; 447 + profile->rlimits.limits[a].rlim_max = tmp2; 448 448 } 449 449 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 450 450 goto fail;
+1
security/apparmor/procattr.c
··· 16 16 #include "include/context.h" 17 17 #include "include/policy.h" 18 18 #include "include/domain.h" 19 + #include "include/procattr.h" 19 20 20 21 21 22 /**
+10 -6
security/commoncap.c
··· 332 332 */ 333 333 static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, 334 334 struct linux_binprm *bprm, 335 - bool *effective) 335 + bool *effective, 336 + bool *has_cap) 336 337 { 337 338 struct cred *new = bprm->cred; 338 339 unsigned i; ··· 341 340 342 341 if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE) 343 342 *effective = true; 343 + 344 + if (caps->magic_etc & VFS_CAP_REVISION_MASK) 345 + *has_cap = true; 344 346 345 347 CAP_FOR_EACH_U32(i) { 346 348 __u32 permitted = caps->permitted.cap[i]; ··· 428 424 * its xattrs and, if present, apply them to the proposed credentials being 429 425 * constructed by execve(). 430 426 */ 431 - static int get_file_caps(struct linux_binprm *bprm, bool *effective) 427 + static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap) 432 428 { 433 429 struct dentry *dentry; 434 430 int rc = 0; ··· 454 450 goto out; 455 451 } 456 452 457 - rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective); 453 + rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap); 458 454 if (rc == -EINVAL) 459 455 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", 460 456 __func__, rc, bprm->filename); ··· 479 475 { 480 476 const struct cred *old = current_cred(); 481 477 struct cred *new = bprm->cred; 482 - bool effective; 478 + bool effective, has_cap = false; 483 479 int ret; 484 480 485 481 effective = false; 486 - ret = get_file_caps(bprm, &effective); 482 + ret = get_file_caps(bprm, &effective, &has_cap); 487 483 if (ret < 0) 488 484 return ret; 489 485 ··· 493 489 * for a setuid root binary run by a non-root user. Do set it 494 490 * for a root user just to cause least surprise to an admin. 495 491 */ 496 - if (effective && new->uid != 0 && new->euid == 0) { 492 + if (has_cap && new->uid != 0 && new->euid == 0) { 497 493 warn_setuid_and_fcaps_mixed(bprm->filename); 498 494 goto skip; 499 495 }
+7
security/integrity/Kconfig
··· 1 + # 2 + config INTEGRITY 3 + def_bool y 4 + depends on IMA || EVM 5 + 6 + source security/integrity/ima/Kconfig 7 + source security/integrity/evm/Kconfig
+12
security/integrity/Makefile
··· 1 + # 2 + # Makefile for caching inode integrity data (iint) 3 + # 4 + 5 + obj-$(CONFIG_INTEGRITY) += integrity.o 6 + 7 + integrity-y := iint.o 8 + 9 + subdir-$(CONFIG_IMA) += ima 10 + obj-$(CONFIG_IMA) += ima/built-in.o 11 + subdir-$(CONFIG_EVM) += evm 12 + obj-$(CONFIG_EVM) += evm/built-in.o
+13
security/integrity/evm/Kconfig
··· 1 + config EVM 2 + boolean "EVM support" 3 + depends on SECURITY && KEYS && (TRUSTED_KEYS=y || TRUSTED_KEYS=n) 4 + select CRYPTO_HMAC 5 + select CRYPTO_MD5 6 + select CRYPTO_SHA1 7 + select ENCRYPTED_KEYS 8 + default n 9 + help 10 + EVM protects a file's security extended attributes against 11 + integrity attacks. 12 + 13 + If you are unsure how to answer this question, answer N.
+7
security/integrity/evm/Makefile
··· 1 + # 2 + # Makefile for building the Extended Verification Module(EVM) 3 + # 4 + obj-$(CONFIG_EVM) += evm.o 5 + 6 + evm-y := evm_main.o evm_crypto.o evm_secfs.o 7 + evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
+38
security/integrity/evm/evm.h
··· 1 + /* 2 + * Copyright (C) 2005-2010 IBM Corporation 3 + * 4 + * Authors: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * Kylene Hall <kjhall@us.ibm.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation, version 2 of the License. 11 + * 12 + * File: evm.h 13 + * 14 + */ 15 + #include <linux/xattr.h> 16 + #include <linux/security.h> 17 + #include "../integrity.h" 18 + 19 + extern int evm_initialized; 20 + extern char *evm_hmac; 21 + 22 + extern struct crypto_shash *hmac_tfm; 23 + 24 + /* List of EVM protected security xattrs */ 25 + extern char *evm_config_xattrnames[]; 26 + 27 + extern int evm_init_key(void); 28 + extern int evm_update_evmxattr(struct dentry *dentry, 29 + const char *req_xattr_name, 30 + const char *req_xattr_value, 31 + size_t req_xattr_value_len); 32 + extern int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, 33 + const char *req_xattr_value, 34 + size_t req_xattr_value_len, char *digest); 35 + extern int evm_init_hmac(struct inode *inode, const struct xattr *xattr, 36 + char *hmac_val); 37 + extern int evm_init_secfs(void); 38 + extern void evm_cleanup_secfs(void);
+216
security/integrity/evm/evm_crypto.c
··· 1 + /* 2 + * Copyright (C) 2005-2010 IBM Corporation 3 + * 4 + * Authors: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * Kylene Hall <kjhall@us.ibm.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation, version 2 of the License. 11 + * 12 + * File: evm_crypto.c 13 + * Using root's kernel master key (kmk), calculate the HMAC 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/crypto.h> 18 + #include <linux/xattr.h> 19 + #include <keys/encrypted-type.h> 20 + #include <crypto/hash.h> 21 + #include "evm.h" 22 + 23 + #define EVMKEY "evm-key" 24 + #define MAX_KEY_SIZE 128 25 + static unsigned char evmkey[MAX_KEY_SIZE]; 26 + static int evmkey_len = MAX_KEY_SIZE; 27 + 28 + struct crypto_shash *hmac_tfm; 29 + 30 + static struct shash_desc *init_desc(void) 31 + { 32 + int rc; 33 + struct shash_desc *desc; 34 + 35 + if (hmac_tfm == NULL) { 36 + hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC); 37 + if (IS_ERR(hmac_tfm)) { 38 + pr_err("Can not allocate %s (reason: %ld)\n", 39 + evm_hmac, PTR_ERR(hmac_tfm)); 40 + rc = PTR_ERR(hmac_tfm); 41 + hmac_tfm = NULL; 42 + return ERR_PTR(rc); 43 + } 44 + } 45 + 46 + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm), 47 + GFP_KERNEL); 48 + if (!desc) 49 + return ERR_PTR(-ENOMEM); 50 + 51 + desc->tfm = hmac_tfm; 52 + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 53 + 54 + rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len); 55 + if (rc) 56 + goto out; 57 + rc = crypto_shash_init(desc); 58 + out: 59 + if (rc) { 60 + kfree(desc); 61 + return ERR_PTR(rc); 62 + } 63 + return desc; 64 + } 65 + 66 + /* Protect against 'cutting & pasting' security.evm xattr, include inode 67 + * specific info. 68 + * 69 + * (Additional directory/file metadata needs to be added for more complete 70 + * protection.) 71 + */ 72 + static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, 73 + char *digest) 74 + { 75 + struct h_misc { 76 + unsigned long ino; 77 + __u32 generation; 78 + uid_t uid; 79 + gid_t gid; 80 + umode_t mode; 81 + } hmac_misc; 82 + 83 + memset(&hmac_misc, 0, sizeof hmac_misc); 84 + hmac_misc.ino = inode->i_ino; 85 + hmac_misc.generation = inode->i_generation; 86 + hmac_misc.uid = inode->i_uid; 87 + hmac_misc.gid = inode->i_gid; 88 + hmac_misc.mode = inode->i_mode; 89 + crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc); 90 + crypto_shash_final(desc, digest); 91 + } 92 + 93 + /* 94 + * Calculate the HMAC value across the set of protected security xattrs. 95 + * 96 + * Instead of retrieving the requested xattr, for performance, calculate 97 + * the hmac using the requested xattr value. Don't alloc/free memory for 98 + * each xattr, but attempt to re-use the previously allocated memory. 99 + */ 100 + int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, 101 + const char *req_xattr_value, size_t req_xattr_value_len, 102 + char *digest) 103 + { 104 + struct inode *inode = dentry->d_inode; 105 + struct shash_desc *desc; 106 + char **xattrname; 107 + size_t xattr_size = 0; 108 + char *xattr_value = NULL; 109 + int error; 110 + int size; 111 + 112 + if (!inode->i_op || !inode->i_op->getxattr) 113 + return -EOPNOTSUPP; 114 + desc = init_desc(); 115 + if (IS_ERR(desc)) 116 + return PTR_ERR(desc); 117 + 118 + error = -ENODATA; 119 + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) { 120 + if ((req_xattr_name && req_xattr_value) 121 + && !strcmp(*xattrname, req_xattr_name)) { 122 + error = 0; 123 + crypto_shash_update(desc, (const u8 *)req_xattr_value, 124 + req_xattr_value_len); 125 + continue; 126 + } 127 + size = vfs_getxattr_alloc(dentry, *xattrname, 128 + &xattr_value, xattr_size, GFP_NOFS); 129 + if (size == -ENOMEM) { 130 + error = -ENOMEM; 131 + goto out; 132 + } 133 + if (size < 0) 134 + continue; 135 + 136 + error = 0; 137 + xattr_size = size; 138 + crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size); 139 + } 140 + hmac_add_misc(desc, inode, digest); 141 + 142 + out: 143 + kfree(xattr_value); 144 + kfree(desc); 145 + return error; 146 + } 147 + 148 + /* 149 + * Calculate the hmac and update security.evm xattr 150 + * 151 + * Expects to be called with i_mutex locked. 152 + */ 153 + int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, 154 + const char *xattr_value, size_t xattr_value_len) 155 + { 156 + struct inode *inode = dentry->d_inode; 157 + struct evm_ima_xattr_data xattr_data; 158 + int rc = 0; 159 + 160 + rc = evm_calc_hmac(dentry, xattr_name, xattr_value, 161 + xattr_value_len, xattr_data.digest); 162 + if (rc == 0) { 163 + xattr_data.type = EVM_XATTR_HMAC; 164 + rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, 165 + &xattr_data, 166 + sizeof(xattr_data), 0); 167 + } 168 + else if (rc == -ENODATA) 169 + rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); 170 + return rc; 171 + } 172 + 173 + int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr, 174 + char *hmac_val) 175 + { 176 + struct shash_desc *desc; 177 + 178 + desc = init_desc(); 179 + if (IS_ERR(desc)) { 180 + printk(KERN_INFO "init_desc failed\n"); 181 + return PTR_ERR(desc); 182 + } 183 + 184 + crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len); 185 + hmac_add_misc(desc, inode, hmac_val); 186 + kfree(desc); 187 + return 0; 188 + } 189 + 190 + /* 191 + * Get the key from the TPM for the SHA1-HMAC 192 + */ 193 + int evm_init_key(void) 194 + { 195 + struct key *evm_key; 196 + struct encrypted_key_payload *ekp; 197 + int rc = 0; 198 + 199 + evm_key = request_key(&key_type_encrypted, EVMKEY, NULL); 200 + if (IS_ERR(evm_key)) 201 + return -ENOENT; 202 + 203 + down_read(&evm_key->sem); 204 + ekp = evm_key->payload.data; 205 + if (ekp->decrypted_datalen > MAX_KEY_SIZE) { 206 + rc = -EINVAL; 207 + goto out; 208 + } 209 + memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen); 210 + out: 211 + /* burn the original key contents */ 212 + memset(ekp->decrypted_data, 0, ekp->decrypted_datalen); 213 + up_read(&evm_key->sem); 214 + key_put(evm_key); 215 + return rc; 216 + }
+384
security/integrity/evm/evm_main.c
··· 1 + /* 2 + * Copyright (C) 2005-2010 IBM Corporation 3 + * 4 + * Author: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * Kylene Hall <kjhall@us.ibm.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation, version 2 of the License. 11 + * 12 + * File: evm_main.c 13 + * implements evm_inode_setxattr, evm_inode_post_setxattr, 14 + * evm_inode_removexattr, and evm_verifyxattr 15 + */ 16 + 17 + #include <linux/module.h> 18 + #include <linux/crypto.h> 19 + #include <linux/xattr.h> 20 + #include <linux/integrity.h> 21 + #include <linux/evm.h> 22 + #include <crypto/hash.h> 23 + #include "evm.h" 24 + 25 + int evm_initialized; 26 + 27 + char *evm_hmac = "hmac(sha1)"; 28 + 29 + char *evm_config_xattrnames[] = { 30 + #ifdef CONFIG_SECURITY_SELINUX 31 + XATTR_NAME_SELINUX, 32 + #endif 33 + #ifdef CONFIG_SECURITY_SMACK 34 + XATTR_NAME_SMACK, 35 + #endif 36 + XATTR_NAME_CAPS, 37 + NULL 38 + }; 39 + 40 + static int evm_fixmode; 41 + static int __init evm_set_fixmode(char *str) 42 + { 43 + if (strncmp(str, "fix", 3) == 0) 44 + evm_fixmode = 1; 45 + return 0; 46 + } 47 + __setup("evm=", evm_set_fixmode); 48 + 49 + /* 50 + * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr 51 + * 52 + * Compute the HMAC on the dentry's protected set of extended attributes 53 + * and compare it against the stored security.evm xattr. 54 + * 55 + * For performance: 56 + * - use the previoulsy retrieved xattr value and length to calculate the 57 + * HMAC.) 58 + * - cache the verification result in the iint, when available. 59 + * 60 + * Returns integrity status 61 + */ 62 + static enum integrity_status evm_verify_hmac(struct dentry *dentry, 63 + const char *xattr_name, 64 + char *xattr_value, 65 + size_t xattr_value_len, 66 + struct integrity_iint_cache *iint) 67 + { 68 + struct evm_ima_xattr_data xattr_data; 69 + enum integrity_status evm_status = INTEGRITY_PASS; 70 + int rc; 71 + 72 + if (iint && iint->evm_status == INTEGRITY_PASS) 73 + return iint->evm_status; 74 + 75 + /* if status is not PASS, try to check again - against -ENOMEM */ 76 + 77 + rc = evm_calc_hmac(dentry, xattr_name, xattr_value, 78 + xattr_value_len, xattr_data.digest); 79 + if (rc < 0) { 80 + evm_status = (rc == -ENODATA) 81 + ? INTEGRITY_NOXATTRS : INTEGRITY_FAIL; 82 + goto out; 83 + } 84 + 85 + xattr_data.type = EVM_XATTR_HMAC; 86 + rc = vfs_xattr_cmp(dentry, XATTR_NAME_EVM, (u8 *)&xattr_data, 87 + sizeof xattr_data, GFP_NOFS); 88 + if (rc < 0) 89 + evm_status = (rc == -ENODATA) 90 + ? INTEGRITY_NOLABEL : INTEGRITY_FAIL; 91 + out: 92 + if (iint) 93 + iint->evm_status = evm_status; 94 + return evm_status; 95 + } 96 + 97 + static int evm_protected_xattr(const char *req_xattr_name) 98 + { 99 + char **xattrname; 100 + int namelen; 101 + int found = 0; 102 + 103 + namelen = strlen(req_xattr_name); 104 + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) { 105 + if ((strlen(*xattrname) == namelen) 106 + && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) { 107 + found = 1; 108 + break; 109 + } 110 + if (strncmp(req_xattr_name, 111 + *xattrname + XATTR_SECURITY_PREFIX_LEN, 112 + strlen(req_xattr_name)) == 0) { 113 + found = 1; 114 + break; 115 + } 116 + } 117 + return found; 118 + } 119 + 120 + /** 121 + * evm_verifyxattr - verify the integrity of the requested xattr 122 + * @dentry: object of the verify xattr 123 + * @xattr_name: requested xattr 124 + * @xattr_value: requested xattr value 125 + * @xattr_value_len: requested xattr value length 126 + * 127 + * Calculate the HMAC for the given dentry and verify it against the stored 128 + * security.evm xattr. For performance, use the xattr value and length 129 + * previously retrieved to calculate the HMAC. 130 + * 131 + * Returns the xattr integrity status. 132 + * 133 + * This function requires the caller to lock the inode's i_mutex before it 134 + * is executed. 135 + */ 136 + enum integrity_status evm_verifyxattr(struct dentry *dentry, 137 + const char *xattr_name, 138 + void *xattr_value, size_t xattr_value_len, 139 + struct integrity_iint_cache *iint) 140 + { 141 + if (!evm_initialized || !evm_protected_xattr(xattr_name)) 142 + return INTEGRITY_UNKNOWN; 143 + 144 + if (!iint) { 145 + iint = integrity_iint_find(dentry->d_inode); 146 + if (!iint) 147 + return INTEGRITY_UNKNOWN; 148 + } 149 + return evm_verify_hmac(dentry, xattr_name, xattr_value, 150 + xattr_value_len, iint); 151 + } 152 + EXPORT_SYMBOL_GPL(evm_verifyxattr); 153 + 154 + /* 155 + * evm_verify_current_integrity - verify the dentry's metadata integrity 156 + * @dentry: pointer to the affected dentry 157 + * 158 + * Verify and return the dentry's metadata integrity. The exceptions are 159 + * before EVM is initialized or in 'fix' mode. 160 + */ 161 + static enum integrity_status evm_verify_current_integrity(struct dentry *dentry) 162 + { 163 + struct inode *inode = dentry->d_inode; 164 + 165 + if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode) 166 + return 0; 167 + return evm_verify_hmac(dentry, NULL, NULL, 0, NULL); 168 + } 169 + 170 + /* 171 + * evm_protect_xattr - protect the EVM extended attribute 172 + * 173 + * Prevent security.evm from being modified or removed without the 174 + * necessary permissions or when the existing value is invalid. 175 + * 176 + * The posix xattr acls are 'system' prefixed, which normally would not 177 + * affect security.evm. An interesting side affect of writing posix xattr 178 + * acls is their modifying of the i_mode, which is included in security.evm. 179 + * For posix xattr acls only, permit security.evm, even if it currently 180 + * doesn't exist, to be updated. 181 + */ 182 + static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name, 183 + const void *xattr_value, size_t xattr_value_len) 184 + { 185 + enum integrity_status evm_status; 186 + 187 + if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { 188 + if (!capable(CAP_SYS_ADMIN)) 189 + return -EPERM; 190 + } else if (!evm_protected_xattr(xattr_name)) { 191 + if (!posix_xattr_acl(xattr_name)) 192 + return 0; 193 + evm_status = evm_verify_current_integrity(dentry); 194 + if ((evm_status == INTEGRITY_PASS) || 195 + (evm_status == INTEGRITY_NOXATTRS)) 196 + return 0; 197 + return -EPERM; 198 + } 199 + evm_status = evm_verify_current_integrity(dentry); 200 + return evm_status == INTEGRITY_PASS ? 0 : -EPERM; 201 + } 202 + 203 + /** 204 + * evm_inode_setxattr - protect the EVM extended attribute 205 + * @dentry: pointer to the affected dentry 206 + * @xattr_name: pointer to the affected extended attribute name 207 + * @xattr_value: pointer to the new extended attribute value 208 + * @xattr_value_len: pointer to the new extended attribute value length 209 + * 210 + * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that 211 + * the current value is valid. 212 + */ 213 + int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, 214 + const void *xattr_value, size_t xattr_value_len) 215 + { 216 + return evm_protect_xattr(dentry, xattr_name, xattr_value, 217 + xattr_value_len); 218 + } 219 + 220 + /** 221 + * evm_inode_removexattr - protect the EVM extended attribute 222 + * @dentry: pointer to the affected dentry 223 + * @xattr_name: pointer to the affected extended attribute name 224 + * 225 + * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that 226 + * the current value is valid. 227 + */ 228 + int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name) 229 + { 230 + return evm_protect_xattr(dentry, xattr_name, NULL, 0); 231 + } 232 + 233 + /** 234 + * evm_inode_post_setxattr - update 'security.evm' to reflect the changes 235 + * @dentry: pointer to the affected dentry 236 + * @xattr_name: pointer to the affected extended attribute name 237 + * @xattr_value: pointer to the new extended attribute value 238 + * @xattr_value_len: pointer to the new extended attribute value length 239 + * 240 + * Update the HMAC stored in 'security.evm' to reflect the change. 241 + * 242 + * No need to take the i_mutex lock here, as this function is called from 243 + * __vfs_setxattr_noperm(). The caller of which has taken the inode's 244 + * i_mutex lock. 245 + */ 246 + void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, 247 + const void *xattr_value, size_t xattr_value_len) 248 + { 249 + if (!evm_initialized || (!evm_protected_xattr(xattr_name) 250 + && !posix_xattr_acl(xattr_name))) 251 + return; 252 + 253 + evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len); 254 + return; 255 + } 256 + 257 + /** 258 + * evm_inode_post_removexattr - update 'security.evm' after removing the xattr 259 + * @dentry: pointer to the affected dentry 260 + * @xattr_name: pointer to the affected extended attribute name 261 + * 262 + * Update the HMAC stored in 'security.evm' to reflect removal of the xattr. 263 + */ 264 + void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name) 265 + { 266 + struct inode *inode = dentry->d_inode; 267 + 268 + if (!evm_initialized || !evm_protected_xattr(xattr_name)) 269 + return; 270 + 271 + mutex_lock(&inode->i_mutex); 272 + evm_update_evmxattr(dentry, xattr_name, NULL, 0); 273 + mutex_unlock(&inode->i_mutex); 274 + return; 275 + } 276 + 277 + /** 278 + * evm_inode_setattr - prevent updating an invalid EVM extended attribute 279 + * @dentry: pointer to the affected dentry 280 + */ 281 + int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) 282 + { 283 + unsigned int ia_valid = attr->ia_valid; 284 + enum integrity_status evm_status; 285 + 286 + if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))) 287 + return 0; 288 + evm_status = evm_verify_current_integrity(dentry); 289 + if ((evm_status == INTEGRITY_PASS) || 290 + (evm_status == INTEGRITY_NOXATTRS)) 291 + return 0; 292 + return -EPERM; 293 + } 294 + 295 + /** 296 + * evm_inode_post_setattr - update 'security.evm' after modifying metadata 297 + * @dentry: pointer to the affected dentry 298 + * @ia_valid: for the UID and GID status 299 + * 300 + * For now, update the HMAC stored in 'security.evm' to reflect UID/GID 301 + * changes. 302 + * 303 + * This function is called from notify_change(), which expects the caller 304 + * to lock the inode's i_mutex. 305 + */ 306 + void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) 307 + { 308 + if (!evm_initialized) 309 + return; 310 + 311 + if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) 312 + evm_update_evmxattr(dentry, NULL, NULL, 0); 313 + return; 314 + } 315 + 316 + /* 317 + * evm_inode_init_security - initializes security.evm 318 + */ 319 + int evm_inode_init_security(struct inode *inode, 320 + const struct xattr *lsm_xattr, 321 + struct xattr *evm_xattr) 322 + { 323 + struct evm_ima_xattr_data *xattr_data; 324 + int rc; 325 + 326 + if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name)) 327 + return 0; 328 + 329 + xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS); 330 + if (!xattr_data) 331 + return -ENOMEM; 332 + 333 + xattr_data->type = EVM_XATTR_HMAC; 334 + rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest); 335 + if (rc < 0) 336 + goto out; 337 + 338 + evm_xattr->value = xattr_data; 339 + evm_xattr->value_len = sizeof(*xattr_data); 340 + evm_xattr->name = kstrdup(XATTR_EVM_SUFFIX, GFP_NOFS); 341 + return 0; 342 + out: 343 + kfree(xattr_data); 344 + return rc; 345 + } 346 + EXPORT_SYMBOL_GPL(evm_inode_init_security); 347 + 348 + static int __init init_evm(void) 349 + { 350 + int error; 351 + 352 + error = evm_init_secfs(); 353 + if (error < 0) { 354 + printk(KERN_INFO "EVM: Error registering secfs\n"); 355 + goto err; 356 + } 357 + err: 358 + return error; 359 + } 360 + 361 + static void __exit cleanup_evm(void) 362 + { 363 + evm_cleanup_secfs(); 364 + if (hmac_tfm) 365 + crypto_free_shash(hmac_tfm); 366 + } 367 + 368 + /* 369 + * evm_display_config - list the EVM protected security extended attributes 370 + */ 371 + static int __init evm_display_config(void) 372 + { 373 + char **xattrname; 374 + 375 + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) 376 + printk(KERN_INFO "EVM: %s\n", *xattrname); 377 + return 0; 378 + } 379 + 380 + pure_initcall(evm_display_config); 381 + late_initcall(init_evm); 382 + 383 + MODULE_DESCRIPTION("Extended Verification Module"); 384 + MODULE_LICENSE("GPL");
+26
security/integrity/evm/evm_posix_acl.c
··· 1 + /* 2 + * Copyright (C) 2011 IBM Corporation 3 + * 4 + * Author: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation, version 2 of the License. 10 + */ 11 + 12 + #include <linux/module.h> 13 + #include <linux/xattr.h> 14 + 15 + int posix_xattr_acl(char *xattr) 16 + { 17 + int xattr_len = strlen(xattr); 18 + 19 + if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len) 20 + && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0)) 21 + return 1; 22 + if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len) 23 + && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0)) 24 + return 1; 25 + return 0; 26 + }
+108
security/integrity/evm/evm_secfs.c
··· 1 + /* 2 + * Copyright (C) 2010 IBM Corporation 3 + * 4 + * Authors: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation, version 2 of the License. 10 + * 11 + * File: evm_secfs.c 12 + * - Used to signal when key is on keyring 13 + * - Get the key and enable EVM 14 + */ 15 + 16 + #include <linux/uaccess.h> 17 + #include <linux/module.h> 18 + #include "evm.h" 19 + 20 + static struct dentry *evm_init_tpm; 21 + 22 + /** 23 + * evm_read_key - read() for <securityfs>/evm 24 + * 25 + * @filp: file pointer, not actually used 26 + * @buf: where to put the result 27 + * @count: maximum to send along 28 + * @ppos: where to start 29 + * 30 + * Returns number of bytes read or error code, as appropriate 31 + */ 32 + static ssize_t evm_read_key(struct file *filp, char __user *buf, 33 + size_t count, loff_t *ppos) 34 + { 35 + char temp[80]; 36 + ssize_t rc; 37 + 38 + if (*ppos != 0) 39 + return 0; 40 + 41 + sprintf(temp, "%d", evm_initialized); 42 + rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); 43 + 44 + return rc; 45 + } 46 + 47 + /** 48 + * evm_write_key - write() for <securityfs>/evm 49 + * @file: file pointer, not actually used 50 + * @buf: where to get the data from 51 + * @count: bytes sent 52 + * @ppos: where to start 53 + * 54 + * Used to signal that key is on the kernel key ring. 55 + * - get the integrity hmac key from the kernel key ring 56 + * - create list of hmac protected extended attributes 57 + * Returns number of bytes written or error code, as appropriate 58 + */ 59 + static ssize_t evm_write_key(struct file *file, const char __user *buf, 60 + size_t count, loff_t *ppos) 61 + { 62 + char temp[80]; 63 + int i, error; 64 + 65 + if (!capable(CAP_SYS_ADMIN) || evm_initialized) 66 + return -EPERM; 67 + 68 + if (count >= sizeof(temp) || count == 0) 69 + return -EINVAL; 70 + 71 + if (copy_from_user(temp, buf, count) != 0) 72 + return -EFAULT; 73 + 74 + temp[count] = '\0'; 75 + 76 + if ((sscanf(temp, "%d", &i) != 1) || (i != 1)) 77 + return -EINVAL; 78 + 79 + error = evm_init_key(); 80 + if (!error) { 81 + evm_initialized = 1; 82 + pr_info("EVM: initialized\n"); 83 + } else 84 + pr_err("EVM: initialization failed\n"); 85 + return count; 86 + } 87 + 88 + static const struct file_operations evm_key_ops = { 89 + .read = evm_read_key, 90 + .write = evm_write_key, 91 + }; 92 + 93 + int __init evm_init_secfs(void) 94 + { 95 + int error = 0; 96 + 97 + evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP, 98 + NULL, NULL, &evm_key_ops); 99 + if (!evm_init_tpm || IS_ERR(evm_init_tpm)) 100 + error = -EFAULT; 101 + return error; 102 + } 103 + 104 + void __exit evm_cleanup_secfs(void) 105 + { 106 + if (evm_init_tpm) 107 + securityfs_remove(evm_init_tpm); 108 + }
+172
security/integrity/iint.c
··· 1 + /* 2 + * Copyright (C) 2008 IBM Corporation 3 + * 4 + * Authors: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License as 9 + * published by the Free Software Foundation, version 2 of the 10 + * License. 11 + * 12 + * File: integrity_iint.c 13 + * - implements the integrity hooks: integrity_inode_alloc, 14 + * integrity_inode_free 15 + * - cache integrity information associated with an inode 16 + * using a rbtree tree. 17 + */ 18 + #include <linux/slab.h> 19 + #include <linux/module.h> 20 + #include <linux/spinlock.h> 21 + #include <linux/rbtree.h> 22 + #include "integrity.h" 23 + 24 + static struct rb_root integrity_iint_tree = RB_ROOT; 25 + static DEFINE_SPINLOCK(integrity_iint_lock); 26 + static struct kmem_cache *iint_cache __read_mostly; 27 + 28 + int iint_initialized; 29 + 30 + /* 31 + * __integrity_iint_find - return the iint associated with an inode 32 + */ 33 + static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode) 34 + { 35 + struct integrity_iint_cache *iint; 36 + struct rb_node *n = integrity_iint_tree.rb_node; 37 + 38 + assert_spin_locked(&integrity_iint_lock); 39 + 40 + while (n) { 41 + iint = rb_entry(n, struct integrity_iint_cache, rb_node); 42 + 43 + if (inode < iint->inode) 44 + n = n->rb_left; 45 + else if (inode > iint->inode) 46 + n = n->rb_right; 47 + else 48 + break; 49 + } 50 + if (!n) 51 + return NULL; 52 + 53 + return iint; 54 + } 55 + 56 + /* 57 + * integrity_iint_find - return the iint associated with an inode 58 + */ 59 + struct integrity_iint_cache *integrity_iint_find(struct inode *inode) 60 + { 61 + struct integrity_iint_cache *iint; 62 + 63 + if (!IS_IMA(inode)) 64 + return NULL; 65 + 66 + spin_lock(&integrity_iint_lock); 67 + iint = __integrity_iint_find(inode); 68 + spin_unlock(&integrity_iint_lock); 69 + 70 + return iint; 71 + } 72 + 73 + static void iint_free(struct integrity_iint_cache *iint) 74 + { 75 + iint->version = 0; 76 + iint->flags = 0UL; 77 + iint->evm_status = INTEGRITY_UNKNOWN; 78 + kmem_cache_free(iint_cache, iint); 79 + } 80 + 81 + /** 82 + * integrity_inode_alloc - allocate an iint associated with an inode 83 + * @inode: pointer to the inode 84 + */ 85 + int integrity_inode_alloc(struct inode *inode) 86 + { 87 + struct rb_node **p; 88 + struct rb_node *new_node, *parent = NULL; 89 + struct integrity_iint_cache *new_iint, *test_iint; 90 + int rc; 91 + 92 + new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS); 93 + if (!new_iint) 94 + return -ENOMEM; 95 + 96 + new_iint->inode = inode; 97 + new_node = &new_iint->rb_node; 98 + 99 + mutex_lock(&inode->i_mutex); /* i_flags */ 100 + spin_lock(&integrity_iint_lock); 101 + 102 + p = &integrity_iint_tree.rb_node; 103 + while (*p) { 104 + parent = *p; 105 + test_iint = rb_entry(parent, struct integrity_iint_cache, 106 + rb_node); 107 + rc = -EEXIST; 108 + if (inode < test_iint->inode) 109 + p = &(*p)->rb_left; 110 + else if (inode > test_iint->inode) 111 + p = &(*p)->rb_right; 112 + else 113 + goto out_err; 114 + } 115 + 116 + inode->i_flags |= S_IMA; 117 + rb_link_node(new_node, parent, p); 118 + rb_insert_color(new_node, &integrity_iint_tree); 119 + 120 + spin_unlock(&integrity_iint_lock); 121 + mutex_unlock(&inode->i_mutex); /* i_flags */ 122 + 123 + return 0; 124 + out_err: 125 + spin_unlock(&integrity_iint_lock); 126 + mutex_unlock(&inode->i_mutex); /* i_flags */ 127 + iint_free(new_iint); 128 + 129 + return rc; 130 + } 131 + 132 + /** 133 + * integrity_inode_free - called on security_inode_free 134 + * @inode: pointer to the inode 135 + * 136 + * Free the integrity information(iint) associated with an inode. 137 + */ 138 + void integrity_inode_free(struct inode *inode) 139 + { 140 + struct integrity_iint_cache *iint; 141 + 142 + if (!IS_IMA(inode)) 143 + return; 144 + 145 + spin_lock(&integrity_iint_lock); 146 + iint = __integrity_iint_find(inode); 147 + rb_erase(&iint->rb_node, &integrity_iint_tree); 148 + spin_unlock(&integrity_iint_lock); 149 + 150 + iint_free(iint); 151 + } 152 + 153 + static void init_once(void *foo) 154 + { 155 + struct integrity_iint_cache *iint = foo; 156 + 157 + memset(iint, 0, sizeof *iint); 158 + iint->version = 0; 159 + iint->flags = 0UL; 160 + mutex_init(&iint->mutex); 161 + iint->evm_status = INTEGRITY_UNKNOWN; 162 + } 163 + 164 + static int __init integrity_iintcache_init(void) 165 + { 166 + iint_cache = 167 + kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache), 168 + 0, SLAB_PANIC, init_once); 169 + iint_initialized = 1; 170 + return 0; 171 + } 172 + security_initcall(integrity_iintcache_init);
+1
security/integrity/ima/Kconfig
··· 3 3 config IMA 4 4 bool "Integrity Measurement Architecture(IMA)" 5 5 depends on SECURITY 6 + select INTEGRITY 6 7 select SECURITYFS 7 8 select CRYPTO 8 9 select CRYPTO_HMAC
+1 -1
security/integrity/ima/Makefile
··· 6 6 obj-$(CONFIG_IMA) += ima.o 7 7 8 8 ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \ 9 - ima_policy.o ima_iint.o ima_audit.o 9 + ima_policy.o ima_audit.o
+9 -21
security/integrity/ima/ima.h
··· 24 24 #include <linux/tpm.h> 25 25 #include <linux/audit.h> 26 26 27 + #include "../integrity.h" 28 + 27 29 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII }; 28 30 enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; 29 31 30 32 /* digest size for IMA, fits SHA1 or MD5 */ 31 - #define IMA_DIGEST_SIZE 20 33 + #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE 32 34 #define IMA_EVENT_NAME_LEN_MAX 255 33 35 34 36 #define IMA_HASH_BITS 9 35 37 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) 36 38 37 39 /* set during initialization */ 38 - extern int iint_initialized; 39 40 extern int ima_initialized; 40 41 extern int ima_used_chip; 41 42 extern char *ima_hash; ··· 97 96 return hash_long(*digest, IMA_HASH_BITS); 98 97 } 99 98 100 - /* iint cache flags */ 101 - #define IMA_MEASURED 0x01 102 - 103 - /* integrity data associated with an inode */ 104 - struct ima_iint_cache { 105 - struct rb_node rb_node; /* rooted in ima_iint_tree */ 106 - struct inode *inode; /* back pointer to inode in question */ 107 - u64 version; /* track inode changes */ 108 - unsigned char flags; 109 - u8 digest[IMA_DIGEST_SIZE]; 110 - struct mutex mutex; /* protects: version, flags, digest */ 111 - }; 112 - 113 99 /* LIM API function definitions */ 114 100 int ima_must_measure(struct inode *inode, int mask, int function); 115 - int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file); 116 - void ima_store_measurement(struct ima_iint_cache *iint, struct file *file, 101 + int ima_collect_measurement(struct integrity_iint_cache *iint, 102 + struct file *file); 103 + void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, 117 104 const unsigned char *filename); 118 105 int ima_store_template(struct ima_template_entry *entry, int violation, 119 106 struct inode *inode); 120 - void ima_template_show(struct seq_file *m, void *e, 121 - enum ima_show_type show); 107 + void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show); 122 108 123 109 /* rbtree tree calls to lookup, insert, delete 124 110 * integrity data associated with an inode. 125 111 */ 126 - struct ima_iint_cache *ima_iint_insert(struct inode *inode); 127 - struct ima_iint_cache *ima_iint_find(struct inode *inode); 112 + struct integrity_iint_cache *integrity_iint_insert(struct inode *inode); 113 + struct integrity_iint_cache *integrity_iint_find(struct inode *inode); 128 114 129 115 /* IMA policy related functions */ 130 116 enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
+4 -3
security/integrity/ima/ima_api.c
··· 126 126 * 127 127 * Return 0 on success, error code otherwise 128 128 */ 129 - int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file) 129 + int ima_collect_measurement(struct integrity_iint_cache *iint, 130 + struct file *file) 130 131 { 131 132 int result = -EEXIST; 132 133 ··· 157 156 * 158 157 * Must be called with iint->mutex held. 159 158 */ 160 - void ima_store_measurement(struct ima_iint_cache *iint, struct file *file, 161 - const unsigned char *filename) 159 + void ima_store_measurement(struct integrity_iint_cache *iint, 160 + struct file *file, const unsigned char *filename) 162 161 { 163 162 const char *op = "add_template_measure"; 164 163 const char *audit_cause = "ENOMEM";
+1 -1
security/integrity/ima/ima_fs.c
··· 287 287 /* 288 288 * ima_open_policy: sequentialize access to the policy file 289 289 */ 290 - int ima_open_policy(struct inode * inode, struct file * filp) 290 + static int ima_open_policy(struct inode * inode, struct file * filp) 291 291 { 292 292 /* No point in being allowed to open it if you aren't going to write */ 293 293 if (!(filp->f_flags & O_WRONLY))
-169
security/integrity/ima/ima_iint.c
··· 1 - /* 2 - * Copyright (C) 2008 IBM Corporation 3 - * 4 - * Authors: 5 - * Mimi Zohar <zohar@us.ibm.com> 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License as 9 - * published by the Free Software Foundation, version 2 of the 10 - * License. 11 - * 12 - * File: ima_iint.c 13 - * - implements the IMA hooks: ima_inode_alloc, ima_inode_free 14 - * - cache integrity information associated with an inode 15 - * using a rbtree tree. 16 - */ 17 - #include <linux/slab.h> 18 - #include <linux/module.h> 19 - #include <linux/spinlock.h> 20 - #include <linux/rbtree.h> 21 - #include "ima.h" 22 - 23 - static struct rb_root ima_iint_tree = RB_ROOT; 24 - static DEFINE_SPINLOCK(ima_iint_lock); 25 - static struct kmem_cache *iint_cache __read_mostly; 26 - 27 - int iint_initialized = 0; 28 - 29 - /* 30 - * __ima_iint_find - return the iint associated with an inode 31 - */ 32 - static struct ima_iint_cache *__ima_iint_find(struct inode *inode) 33 - { 34 - struct ima_iint_cache *iint; 35 - struct rb_node *n = ima_iint_tree.rb_node; 36 - 37 - assert_spin_locked(&ima_iint_lock); 38 - 39 - while (n) { 40 - iint = rb_entry(n, struct ima_iint_cache, rb_node); 41 - 42 - if (inode < iint->inode) 43 - n = n->rb_left; 44 - else if (inode > iint->inode) 45 - n = n->rb_right; 46 - else 47 - break; 48 - } 49 - if (!n) 50 - return NULL; 51 - 52 - return iint; 53 - } 54 - 55 - /* 56 - * ima_iint_find - return the iint associated with an inode 57 - */ 58 - struct ima_iint_cache *ima_iint_find(struct inode *inode) 59 - { 60 - struct ima_iint_cache *iint; 61 - 62 - if (!IS_IMA(inode)) 63 - return NULL; 64 - 65 - spin_lock(&ima_iint_lock); 66 - iint = __ima_iint_find(inode); 67 - spin_unlock(&ima_iint_lock); 68 - 69 - return iint; 70 - } 71 - 72 - static void iint_free(struct ima_iint_cache *iint) 73 - { 74 - iint->version = 0; 75 - iint->flags = 0UL; 76 - kmem_cache_free(iint_cache, iint); 77 - } 78 - 79 - /** 80 - * ima_inode_alloc - allocate an iint associated with an inode 81 - * @inode: pointer to the inode 82 - */ 83 - int ima_inode_alloc(struct inode *inode) 84 - { 85 - struct rb_node **p; 86 - struct rb_node *new_node, *parent = NULL; 87 - struct ima_iint_cache *new_iint, *test_iint; 88 - int rc; 89 - 90 - new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS); 91 - if (!new_iint) 92 - return -ENOMEM; 93 - 94 - new_iint->inode = inode; 95 - new_node = &new_iint->rb_node; 96 - 97 - mutex_lock(&inode->i_mutex); /* i_flags */ 98 - spin_lock(&ima_iint_lock); 99 - 100 - p = &ima_iint_tree.rb_node; 101 - while (*p) { 102 - parent = *p; 103 - test_iint = rb_entry(parent, struct ima_iint_cache, rb_node); 104 - 105 - rc = -EEXIST; 106 - if (inode < test_iint->inode) 107 - p = &(*p)->rb_left; 108 - else if (inode > test_iint->inode) 109 - p = &(*p)->rb_right; 110 - else 111 - goto out_err; 112 - } 113 - 114 - inode->i_flags |= S_IMA; 115 - rb_link_node(new_node, parent, p); 116 - rb_insert_color(new_node, &ima_iint_tree); 117 - 118 - spin_unlock(&ima_iint_lock); 119 - mutex_unlock(&inode->i_mutex); /* i_flags */ 120 - 121 - return 0; 122 - out_err: 123 - spin_unlock(&ima_iint_lock); 124 - mutex_unlock(&inode->i_mutex); /* i_flags */ 125 - iint_free(new_iint); 126 - 127 - return rc; 128 - } 129 - 130 - /** 131 - * ima_inode_free - called on security_inode_free 132 - * @inode: pointer to the inode 133 - * 134 - * Free the integrity information(iint) associated with an inode. 135 - */ 136 - void ima_inode_free(struct inode *inode) 137 - { 138 - struct ima_iint_cache *iint; 139 - 140 - if (!IS_IMA(inode)) 141 - return; 142 - 143 - spin_lock(&ima_iint_lock); 144 - iint = __ima_iint_find(inode); 145 - rb_erase(&iint->rb_node, &ima_iint_tree); 146 - spin_unlock(&ima_iint_lock); 147 - 148 - iint_free(iint); 149 - } 150 - 151 - static void init_once(void *foo) 152 - { 153 - struct ima_iint_cache *iint = foo; 154 - 155 - memset(iint, 0, sizeof *iint); 156 - iint->version = 0; 157 - iint->flags = 0UL; 158 - mutex_init(&iint->mutex); 159 - } 160 - 161 - static int __init ima_iintcache_init(void) 162 - { 163 - iint_cache = 164 - kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 165 - SLAB_PANIC, init_once); 166 - iint_initialized = 1; 167 - return 0; 168 - } 169 - security_initcall(ima_iintcache_init);
+7 -6
security/integrity/ima/ima_main.c
··· 22 22 #include <linux/mount.h> 23 23 #include <linux/mman.h> 24 24 #include <linux/slab.h> 25 + #include <linux/ima.h> 25 26 26 27 #include "ima.h" 27 28 ··· 83 82 "open_writers"); 84 83 } 85 84 86 - static void ima_check_last_writer(struct ima_iint_cache *iint, 85 + static void ima_check_last_writer(struct integrity_iint_cache *iint, 87 86 struct inode *inode, 88 87 struct file *file) 89 88 { ··· 106 105 void ima_file_free(struct file *file) 107 106 { 108 107 struct inode *inode = file->f_dentry->d_inode; 109 - struct ima_iint_cache *iint; 108 + struct integrity_iint_cache *iint; 110 109 111 110 if (!iint_initialized || !S_ISREG(inode->i_mode)) 112 111 return; 113 112 114 - iint = ima_iint_find(inode); 113 + iint = integrity_iint_find(inode); 115 114 if (!iint) 116 115 return; 117 116 ··· 122 121 int mask, int function) 123 122 { 124 123 struct inode *inode = file->f_dentry->d_inode; 125 - struct ima_iint_cache *iint; 124 + struct integrity_iint_cache *iint; 126 125 int rc = 0; 127 126 128 127 if (!ima_initialized || !S_ISREG(inode->i_mode)) ··· 132 131 if (rc != 0) 133 132 return rc; 134 133 retry: 135 - iint = ima_iint_find(inode); 134 + iint = integrity_iint_find(inode); 136 135 if (!iint) { 137 - rc = ima_inode_alloc(inode); 136 + rc = integrity_inode_alloc(inode); 138 137 if (!rc || rc == -EEXIST) 139 138 goto retry; 140 139 return rc;
+50
security/integrity/integrity.h
··· 1 + /* 2 + * Copyright (C) 2009-2010 IBM Corporation 3 + * 4 + * Authors: 5 + * Mimi Zohar <zohar@us.ibm.com> 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License as 9 + * published by the Free Software Foundation, version 2 of the 10 + * License. 11 + * 12 + */ 13 + 14 + #include <linux/types.h> 15 + #include <linux/integrity.h> 16 + #include <crypto/sha.h> 17 + 18 + /* iint cache flags */ 19 + #define IMA_MEASURED 0x01 20 + 21 + enum evm_ima_xattr_type { 22 + IMA_XATTR_DIGEST = 0x01, 23 + EVM_XATTR_HMAC, 24 + EVM_IMA_XATTR_DIGSIG, 25 + }; 26 + 27 + struct evm_ima_xattr_data { 28 + u8 type; 29 + u8 digest[SHA1_DIGEST_SIZE]; 30 + } __attribute__((packed)); 31 + 32 + /* integrity data associated with an inode */ 33 + struct integrity_iint_cache { 34 + struct rb_node rb_node; /* rooted in integrity_iint_tree */ 35 + struct inode *inode; /* back pointer to inode in question */ 36 + u64 version; /* track inode changes */ 37 + unsigned char flags; 38 + u8 digest[SHA1_DIGEST_SIZE]; 39 + struct mutex mutex; /* protects: version, flags, digest */ 40 + enum integrity_status evm_status; 41 + }; 42 + 43 + /* rbtree tree calls to lookup, insert, delete 44 + * integrity data associated with an inode. 45 + */ 46 + struct integrity_iint_cache *integrity_iint_insert(struct inode *inode); 47 + struct integrity_iint_cache *integrity_iint_find(struct inode *inode); 48 + 49 + /* set during initialization */ 50 + extern int iint_initialized;
+1 -1
security/keys/Makefile
··· 14 14 user_defined.o 15 15 16 16 obj-$(CONFIG_TRUSTED_KEYS) += trusted.o 17 - obj-$(CONFIG_ENCRYPTED_KEYS) += ecryptfs_format.o encrypted.o 17 + obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/ 18 18 obj-$(CONFIG_KEYS_COMPAT) += compat.o 19 19 obj-$(CONFIG_PROC_FS) += proc.o 20 20 obj-$(CONFIG_SYSCTL) += sysctl.o
security/keys/ecryptfs_format.c security/keys/encrypted-keys/ecryptfs_format.c
security/keys/ecryptfs_format.h security/keys/encrypted-keys/ecryptfs_format.h
+6
security/keys/encrypted-keys/Makefile
··· 1 + # 2 + # Makefile for encrypted keys 3 + # 4 + 5 + obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o 6 + obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o
+45
security/keys/encrypted-keys/masterkey_trusted.c
··· 1 + /* 2 + * Copyright (C) 2010 IBM Corporation 3 + * Copyright (C) 2010 Politecnico di Torino, Italy 4 + * TORSEC group -- http://security.polito.it 5 + * 6 + * Authors: 7 + * Mimi Zohar <zohar@us.ibm.com> 8 + * Roberto Sassu <roberto.sassu@polito.it> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation, version 2 of the License. 13 + * 14 + * See Documentation/security/keys-trusted-encrypted.txt 15 + */ 16 + 17 + #include <linux/uaccess.h> 18 + #include <linux/module.h> 19 + #include <linux/err.h> 20 + #include <keys/trusted-type.h> 21 + 22 + /* 23 + * request_trusted_key - request the trusted key 24 + * 25 + * Trusted keys are sealed to PCRs and other metadata. Although userspace 26 + * manages both trusted/encrypted key-types, like the encrypted key type 27 + * data, trusted key type data is not visible decrypted from userspace. 28 + */ 29 + struct key *request_trusted_key(const char *trusted_desc, 30 + u8 **master_key, size_t *master_keylen) 31 + { 32 + struct trusted_key_payload *tpayload; 33 + struct key *tkey; 34 + 35 + tkey = request_key(&key_type_trusted, trusted_desc, NULL); 36 + if (IS_ERR(tkey)) 37 + goto error; 38 + 39 + down_read(&tkey->sem); 40 + tpayload = rcu_dereference(tkey->payload.data); 41 + *master_key = tpayload->key; 42 + *master_keylen = tpayload->key_len; 43 + error: 44 + return tkey; 45 + }
+19 -30
security/keys/encrypted.c security/keys/encrypted-keys/encrypted.c
··· 299 299 } 300 300 301 301 /* 302 - * request_trusted_key - request the trusted key 303 - * 304 - * Trusted keys are sealed to PCRs and other metadata. Although userspace 305 - * manages both trusted/encrypted key-types, like the encrypted key type 306 - * data, trusted key type data is not visible decrypted from userspace. 307 - */ 308 - static struct key *request_trusted_key(const char *trusted_desc, 309 - u8 **master_key, size_t *master_keylen) 310 - { 311 - struct trusted_key_payload *tpayload; 312 - struct key *tkey; 313 - 314 - tkey = request_key(&key_type_trusted, trusted_desc, NULL); 315 - if (IS_ERR(tkey)) 316 - goto error; 317 - 318 - down_read(&tkey->sem); 319 - tpayload = rcu_dereference(tkey->payload.data); 320 - *master_key = tpayload->key; 321 - *master_keylen = tpayload->key_len; 322 - error: 323 - return tkey; 324 - } 325 - 326 - /* 327 302 * request_user_key - request the user key 328 303 * 329 304 * Use a user provided key to encrypt/decrypt an encrypted-key. ··· 444 469 goto out; 445 470 446 471 if (IS_ERR(mkey)) { 447 - pr_info("encrypted_key: key %s not found", 448 - epayload->master_desc); 472 + int ret = PTR_ERR(epayload); 473 + 474 + if (ret == -ENOTSUPP) 475 + pr_info("encrypted_key: key %s not supported", 476 + epayload->master_desc); 477 + else 478 + pr_info("encrypted_key: key %s not found", 479 + epayload->master_desc); 449 480 goto out; 450 481 } 451 482 ··· 667 686 return -EINVAL; 668 687 669 688 hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2; 670 - hex2bin(epayload->iv, hex_encoded_iv, ivsize); 671 - hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen); 689 + ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize); 690 + if (ret < 0) 691 + return -EINVAL; 692 + ret = hex2bin(epayload->encrypted_data, hex_encoded_data, 693 + encrypted_datalen); 694 + if (ret < 0) 695 + return -EINVAL; 672 696 673 697 hmac = epayload->format + epayload->datablob_len; 674 - hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE); 698 + ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), 699 + HASH_SIZE); 700 + if (ret < 0) 701 + return -EINVAL; 675 702 676 703 mkey = request_master_key(epayload, &master_key, &master_keylen); 677 704 if (IS_ERR(mkey))
+11
security/keys/encrypted.h security/keys/encrypted-keys/encrypted.h
··· 2 2 #define __ENCRYPTED_KEY_H 3 3 4 4 #define ENCRYPTED_DEBUG 0 5 + #ifdef CONFIG_TRUSTED_KEYS 6 + extern struct key *request_trusted_key(const char *trusted_desc, 7 + u8 **master_key, size_t *master_keylen); 8 + #else 9 + static inline struct key *request_trusted_key(const char *trusted_desc, 10 + u8 **master_key, 11 + size_t *master_keylen) 12 + { 13 + return ERR_PTR(-EOPNOTSUPP); 14 + } 15 + #endif 5 16 6 17 #if ENCRYPTED_DEBUG 7 18 static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+275 -109
security/keys/gc.c
··· 1 1 /* Key garbage collector 2 2 * 3 - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 3 + * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved. 4 4 * Written by David Howells (dhowells@redhat.com) 5 5 * 6 6 * This program is free software; you can redistribute it and/or ··· 10 10 */ 11 11 12 12 #include <linux/module.h> 13 + #include <linux/slab.h> 14 + #include <linux/security.h> 13 15 #include <keys/keyring-type.h> 14 16 #include "internal.h" 15 17 ··· 21 19 unsigned key_gc_delay = 5 * 60; 22 20 23 21 /* 24 - * Reaper 22 + * Reaper for unused keys. 23 + */ 24 + static void key_garbage_collector(struct work_struct *work); 25 + DECLARE_WORK(key_gc_work, key_garbage_collector); 26 + 27 + /* 28 + * Reaper for links from keyrings to dead keys. 25 29 */ 26 30 static void key_gc_timer_func(unsigned long); 27 - static void key_garbage_collector(struct work_struct *); 28 31 static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0); 29 - static DECLARE_WORK(key_gc_work, key_garbage_collector); 30 - static key_serial_t key_gc_cursor; /* the last key the gc considered */ 31 - static bool key_gc_again; 32 - static unsigned long key_gc_executing; 32 + 33 33 static time_t key_gc_next_run = LONG_MAX; 34 - static time_t key_gc_new_timer; 34 + static struct key_type *key_gc_dead_keytype; 35 + 36 + static unsigned long key_gc_flags; 37 + #define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */ 38 + #define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */ 39 + #define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */ 40 + 41 + 42 + /* 43 + * Any key whose type gets unregistered will be re-typed to this if it can't be 44 + * immediately unlinked. 45 + */ 46 + struct key_type key_type_dead = { 47 + .name = "dead", 48 + }; 35 49 36 50 /* 37 51 * Schedule a garbage collection run. ··· 60 42 61 43 kenter("%ld", gc_at - now); 62 44 63 - if (gc_at <= now) { 64 - schedule_work(&key_gc_work); 45 + if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { 46 + kdebug("IMMEDIATE"); 47 + queue_work(system_nrt_wq, &key_gc_work); 65 48 } else if (gc_at < key_gc_next_run) { 49 + kdebug("DEFERRED"); 50 + key_gc_next_run = gc_at; 66 51 expires = jiffies + (gc_at - now) * HZ; 67 52 mod_timer(&key_gc_timer, expires); 68 53 } 69 54 } 70 55 71 56 /* 72 - * The garbage collector timer kicked off 57 + * Some key's cleanup time was met after it expired, so we need to get the 58 + * reaper to go through a cycle finding expired keys. 73 59 */ 74 60 static void key_gc_timer_func(unsigned long data) 75 61 { 76 62 kenter(""); 77 63 key_gc_next_run = LONG_MAX; 78 - schedule_work(&key_gc_work); 64 + set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); 65 + queue_work(system_nrt_wq, &key_gc_work); 66 + } 67 + 68 + /* 69 + * wait_on_bit() sleep function for uninterruptible waiting 70 + */ 71 + static int key_gc_wait_bit(void *flags) 72 + { 73 + schedule(); 74 + return 0; 75 + } 76 + 77 + /* 78 + * Reap keys of dead type. 79 + * 80 + * We use three flags to make sure we see three complete cycles of the garbage 81 + * collector: the first to mark keys of that type as being dead, the second to 82 + * collect dead links and the third to clean up the dead keys. We have to be 83 + * careful as there may already be a cycle in progress. 84 + * 85 + * The caller must be holding key_types_sem. 86 + */ 87 + void key_gc_keytype(struct key_type *ktype) 88 + { 89 + kenter("%s", ktype->name); 90 + 91 + key_gc_dead_keytype = ktype; 92 + set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); 93 + smp_mb(); 94 + set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); 95 + 96 + kdebug("schedule"); 97 + queue_work(system_nrt_wq, &key_gc_work); 98 + 99 + kdebug("sleep"); 100 + wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, 101 + TASK_UNINTERRUPTIBLE); 102 + 103 + key_gc_dead_keytype = NULL; 104 + kleave(""); 79 105 } 80 106 81 107 /* 82 108 * Garbage collect pointers from a keyring. 83 109 * 84 - * Return true if we altered the keyring. 110 + * Not called with any locks held. The keyring's key struct will not be 111 + * deallocated under us as only our caller may deallocate it. 85 112 */ 86 - static bool key_gc_keyring(struct key *keyring, time_t limit) 87 - __releases(key_serial_lock) 113 + static void key_gc_keyring(struct key *keyring, time_t limit) 88 114 { 89 115 struct keyring_list *klist; 90 116 struct key *key; ··· 155 93 unlock_dont_gc: 156 94 rcu_read_unlock(); 157 95 dont_gc: 158 - kleave(" = false"); 159 - return false; 96 + kleave(" [no gc]"); 97 + return; 160 98 161 99 do_gc: 162 100 rcu_read_unlock(); 163 - key_gc_cursor = keyring->serial; 164 - key_get(keyring); 165 - spin_unlock(&key_serial_lock); 101 + 166 102 keyring_gc(keyring, limit); 167 - key_put(keyring); 168 - kleave(" = true"); 169 - return true; 103 + kleave(" [gc]"); 170 104 } 171 105 172 106 /* 173 - * Garbage collector for keys. This involves scanning the keyrings for dead, 174 - * expired and revoked keys that have overstayed their welcome 107 + * Garbage collect an unreferenced, detached key 108 + */ 109 + static noinline void key_gc_unused_key(struct key *key) 110 + { 111 + key_check(key); 112 + 113 + security_key_free(key); 114 + 115 + /* deal with the user's key tracking and quota */ 116 + if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 117 + spin_lock(&key->user->lock); 118 + key->user->qnkeys--; 119 + key->user->qnbytes -= key->quotalen; 120 + spin_unlock(&key->user->lock); 121 + } 122 + 123 + atomic_dec(&key->user->nkeys); 124 + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 125 + atomic_dec(&key->user->nikeys); 126 + 127 + key_user_put(key->user); 128 + 129 + /* now throw away the key memory */ 130 + if (key->type->destroy) 131 + key->type->destroy(key); 132 + 133 + kfree(key->description); 134 + 135 + #ifdef KEY_DEBUGGING 136 + key->magic = KEY_DEBUG_MAGIC_X; 137 + #endif 138 + kmem_cache_free(key_jar, key); 139 + } 140 + 141 + /* 142 + * Garbage collector for unused keys. 143 + * 144 + * This is done in process context so that we don't have to disable interrupts 145 + * all over the place. key_put() schedules this rather than trying to do the 146 + * cleanup itself, which means key_put() doesn't have to sleep. 175 147 */ 176 148 static void key_garbage_collector(struct work_struct *work) 177 149 { 178 - struct rb_node *rb; 179 - key_serial_t cursor; 180 - struct key *key, *xkey; 181 - time_t new_timer = LONG_MAX, limit, now; 150 + static u8 gc_state; /* Internal persistent state */ 151 + #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */ 152 + #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */ 153 + #define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */ 154 + #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */ 155 + #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */ 156 + #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */ 157 + #define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */ 182 158 183 - now = current_kernel_time().tv_sec; 184 - kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now); 159 + struct rb_node *cursor; 160 + struct key *key; 161 + time_t new_timer, limit; 185 162 186 - if (test_and_set_bit(0, &key_gc_executing)) { 187 - key_schedule_gc(current_kernel_time().tv_sec + 1); 188 - kleave(" [busy; deferring]"); 189 - return; 190 - } 163 + kenter("[%lx,%x]", key_gc_flags, gc_state); 191 164 192 - limit = now; 165 + limit = current_kernel_time().tv_sec; 193 166 if (limit > key_gc_delay) 194 167 limit -= key_gc_delay; 195 168 else 196 169 limit = key_gc_delay; 197 170 171 + /* Work out what we're going to be doing in this pass */ 172 + gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; 173 + gc_state <<= 1; 174 + if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) 175 + gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; 176 + 177 + if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) 178 + gc_state |= KEY_GC_REAPING_DEAD_1; 179 + kdebug("new pass %x", gc_state); 180 + 181 + new_timer = LONG_MAX; 182 + 183 + /* As only this function is permitted to remove things from the key 184 + * serial tree, if cursor is non-NULL then it will always point to a 185 + * valid node in the tree - even if lock got dropped. 186 + */ 198 187 spin_lock(&key_serial_lock); 188 + cursor = rb_first(&key_serial_tree); 199 189 200 - if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) { 201 - spin_unlock(&key_serial_lock); 202 - clear_bit(0, &key_gc_executing); 203 - return; 204 - } 190 + continue_scanning: 191 + while (cursor) { 192 + key = rb_entry(cursor, struct key, serial_node); 193 + cursor = rb_next(cursor); 205 194 206 - cursor = key_gc_cursor; 207 - if (cursor < 0) 208 - cursor = 0; 209 - if (cursor > 0) 210 - new_timer = key_gc_new_timer; 211 - else 212 - key_gc_again = false; 195 + if (atomic_read(&key->usage) == 0) 196 + goto found_unreferenced_key; 213 197 214 - /* find the first key above the cursor */ 215 - key = NULL; 216 - rb = key_serial_tree.rb_node; 217 - while (rb) { 218 - xkey = rb_entry(rb, struct key, serial_node); 219 - if (cursor < xkey->serial) { 220 - key = xkey; 221 - rb = rb->rb_left; 222 - } else if (cursor > xkey->serial) { 223 - rb = rb->rb_right; 224 - } else { 225 - rb = rb_next(rb); 226 - if (!rb) 227 - goto reached_the_end; 228 - key = rb_entry(rb, struct key, serial_node); 229 - break; 230 - } 231 - } 232 - 233 - if (!key) 234 - goto reached_the_end; 235 - 236 - /* trawl through the keys looking for keyrings */ 237 - for (;;) { 238 - if (key->expiry > limit && key->expiry < new_timer) { 239 - kdebug("will expire %x in %ld", 240 - key_serial(key), key->expiry - limit); 241 - new_timer = key->expiry; 198 + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { 199 + if (key->type == key_gc_dead_keytype) { 200 + gc_state |= KEY_GC_FOUND_DEAD_KEY; 201 + set_bit(KEY_FLAG_DEAD, &key->flags); 202 + key->perm = 0; 203 + goto skip_dead_key; 204 + } 242 205 } 243 206 244 - if (key->type == &key_type_keyring && 245 - key_gc_keyring(key, limit)) 246 - /* the gc had to release our lock so that the keyring 247 - * could be modified, so we have to get it again */ 248 - goto gc_released_our_lock; 207 + if (gc_state & KEY_GC_SET_TIMER) { 208 + if (key->expiry > limit && key->expiry < new_timer) { 209 + kdebug("will expire %x in %ld", 210 + key_serial(key), key->expiry - limit); 211 + new_timer = key->expiry; 212 + } 213 + } 249 214 250 - rb = rb_next(&key->serial_node); 251 - if (!rb) 252 - goto reached_the_end; 253 - key = rb_entry(rb, struct key, serial_node); 215 + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) 216 + if (key->type == key_gc_dead_keytype) 217 + gc_state |= KEY_GC_FOUND_DEAD_KEY; 218 + 219 + if ((gc_state & KEY_GC_REAPING_LINKS) || 220 + unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { 221 + if (key->type == &key_type_keyring) 222 + goto found_keyring; 223 + } 224 + 225 + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) 226 + if (key->type == key_gc_dead_keytype) 227 + goto destroy_dead_key; 228 + 229 + skip_dead_key: 230 + if (spin_is_contended(&key_serial_lock) || need_resched()) 231 + goto contended; 254 232 } 255 233 256 - gc_released_our_lock: 257 - kdebug("gc_released_our_lock"); 258 - key_gc_new_timer = new_timer; 259 - key_gc_again = true; 260 - clear_bit(0, &key_gc_executing); 261 - schedule_work(&key_gc_work); 262 - kleave(" [continue]"); 263 - return; 264 - 265 - /* when we reach the end of the run, we set the timer for the next one */ 266 - reached_the_end: 267 - kdebug("reached_the_end"); 234 + contended: 268 235 spin_unlock(&key_serial_lock); 269 - key_gc_new_timer = new_timer; 270 - key_gc_cursor = 0; 271 - clear_bit(0, &key_gc_executing); 272 236 273 - if (key_gc_again) { 274 - /* there may have been a key that expired whilst we were 275 - * scanning, so if we discarded any links we should do another 276 - * scan */ 277 - new_timer = now + 1; 278 - key_schedule_gc(new_timer); 279 - } else if (new_timer < LONG_MAX) { 237 + maybe_resched: 238 + if (cursor) { 239 + cond_resched(); 240 + spin_lock(&key_serial_lock); 241 + goto continue_scanning; 242 + } 243 + 244 + /* We've completed the pass. Set the timer if we need to and queue a 245 + * new cycle if necessary. We keep executing cycles until we find one 246 + * where we didn't reap any keys. 247 + */ 248 + kdebug("pass complete"); 249 + 250 + if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { 280 251 new_timer += key_gc_delay; 281 252 key_schedule_gc(new_timer); 282 253 } 283 - kleave(" [end]"); 254 + 255 + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { 256 + /* Make sure everyone revalidates their keys if we marked a 257 + * bunch as being dead and make sure all keyring ex-payloads 258 + * are destroyed. 259 + */ 260 + kdebug("dead sync"); 261 + synchronize_rcu(); 262 + } 263 + 264 + if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 | 265 + KEY_GC_REAPING_DEAD_2))) { 266 + if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) { 267 + /* No remaining dead keys: short circuit the remaining 268 + * keytype reap cycles. 269 + */ 270 + kdebug("dead short"); 271 + gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2); 272 + gc_state |= KEY_GC_REAPING_DEAD_3; 273 + } else { 274 + gc_state |= KEY_GC_REAP_AGAIN; 275 + } 276 + } 277 + 278 + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) { 279 + kdebug("dead wake"); 280 + smp_mb(); 281 + clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); 282 + wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE); 283 + } 284 + 285 + if (gc_state & KEY_GC_REAP_AGAIN) 286 + queue_work(system_nrt_wq, &key_gc_work); 287 + kleave(" [end %x]", gc_state); 288 + return; 289 + 290 + /* We found an unreferenced key - once we've removed it from the tree, 291 + * we can safely drop the lock. 292 + */ 293 + found_unreferenced_key: 294 + kdebug("unrefd key %d", key->serial); 295 + rb_erase(&key->serial_node, &key_serial_tree); 296 + spin_unlock(&key_serial_lock); 297 + 298 + key_gc_unused_key(key); 299 + gc_state |= KEY_GC_REAP_AGAIN; 300 + goto maybe_resched; 301 + 302 + /* We found a keyring and we need to check the payload for links to 303 + * dead or expired keys. We don't flag another reap immediately as we 304 + * have to wait for the old payload to be destroyed by RCU before we 305 + * can reap the keys to which it refers. 306 + */ 307 + found_keyring: 308 + spin_unlock(&key_serial_lock); 309 + kdebug("scan keyring %d", key->serial); 310 + key_gc_keyring(key, limit); 311 + goto maybe_resched; 312 + 313 + /* We found a dead key that is still referenced. Reset its type and 314 + * destroy its payload with its semaphore held. 315 + */ 316 + destroy_dead_key: 317 + spin_unlock(&key_serial_lock); 318 + kdebug("destroy key %d", key->serial); 319 + down_write(&key->sem); 320 + key->type = &key_type_dead; 321 + if (key_gc_dead_keytype->destroy) 322 + key_gc_dead_keytype->destroy(key); 323 + memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); 324 + up_write(&key->sem); 325 + goto maybe_resched; 284 326 }
+4
security/keys/internal.h
··· 31 31 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) 32 32 #endif 33 33 34 + extern struct key_type key_type_dead; 34 35 extern struct key_type key_type_user; 35 36 36 37 /*****************************************************************************/ ··· 76 75 #define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */ 77 76 78 77 78 + extern struct kmem_cache *key_jar; 79 79 extern struct rb_root key_serial_tree; 80 80 extern spinlock_t key_serial_lock; 81 81 extern struct mutex key_construction_mutex; ··· 148 146 149 147 extern long join_session_keyring(const char *name); 150 148 149 + extern struct work_struct key_gc_work; 151 150 extern unsigned key_gc_delay; 152 151 extern void keyring_gc(struct key *keyring, time_t limit); 153 152 extern void key_schedule_gc(time_t expiry_at); 153 + extern void key_gc_keytype(struct key_type *ktype); 154 154 155 155 extern int key_task_permission(const key_ref_t key_ref, 156 156 const struct cred *cred,
+5 -116
security/keys/key.c
··· 21 21 #include <linux/user_namespace.h> 22 22 #include "internal.h" 23 23 24 - static struct kmem_cache *key_jar; 24 + struct kmem_cache *key_jar; 25 25 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 26 26 DEFINE_SPINLOCK(key_serial_lock); 27 27 ··· 36 36 static LIST_HEAD(key_types_list); 37 37 static DECLARE_RWSEM(key_types_sem); 38 38 39 - static void key_cleanup(struct work_struct *work); 40 - static DECLARE_WORK(key_cleanup_task, key_cleanup); 41 - 42 39 /* We serialise key instantiation and link */ 43 40 DEFINE_MUTEX(key_construction_mutex); 44 - 45 - /* Any key who's type gets unegistered will be re-typed to this */ 46 - static struct key_type key_type_dead = { 47 - .name = "dead", 48 - }; 49 41 50 42 #ifdef KEY_DEBUGGING 51 43 void __key_check(const struct key *key) ··· 583 591 } 584 592 EXPORT_SYMBOL(key_reject_and_link); 585 593 586 - /* 587 - * Garbage collect keys in process context so that we don't have to disable 588 - * interrupts all over the place. 589 - * 590 - * key_put() schedules this rather than trying to do the cleanup itself, which 591 - * means key_put() doesn't have to sleep. 592 - */ 593 - static void key_cleanup(struct work_struct *work) 594 - { 595 - struct rb_node *_n; 596 - struct key *key; 597 - 598 - go_again: 599 - /* look for a dead key in the tree */ 600 - spin_lock(&key_serial_lock); 601 - 602 - for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 603 - key = rb_entry(_n, struct key, serial_node); 604 - 605 - if (atomic_read(&key->usage) == 0) 606 - goto found_dead_key; 607 - } 608 - 609 - spin_unlock(&key_serial_lock); 610 - return; 611 - 612 - found_dead_key: 613 - /* we found a dead key - once we've removed it from the tree, we can 614 - * drop the lock */ 615 - rb_erase(&key->serial_node, &key_serial_tree); 616 - spin_unlock(&key_serial_lock); 617 - 618 - key_check(key); 619 - 620 - security_key_free(key); 621 - 622 - /* deal with the user's key tracking and quota */ 623 - if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 624 - spin_lock(&key->user->lock); 625 - key->user->qnkeys--; 626 - key->user->qnbytes -= key->quotalen; 627 - spin_unlock(&key->user->lock); 628 - } 629 - 630 - atomic_dec(&key->user->nkeys); 631 - if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 632 - atomic_dec(&key->user->nikeys); 633 - 634 - key_user_put(key->user); 635 - 636 - /* now throw away the key memory */ 637 - if (key->type->destroy) 638 - key->type->destroy(key); 639 - 640 - kfree(key->description); 641 - 642 - #ifdef KEY_DEBUGGING 643 - key->magic = KEY_DEBUG_MAGIC_X; 644 - #endif 645 - kmem_cache_free(key_jar, key); 646 - 647 - /* there may, of course, be more than one key to destroy */ 648 - goto go_again; 649 - } 650 - 651 594 /** 652 595 * key_put - Discard a reference to a key. 653 596 * @key: The key to discard a reference from. ··· 597 670 key_check(key); 598 671 599 672 if (atomic_dec_and_test(&key->usage)) 600 - schedule_work(&key_cleanup_task); 673 + queue_work(system_nrt_wq, &key_gc_work); 601 674 } 602 675 } 603 676 EXPORT_SYMBOL(key_put); ··· 975 1048 */ 976 1049 void unregister_key_type(struct key_type *ktype) 977 1050 { 978 - struct rb_node *_n; 979 - struct key *key; 980 - 981 1051 down_write(&key_types_sem); 982 - 983 - /* withdraw the key type */ 984 1052 list_del_init(&ktype->link); 985 - 986 - /* mark all the keys of this type dead */ 987 - spin_lock(&key_serial_lock); 988 - 989 - for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 990 - key = rb_entry(_n, struct key, serial_node); 991 - 992 - if (key->type == ktype) { 993 - key->type = &key_type_dead; 994 - set_bit(KEY_FLAG_DEAD, &key->flags); 995 - } 996 - } 997 - 998 - spin_unlock(&key_serial_lock); 999 - 1000 - /* make sure everyone revalidates their keys */ 1001 - synchronize_rcu(); 1002 - 1003 - /* we should now be able to destroy the payloads of all the keys of 1004 - * this type with impunity */ 1005 - spin_lock(&key_serial_lock); 1006 - 1007 - for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 1008 - key = rb_entry(_n, struct key, serial_node); 1009 - 1010 - if (key->type == ktype) { 1011 - if (ktype->destroy) 1012 - ktype->destroy(key); 1013 - memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); 1014 - } 1015 - } 1016 - 1017 - spin_unlock(&key_serial_lock); 1018 - up_write(&key_types_sem); 1019 - 1020 - key_schedule_gc(0); 1053 + downgrade_write(&key_types_sem); 1054 + key_gc_keytype(ktype); 1055 + up_read(&key_types_sem); 1021 1056 } 1022 1057 EXPORT_SYMBOL(unregister_key_type); 1023 1058
+1 -2
security/keys/keyring.c
··· 860 860 861 861 kenter("%d,%d,%p", keyring->serial, key->serial, nklist); 862 862 863 - klist = rcu_dereference_protected(keyring->payload.subscriptions, 864 - rwsem_is_locked(&keyring->sem)); 863 + klist = rcu_dereference_locked_keyring(keyring); 865 864 866 865 atomic_inc(&key->usage); 867 866
+13 -3
security/keys/process_keys.c
··· 270 270 if (!new) 271 271 return -ENOMEM; 272 272 273 - ret = install_session_keyring_to_cred(new, NULL); 273 + ret = install_session_keyring_to_cred(new, keyring); 274 274 if (ret < 0) { 275 275 abort_creds(new); 276 276 return ret; ··· 589 589 ret = install_user_keyrings(); 590 590 if (ret < 0) 591 591 goto error; 592 - ret = install_session_keyring( 593 - cred->user->session_keyring); 592 + if (lflags & KEY_LOOKUP_CREATE) 593 + ret = join_session_keyring(NULL); 594 + else 595 + ret = install_session_keyring( 596 + cred->user->session_keyring); 594 597 598 + if (ret < 0) 599 + goto error; 600 + goto reget_creds; 601 + } else if (cred->tgcred->session_keyring == 602 + cred->user->session_keyring && 603 + lflags & KEY_LOOKUP_CREATE) { 604 + ret = join_session_keyring(NULL); 595 605 if (ret < 0) 596 606 goto error; 597 607 goto reget_creds;
+15 -4
security/keys/trusted.c
··· 779 779 opt->pcrinfo_len = strlen(args[0].from) / 2; 780 780 if (opt->pcrinfo_len > MAX_PCRINFO_SIZE) 781 781 return -EINVAL; 782 - hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len); 782 + res = hex2bin(opt->pcrinfo, args[0].from, 783 + opt->pcrinfo_len); 784 + if (res < 0) 785 + return -EINVAL; 783 786 break; 784 787 case Opt_keyhandle: 785 788 res = strict_strtoul(args[0].from, 16, &handle); ··· 794 791 case Opt_keyauth: 795 792 if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) 796 793 return -EINVAL; 797 - hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE); 794 + res = hex2bin(opt->keyauth, args[0].from, 795 + SHA1_DIGEST_SIZE); 796 + if (res < 0) 797 + return -EINVAL; 798 798 break; 799 799 case Opt_blobauth: 800 800 if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) 801 801 return -EINVAL; 802 - hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE); 802 + res = hex2bin(opt->blobauth, args[0].from, 803 + SHA1_DIGEST_SIZE); 804 + if (res < 0) 805 + return -EINVAL; 803 806 break; 804 807 case Opt_migratable: 805 808 if (*args[0].from == '0') ··· 869 860 p->blob_len = strlen(c) / 2; 870 861 if (p->blob_len > MAX_BLOB_SIZE) 871 862 return -EINVAL; 872 - hex2bin(p->blob, c, p->blob_len); 863 + ret = hex2bin(p->blob, c, p->blob_len); 864 + if (ret < 0) 865 + return -EINVAL; 873 866 ret = getoptions(datablob, p, o); 874 867 if (ret < 0) 875 868 return ret;
+65 -11
security/security.c
··· 16 16 #include <linux/init.h> 17 17 #include <linux/kernel.h> 18 18 #include <linux/security.h> 19 + #include <linux/integrity.h> 19 20 #include <linux/ima.h> 21 + #include <linux/evm.h> 22 + 23 + #define MAX_LSM_EVM_XATTR 2 20 24 21 25 /* Boot-time LSM user choice */ 22 26 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = 23 27 CONFIG_DEFAULT_SECURITY; 24 - 25 - /* things that live in capability.c */ 26 - extern void __init security_fixup_ops(struct security_operations *ops); 27 28 28 29 static struct security_operations *security_ops; 29 30 static struct security_operations default_security_ops = { ··· 335 334 336 335 void security_inode_free(struct inode *inode) 337 336 { 338 - ima_inode_free(inode); 337 + integrity_inode_free(inode); 339 338 security_ops->inode_free_security(inode); 340 339 } 341 340 342 341 int security_inode_init_security(struct inode *inode, struct inode *dir, 343 - const struct qstr *qstr, char **name, 344 - void **value, size_t *len) 342 + const struct qstr *qstr, 343 + const initxattrs initxattrs, void *fs_data) 344 + { 345 + struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1]; 346 + struct xattr *lsm_xattr, *evm_xattr, *xattr; 347 + int ret; 348 + 349 + if (unlikely(IS_PRIVATE(inode))) 350 + return 0; 351 + 352 + memset(new_xattrs, 0, sizeof new_xattrs); 353 + if (!initxattrs) 354 + return security_ops->inode_init_security(inode, dir, qstr, 355 + NULL, NULL, NULL); 356 + lsm_xattr = new_xattrs; 357 + ret = security_ops->inode_init_security(inode, dir, qstr, 358 + &lsm_xattr->name, 359 + &lsm_xattr->value, 360 + &lsm_xattr->value_len); 361 + if (ret) 362 + goto out; 363 + 364 + evm_xattr = lsm_xattr + 1; 365 + ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr); 366 + if (ret) 367 + goto out; 368 + ret = initxattrs(inode, new_xattrs, fs_data); 369 + out: 370 + for (xattr = new_xattrs; xattr->name != NULL; xattr++) { 371 + kfree(xattr->name); 372 + kfree(xattr->value); 373 + } 374 + return (ret == -EOPNOTSUPP) ? 0 : ret; 375 + } 376 + EXPORT_SYMBOL(security_inode_init_security); 377 + 378 + int security_old_inode_init_security(struct inode *inode, struct inode *dir, 379 + const struct qstr *qstr, char **name, 380 + void **value, size_t *len) 345 381 { 346 382 if (unlikely(IS_PRIVATE(inode))) 347 - return -EOPNOTSUPP; 383 + return 0; 348 384 return security_ops->inode_init_security(inode, dir, qstr, name, value, 349 385 len); 350 386 } 351 - EXPORT_SYMBOL(security_inode_init_security); 387 + EXPORT_SYMBOL(security_old_inode_init_security); 352 388 353 389 #ifdef CONFIG_SECURITY_PATH 354 390 int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, ··· 561 523 562 524 int security_inode_setattr(struct dentry *dentry, struct iattr *attr) 563 525 { 526 + int ret; 527 + 564 528 if (unlikely(IS_PRIVATE(dentry->d_inode))) 565 529 return 0; 566 - return security_ops->inode_setattr(dentry, attr); 530 + ret = security_ops->inode_setattr(dentry, attr); 531 + if (ret) 532 + return ret; 533 + return evm_inode_setattr(dentry, attr); 567 534 } 568 535 EXPORT_SYMBOL_GPL(security_inode_setattr); 569 536 ··· 582 539 int security_inode_setxattr(struct dentry *dentry, const char *name, 583 540 const void *value, size_t size, int flags) 584 541 { 542 + int ret; 543 + 585 544 if (unlikely(IS_PRIVATE(dentry->d_inode))) 586 545 return 0; 587 - return security_ops->inode_setxattr(dentry, name, value, size, flags); 546 + ret = security_ops->inode_setxattr(dentry, name, value, size, flags); 547 + if (ret) 548 + return ret; 549 + return evm_inode_setxattr(dentry, name, value, size); 588 550 } 589 551 590 552 void security_inode_post_setxattr(struct dentry *dentry, const char *name, ··· 598 550 if (unlikely(IS_PRIVATE(dentry->d_inode))) 599 551 return; 600 552 security_ops->inode_post_setxattr(dentry, name, value, size, flags); 553 + evm_inode_post_setxattr(dentry, name, value, size); 601 554 } 602 555 603 556 int security_inode_getxattr(struct dentry *dentry, const char *name) ··· 617 568 618 569 int security_inode_removexattr(struct dentry *dentry, const char *name) 619 570 { 571 + int ret; 572 + 620 573 if (unlikely(IS_PRIVATE(dentry->d_inode))) 621 574 return 0; 622 - return security_ops->inode_removexattr(dentry, name); 575 + ret = security_ops->inode_removexattr(dentry, name); 576 + if (ret) 577 + return ret; 578 + return evm_inode_removexattr(dentry, name); 623 579 } 624 580 625 581 int security_inode_need_killpriv(struct dentry *dentry)
+1
security/selinux/exports.c
··· 12 12 * as published by the Free Software Foundation. 13 13 */ 14 14 #include <linux/module.h> 15 + #include <linux/selinux.h> 15 16 16 17 #include "security.h" 17 18
+2 -11
security/selinux/hooks.c
··· 89 89 #include "xfrm.h" 90 90 #include "netlabel.h" 91 91 #include "audit.h" 92 + #include "avc_ss.h" 92 93 93 94 #define NUM_SEL_MNT_OPTS 5 94 95 95 - extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); 96 96 extern struct security_operations *security_ops; 97 97 98 98 /* SECMARK reference count */ 99 - atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); 99 + static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); 100 100 101 101 #ifdef CONFIG_SECURITY_SELINUX_DEVELOP 102 102 int selinux_enforcing; ··· 278 278 sb->s_security = NULL; 279 279 kfree(sbsec); 280 280 } 281 - 282 - /* The security server must be initialized before 283 - any labeling or access decisions can be provided. */ 284 - extern int ss_initialized; 285 281 286 282 /* The file system's label must be initialized prior to use. */ 287 283 ··· 2092 2096 2093 2097 return (atsecure || cap_bprm_secureexec(bprm)); 2094 2098 } 2095 - 2096 - extern struct vfsmount *selinuxfs_mount; 2097 - extern struct dentry *selinux_null; 2098 2099 2099 2100 /* Derived from fs/exec.c:flush_old_files. */ 2100 2101 static inline void flush_unauthorized_files(const struct cred *cred, ··· 5796 5803 5797 5804 int selinux_disable(void) 5798 5805 { 5799 - extern void exit_sel_fs(void); 5800 - 5801 5806 if (ss_initialized) { 5802 5807 /* Not permitted after initial policy load. */ 5803 5808 return -EINVAL;
+6
security/selinux/include/avc_ss.h
··· 18 18 19 19 extern struct security_class_mapping secclass_map[]; 20 20 21 + /* 22 + * The security server must be initialized before 23 + * any labeling or access decisions can be provided. 24 + */ 25 + extern int ss_initialized; 26 + 21 27 #endif /* _SELINUX_AVC_SS_H_ */ 22 28
+8
security/selinux/include/security.h
··· 216 216 217 217 extern void selinux_status_update_setenforce(int enforcing); 218 218 extern void selinux_status_update_policyload(int seqno); 219 + extern void selinux_complete_init(void); 220 + extern int selinux_disable(void); 221 + extern void exit_sel_fs(void); 222 + extern struct dentry *selinux_null; 223 + extern struct vfsmount *selinuxfs_mount; 224 + extern void selnl_notify_setenforce(int val); 225 + extern void selnl_notify_policyload(u32 seqno); 226 + extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); 219 227 220 228 #endif /* _SELINUX_SECURITY_H_ */ 221 229
+2
security/selinux/netlink.c
··· 19 19 #include <linux/selinux_netlink.h> 20 20 #include <net/net_namespace.h> 21 21 22 + #include "security.h" 23 + 22 24 static struct sock *selnl; 23 25 24 26 static int selnl_msglen(int msgtype)
+1
security/selinux/nlmsgtab.c
··· 21 21 22 22 #include "flask.h" 23 23 #include "av_permissions.h" 24 + #include "security.h" 24 25 25 26 struct nlmsg_perm { 26 27 u16 nlmsg_type;
+1 -4
security/selinux/selinuxfs.c
··· 75 75 /* global data for policy capabilities */ 76 76 static struct dentry *policycap_dir; 77 77 78 - extern void selnl_notify_setenforce(int val); 79 - 80 78 /* Check whether a task is allowed to use a security operation. */ 81 79 static int task_has_security(struct task_struct *tsk, 82 80 u32 perms) ··· 276 278 char *page = NULL; 277 279 ssize_t length; 278 280 int new_value; 279 - extern int selinux_disable(void); 280 281 281 282 length = -ENOMEM; 282 283 if (count >= PAGE_SIZE) ··· 475 478 .page_mkwrite = sel_mmap_policy_fault, 476 479 }; 477 480 478 - int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) 481 + static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) 479 482 { 480 483 if (vma->vm_flags & VM_SHARED) { 481 484 /* do not allow mprotect to make mapping writable */
+1 -1
security/selinux/ss/conditional.c
··· 555 555 return 0; 556 556 } 557 557 558 - int cond_write_node(struct policydb *p, struct cond_node *node, 558 + static int cond_write_node(struct policydb *p, struct cond_node *node, 559 559 struct policy_file *fp) 560 560 { 561 561 struct cond_expr *cur_expr;
+1
security/selinux/ss/conditional.h
··· 13 13 #include "avtab.h" 14 14 #include "symtab.h" 15 15 #include "policydb.h" 16 + #include "../include/conditional.h" 16 17 17 18 #define COND_EXPR_MAXDEPTH 10 18 19
-2
security/selinux/ss/policydb.c
··· 1743 1743 return 0; 1744 1744 } 1745 1745 1746 - extern int ss_initialized; 1747 - 1748 1746 u16 string_to_security_class(struct policydb *p, const char *name) 1749 1747 { 1750 1748 struct class_datum *cladatum;
-3
security/selinux/ss/services.c
··· 70 70 #include "ebitmap.h" 71 71 #include "audit.h" 72 72 73 - extern void selnl_notify_policyload(u32 seqno); 74 - 75 73 int selinux_policycap_netpeer; 76 74 int selinux_policycap_openperm; 77 75 ··· 1788 1790 POLICYDB_CAPABILITY_OPENPERM); 1789 1791 } 1790 1792 1791 - extern void selinux_complete_init(void); 1792 1793 static int security_preserve_bools(struct policydb *p); 1793 1794 1794 1795 /**
+15 -9
security/smack/smack.h
··· 41 41 }; 42 42 43 43 struct socket_smack { 44 - char *smk_out; /* outbound label */ 45 - char *smk_in; /* inbound label */ 46 - char smk_packet[SMK_LABELLEN]; /* TCP peer label */ 44 + char *smk_out; /* outbound label */ 45 + char *smk_in; /* inbound label */ 46 + char *smk_packet; /* TCP peer label */ 47 47 }; 48 48 49 49 /* ··· 116 116 * If there is a cipso value associated with the label it 117 117 * gets stored here, too. This will most likely be rare as 118 118 * the cipso direct mapping in used internally. 119 + * 120 + * Keep the access rules for this subject label here so that 121 + * the entire set of rules does not need to be examined every 122 + * time. 119 123 */ 120 124 struct smack_known { 121 125 struct list_head list; 122 126 char smk_known[SMK_LABELLEN]; 123 127 u32 smk_secid; 124 128 struct smack_cipso *smk_cipso; 125 - spinlock_t smk_cipsolock; /* for changing cipso map */ 129 + spinlock_t smk_cipsolock; /* for changing cipso map */ 130 + struct list_head smk_rules; /* access rules */ 131 + struct mutex smk_rules_lock; /* lock for the rules */ 126 132 }; 127 133 128 134 /* ··· 156 150 157 151 /* 158 152 * smackfs magic number 159 - * smackfs macic number 160 153 */ 161 154 #define SMACK_MAGIC 0x43415d53 /* "SMAC" */ 162 155 ··· 181 176 #define MAY_NOT 0 182 177 183 178 /* 184 - * Number of access types used by Smack (rwxa) 179 + * Number of access types used by Smack (rwxat) 185 180 */ 186 - #define SMK_NUM_ACCESS_TYPE 4 181 + #define SMK_NUM_ACCESS_TYPE 5 187 182 188 183 /* 189 184 * Smack audit data; is empty if CONFIG_AUDIT not set ··· 206 201 int smk_access(char *, char *, int, struct smk_audit_info *); 207 202 int smk_curacc(char *, u32, struct smk_audit_info *); 208 203 int smack_to_cipso(const char *, struct smack_cipso *); 209 - void smack_from_cipso(u32, char *, char *); 204 + char *smack_from_cipso(u32, char *); 210 205 char *smack_from_secid(const u32); 206 + void smk_parse_smack(const char *string, int len, char *smack); 211 207 char *smk_import(const char *, int); 212 208 struct smack_known *smk_import_entry(const char *, int); 209 + struct smack_known *smk_find_entry(const char *); 213 210 u32 smack_to_secid(const char *); 214 211 215 212 /* ··· 230 223 extern struct smack_known smack_known_web; 231 224 232 225 extern struct list_head smack_known_list; 233 - extern struct list_head smack_rule_list; 234 226 extern struct list_head smk_netlbladdr_list; 235 227 236 228 extern struct security_operations smack_ops;
+72 -62
security/smack/smack_access.c
··· 77 77 * entry is found returns -ENOENT. 78 78 * 79 79 * NOTE: 80 - * Even though Smack labels are usually shared on smack_list 81 - * labels that come in off the network can't be imported 82 - * and added to the list for locking reasons. 83 80 * 84 - * Therefore, it is necessary to check the contents of the labels, 85 - * not just the pointer values. Of course, in most cases the labels 86 - * will be on the list, so checking the pointers may be a worthwhile 87 - * optimization. 81 + * Earlier versions of this function allowed for labels that 82 + * were not on the label list. This was done to allow for 83 + * labels to come over the network that had never been seen 84 + * before on this host. Unless the receiving socket has the 85 + * star label this will always result in a failure check. The 86 + * star labeled socket case is now handled in the networking 87 + * hooks so there is no case where the label is not on the 88 + * label list. Checking to see if the address of two labels 89 + * is the same is now a reliable test. 90 + * 91 + * Do the object check first because that is more 92 + * likely to differ. 88 93 */ 89 94 int smk_access_entry(char *subject_label, char *object_label, 90 95 struct list_head *rule_list) ··· 98 93 struct smack_rule *srp; 99 94 100 95 list_for_each_entry_rcu(srp, rule_list, list) { 101 - if (srp->smk_subject == subject_label || 102 - strcmp(srp->smk_subject, subject_label) == 0) { 103 - if (srp->smk_object == object_label || 104 - strcmp(srp->smk_object, object_label) == 0) { 105 - may = srp->smk_access; 106 - break; 107 - } 96 + if (srp->smk_object == object_label && 97 + srp->smk_subject == subject_label) { 98 + may = srp->smk_access; 99 + break; 108 100 } 109 101 } 110 102 ··· 119 117 * access rule list and returns 0 if the access is permitted, 120 118 * non zero otherwise. 121 119 * 122 - * Even though Smack labels are usually shared on smack_list 123 - * labels that come in off the network can't be imported 124 - * and added to the list for locking reasons. 125 - * 126 - * Therefore, it is necessary to check the contents of the labels, 127 - * not just the pointer values. Of course, in most cases the labels 128 - * will be on the list, so checking the pointers may be a worthwhile 129 - * optimization. 120 + * Smack labels are shared on smack_list 130 121 */ 131 122 int smk_access(char *subject_label, char *object_label, int request, 132 123 struct smk_audit_info *a) 133 124 { 125 + struct smack_known *skp; 134 126 int may = MAY_NOT; 135 127 int rc = 0; 136 128 ··· 133 137 * 134 138 * A star subject can't access any object. 135 139 */ 136 - if (subject_label == smack_known_star.smk_known || 137 - strcmp(subject_label, smack_known_star.smk_known) == 0) { 140 + if (subject_label == smack_known_star.smk_known) { 138 141 rc = -EACCES; 139 142 goto out_audit; 140 143 } ··· 143 148 * An internet subject can access any object. 144 149 */ 145 150 if (object_label == smack_known_web.smk_known || 146 - subject_label == smack_known_web.smk_known || 147 - strcmp(object_label, smack_known_web.smk_known) == 0 || 148 - strcmp(subject_label, smack_known_web.smk_known) == 0) 151 + subject_label == smack_known_web.smk_known) 149 152 goto out_audit; 150 153 /* 151 154 * A star object can be accessed by any subject. 152 155 */ 153 - if (object_label == smack_known_star.smk_known || 154 - strcmp(object_label, smack_known_star.smk_known) == 0) 156 + if (object_label == smack_known_star.smk_known) 155 157 goto out_audit; 156 158 /* 157 159 * An object can be accessed in any way by a subject 158 160 * with the same label. 159 161 */ 160 - if (subject_label == object_label || 161 - strcmp(subject_label, object_label) == 0) 162 + if (subject_label == object_label) 162 163 goto out_audit; 163 164 /* 164 165 * A hat subject can read any object. 165 166 * A floor object can be read by any subject. 166 167 */ 167 168 if ((request & MAY_ANYREAD) == request) { 168 - if (object_label == smack_known_floor.smk_known || 169 - strcmp(object_label, smack_known_floor.smk_known) == 0) 169 + if (object_label == smack_known_floor.smk_known) 170 170 goto out_audit; 171 - if (subject_label == smack_known_hat.smk_known || 172 - strcmp(subject_label, smack_known_hat.smk_known) == 0) 171 + if (subject_label == smack_known_hat.smk_known) 173 172 goto out_audit; 174 173 } 175 174 /* ··· 173 184 * good. A negative response from smk_access_entry() 174 185 * indicates there is no entry for this pair. 175 186 */ 187 + skp = smk_find_entry(subject_label); 176 188 rcu_read_lock(); 177 - may = smk_access_entry(subject_label, object_label, &smack_rule_list); 189 + may = smk_access_entry(subject_label, object_label, &skp->smk_rules); 178 190 rcu_read_unlock(); 179 191 180 192 if (may > 0 && (request & may) == request) ··· 334 344 static DEFINE_MUTEX(smack_known_lock); 335 345 336 346 /** 337 - * smk_import_entry - import a label, return the list entry 347 + * smk_find_entry - find a label on the list, return the list entry 338 348 * @string: a text string that might be a Smack label 339 - * @len: the maximum size, or zero if it is NULL terminated. 340 349 * 341 350 * Returns a pointer to the entry in the label list that 342 - * matches the passed string, adding it if necessary. 351 + * matches the passed string. 343 352 */ 344 - struct smack_known *smk_import_entry(const char *string, int len) 353 + struct smack_known *smk_find_entry(const char *string) 345 354 { 346 355 struct smack_known *skp; 347 - char smack[SMK_LABELLEN]; 356 + 357 + list_for_each_entry_rcu(skp, &smack_known_list, list) { 358 + if (strncmp(skp->smk_known, string, SMK_MAXLEN) == 0) 359 + return skp; 360 + } 361 + 362 + return NULL; 363 + } 364 + 365 + /** 366 + * smk_parse_smack - parse smack label from a text string 367 + * @string: a text string that might contain a Smack label 368 + * @len: the maximum size, or zero if it is NULL terminated. 369 + * @smack: parsed smack label, or NULL if parse error 370 + */ 371 + void smk_parse_smack(const char *string, int len, char *smack) 372 + { 348 373 int found; 349 374 int i; 350 375 ··· 377 372 } else 378 373 smack[i] = string[i]; 379 374 } 375 + } 380 376 377 + /** 378 + * smk_import_entry - import a label, return the list entry 379 + * @string: a text string that might be a Smack label 380 + * @len: the maximum size, or zero if it is NULL terminated. 381 + * 382 + * Returns a pointer to the entry in the label list that 383 + * matches the passed string, adding it if necessary. 384 + */ 385 + struct smack_known *smk_import_entry(const char *string, int len) 386 + { 387 + struct smack_known *skp; 388 + char smack[SMK_LABELLEN]; 389 + 390 + smk_parse_smack(string, len, smack); 381 391 if (smack[0] == '\0') 382 392 return NULL; 383 393 384 394 mutex_lock(&smack_known_lock); 385 395 386 - found = 0; 387 - list_for_each_entry_rcu(skp, &smack_known_list, list) { 388 - if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) { 389 - found = 1; 390 - break; 391 - } 392 - } 396 + skp = smk_find_entry(smack); 393 397 394 - if (found == 0) { 398 + if (skp == NULL) { 395 399 skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL); 396 400 if (skp != NULL) { 397 401 strncpy(skp->smk_known, smack, SMK_MAXLEN); 398 402 skp->smk_secid = smack_next_secid++; 399 403 skp->smk_cipso = NULL; 404 + INIT_LIST_HEAD(&skp->smk_rules); 400 405 spin_lock_init(&skp->smk_cipsolock); 406 + mutex_init(&skp->smk_rules_lock); 401 407 /* 402 408 * Make sure that the entry is actually 403 409 * filled before putting it on the list. ··· 496 480 * smack_from_cipso - find the Smack label associated with a CIPSO option 497 481 * @level: Bell & LaPadula level from the network 498 482 * @cp: Bell & LaPadula categories from the network 499 - * @result: where to put the Smack value 500 483 * 501 484 * This is a simple lookup in the label table. 502 485 * 503 - * This is an odd duck as far as smack handling goes in that 504 - * it sends back a copy of the smack label rather than a pointer 505 - * to the master list. This is done because it is possible for 506 - * a foreign host to send a smack label that is new to this 507 - * machine and hence not on the list. That would not be an 508 - * issue except that adding an entry to the master list can't 509 - * be done at that point. 486 + * Return the matching label from the label list or NULL. 510 487 */ 511 - void smack_from_cipso(u32 level, char *cp, char *result) 488 + char *smack_from_cipso(u32 level, char *cp) 512 489 { 513 490 struct smack_known *kp; 514 491 char *final = NULL; ··· 518 509 final = kp->smk_known; 519 510 520 511 spin_unlock_bh(&kp->smk_cipsolock); 512 + 513 + if (final != NULL) 514 + break; 521 515 } 522 516 rcu_read_unlock(); 523 - if (final == NULL) 524 - final = smack_known_huh.smk_known; 525 - strncpy(result, final, SMK_MAXLEN); 526 - return; 517 + 518 + return final; 527 519 } 528 520 529 521 /**
+180 -88
security/smack/smack_lsm.c
··· 5 5 * 6 6 * Authors: 7 7 * Casey Schaufler <casey@schaufler-ca.com> 8 - * Jarkko Sakkinen <ext-jarkko.2.sakkinen@nokia.com> 8 + * Jarkko Sakkinen <jarkko.sakkinen@intel.com> 9 9 * 10 10 * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> 11 11 * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. 12 12 * Paul Moore <paul@paul-moore.com> 13 13 * Copyright (C) 2010 Nokia Corporation 14 + * Copyright (C) 2011 Intel Corporation. 14 15 * 15 16 * This program is free software; you can redistribute it and/or modify 16 17 * it under the terms of the GNU General Public License version 2, ··· 35 34 #include <linux/audit.h> 36 35 #include <linux/magic.h> 37 36 #include <linux/dcache.h> 37 + #include <linux/personality.h> 38 38 #include "smack.h" 39 39 40 40 #define task_security(task) (task_cred_xxx((task), security)) ··· 443 441 * BPRM hooks 444 442 */ 445 443 444 + /** 445 + * smack_bprm_set_creds - set creds for exec 446 + * @bprm: the exec information 447 + * 448 + * Returns 0 if it gets a blob, -ENOMEM otherwise 449 + */ 446 450 static int smack_bprm_set_creds(struct linux_binprm *bprm) 447 451 { 448 - struct task_smack *tsp = bprm->cred->security; 452 + struct inode *inode = bprm->file->f_path.dentry->d_inode; 453 + struct task_smack *bsp = bprm->cred->security; 449 454 struct inode_smack *isp; 450 - struct dentry *dp; 451 455 int rc; 452 456 453 457 rc = cap_bprm_set_creds(bprm); ··· 463 455 if (bprm->cred_prepared) 464 456 return 0; 465 457 466 - if (bprm->file == NULL || bprm->file->f_dentry == NULL) 458 + isp = inode->i_security; 459 + if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task) 467 460 return 0; 468 461 469 - dp = bprm->file->f_dentry; 462 + if (bprm->unsafe) 463 + return -EPERM; 470 464 471 - if (dp->d_inode == NULL) 472 - return 0; 473 - 474 - isp = dp->d_inode->i_security; 475 - 476 - if (isp->smk_task != NULL) 477 - tsp->smk_task = isp->smk_task; 465 + bsp->smk_task = isp->smk_task; 466 + bprm->per_clear |= PER_CLEAR_ON_SETID; 478 467 479 468 return 0; 469 + } 470 + 471 + /** 472 + * smack_bprm_committing_creds - Prepare to install the new credentials 473 + * from bprm. 474 + * 475 + * @bprm: binprm for exec 476 + */ 477 + static void smack_bprm_committing_creds(struct linux_binprm *bprm) 478 + { 479 + struct task_smack *bsp = bprm->cred->security; 480 + 481 + if (bsp->smk_task != bsp->smk_forked) 482 + current->pdeath_signal = 0; 483 + } 484 + 485 + /** 486 + * smack_bprm_secureexec - Return the decision to use secureexec. 487 + * @bprm: binprm for exec 488 + * 489 + * Returns 0 on success. 490 + */ 491 + static int smack_bprm_secureexec(struct linux_binprm *bprm) 492 + { 493 + struct task_smack *tsp = current_security(); 494 + int ret = cap_bprm_secureexec(bprm); 495 + 496 + if (!ret && (tsp->smk_task != tsp->smk_forked)) 497 + ret = 1; 498 + 499 + return ret; 480 500 } 481 501 482 502 /* ··· 552 516 const struct qstr *qstr, char **name, 553 517 void **value, size_t *len) 554 518 { 519 + struct smack_known *skp; 520 + char *csp = smk_of_current(); 555 521 char *isp = smk_of_inode(inode); 556 522 char *dsp = smk_of_inode(dir); 557 523 int may; ··· 565 527 } 566 528 567 529 if (value) { 530 + skp = smk_find_entry(csp); 568 531 rcu_read_lock(); 569 - may = smk_access_entry(smk_of_current(), dsp, &smack_rule_list); 532 + may = smk_access_entry(csp, dsp, &skp->smk_rules); 570 533 rcu_read_unlock(); 571 534 572 535 /* ··· 880 841 return; 881 842 } 882 843 883 - /* 844 + /** 884 845 * smack_inode_getxattr - Smack check on getxattr 885 846 * @dentry: the object 886 847 * @name: unused ··· 897 858 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); 898 859 } 899 860 900 - /* 861 + /** 901 862 * smack_inode_removexattr - Smack check on removexattr 902 863 * @dentry: the object 903 864 * @name: name of the attribute ··· 1127 1088 * @cmd: what action to check 1128 1089 * @arg: unused 1129 1090 * 1091 + * Generally these operations are harmless. 1092 + * File locking operations present an obvious mechanism 1093 + * for passing information, so they require write access. 1094 + * 1130 1095 * Returns 0 if current has access, error code otherwise 1131 1096 */ 1132 1097 static int smack_file_fcntl(struct file *file, unsigned int cmd, 1133 1098 unsigned long arg) 1134 1099 { 1135 1100 struct smk_audit_info ad; 1136 - int rc; 1101 + int rc = 0; 1137 1102 1138 - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); 1139 - smk_ad_setfield_u_fs_path(&ad, file->f_path); 1140 1103 1141 1104 switch (cmd) { 1142 - case F_DUPFD: 1143 - case F_GETFD: 1144 - case F_GETFL: 1145 1105 case F_GETLK: 1146 - case F_GETOWN: 1147 - case F_GETSIG: 1148 - rc = smk_curacc(file->f_security, MAY_READ, &ad); 1149 - break; 1150 - case F_SETFD: 1151 - case F_SETFL: 1152 1106 case F_SETLK: 1153 1107 case F_SETLKW: 1154 1108 case F_SETOWN: 1155 1109 case F_SETSIG: 1110 + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); 1111 + smk_ad_setfield_u_fs_path(&ad, file->f_path); 1156 1112 rc = smk_curacc(file->f_security, MAY_WRITE, &ad); 1157 1113 break; 1158 1114 default: 1159 - rc = smk_curacc(file->f_security, MAY_READWRITE, &ad); 1115 + break; 1160 1116 } 1161 1117 1162 1118 return rc; ··· 1172 1138 unsigned long flags, unsigned long addr, 1173 1139 unsigned long addr_only) 1174 1140 { 1141 + struct smack_known *skp; 1175 1142 struct smack_rule *srp; 1176 1143 struct task_smack *tsp; 1177 1144 char *sp; ··· 1205 1170 1206 1171 tsp = current_security(); 1207 1172 sp = smk_of_current(); 1173 + skp = smk_find_entry(sp); 1208 1174 rc = 0; 1209 1175 1210 1176 rcu_read_lock(); ··· 1213 1177 * For each Smack rule associated with the subject 1214 1178 * label verify that the SMACK64MMAP also has access 1215 1179 * to that rule's object label. 1216 - * 1217 - * Because neither of the labels comes 1218 - * from the networking code it is sufficient 1219 - * to compare pointers. 1220 1180 */ 1221 - list_for_each_entry_rcu(srp, &smack_rule_list, list) { 1222 - if (srp->smk_subject != sp) 1223 - continue; 1224 - 1181 + list_for_each_entry_rcu(srp, &skp->smk_rules, list) { 1225 1182 osmack = srp->smk_object; 1226 1183 /* 1227 1184 * Matching labels always allows access. ··· 1243 1214 * If there isn't one a SMACK64MMAP subject 1244 1215 * can't have as much access as current. 1245 1216 */ 1246 - mmay = smk_access_entry(msmack, osmack, &smack_rule_list); 1217 + skp = smk_find_entry(msmack); 1218 + mmay = smk_access_entry(msmack, osmack, &skp->smk_rules); 1247 1219 if (mmay == -ENOENT) { 1248 1220 rc = -EACCES; 1249 1221 break; ··· 1343 1313 may |= MAY_WRITE; 1344 1314 1345 1315 return smk_curacc(file->f_security, may, &ad); 1316 + } 1317 + 1318 + /** 1319 + * smack_dentry_open - Smack dentry open processing 1320 + * @file: the object 1321 + * @cred: unused 1322 + * 1323 + * Set the security blob in the file structure. 1324 + * 1325 + * Returns 0 1326 + */ 1327 + static int smack_dentry_open(struct file *file, const struct cred *cred) 1328 + { 1329 + struct inode_smack *isp = file->f_path.dentry->d_inode->i_security; 1330 + 1331 + file->f_security = isp->smk_inode; 1332 + 1333 + return 0; 1346 1334 } 1347 1335 1348 1336 /* ··· 1503 1455 /** 1504 1456 * smk_curacc_on_task - helper to log task related access 1505 1457 * @p: the task object 1506 - * @access : the access requested 1458 + * @access: the access requested 1459 + * @caller: name of the calling function for audit 1507 1460 * 1508 1461 * Return 0 if access is permitted 1509 1462 */ 1510 - static int smk_curacc_on_task(struct task_struct *p, int access) 1463 + static int smk_curacc_on_task(struct task_struct *p, int access, 1464 + const char *caller) 1511 1465 { 1512 1466 struct smk_audit_info ad; 1513 1467 1514 - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); 1468 + smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK); 1515 1469 smk_ad_setfield_u_tsk(&ad, p); 1516 1470 return smk_curacc(smk_of_task(task_security(p)), access, &ad); 1517 1471 } ··· 1527 1477 */ 1528 1478 static int smack_task_setpgid(struct task_struct *p, pid_t pgid) 1529 1479 { 1530 - return smk_curacc_on_task(p, MAY_WRITE); 1480 + return smk_curacc_on_task(p, MAY_WRITE, __func__); 1531 1481 } 1532 1482 1533 1483 /** ··· 1538 1488 */ 1539 1489 static int smack_task_getpgid(struct task_struct *p) 1540 1490 { 1541 - return smk_curacc_on_task(p, MAY_READ); 1491 + return smk_curacc_on_task(p, MAY_READ, __func__); 1542 1492 } 1543 1493 1544 1494 /** ··· 1549 1499 */ 1550 1500 static int smack_task_getsid(struct task_struct *p) 1551 1501 { 1552 - return smk_curacc_on_task(p, MAY_READ); 1502 + return smk_curacc_on_task(p, MAY_READ, __func__); 1553 1503 } 1554 1504 1555 1505 /** ··· 1577 1527 1578 1528 rc = cap_task_setnice(p, nice); 1579 1529 if (rc == 0) 1580 - rc = smk_curacc_on_task(p, MAY_WRITE); 1530 + rc = smk_curacc_on_task(p, MAY_WRITE, __func__); 1581 1531 return rc; 1582 1532 } 1583 1533 ··· 1594 1544 1595 1545 rc = cap_task_setioprio(p, ioprio); 1596 1546 if (rc == 0) 1597 - rc = smk_curacc_on_task(p, MAY_WRITE); 1547 + rc = smk_curacc_on_task(p, MAY_WRITE, __func__); 1598 1548 return rc; 1599 1549 } 1600 1550 ··· 1606 1556 */ 1607 1557 static int smack_task_getioprio(struct task_struct *p) 1608 1558 { 1609 - return smk_curacc_on_task(p, MAY_READ); 1559 + return smk_curacc_on_task(p, MAY_READ, __func__); 1610 1560 } 1611 1561 1612 1562 /** ··· 1623 1573 1624 1574 rc = cap_task_setscheduler(p); 1625 1575 if (rc == 0) 1626 - rc = smk_curacc_on_task(p, MAY_WRITE); 1576 + rc = smk_curacc_on_task(p, MAY_WRITE, __func__); 1627 1577 return rc; 1628 1578 } 1629 1579 ··· 1635 1585 */ 1636 1586 static int smack_task_getscheduler(struct task_struct *p) 1637 1587 { 1638 - return smk_curacc_on_task(p, MAY_READ); 1588 + return smk_curacc_on_task(p, MAY_READ, __func__); 1639 1589 } 1640 1590 1641 1591 /** ··· 1646 1596 */ 1647 1597 static int smack_task_movememory(struct task_struct *p) 1648 1598 { 1649 - return smk_curacc_on_task(p, MAY_WRITE); 1599 + return smk_curacc_on_task(p, MAY_WRITE, __func__); 1650 1600 } 1651 1601 1652 1602 /** ··· 1761 1711 1762 1712 ssp->smk_in = csp; 1763 1713 ssp->smk_out = csp; 1764 - ssp->smk_packet[0] = '\0'; 1714 + ssp->smk_packet = NULL; 1765 1715 1766 1716 sk->sk_security = ssp; 1767 1717 ··· 2803 2753 { 2804 2754 struct socket_smack *ssp = sock->sk_security; 2805 2755 struct socket_smack *osp = other->sk_security; 2756 + struct socket_smack *nsp = newsk->sk_security; 2806 2757 struct smk_audit_info ad; 2807 2758 int rc = 0; 2808 2759 ··· 2812 2761 2813 2762 if (!capable(CAP_MAC_OVERRIDE)) 2814 2763 rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad); 2764 + 2765 + /* 2766 + * Cross reference the peer labels for SO_PEERSEC. 2767 + */ 2768 + if (rc == 0) { 2769 + nsp->smk_packet = ssp->smk_out; 2770 + ssp->smk_packet = osp->smk_out; 2771 + } 2815 2772 2816 2773 return rc; 2817 2774 } ··· 2872 2813 return smack_netlabel_send(sock->sk, sip); 2873 2814 } 2874 2815 2875 - 2876 2816 /** 2877 2817 * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack 2878 2818 * @sap: netlabel secattr 2879 - * @sip: where to put the result 2819 + * @ssp: socket security information 2880 2820 * 2881 - * Copies a smack label into sip 2821 + * Returns a pointer to a Smack label found on the label list. 2882 2822 */ 2883 - static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip) 2823 + static char *smack_from_secattr(struct netlbl_lsm_secattr *sap, 2824 + struct socket_smack *ssp) 2884 2825 { 2826 + struct smack_known *skp; 2885 2827 char smack[SMK_LABELLEN]; 2886 2828 char *sp; 2887 2829 int pcat; ··· 2912 2852 * we are already done. WeeHee. 2913 2853 */ 2914 2854 if (sap->attr.mls.lvl == smack_cipso_direct) { 2915 - memcpy(sip, smack, SMK_MAXLEN); 2916 - return; 2855 + /* 2856 + * The label sent is usually on the label list. 2857 + * 2858 + * If it is not we may still want to allow the 2859 + * delivery. 2860 + * 2861 + * If the recipient is accepting all packets 2862 + * because it is using the star ("*") label 2863 + * for SMACK64IPIN provide the web ("@") label 2864 + * so that a directed response will succeed. 2865 + * This is not very correct from a MAC point 2866 + * of view, but gets around the problem that 2867 + * locking prevents adding the newly discovered 2868 + * label to the list. 2869 + * The case where the recipient is not using 2870 + * the star label should obviously fail. 2871 + * The easy way to do this is to provide the 2872 + * star label as the subject label. 2873 + */ 2874 + skp = smk_find_entry(smack); 2875 + if (skp != NULL) 2876 + return skp->smk_known; 2877 + if (ssp != NULL && 2878 + ssp->smk_in == smack_known_star.smk_known) 2879 + return smack_known_web.smk_known; 2880 + return smack_known_star.smk_known; 2917 2881 } 2918 2882 /* 2919 2883 * Look it up in the supplied table if it is not 2920 2884 * a direct mapping. 2921 2885 */ 2922 - smack_from_cipso(sap->attr.mls.lvl, smack, sip); 2923 - return; 2886 + sp = smack_from_cipso(sap->attr.mls.lvl, smack); 2887 + if (sp != NULL) 2888 + return sp; 2889 + if (ssp != NULL && ssp->smk_in == smack_known_star.smk_known) 2890 + return smack_known_web.smk_known; 2891 + return smack_known_star.smk_known; 2924 2892 } 2925 2893 if ((sap->flags & NETLBL_SECATTR_SECID) != 0) { 2926 2894 /* ··· 2963 2875 * secid is from a fallback. 2964 2876 */ 2965 2877 BUG_ON(sp == NULL); 2966 - strncpy(sip, sp, SMK_MAXLEN); 2967 - return; 2878 + return sp; 2968 2879 } 2969 2880 /* 2970 2881 * Without guidance regarding the smack value 2971 2882 * for the packet fall back on the network 2972 2883 * ambient value. 2973 2884 */ 2974 - strncpy(sip, smack_net_ambient, SMK_MAXLEN); 2975 - return; 2885 + return smack_net_ambient; 2976 2886 } 2977 2887 2978 2888 /** ··· 2984 2898 { 2985 2899 struct netlbl_lsm_secattr secattr; 2986 2900 struct socket_smack *ssp = sk->sk_security; 2987 - char smack[SMK_LABELLEN]; 2988 2901 char *csp; 2989 2902 int rc; 2990 2903 struct smk_audit_info ad; ··· 2996 2911 netlbl_secattr_init(&secattr); 2997 2912 2998 2913 rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr); 2999 - if (rc == 0) { 3000 - smack_from_secattr(&secattr, smack); 3001 - csp = smack; 3002 - } else 2914 + if (rc == 0) 2915 + csp = smack_from_secattr(&secattr, ssp); 2916 + else 3003 2917 csp = smack_net_ambient; 3004 2918 3005 2919 netlbl_secattr_destroy(&secattr); ··· 3035 2951 int __user *optlen, unsigned len) 3036 2952 { 3037 2953 struct socket_smack *ssp; 3038 - int slen; 2954 + char *rcp = ""; 2955 + int slen = 1; 3039 2956 int rc = 0; 3040 2957 3041 2958 ssp = sock->sk->sk_security; 3042 - slen = strlen(ssp->smk_packet) + 1; 2959 + if (ssp->smk_packet != NULL) { 2960 + rcp = ssp->smk_packet; 2961 + slen = strlen(rcp) + 1; 2962 + } 3043 2963 3044 2964 if (slen > len) 3045 2965 rc = -ERANGE; 3046 - else if (copy_to_user(optval, ssp->smk_packet, slen) != 0) 2966 + else if (copy_to_user(optval, rcp, slen) != 0) 3047 2967 rc = -EFAULT; 3048 2968 3049 2969 if (put_user(slen, optlen) != 0) ··· 3070 2982 3071 2983 { 3072 2984 struct netlbl_lsm_secattr secattr; 3073 - struct socket_smack *sp; 3074 - char smack[SMK_LABELLEN]; 2985 + struct socket_smack *ssp = NULL; 2986 + char *sp; 3075 2987 int family = PF_UNSPEC; 3076 2988 u32 s = 0; /* 0 is the invalid secid */ 3077 2989 int rc; ··· 3086 2998 family = sock->sk->sk_family; 3087 2999 3088 3000 if (family == PF_UNIX) { 3089 - sp = sock->sk->sk_security; 3090 - s = smack_to_secid(sp->smk_out); 3001 + ssp = sock->sk->sk_security; 3002 + s = smack_to_secid(ssp->smk_out); 3091 3003 } else if (family == PF_INET || family == PF_INET6) { 3092 3004 /* 3093 3005 * Translate what netlabel gave us. 3094 3006 */ 3007 + if (sock != NULL && sock->sk != NULL) 3008 + ssp = sock->sk->sk_security; 3095 3009 netlbl_secattr_init(&secattr); 3096 3010 rc = netlbl_skbuff_getattr(skb, family, &secattr); 3097 3011 if (rc == 0) { 3098 - smack_from_secattr(&secattr, smack); 3099 - s = smack_to_secid(smack); 3012 + sp = smack_from_secattr(&secattr, ssp); 3013 + s = smack_to_secid(sp); 3100 3014 } 3101 3015 netlbl_secattr_destroy(&secattr); 3102 3016 } ··· 3146 3056 struct netlbl_lsm_secattr secattr; 3147 3057 struct sockaddr_in addr; 3148 3058 struct iphdr *hdr; 3149 - char smack[SMK_LABELLEN]; 3059 + char *sp; 3150 3060 int rc; 3151 3061 struct smk_audit_info ad; 3152 3062 ··· 3157 3067 netlbl_secattr_init(&secattr); 3158 3068 rc = netlbl_skbuff_getattr(skb, family, &secattr); 3159 3069 if (rc == 0) 3160 - smack_from_secattr(&secattr, smack); 3070 + sp = smack_from_secattr(&secattr, ssp); 3161 3071 else 3162 - strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN); 3072 + sp = smack_known_huh.smk_known; 3163 3073 netlbl_secattr_destroy(&secattr); 3164 3074 3165 3075 #ifdef CONFIG_AUDIT ··· 3172 3082 * Receiving a packet requires that the other end be able to write 3173 3083 * here. Read access is not required. 3174 3084 */ 3175 - rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad); 3085 + rc = smk_access(sp, ssp->smk_in, MAY_WRITE, &ad); 3176 3086 if (rc != 0) 3177 3087 return rc; 3178 3088 ··· 3180 3090 * Save the peer's label in the request_sock so we can later setup 3181 3091 * smk_packet in the child socket so that SO_PEERCRED can report it. 3182 3092 */ 3183 - req->peer_secid = smack_to_secid(smack); 3093 + req->peer_secid = smack_to_secid(sp); 3184 3094 3185 3095 /* 3186 3096 * We need to decide if we want to label the incoming connection here ··· 3193 3103 if (smack_host_label(&addr) == NULL) { 3194 3104 rcu_read_unlock(); 3195 3105 netlbl_secattr_init(&secattr); 3196 - smack_to_secattr(smack, &secattr); 3106 + smack_to_secattr(sp, &secattr); 3197 3107 rc = netlbl_req_setattr(req, &secattr); 3198 3108 netlbl_secattr_destroy(&secattr); 3199 3109 } else { ··· 3215 3125 const struct request_sock *req) 3216 3126 { 3217 3127 struct socket_smack *ssp = sk->sk_security; 3218 - char *smack; 3219 3128 3220 - if (req->peer_secid != 0) { 3221 - smack = smack_from_secid(req->peer_secid); 3222 - strncpy(ssp->smk_packet, smack, SMK_MAXLEN); 3223 - } else 3224 - ssp->smk_packet[0] = '\0'; 3129 + if (req->peer_secid != 0) 3130 + ssp->smk_packet = smack_from_secid(req->peer_secid); 3131 + else 3132 + ssp->smk_packet = NULL; 3225 3133 } 3226 3134 3227 3135 /* ··· 3497 3409 .sb_umount = smack_sb_umount, 3498 3410 3499 3411 .bprm_set_creds = smack_bprm_set_creds, 3412 + .bprm_committing_creds = smack_bprm_committing_creds, 3413 + .bprm_secureexec = smack_bprm_secureexec, 3500 3414 3501 3415 .inode_alloc_security = smack_inode_alloc_security, 3502 3416 .inode_free_security = smack_inode_free_security, ··· 3529 3439 .file_set_fowner = smack_file_set_fowner, 3530 3440 .file_send_sigiotask = smack_file_send_sigiotask, 3531 3441 .file_receive = smack_file_receive, 3442 + 3443 + .dentry_open = smack_dentry_open, 3532 3444 3533 3445 .cred_alloc_blank = smack_cred_alloc_blank, 3534 3446 .cred_free = smack_cred_free,
+204 -73
security/smack/smackfs.c
··· 44 44 SMK_ONLYCAP = 9, /* the only "capable" label */ 45 45 SMK_LOGGING = 10, /* logging */ 46 46 SMK_LOAD_SELF = 11, /* task specific rules */ 47 + SMK_ACCESSES = 12, /* access policy */ 47 48 }; 48 49 49 50 /* ··· 86 85 */ 87 86 88 87 LIST_HEAD(smk_netlbladdr_list); 88 + 89 + /* 90 + * Rule lists are maintained for each label. 91 + * This master list is just for reading /smack/load. 92 + */ 93 + struct smack_master_list { 94 + struct list_head list; 95 + struct smack_rule *smk_rule; 96 + }; 97 + 89 98 LIST_HEAD(smack_rule_list); 90 99 91 100 static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT; ··· 103 92 const char *smack_cipso_option = SMACK_CIPSO_OPTION; 104 93 105 94 106 - #define SEQ_READ_FINISHED 1 95 + #define SEQ_READ_FINISHED ((loff_t)-1) 107 96 108 97 /* 109 98 * Values for parsing cipso rules ··· 170 159 171 160 mutex_lock(rule_lock); 172 161 162 + /* 163 + * Because the object label is less likely to match 164 + * than the subject label check it first 165 + */ 173 166 list_for_each_entry_rcu(sp, rule_list, list) { 174 - if (sp->smk_subject == srp->smk_subject && 175 - sp->smk_object == srp->smk_object) { 167 + if (sp->smk_object == srp->smk_object && 168 + sp->smk_subject == srp->smk_subject) { 176 169 found = 1; 177 170 sp->smk_access = srp->smk_access; 178 171 break; ··· 188 173 mutex_unlock(rule_lock); 189 174 190 175 return found; 176 + } 177 + 178 + /** 179 + * smk_parse_rule - parse Smack rule from load string 180 + * @data: string to be parsed whose size is SMK_LOADLEN 181 + * @rule: Smack rule 182 + * @import: if non-zero, import labels 183 + */ 184 + static int smk_parse_rule(const char *data, struct smack_rule *rule, int import) 185 + { 186 + char smack[SMK_LABELLEN]; 187 + struct smack_known *skp; 188 + 189 + if (import) { 190 + rule->smk_subject = smk_import(data, 0); 191 + if (rule->smk_subject == NULL) 192 + return -1; 193 + 194 + rule->smk_object = smk_import(data + SMK_LABELLEN, 0); 195 + if (rule->smk_object == NULL) 196 + return -1; 197 + } else { 198 + smk_parse_smack(data, 0, smack); 199 + skp = smk_find_entry(smack); 200 + if (skp == NULL) 201 + return -1; 202 + rule->smk_subject = skp->smk_known; 203 + 204 + smk_parse_smack(data + SMK_LABELLEN, 0, smack); 205 + skp = smk_find_entry(smack); 206 + if (skp == NULL) 207 + return -1; 208 + rule->smk_object = skp->smk_known; 209 + } 210 + 211 + rule->smk_access = 0; 212 + 213 + switch (data[SMK_LABELLEN + SMK_LABELLEN]) { 214 + case '-': 215 + break; 216 + case 'r': 217 + case 'R': 218 + rule->smk_access |= MAY_READ; 219 + break; 220 + default: 221 + return -1; 222 + } 223 + 224 + switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) { 225 + case '-': 226 + break; 227 + case 'w': 228 + case 'W': 229 + rule->smk_access |= MAY_WRITE; 230 + break; 231 + default: 232 + return -1; 233 + } 234 + 235 + switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) { 236 + case '-': 237 + break; 238 + case 'x': 239 + case 'X': 240 + rule->smk_access |= MAY_EXEC; 241 + break; 242 + default: 243 + return -1; 244 + } 245 + 246 + switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) { 247 + case '-': 248 + break; 249 + case 'a': 250 + case 'A': 251 + rule->smk_access |= MAY_APPEND; 252 + break; 253 + default: 254 + return -1; 255 + } 256 + 257 + switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) { 258 + case '-': 259 + break; 260 + case 't': 261 + case 'T': 262 + rule->smk_access |= MAY_TRANSMUTE; 263 + break; 264 + default: 265 + return -1; 266 + } 267 + 268 + return 0; 191 269 } 192 270 193 271 /** ··· 305 197 struct list_head *rule_list, 306 198 struct mutex *rule_lock) 307 199 { 200 + struct smack_master_list *smlp; 201 + struct smack_known *skp; 308 202 struct smack_rule *rule; 309 203 char *data; 310 204 int rc = -EINVAL; 205 + int load = 0; 311 206 312 207 /* 313 208 * No partial writes. ··· 345 234 goto out; 346 235 } 347 236 348 - rule->smk_subject = smk_import(data, 0); 349 - if (rule->smk_subject == NULL) 237 + if (smk_parse_rule(data, rule, 1)) 350 238 goto out_free_rule; 351 239 352 - rule->smk_object = smk_import(data + SMK_LABELLEN, 0); 353 - if (rule->smk_object == NULL) 354 - goto out_free_rule; 355 - 356 - rule->smk_access = 0; 357 - 358 - switch (data[SMK_LABELLEN + SMK_LABELLEN]) { 359 - case '-': 360 - break; 361 - case 'r': 362 - case 'R': 363 - rule->smk_access |= MAY_READ; 364 - break; 365 - default: 366 - goto out_free_rule; 367 - } 368 - 369 - switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) { 370 - case '-': 371 - break; 372 - case 'w': 373 - case 'W': 374 - rule->smk_access |= MAY_WRITE; 375 - break; 376 - default: 377 - goto out_free_rule; 378 - } 379 - 380 - switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) { 381 - case '-': 382 - break; 383 - case 'x': 384 - case 'X': 385 - rule->smk_access |= MAY_EXEC; 386 - break; 387 - default: 388 - goto out_free_rule; 389 - } 390 - 391 - switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) { 392 - case '-': 393 - break; 394 - case 'a': 395 - case 'A': 396 - rule->smk_access |= MAY_APPEND; 397 - break; 398 - default: 399 - goto out_free_rule; 400 - } 401 - 402 - switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) { 403 - case '-': 404 - break; 405 - case 't': 406 - case 'T': 407 - rule->smk_access |= MAY_TRANSMUTE; 408 - break; 409 - default: 410 - goto out_free_rule; 240 + if (rule_list == NULL) { 241 + load = 1; 242 + skp = smk_find_entry(rule->smk_subject); 243 + rule_list = &skp->smk_rules; 244 + rule_lock = &skp->smk_rules_lock; 411 245 } 412 246 413 247 rc = count; ··· 360 304 * smk_set_access returns true if there was already a rule 361 305 * for the subject/object pair, and false if it was new. 362 306 */ 363 - if (!smk_set_access(rule, rule_list, rule_lock)) 307 + if (!smk_set_access(rule, rule_list, rule_lock)) { 308 + smlp = kzalloc(sizeof(*smlp), GFP_KERNEL); 309 + if (smlp != NULL) { 310 + smlp->smk_rule = rule; 311 + list_add_rcu(&smlp->list, &smack_rule_list); 312 + } else 313 + rc = -ENOMEM; 364 314 goto out; 315 + } 365 316 366 317 out_free_rule: 367 318 kfree(rule); ··· 384 321 385 322 static void *load_seq_start(struct seq_file *s, loff_t *pos) 386 323 { 387 - if (*pos == SEQ_READ_FINISHED) 324 + struct list_head *list; 325 + 326 + /* 327 + * This is 0 the first time through. 328 + */ 329 + if (s->index == 0) 330 + s->private = &smack_rule_list; 331 + 332 + if (s->private == NULL) 388 333 return NULL; 389 - if (list_empty(&smack_rule_list)) 334 + 335 + list = s->private; 336 + if (list_empty(list)) 390 337 return NULL; 391 - return smack_rule_list.next; 338 + 339 + if (s->index == 0) 340 + return list->next; 341 + return list; 392 342 } 393 343 394 344 static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos) ··· 409 333 struct list_head *list = v; 410 334 411 335 if (list_is_last(list, &smack_rule_list)) { 412 - *pos = SEQ_READ_FINISHED; 336 + s->private = NULL; 413 337 return NULL; 414 338 } 339 + s->private = list->next; 415 340 return list->next; 416 341 } 417 342 418 343 static int load_seq_show(struct seq_file *s, void *v) 419 344 { 420 345 struct list_head *list = v; 421 - struct smack_rule *srp = 422 - list_entry(list, struct smack_rule, list); 346 + struct smack_master_list *smlp = 347 + list_entry(list, struct smack_master_list, list); 348 + struct smack_rule *srp = smlp->smk_rule; 423 349 424 350 seq_printf(s, "%s %s", (char *)srp->smk_subject, 425 351 (char *)srp->smk_object); ··· 490 412 if (!capable(CAP_MAC_ADMIN)) 491 413 return -EPERM; 492 414 493 - return smk_write_load_list(file, buf, count, ppos, &smack_rule_list, 494 - &smack_list_lock); 415 + return smk_write_load_list(file, buf, count, ppos, NULL, NULL); 495 416 } 496 417 497 418 static const struct file_operations smk_load_ops = { ··· 1502 1425 .write = smk_write_load_self, 1503 1426 .release = seq_release, 1504 1427 }; 1428 + 1429 + /** 1430 + * smk_write_access - handle access check transaction 1431 + * @file: file pointer 1432 + * @buf: data from user space 1433 + * @count: bytes sent 1434 + * @ppos: where to start - must be 0 1435 + */ 1436 + static ssize_t smk_write_access(struct file *file, const char __user *buf, 1437 + size_t count, loff_t *ppos) 1438 + { 1439 + struct smack_rule rule; 1440 + char *data; 1441 + int res; 1442 + 1443 + data = simple_transaction_get(file, buf, count); 1444 + if (IS_ERR(data)) 1445 + return PTR_ERR(data); 1446 + 1447 + if (count < SMK_LOADLEN || smk_parse_rule(data, &rule, 0)) 1448 + return -EINVAL; 1449 + 1450 + res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access, 1451 + NULL); 1452 + data[0] = res == 0 ? '1' : '0'; 1453 + data[1] = '\0'; 1454 + 1455 + simple_transaction_set(file, 2); 1456 + return SMK_LOADLEN; 1457 + } 1458 + 1459 + static const struct file_operations smk_access_ops = { 1460 + .write = smk_write_access, 1461 + .read = simple_transaction_read, 1462 + .release = simple_transaction_release, 1463 + .llseek = generic_file_llseek, 1464 + }; 1465 + 1505 1466 /** 1506 1467 * smk_fill_super - fill the /smackfs superblock 1507 1468 * @sb: the empty superblock ··· 1574 1459 "logging", &smk_logging_ops, S_IRUGO|S_IWUSR}, 1575 1460 [SMK_LOAD_SELF] = { 1576 1461 "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO}, 1462 + [SMK_ACCESSES] = { 1463 + "access", &smk_access_ops, S_IRUGO|S_IWUGO}, 1577 1464 /* last one */ 1578 1465 {""} 1579 1466 }; ··· 1650 1533 1651 1534 smk_cipso_doi(); 1652 1535 smk_unlbl_ambient(NULL); 1536 + 1537 + mutex_init(&smack_known_floor.smk_rules_lock); 1538 + mutex_init(&smack_known_hat.smk_rules_lock); 1539 + mutex_init(&smack_known_huh.smk_rules_lock); 1540 + mutex_init(&smack_known_invalid.smk_rules_lock); 1541 + mutex_init(&smack_known_star.smk_rules_lock); 1542 + mutex_init(&smack_known_web.smk_rules_lock); 1543 + 1544 + INIT_LIST_HEAD(&smack_known_floor.smk_rules); 1545 + INIT_LIST_HEAD(&smack_known_hat.smk_rules); 1546 + INIT_LIST_HEAD(&smack_known_huh.smk_rules); 1547 + INIT_LIST_HEAD(&smack_known_invalid.smk_rules); 1548 + INIT_LIST_HEAD(&smack_known_star.smk_rules); 1549 + INIT_LIST_HEAD(&smack_known_web.smk_rules); 1653 1550 1654 1551 return err; 1655 1552 }
+2
security/tomoyo/Kconfig
··· 1 1 config SECURITY_TOMOYO 2 2 bool "TOMOYO Linux Support" 3 3 depends on SECURITY 4 + depends on NET 4 5 select SECURITYFS 5 6 select SECURITY_PATH 7 + select SECURITY_NETWORK 6 8 default n 7 9 help 8 10 This selects TOMOYO Linux, pathname-based access control.
+2 -2
security/tomoyo/Makefile
··· 1 - obj-y = audit.o common.o condition.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o 1 + obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o 2 2 3 3 $(obj)/policy/profile.conf: 4 4 @mkdir -p $(obj)/policy/ ··· 27 27 @touch $@ 28 28 29 29 $(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf 30 - @echo Generating built-in policy for TOMOYO 2.4.x. 30 + @echo Generating built-in policy for TOMOYO 2.5.x. 31 31 @echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp 32 32 @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp 33 33 @echo "\"\";" >> $@.tmp
+6 -1
security/tomoyo/audit.c
··· 313 313 */ 314 314 static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns, 315 315 const u8 profile, const u8 index, 316 + const struct tomoyo_acl_info *matched_acl, 316 317 const bool is_granted) 317 318 { 318 319 u8 mode; ··· 325 324 p = tomoyo_profile(ns, profile); 326 325 if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG]) 327 326 return false; 327 + if (is_granted && matched_acl && matched_acl->cond && 328 + matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO) 329 + return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES; 328 330 mode = p->config[index]; 329 331 if (mode == TOMOYO_CONFIG_USE_DEFAULT) 330 332 mode = p->config[category]; ··· 354 350 char *buf; 355 351 struct tomoyo_log *entry; 356 352 bool quota_exceeded = false; 357 - if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, r->granted)) 353 + if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, 354 + r->matched_acl, r->granted)) 358 355 goto out; 359 356 buf = tomoyo_init_log(r, len, fmt, args); 360 357 if (!buf)
+185 -43
security/tomoyo/common.c
··· 20 20 /* String table for /sys/kernel/security/tomoyo/profile */ 21 21 const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX 22 22 + TOMOYO_MAX_MAC_CATEGORY_INDEX] = { 23 + /* CONFIG::file group */ 23 24 [TOMOYO_MAC_FILE_EXECUTE] = "execute", 24 25 [TOMOYO_MAC_FILE_OPEN] = "open", 25 26 [TOMOYO_MAC_FILE_CREATE] = "create", ··· 44 43 [TOMOYO_MAC_FILE_MOUNT] = "mount", 45 44 [TOMOYO_MAC_FILE_UMOUNT] = "unmount", 46 45 [TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root", 46 + /* CONFIG::network group */ 47 + [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind", 48 + [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen", 49 + [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect", 50 + [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind", 51 + [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send", 52 + [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind", 53 + [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send", 54 + [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind", 55 + [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen", 56 + [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect", 57 + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind", 58 + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send", 59 + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind", 60 + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen", 61 + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect", 62 + /* CONFIG::misc group */ 63 + [TOMOYO_MAC_ENVIRON] = "env", 64 + /* CONFIG group */ 47 65 [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file", 66 + [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network", 67 + [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc", 48 68 }; 49 69 50 70 /* String table for conditions. */ ··· 152 130 [TOMOYO_TYPE_UMOUNT] = "unmount", 153 131 }; 154 132 133 + /* String table for socket's operation. */ 134 + const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = { 135 + [TOMOYO_NETWORK_BIND] = "bind", 136 + [TOMOYO_NETWORK_LISTEN] = "listen", 137 + [TOMOYO_NETWORK_CONNECT] = "connect", 138 + [TOMOYO_NETWORK_SEND] = "send", 139 + }; 140 + 155 141 /* String table for categories. */ 156 142 static const char * const tomoyo_category_keywords 157 143 [TOMOYO_MAX_MAC_CATEGORY_INDEX] = { 158 - [TOMOYO_MAC_CATEGORY_FILE] = "file", 144 + [TOMOYO_MAC_CATEGORY_FILE] = "file", 145 + [TOMOYO_MAC_CATEGORY_NETWORK] = "network", 146 + [TOMOYO_MAC_CATEGORY_MISC] = "misc", 159 147 }; 160 148 161 149 /* Permit policy management by non-root user? */ ··· 262 230 WARN_ON(1); 263 231 } 264 232 233 + static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, 234 + ...) __printf(2, 3); 235 + 265 236 /** 266 237 * tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure. 267 238 * 268 239 * @head: Pointer to "struct tomoyo_io_buffer". 269 240 * @fmt: The printf()'s format string, followed by parameters. 270 241 */ 271 - void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) 242 + static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, 243 + ...) 272 244 { 273 245 va_list args; 274 246 size_t len; ··· 349 313 INIT_LIST_HEAD(&ns->group_list[idx]); 350 314 for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++) 351 315 INIT_LIST_HEAD(&ns->policy_list[idx]); 352 - ns->profile_version = 20100903; 316 + ns->profile_version = 20110903; 353 317 tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list); 354 318 list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list); 355 319 } ··· 502 466 TOMOYO_CONFIG_WANT_REJECT_LOG; 503 467 memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT, 504 468 sizeof(ptr->config)); 505 - ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 1024; 506 - ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048; 469 + ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 470 + CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG; 471 + ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 472 + CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY; 507 473 mb(); /* Avoid out-of-order execution. */ 508 474 ns->profile_ptr[profile] = ptr; 509 475 entry = NULL; ··· 989 951 (global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) { 990 952 struct task_struct *p; 991 953 rcu_read_lock(); 992 - read_lock(&tasklist_lock); 993 954 if (global_pid) 994 955 p = find_task_by_pid_ns(pid, &init_pid_ns); 995 956 else 996 957 p = find_task_by_vpid(pid); 997 958 if (p) 998 959 domain = tomoyo_real_domain(p); 999 - read_unlock(&tasklist_lock); 1000 960 rcu_read_unlock(); 1001 961 } else if (!strncmp(data, "domain=", 7)) { 1002 962 if (tomoyo_domain_def(data + 7)) ··· 1015 979 if (domain && domain->is_deleted) 1016 980 tomoyo_io_printf(head, "# This is a deleted domain.\n"); 1017 981 return true; 982 + } 983 + 984 + /** 985 + * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry. 986 + * 987 + * @a: Pointer to "struct tomoyo_acl_info". 988 + * @b: Pointer to "struct tomoyo_acl_info". 989 + * 990 + * Returns true if @a == @b, false otherwise. 991 + */ 992 + static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a, 993 + const struct tomoyo_acl_info *b) 994 + { 995 + const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head); 996 + const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head); 997 + return p1->domainname == p2->domainname; 998 + } 999 + 1000 + /** 1001 + * tomoyo_write_task - Update task related list. 1002 + * 1003 + * @param: Pointer to "struct tomoyo_acl_param". 1004 + * 1005 + * Returns 0 on success, negative value otherwise. 1006 + * 1007 + * Caller holds tomoyo_read_lock(). 1008 + */ 1009 + static int tomoyo_write_task(struct tomoyo_acl_param *param) 1010 + { 1011 + int error = -EINVAL; 1012 + if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) { 1013 + struct tomoyo_task_acl e = { 1014 + .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL, 1015 + .domainname = tomoyo_get_domainname(param), 1016 + }; 1017 + if (e.domainname) 1018 + error = tomoyo_update_domain(&e.head, sizeof(e), param, 1019 + tomoyo_same_task_acl, 1020 + NULL); 1021 + tomoyo_put_name(e.domainname); 1022 + } 1023 + return error; 1018 1024 } 1019 1025 1020 1026 /** ··· 1117 1039 static const struct { 1118 1040 const char *keyword; 1119 1041 int (*write) (struct tomoyo_acl_param *); 1120 - } tomoyo_callback[1] = { 1042 + } tomoyo_callback[5] = { 1121 1043 { "file ", tomoyo_write_file }, 1044 + { "network inet ", tomoyo_write_inet_network }, 1045 + { "network unix ", tomoyo_write_unix_network }, 1046 + { "misc ", tomoyo_write_misc }, 1047 + { "task ", tomoyo_write_task }, 1122 1048 }; 1123 1049 u8 i; 1124 - for (i = 0; i < 1; i++) { 1050 + 1051 + for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) { 1125 1052 if (!tomoyo_str_starts(&param.data, 1126 1053 tomoyo_callback[i].keyword)) 1127 1054 continue; ··· 1210 1127 case 0: 1211 1128 head->r.cond_index = 0; 1212 1129 head->r.cond_step++; 1130 + if (cond->transit) { 1131 + tomoyo_set_space(head); 1132 + tomoyo_set_string(head, cond->transit->name); 1133 + } 1213 1134 /* fall through */ 1214 1135 case 1: 1215 1136 { ··· 1326 1239 head->r.cond_step++; 1327 1240 /* fall through */ 1328 1241 case 3: 1242 + if (cond->grant_log != TOMOYO_GRANTLOG_AUTO) 1243 + tomoyo_io_printf(head, " grant_log=%s", 1244 + tomoyo_yesno(cond->grant_log == 1245 + TOMOYO_GRANTLOG_YES)); 1329 1246 tomoyo_set_lf(head); 1330 1247 return true; 1331 1248 } ··· 1397 1306 if (first) 1398 1307 return true; 1399 1308 tomoyo_print_name_union(head, &ptr->name); 1309 + } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) { 1310 + struct tomoyo_task_acl *ptr = 1311 + container_of(acl, typeof(*ptr), head); 1312 + tomoyo_set_group(head, "task "); 1313 + tomoyo_set_string(head, "manual_domain_transition "); 1314 + tomoyo_set_string(head, ptr->domainname->name); 1400 1315 } else if (head->r.print_transition_related_only) { 1401 1316 return true; 1402 1317 } else if (acl_type == TOMOYO_TYPE_PATH2_ACL) { ··· 1467 1370 tomoyo_print_number_union(head, &ptr->mode); 1468 1371 tomoyo_print_number_union(head, &ptr->major); 1469 1372 tomoyo_print_number_union(head, &ptr->minor); 1373 + } else if (acl_type == TOMOYO_TYPE_INET_ACL) { 1374 + struct tomoyo_inet_acl *ptr = 1375 + container_of(acl, typeof(*ptr), head); 1376 + const u8 perm = ptr->perm; 1377 + 1378 + for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) { 1379 + if (!(perm & (1 << bit))) 1380 + continue; 1381 + if (first) { 1382 + tomoyo_set_group(head, "network inet "); 1383 + tomoyo_set_string(head, tomoyo_proto_keyword 1384 + [ptr->protocol]); 1385 + tomoyo_set_space(head); 1386 + first = false; 1387 + } else { 1388 + tomoyo_set_slash(head); 1389 + } 1390 + tomoyo_set_string(head, tomoyo_socket_keyword[bit]); 1391 + } 1392 + if (first) 1393 + return true; 1394 + tomoyo_set_space(head); 1395 + if (ptr->address.group) { 1396 + tomoyo_set_string(head, "@"); 1397 + tomoyo_set_string(head, ptr->address.group->group_name 1398 + ->name); 1399 + } else { 1400 + char buf[128]; 1401 + tomoyo_print_ip(buf, sizeof(buf), &ptr->address); 1402 + tomoyo_io_printf(head, "%s", buf); 1403 + } 1404 + tomoyo_print_number_union(head, &ptr->port); 1405 + } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) { 1406 + struct tomoyo_unix_acl *ptr = 1407 + container_of(acl, typeof(*ptr), head); 1408 + const u8 perm = ptr->perm; 1409 + 1410 + for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) { 1411 + if (!(perm & (1 << bit))) 1412 + continue; 1413 + if (first) { 1414 + tomoyo_set_group(head, "network unix "); 1415 + tomoyo_set_string(head, tomoyo_proto_keyword 1416 + [ptr->protocol]); 1417 + tomoyo_set_space(head); 1418 + first = false; 1419 + } else { 1420 + tomoyo_set_slash(head); 1421 + } 1422 + tomoyo_set_string(head, tomoyo_socket_keyword[bit]); 1423 + } 1424 + if (first) 1425 + return true; 1426 + tomoyo_print_name_union(head, &ptr->name); 1470 1427 } else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) { 1471 1428 struct tomoyo_mount_acl *ptr = 1472 1429 container_of(acl, typeof(*ptr), head); ··· 1529 1378 tomoyo_print_name_union(head, &ptr->dir_name); 1530 1379 tomoyo_print_name_union(head, &ptr->fs_type); 1531 1380 tomoyo_print_number_union(head, &ptr->flags); 1381 + } else if (acl_type == TOMOYO_TYPE_ENV_ACL) { 1382 + struct tomoyo_env_acl *ptr = 1383 + container_of(acl, typeof(*ptr), head); 1384 + 1385 + tomoyo_set_group(head, "misc env "); 1386 + tomoyo_set_string(head, ptr->env->name); 1532 1387 } 1533 1388 if (acl->cond) { 1534 1389 head->r.print_cond_part = true; ··· 1667 1510 global_pid = true; 1668 1511 pid = (unsigned int) simple_strtoul(buf, NULL, 10); 1669 1512 rcu_read_lock(); 1670 - read_lock(&tasklist_lock); 1671 1513 if (global_pid) 1672 1514 p = find_task_by_pid_ns(pid, &init_pid_ns); 1673 1515 else 1674 1516 p = find_task_by_vpid(pid); 1675 1517 if (p) 1676 1518 domain = tomoyo_real_domain(p); 1677 - read_unlock(&tasklist_lock); 1678 1519 rcu_read_unlock(); 1679 1520 if (!domain) 1680 1521 return; ··· 1692 1537 1693 1538 /* String table for grouping keywords. */ 1694 1539 static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { 1695 - [TOMOYO_PATH_GROUP] = "path_group ", 1696 - [TOMOYO_NUMBER_GROUP] = "number_group ", 1540 + [TOMOYO_PATH_GROUP] = "path_group ", 1541 + [TOMOYO_NUMBER_GROUP] = "number_group ", 1542 + [TOMOYO_ADDRESS_GROUP] = "address_group ", 1697 1543 }; 1698 1544 1699 1545 /** ··· 1736 1580 } 1737 1581 1738 1582 /** 1739 - * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group" list. 1583 + * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list. 1740 1584 * 1741 1585 * @head: Pointer to "struct tomoyo_io_buffer". 1742 1586 * @idx: Index number. ··· 1773 1617 (ptr, 1774 1618 struct tomoyo_number_group, 1775 1619 head)->number); 1620 + } else if (idx == TOMOYO_ADDRESS_GROUP) { 1621 + char buffer[128]; 1622 + 1623 + struct tomoyo_address_group *member = 1624 + container_of(ptr, typeof(*member), 1625 + head); 1626 + tomoyo_print_ip(buffer, sizeof(buffer), 1627 + &member->address); 1628 + tomoyo_io_printf(head, " %s", buffer); 1776 1629 } 1777 1630 tomoyo_set_lf(head); 1778 1631 } ··· 2231 2066 static void tomoyo_read_version(struct tomoyo_io_buffer *head) 2232 2067 { 2233 2068 if (!head->r.eof) { 2234 - tomoyo_io_printf(head, "2.4.0"); 2235 - head->r.eof = true; 2236 - } 2237 - } 2238 - 2239 - /** 2240 - * tomoyo_read_self_domain - Get the current process's domainname. 2241 - * 2242 - * @head: Pointer to "struct tomoyo_io_buffer". 2243 - * 2244 - * Returns the current process's domainname. 2245 - */ 2246 - static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head) 2247 - { 2248 - if (!head->r.eof) { 2249 - /* 2250 - * tomoyo_domain()->domainname != NULL 2251 - * because every process belongs to a domain and 2252 - * the domain's name cannot be NULL. 2253 - */ 2254 - tomoyo_io_printf(head, "%s", tomoyo_domain()->domainname->name); 2069 + tomoyo_io_printf(head, "2.5.0"); 2255 2070 head->r.eof = true; 2256 2071 } 2257 2072 } ··· 2365 2220 /* /sys/kernel/security/tomoyo/audit */ 2366 2221 head->poll = tomoyo_poll_log; 2367 2222 head->read = tomoyo_read_log; 2368 - break; 2369 - case TOMOYO_SELFDOMAIN: 2370 - /* /sys/kernel/security/tomoyo/self_domain */ 2371 - head->read = tomoyo_read_self_domain; 2372 2223 break; 2373 2224 case TOMOYO_PROCESS_STATUS: 2374 2225 /* /sys/kernel/security/tomoyo/.process_status */ ··· 2594 2453 return -EFAULT; 2595 2454 if (mutex_lock_interruptible(&head->io_sem)) 2596 2455 return -EINTR; 2456 + head->read_user_buf_avail = 0; 2597 2457 idx = tomoyo_read_lock(); 2598 2458 /* Read a line and dispatch it to the policy handler. */ 2599 2459 while (avail_len > 0) { ··· 2704 2562 struct tomoyo_domain_info *domain; 2705 2563 const int idx = tomoyo_read_lock(); 2706 2564 tomoyo_policy_loaded = true; 2707 - printk(KERN_INFO "TOMOYO: 2.4.0\n"); 2565 + printk(KERN_INFO "TOMOYO: 2.5.0\n"); 2708 2566 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { 2709 2567 const u8 profile = domain->profile; 2710 2568 const struct tomoyo_policy_namespace *ns = domain->ns; 2711 - if (ns->profile_version != 20100903) 2569 + if (ns->profile_version != 20110903) 2712 2570 printk(KERN_ERR 2713 2571 "Profile version %u is not supported.\n", 2714 2572 ns->profile_version); ··· 2719 2577 else 2720 2578 continue; 2721 2579 printk(KERN_ERR 2722 - "Userland tools for TOMOYO 2.4 must be installed and " 2580 + "Userland tools for TOMOYO 2.5 must be installed and " 2723 2581 "policy must be initialized.\n"); 2724 - printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.4/ " 2582 + printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ " 2725 2583 "for more information.\n"); 2726 2584 panic("STOP!"); 2727 2585 }
+178 -11
security/tomoyo/common.h
··· 3 3 * 4 4 * Header file for TOMOYO. 5 5 * 6 - * Copyright (C) 2005-2010 NTT DATA CORPORATION 6 + * Copyright (C) 2005-2011 NTT DATA CORPORATION 7 7 */ 8 8 9 9 #ifndef _SECURITY_TOMOYO_COMMON_H ··· 23 23 #include <linux/poll.h> 24 24 #include <linux/binfmts.h> 25 25 #include <linux/highmem.h> 26 + #include <linux/net.h> 27 + #include <linux/inet.h> 28 + #include <linux/in.h> 29 + #include <linux/in6.h> 30 + #include <linux/un.h> 31 + #include <net/sock.h> 32 + #include <net/af_unix.h> 33 + #include <net/ip.h> 34 + #include <net/ipv6.h> 35 + #include <net/udp.h> 26 36 27 37 /********** Constants definitions. **********/ 28 38 ··· 44 34 #define TOMOYO_HASH_BITS 8 45 35 #define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS) 46 36 37 + /* 38 + * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET. 39 + * Therefore, we don't need SOCK_MAX. 40 + */ 41 + #define TOMOYO_SOCK_MAX 6 42 + 47 43 #define TOMOYO_EXEC_TMPSIZE 4096 44 + 45 + /* Garbage collector is trying to kfree() this element. */ 46 + #define TOMOYO_GC_IN_PROGRESS -1 48 47 49 48 /* Profile number is an integer between 0 and 255. */ 50 49 #define TOMOYO_MAX_PROFILES 256 ··· 155 136 /* Index numbers for entry type. */ 156 137 enum tomoyo_policy_id { 157 138 TOMOYO_ID_GROUP, 139 + TOMOYO_ID_ADDRESS_GROUP, 158 140 TOMOYO_ID_PATH_GROUP, 159 141 TOMOYO_ID_NUMBER_GROUP, 160 142 TOMOYO_ID_TRANSITION_CONTROL, ··· 182 162 TOMOYO_MAX_DOMAIN_INFO_FLAGS 183 163 }; 184 164 165 + /* Index numbers for audit type. */ 166 + enum tomoyo_grant_log { 167 + /* Follow profile's configuration. */ 168 + TOMOYO_GRANTLOG_AUTO, 169 + /* Do not generate grant log. */ 170 + TOMOYO_GRANTLOG_NO, 171 + /* Generate grant_log. */ 172 + TOMOYO_GRANTLOG_YES, 173 + }; 174 + 185 175 /* Index numbers for group entries. */ 186 176 enum tomoyo_group_id { 187 177 TOMOYO_PATH_GROUP, 188 178 TOMOYO_NUMBER_GROUP, 179 + TOMOYO_ADDRESS_GROUP, 189 180 TOMOYO_MAX_GROUP 190 181 }; 191 182 ··· 227 196 TOMOYO_TYPE_PATH_NUMBER_ACL, 228 197 TOMOYO_TYPE_MKDEV_ACL, 229 198 TOMOYO_TYPE_MOUNT_ACL, 199 + TOMOYO_TYPE_INET_ACL, 200 + TOMOYO_TYPE_UNIX_ACL, 201 + TOMOYO_TYPE_ENV_ACL, 202 + TOMOYO_TYPE_MANUAL_TASK_ACL, 230 203 }; 231 204 232 205 /* Index numbers for access controls with one pathname. */ ··· 263 228 TOMOYO_MAX_MKDEV_OPERATION 264 229 }; 265 230 231 + /* Index numbers for socket operations. */ 232 + enum tomoyo_network_acl_index { 233 + TOMOYO_NETWORK_BIND, /* bind() operation. */ 234 + TOMOYO_NETWORK_LISTEN, /* listen() operation. */ 235 + TOMOYO_NETWORK_CONNECT, /* connect() operation. */ 236 + TOMOYO_NETWORK_SEND, /* send() operation. */ 237 + TOMOYO_MAX_NETWORK_OPERATION 238 + }; 239 + 266 240 /* Index numbers for access controls with two pathnames. */ 267 241 enum tomoyo_path2_acl_index { 268 242 TOMOYO_TYPE_LINK, ··· 299 255 TOMOYO_EXCEPTIONPOLICY, 300 256 TOMOYO_PROCESS_STATUS, 301 257 TOMOYO_STAT, 302 - TOMOYO_SELFDOMAIN, 303 258 TOMOYO_AUDIT, 304 259 TOMOYO_VERSION, 305 260 TOMOYO_PROFILE, ··· 343 300 TOMOYO_MAC_FILE_MOUNT, 344 301 TOMOYO_MAC_FILE_UMOUNT, 345 302 TOMOYO_MAC_FILE_PIVOT_ROOT, 303 + TOMOYO_MAC_NETWORK_INET_STREAM_BIND, 304 + TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN, 305 + TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT, 306 + TOMOYO_MAC_NETWORK_INET_DGRAM_BIND, 307 + TOMOYO_MAC_NETWORK_INET_DGRAM_SEND, 308 + TOMOYO_MAC_NETWORK_INET_RAW_BIND, 309 + TOMOYO_MAC_NETWORK_INET_RAW_SEND, 310 + TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND, 311 + TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN, 312 + TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT, 313 + TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND, 314 + TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND, 315 + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND, 316 + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN, 317 + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT, 318 + TOMOYO_MAC_ENVIRON, 346 319 TOMOYO_MAX_MAC_INDEX 347 320 }; 348 321 349 322 /* Index numbers for category of functionality. */ 350 323 enum tomoyo_mac_category_index { 351 324 TOMOYO_MAC_CATEGORY_FILE, 325 + TOMOYO_MAC_CATEGORY_NETWORK, 326 + TOMOYO_MAC_CATEGORY_MISC, 352 327 TOMOYO_MAX_MAC_CATEGORY_INDEX 353 328 }; 354 329 ··· 401 340 /* Common header for holding ACL entries. */ 402 341 struct tomoyo_acl_head { 403 342 struct list_head list; 404 - bool is_deleted; 343 + s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */ 405 344 } __packed; 406 345 407 346 /* Common header for shared entries. */ ··· 458 397 u8 operation; 459 398 } path_number; 460 399 struct { 400 + const struct tomoyo_path_info *name; 401 + } environ; 402 + struct { 403 + const __be32 *address; 404 + u16 port; 405 + /* One of values smaller than TOMOYO_SOCK_MAX. */ 406 + u8 protocol; 407 + /* One of values in "enum tomoyo_network_acl_index". */ 408 + u8 operation; 409 + bool is_ipv6; 410 + } inet_network; 411 + struct { 412 + const struct tomoyo_path_info *address; 413 + /* One of values smaller than TOMOYO_SOCK_MAX. */ 414 + u8 protocol; 415 + /* One of values in "enum tomoyo_network_acl_index". */ 416 + u8 operation; 417 + } unix_network; 418 + struct { 461 419 const struct tomoyo_path_info *type; 462 420 const struct tomoyo_path_info *dir; 463 421 const struct tomoyo_path_info *dev; 464 422 unsigned long flags; 465 423 int need_dev; 466 424 } mount; 425 + struct { 426 + const struct tomoyo_path_info *domainname; 427 + } task; 467 428 } param; 429 + struct tomoyo_acl_info *matched_acl; 468 430 u8 param_type; 469 431 bool granted; 470 432 u8 retry; ··· 526 442 u8 value_type[2]; 527 443 }; 528 444 529 - /* Structure for "path_group"/"number_group" directive. */ 445 + /* Structure for holding an IP address. */ 446 + struct tomoyo_ipaddr_union { 447 + struct in6_addr ip[2]; /* Big endian. */ 448 + struct tomoyo_group *group; /* Pointer to address group. */ 449 + bool is_ipv6; /* Valid only if @group == NULL. */ 450 + }; 451 + 452 + /* Structure for "path_group"/"number_group"/"address_group" directive. */ 530 453 struct tomoyo_group { 531 454 struct tomoyo_shared_acl_head head; 532 455 const struct tomoyo_path_info *group_name; ··· 550 459 struct tomoyo_number_group { 551 460 struct tomoyo_acl_head head; 552 461 struct tomoyo_number_union number; 462 + }; 463 + 464 + /* Structure for "address_group" directive. */ 465 + struct tomoyo_address_group { 466 + struct tomoyo_acl_head head; 467 + /* Structure for holding an IP address. */ 468 + struct tomoyo_ipaddr_union address; 553 469 }; 554 470 555 471 /* Subset of "struct stat". Used by conditional ACL and audit logs. */ ··· 618 520 struct tomoyo_request_info r; 619 521 struct tomoyo_obj_info obj; 620 522 struct linux_binprm *bprm; 523 + const struct tomoyo_path_info *transition; 621 524 /* For dumping argv[] and envp[]. */ 622 525 struct tomoyo_page_dump dump; 623 526 /* For temporary use. */ ··· 653 554 u16 names_count; /* Number of "struct tomoyo_name_union names". */ 654 555 u16 argc; /* Number of "struct tomoyo_argv". */ 655 556 u16 envc; /* Number of "struct tomoyo_envp". */ 557 + u8 grant_log; /* One of values in "enum tomoyo_grant_log". */ 558 + const struct tomoyo_path_info *transit; /* Maybe NULL. */ 656 559 /* 657 560 * struct tomoyo_condition_element condition[condc]; 658 561 * struct tomoyo_number_union values[numbers_count]; ··· 668 567 struct tomoyo_acl_info { 669 568 struct list_head list; 670 569 struct tomoyo_condition *cond; /* Maybe NULL. */ 671 - bool is_deleted; 570 + s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */ 672 571 u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */ 673 572 } __packed; 674 573 ··· 685 584 bool is_deleted; /* Delete flag. */ 686 585 bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; 687 586 atomic_t users; /* Number of referring credentials. */ 587 + }; 588 + 589 + /* 590 + * Structure for "task manual_domain_transition" directive. 591 + */ 592 + struct tomoyo_task_acl { 593 + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */ 594 + /* Pointer to domainname. */ 595 + const struct tomoyo_path_info *domainname; 688 596 }; 689 597 690 598 /* ··· 746 636 struct tomoyo_name_union dir_name; 747 637 struct tomoyo_name_union fs_type; 748 638 struct tomoyo_number_union flags; 639 + }; 640 + 641 + /* Structure for "misc env" directive in domain policy. */ 642 + struct tomoyo_env_acl { 643 + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */ 644 + const struct tomoyo_path_info *env; /* environment variable */ 645 + }; 646 + 647 + /* Structure for "network inet" directive. */ 648 + struct tomoyo_inet_acl { 649 + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */ 650 + u8 protocol; 651 + u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */ 652 + struct tomoyo_ipaddr_union address; 653 + struct tomoyo_number_union port; 654 + }; 655 + 656 + /* Structure for "network unix" directive. */ 657 + struct tomoyo_unix_acl { 658 + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */ 659 + u8 protocol; 660 + u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */ 661 + struct tomoyo_name_union name; 749 662 }; 750 663 751 664 /* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */ ··· 906 773 struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS]; 907 774 /* List for connecting to tomoyo_namespace_list list. */ 908 775 struct list_head namespace_list; 909 - /* Profile version. Currently only 20100903 is defined. */ 776 + /* Profile version. Currently only 20110903 is defined. */ 910 777 unsigned int profile_version; 911 778 /* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */ 912 779 const char *name; ··· 914 781 915 782 /********** Function prototypes. **********/ 916 783 784 + bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, 785 + const struct tomoyo_group *group); 917 786 bool tomoyo_compare_number_union(const unsigned long value, 918 787 const struct tomoyo_number_union *ptr); 919 788 bool tomoyo_condition(struct tomoyo_request_info *r, ··· 931 796 bool tomoyo_number_matches_group(const unsigned long min, 932 797 const unsigned long max, 933 798 const struct tomoyo_group *group); 799 + bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param, 800 + struct tomoyo_ipaddr_union *ptr); 934 801 bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, 935 802 struct tomoyo_name_union *ptr); 936 803 bool tomoyo_parse_number_union(struct tomoyo_acl_param *param, ··· 942 805 bool tomoyo_permstr(const char *string, const char *keyword); 943 806 bool tomoyo_str_starts(char **src, const char *find); 944 807 char *tomoyo_encode(const char *str); 808 + char *tomoyo_encode2(const char *str, int str_len); 945 809 char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, 946 810 va_list args); 947 811 char *tomoyo_read_token(struct tomoyo_acl_param *param); ··· 952 814 const char *tomoyo_yesno(const unsigned int value); 953 815 const struct tomoyo_path_info *tomoyo_compare_name_union 954 816 (const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr); 817 + const struct tomoyo_path_info *tomoyo_get_domainname 818 + (struct tomoyo_acl_param *param); 955 819 const struct tomoyo_path_info *tomoyo_get_name(const char *name); 956 820 const struct tomoyo_path_info *tomoyo_path_matches_group 957 821 (const struct tomoyo_path_info *pathname, const struct tomoyo_group *group); 958 822 int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, 959 823 struct path *path, const int flag); 960 824 int tomoyo_close_control(struct tomoyo_io_buffer *head); 825 + int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env); 826 + int tomoyo_execute_permission(struct tomoyo_request_info *r, 827 + const struct tomoyo_path_info *filename); 961 828 int tomoyo_find_next_domain(struct linux_binprm *bprm); 962 829 int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, 963 830 const u8 index); ··· 981 838 unsigned long number); 982 839 int tomoyo_path_perm(const u8 operation, struct path *path, 983 840 const char *target); 984 - int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, 985 - const struct tomoyo_path_info *filename); 986 841 int tomoyo_poll_control(struct file *file, poll_table *wait); 987 842 int tomoyo_poll_log(struct file *file, poll_table *wait); 843 + int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr, 844 + int addr_len); 845 + int tomoyo_socket_connect_permission(struct socket *sock, 846 + struct sockaddr *addr, int addr_len); 847 + int tomoyo_socket_listen_permission(struct socket *sock); 848 + int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg, 849 + int size); 988 850 int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) 989 851 __printf(2, 3); 990 852 int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, ··· 1008 860 int tomoyo_write_aggregator(struct tomoyo_acl_param *param); 1009 861 int tomoyo_write_file(struct tomoyo_acl_param *param); 1010 862 int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type); 863 + int tomoyo_write_misc(struct tomoyo_acl_param *param); 864 + int tomoyo_write_inet_network(struct tomoyo_acl_param *param); 1011 865 int tomoyo_write_transition_control(struct tomoyo_acl_param *param, 1012 866 const u8 type); 867 + int tomoyo_write_unix_network(struct tomoyo_acl_param *param); 1013 868 ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, 1014 869 const int buffer_len); 1015 870 ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, ··· 1042 891 void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); 1043 892 void tomoyo_get_attributes(struct tomoyo_obj_info *obj); 1044 893 void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns); 1045 - void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) 1046 - __printf(2, 3); 1047 894 void tomoyo_load_policy(const char *filename); 1048 - void tomoyo_memory_free(void *ptr); 1049 895 void tomoyo_normalize_line(unsigned char *buffer); 1050 896 void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register); 897 + void tomoyo_print_ip(char *buf, const unsigned int size, 898 + const struct tomoyo_ipaddr_union *ptr); 1051 899 void tomoyo_print_ulong(char *buffer, const int buffer_len, 1052 900 const unsigned long value, const u8 type); 1053 901 void tomoyo_put_name_union(struct tomoyo_name_union *ptr); ··· 1069 919 + TOMOYO_MAX_MAC_CATEGORY_INDEX]; 1070 920 extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE]; 1071 921 extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; 922 + extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX]; 923 + extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION]; 1072 924 extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX]; 1073 925 extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; 1074 926 extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION]; ··· 1247 1095 return a->values[0] == b->values[0] && a->values[1] == b->values[1] && 1248 1096 a->group == b->group && a->value_type[0] == b->value_type[0] && 1249 1097 a->value_type[1] == b->value_type[1]; 1098 + } 1099 + 1100 + /** 1101 + * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry. 1102 + * 1103 + * @a: Pointer to "struct tomoyo_ipaddr_union". 1104 + * @b: Pointer to "struct tomoyo_ipaddr_union". 1105 + * 1106 + * Returns true if @a == @b, false otherwise. 1107 + */ 1108 + static inline bool tomoyo_same_ipaddr_union 1109 + (const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b) 1110 + { 1111 + return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group && 1112 + a->is_ipv6 == b->is_ipv6; 1250 1113 } 1251 1114 1252 1115 /**
+65 -6
security/tomoyo/condition.c
··· 348 348 a->numbers_count == b->numbers_count && 349 349 a->names_count == b->names_count && 350 350 a->argc == b->argc && a->envc == b->envc && 351 + a->grant_log == b->grant_log && a->transit == b->transit && 351 352 !memcmp(a + 1, b + 1, a->size - sizeof(*a)); 352 353 } 353 354 ··· 400 399 found = true; 401 400 goto out; 402 401 } 403 - list_for_each_entry_rcu(ptr, &tomoyo_condition_list, head.list) { 404 - if (!tomoyo_same_condition(ptr, entry)) 402 + list_for_each_entry(ptr, &tomoyo_condition_list, head.list) { 403 + if (!tomoyo_same_condition(ptr, entry) || 404 + atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) 405 405 continue; 406 406 /* Same entry found. Share this entry. */ 407 407 atomic_inc(&ptr->head.users); ··· 412 410 if (!found) { 413 411 if (tomoyo_memory_ok(entry)) { 414 412 atomic_set(&entry->head.users, 1); 415 - list_add_rcu(&entry->head.list, 416 - &tomoyo_condition_list); 413 + list_add(&entry->head.list, &tomoyo_condition_list); 417 414 } else { 418 415 found = true; 419 416 ptr = NULL; ··· 426 425 entry = ptr; 427 426 } 428 427 return entry; 428 + } 429 + 430 + /** 431 + * tomoyo_get_transit_preference - Parse domain transition preference for execve(). 432 + * 433 + * @param: Pointer to "struct tomoyo_acl_param". 434 + * @e: Pointer to "struct tomoyo_condition". 435 + * 436 + * Returns the condition string part. 437 + */ 438 + static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param, 439 + struct tomoyo_condition *e) 440 + { 441 + char * const pos = param->data; 442 + bool flag; 443 + if (*pos == '<') { 444 + e->transit = tomoyo_get_domainname(param); 445 + goto done; 446 + } 447 + { 448 + char *cp = strchr(pos, ' '); 449 + if (cp) 450 + *cp = '\0'; 451 + flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") || 452 + !strcmp(pos, "initialize") || !strcmp(pos, "reset") || 453 + !strcmp(pos, "child") || !strcmp(pos, "parent"); 454 + if (cp) 455 + *cp = ' '; 456 + } 457 + if (!flag) 458 + return pos; 459 + e->transit = tomoyo_get_name(tomoyo_read_token(param)); 460 + done: 461 + if (e->transit) 462 + return param->data; 463 + /* 464 + * Return a bad read-only condition string that will let 465 + * tomoyo_get_condition() return NULL. 466 + */ 467 + return "/"; 429 468 } 430 469 431 470 /** ··· 484 443 struct tomoyo_argv *argv = NULL; 485 444 struct tomoyo_envp *envp = NULL; 486 445 struct tomoyo_condition e = { }; 487 - char * const start_of_string = param->data; 446 + char * const start_of_string = 447 + tomoyo_get_transit_preference(param, &e); 488 448 char * const end_of_string = start_of_string + strlen(start_of_string); 489 449 char *pos; 490 450 rerun: ··· 528 486 goto out; 529 487 dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word, 530 488 is_not ? "!" : "", right_word); 489 + if (!strcmp(left_word, "grant_log")) { 490 + if (entry) { 491 + if (is_not || 492 + entry->grant_log != TOMOYO_GRANTLOG_AUTO) 493 + goto out; 494 + else if (!strcmp(right_word, "yes")) 495 + entry->grant_log = TOMOYO_GRANTLOG_YES; 496 + else if (!strcmp(right_word, "no")) 497 + entry->grant_log = TOMOYO_GRANTLOG_NO; 498 + else 499 + goto out; 500 + } 501 + continue; 502 + } 531 503 if (!strncmp(left_word, "exec.argv[", 10)) { 532 504 if (!argv) { 533 505 e.argc++; ··· 649 593 + e.envc * sizeof(struct tomoyo_envp); 650 594 entry = kzalloc(e.size, GFP_NOFS); 651 595 if (!entry) 652 - return NULL; 596 + goto out2; 653 597 *entry = e; 598 + e.transit = NULL; 654 599 condp = (struct tomoyo_condition_element *) (entry + 1); 655 600 numbers_p = (struct tomoyo_number_union *) (condp + e.condc); 656 601 names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count); ··· 678 621 tomoyo_del_condition(&entry->head.list); 679 622 kfree(entry); 680 623 } 624 + out2: 625 + tomoyo_put_name(e.transit); 681 626 return NULL; 682 627 } 683 628
+172 -37
security/tomoyo/domain.c
··· 39 39 if (mutex_lock_interruptible(&tomoyo_policy_lock)) 40 40 return -ENOMEM; 41 41 list_for_each_entry_rcu(entry, list, list) { 42 + if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) 43 + continue; 42 44 if (!check_duplicate(entry, new_entry)) 43 45 continue; 44 46 entry->is_deleted = param->is_delete; ··· 104 102 new_entry->cond = tomoyo_get_condition(param); 105 103 if (!new_entry->cond) 106 104 return -EINVAL; 105 + /* 106 + * Domain transition preference is allowed for only 107 + * "file execute" entries. 108 + */ 109 + if (new_entry->cond->transit && 110 + !(new_entry->type == TOMOYO_TYPE_PATH_ACL && 111 + container_of(new_entry, struct tomoyo_path_acl, head) 112 + ->perm == 1 << TOMOYO_TYPE_EXECUTE)) 113 + goto out; 107 114 } 108 115 if (mutex_lock_interruptible(&tomoyo_policy_lock)) 109 116 goto out; 110 117 list_for_each_entry_rcu(entry, list, list) { 118 + if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) 119 + continue; 111 120 if (!tomoyo_same_acl_head(entry, new_entry) || 112 121 !check_duplicate(entry, new_entry)) 113 122 continue; ··· 170 157 continue; 171 158 if (!tomoyo_condition(r, ptr->cond)) 172 159 continue; 160 + r->matched_acl = ptr; 173 161 r->granted = true; 174 162 return; 175 163 } ··· 515 501 * that domain. Do not perform domain transition if 516 502 * profile for that domain is not yet created. 517 503 */ 518 - if (!entry->ns->profile_ptr[entry->profile]) 504 + if (tomoyo_policy_loaded && 505 + !entry->ns->profile_ptr[entry->profile]) 519 506 return NULL; 520 507 } 521 508 return entry; ··· 572 557 tomoyo_write_log(&r, "use_profile %u\n", 573 558 entry->profile); 574 559 tomoyo_write_log(&r, "use_group %u\n", entry->group); 560 + tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES); 575 561 } 576 562 } 577 563 return entry; 564 + } 565 + 566 + /** 567 + * tomoyo_environ - Check permission for environment variable names. 568 + * 569 + * @ee: Pointer to "struct tomoyo_execve". 570 + * 571 + * Returns 0 on success, negative value otherwise. 572 + */ 573 + static int tomoyo_environ(struct tomoyo_execve *ee) 574 + { 575 + struct tomoyo_request_info *r = &ee->r; 576 + struct linux_binprm *bprm = ee->bprm; 577 + /* env_page.data is allocated by tomoyo_dump_page(). */ 578 + struct tomoyo_page_dump env_page = { }; 579 + char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */ 580 + int arg_len = 0; 581 + unsigned long pos = bprm->p; 582 + int offset = pos % PAGE_SIZE; 583 + int argv_count = bprm->argc; 584 + int envp_count = bprm->envc; 585 + int error = -ENOMEM; 586 + 587 + ee->r.type = TOMOYO_MAC_ENVIRON; 588 + ee->r.profile = r->domain->profile; 589 + ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile, 590 + TOMOYO_MAC_ENVIRON); 591 + if (!r->mode || !envp_count) 592 + return 0; 593 + arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS); 594 + if (!arg_ptr) 595 + goto out; 596 + while (error == -ENOMEM) { 597 + if (!tomoyo_dump_page(bprm, pos, &env_page)) 598 + goto out; 599 + pos += PAGE_SIZE - offset; 600 + /* Read. */ 601 + while (argv_count && offset < PAGE_SIZE) { 602 + if (!env_page.data[offset++]) 603 + argv_count--; 604 + } 605 + if (argv_count) { 606 + offset = 0; 607 + continue; 608 + } 609 + while (offset < PAGE_SIZE) { 610 + const unsigned char c = env_page.data[offset++]; 611 + 612 + if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) { 613 + if (c == '=') { 614 + arg_ptr[arg_len++] = '\0'; 615 + } else if (c == '\\') { 616 + arg_ptr[arg_len++] = '\\'; 617 + arg_ptr[arg_len++] = '\\'; 618 + } else if (c > ' ' && c < 127) { 619 + arg_ptr[arg_len++] = c; 620 + } else { 621 + arg_ptr[arg_len++] = '\\'; 622 + arg_ptr[arg_len++] = (c >> 6) + '0'; 623 + arg_ptr[arg_len++] 624 + = ((c >> 3) & 7) + '0'; 625 + arg_ptr[arg_len++] = (c & 7) + '0'; 626 + } 627 + } else { 628 + arg_ptr[arg_len] = '\0'; 629 + } 630 + if (c) 631 + continue; 632 + if (tomoyo_env_perm(r, arg_ptr)) { 633 + error = -EPERM; 634 + break; 635 + } 636 + if (!--envp_count) { 637 + error = 0; 638 + break; 639 + } 640 + arg_len = 0; 641 + } 642 + offset = 0; 643 + } 644 + out: 645 + if (r->mode != TOMOYO_CONFIG_ENFORCING) 646 + error = 0; 647 + kfree(env_page.data); 648 + kfree(arg_ptr); 649 + return error; 578 650 } 579 651 580 652 /** ··· 679 577 struct tomoyo_domain_info *domain = NULL; 680 578 const char *original_name = bprm->filename; 681 579 int retval = -ENOMEM; 682 - bool need_kfree = false; 683 580 bool reject_on_transition_failure = false; 684 - struct tomoyo_path_info rn = { }; /* real name */ 581 + const struct tomoyo_path_info *candidate; 582 + struct tomoyo_path_info exename; 685 583 struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS); 584 + 686 585 if (!ee) 687 586 return -ENOMEM; 688 587 ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS); ··· 697 594 ee->bprm = bprm; 698 595 ee->r.obj = &ee->obj; 699 596 ee->obj.path1 = bprm->file->f_path; 700 - retry: 701 - if (need_kfree) { 702 - kfree(rn.name); 703 - need_kfree = false; 704 - } 705 597 /* Get symlink's pathname of program. */ 706 598 retval = -ENOENT; 707 - rn.name = tomoyo_realpath_nofollow(original_name); 708 - if (!rn.name) 599 + exename.name = tomoyo_realpath_nofollow(original_name); 600 + if (!exename.name) 709 601 goto out; 710 - tomoyo_fill_path_info(&rn); 711 - need_kfree = true; 712 - 602 + tomoyo_fill_path_info(&exename); 603 + retry: 713 604 /* Check 'aggregator' directive. */ 714 605 { 715 606 struct tomoyo_aggregator *ptr; 716 607 struct list_head *list = 717 608 &old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR]; 718 609 /* Check 'aggregator' directive. */ 610 + candidate = &exename; 719 611 list_for_each_entry_rcu(ptr, list, head.list) { 720 612 if (ptr->head.is_deleted || 721 - !tomoyo_path_matches_pattern(&rn, 613 + !tomoyo_path_matches_pattern(&exename, 722 614 ptr->original_name)) 723 615 continue; 724 - kfree(rn.name); 725 - need_kfree = false; 726 - /* This is OK because it is read only. */ 727 - rn = *ptr->aggregated_name; 616 + candidate = ptr->aggregated_name; 728 617 break; 729 618 } 730 619 } 731 620 732 621 /* Check execute permission. */ 733 - retval = tomoyo_path_permission(&ee->r, TOMOYO_TYPE_EXECUTE, &rn); 622 + retval = tomoyo_execute_permission(&ee->r, candidate); 734 623 if (retval == TOMOYO_RETRY_REQUEST) 735 624 goto retry; 736 625 if (retval < 0) ··· 733 638 * wildcard) rather than the pathname passed to execve() 734 639 * (which never contains wildcard). 735 640 */ 736 - if (ee->r.param.path.matched_path) { 737 - if (need_kfree) 738 - kfree(rn.name); 739 - need_kfree = false; 740 - /* This is OK because it is read only. */ 741 - rn = *ee->r.param.path.matched_path; 742 - } 641 + if (ee->r.param.path.matched_path) 642 + candidate = ee->r.param.path.matched_path; 743 643 744 - /* Calculate domain to transit to. */ 644 + /* 645 + * Check for domain transition preference if "file execute" matched. 646 + * If preference is given, make do_execve() fail if domain transition 647 + * has failed, for domain transition preference should be used with 648 + * destination domain defined. 649 + */ 650 + if (ee->transition) { 651 + const char *domainname = ee->transition->name; 652 + reject_on_transition_failure = true; 653 + if (!strcmp(domainname, "keep")) 654 + goto force_keep_domain; 655 + if (!strcmp(domainname, "child")) 656 + goto force_child_domain; 657 + if (!strcmp(domainname, "reset")) 658 + goto force_reset_domain; 659 + if (!strcmp(domainname, "initialize")) 660 + goto force_initialize_domain; 661 + if (!strcmp(domainname, "parent")) { 662 + char *cp; 663 + strncpy(ee->tmp, old_domain->domainname->name, 664 + TOMOYO_EXEC_TMPSIZE - 1); 665 + cp = strrchr(ee->tmp, ' '); 666 + if (cp) 667 + *cp = '\0'; 668 + } else if (*domainname == '<') 669 + strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1); 670 + else 671 + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", 672 + old_domain->domainname->name, domainname); 673 + goto force_jump_domain; 674 + } 675 + /* 676 + * No domain transition preference specified. 677 + * Calculate domain to transit to. 678 + */ 745 679 switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname, 746 - &rn)) { 680 + candidate)) { 747 681 case TOMOYO_TRANSITION_CONTROL_RESET: 682 + force_reset_domain: 748 683 /* Transit to the root of specified namespace. */ 749 - snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", rn.name); 684 + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", 685 + candidate->name); 750 686 /* 751 687 * Make do_execve() fail if domain transition across namespaces 752 688 * has failed. ··· 785 659 reject_on_transition_failure = true; 786 660 break; 787 661 case TOMOYO_TRANSITION_CONTROL_INITIALIZE: 662 + force_initialize_domain: 788 663 /* Transit to the child of current namespace's root. */ 789 664 snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", 790 - old_domain->ns->name, rn.name); 665 + old_domain->ns->name, candidate->name); 791 666 break; 792 667 case TOMOYO_TRANSITION_CONTROL_KEEP: 668 + force_keep_domain: 793 669 /* Keep current domain. */ 794 670 domain = old_domain; 795 671 break; ··· 805 677 * before /sbin/init. 806 678 */ 807 679 domain = old_domain; 808 - } else { 809 - /* Normal domain transition. */ 810 - snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", 811 - old_domain->domainname->name, rn.name); 680 + break; 812 681 } 682 + force_child_domain: 683 + /* Normal domain transition. */ 684 + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", 685 + old_domain->domainname->name, candidate->name); 813 686 break; 814 687 } 688 + force_jump_domain: 815 689 if (!domain) 816 690 domain = tomoyo_assign_domain(ee->tmp, true); 817 691 if (domain) ··· 841 711 /* Update reference count on "struct tomoyo_domain_info". */ 842 712 atomic_inc(&domain->users); 843 713 bprm->cred->security = domain; 844 - if (need_kfree) 845 - kfree(rn.name); 714 + kfree(exename.name); 715 + if (!retval) { 716 + ee->r.domain = domain; 717 + retval = tomoyo_environ(ee); 718 + } 846 719 kfree(ee->tmp); 847 720 kfree(ee->dump.data); 848 721 kfree(ee); ··· 865 732 struct tomoyo_page_dump *dump) 866 733 { 867 734 struct page *page; 868 - /* dump->data is released by tomoyo_finish_execve(). */ 735 + 736 + /* dump->data is released by tomoyo_find_next_domain(). */ 869 737 if (!dump->data) { 870 738 dump->data = kzalloc(PAGE_SIZE, GFP_NOFS); 871 739 if (!dump->data) ··· 887 753 * So do I. 888 754 */ 889 755 char *kaddr = kmap_atomic(page, KM_USER0); 756 + 890 757 dump->page = page; 891 758 memcpy(dump->data + offset, kaddr + offset, 892 759 PAGE_SIZE - offset);
+122
security/tomoyo/environ.c
··· 1 + /* 2 + * security/tomoyo/environ.c 3 + * 4 + * Copyright (C) 2005-2011 NTT DATA CORPORATION 5 + */ 6 + 7 + #include "common.h" 8 + 9 + /** 10 + * tomoyo_check_env_acl - Check permission for environment variable's name. 11 + * 12 + * @r: Pointer to "struct tomoyo_request_info". 13 + * @ptr: Pointer to "struct tomoyo_acl_info". 14 + * 15 + * Returns true if granted, false otherwise. 16 + */ 17 + static bool tomoyo_check_env_acl(struct tomoyo_request_info *r, 18 + const struct tomoyo_acl_info *ptr) 19 + { 20 + const struct tomoyo_env_acl *acl = 21 + container_of(ptr, typeof(*acl), head); 22 + 23 + return tomoyo_path_matches_pattern(r->param.environ.name, acl->env); 24 + } 25 + 26 + /** 27 + * tomoyo_audit_env_log - Audit environment variable name log. 28 + * 29 + * @r: Pointer to "struct tomoyo_request_info". 30 + * 31 + * Returns 0 on success, negative value otherwise. 32 + */ 33 + static int tomoyo_audit_env_log(struct tomoyo_request_info *r) 34 + { 35 + return tomoyo_supervisor(r, "misc env %s\n", 36 + r->param.environ.name->name); 37 + } 38 + 39 + /** 40 + * tomoyo_env_perm - Check permission for environment variable's name. 41 + * 42 + * @r: Pointer to "struct tomoyo_request_info". 43 + * @env: The name of environment variable. 44 + * 45 + * Returns 0 on success, negative value otherwise. 46 + * 47 + * Caller holds tomoyo_read_lock(). 48 + */ 49 + int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env) 50 + { 51 + struct tomoyo_path_info environ; 52 + int error; 53 + 54 + if (!env || !*env) 55 + return 0; 56 + environ.name = env; 57 + tomoyo_fill_path_info(&environ); 58 + r->param_type = TOMOYO_TYPE_ENV_ACL; 59 + r->param.environ.name = &environ; 60 + do { 61 + tomoyo_check_acl(r, tomoyo_check_env_acl); 62 + error = tomoyo_audit_env_log(r); 63 + } while (error == TOMOYO_RETRY_REQUEST); 64 + return error; 65 + } 66 + 67 + /** 68 + * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry. 69 + * 70 + * @a: Pointer to "struct tomoyo_acl_info". 71 + * @b: Pointer to "struct tomoyo_acl_info". 72 + * 73 + * Returns true if @a == @b, false otherwise. 74 + */ 75 + static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a, 76 + const struct tomoyo_acl_info *b) 77 + { 78 + const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); 79 + const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); 80 + 81 + return p1->env == p2->env; 82 + } 83 + 84 + /** 85 + * tomoyo_write_env - Write "struct tomoyo_env_acl" list. 86 + * 87 + * @param: Pointer to "struct tomoyo_acl_param". 88 + * 89 + * Returns 0 on success, negative value otherwise. 90 + * 91 + * Caller holds tomoyo_read_lock(). 92 + */ 93 + static int tomoyo_write_env(struct tomoyo_acl_param *param) 94 + { 95 + struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; 96 + int error = -ENOMEM; 97 + const char *data = tomoyo_read_token(param); 98 + 99 + if (!tomoyo_correct_word(data) || strchr(data, '=')) 100 + return -EINVAL; 101 + e.env = tomoyo_get_name(data); 102 + if (!e.env) 103 + return error; 104 + error = tomoyo_update_domain(&e.head, sizeof(e), param, 105 + tomoyo_same_env_acl, NULL); 106 + tomoyo_put_name(e.env); 107 + return error; 108 + } 109 + 110 + /** 111 + * tomoyo_write_misc - Update environment variable list. 112 + * 113 + * @param: Pointer to "struct tomoyo_acl_param". 114 + * 115 + * Returns 0 on success, negative value otherwise. 116 + */ 117 + int tomoyo_write_misc(struct tomoyo_acl_param *param) 118 + { 119 + if (tomoyo_str_starts(&param->data, "env ")) 120 + return tomoyo_write_env(param); 121 + return -EINVAL; 122 + }
+34 -8
security/tomoyo/file.c
··· 555 555 * 556 556 * Caller holds tomoyo_read_lock(). 557 557 */ 558 - int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, 559 - const struct tomoyo_path_info *filename) 558 + static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, 559 + const struct tomoyo_path_info *filename) 560 560 { 561 561 int error; 562 562 ··· 570 570 do { 571 571 tomoyo_check_acl(r, tomoyo_check_path_acl); 572 572 error = tomoyo_audit_path_log(r); 573 - /* 574 - * Do not retry for execute request, for alias may have 575 - * changed. 576 - */ 577 - } while (error == TOMOYO_RETRY_REQUEST && 578 - operation != TOMOYO_TYPE_EXECUTE); 573 + } while (error == TOMOYO_RETRY_REQUEST); 579 574 return error; 575 + } 576 + 577 + /** 578 + * tomoyo_execute_permission - Check permission for execute operation. 579 + * 580 + * @r: Pointer to "struct tomoyo_request_info". 581 + * @filename: Filename to check. 582 + * 583 + * Returns 0 on success, negative value otherwise. 584 + * 585 + * Caller holds tomoyo_read_lock(). 586 + */ 587 + int tomoyo_execute_permission(struct tomoyo_request_info *r, 588 + const struct tomoyo_path_info *filename) 589 + { 590 + /* 591 + * Unlike other permission checks, this check is done regardless of 592 + * profile mode settings in order to check for domain transition 593 + * preference. 594 + */ 595 + r->type = TOMOYO_MAC_FILE_EXECUTE; 596 + r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type); 597 + r->param_type = TOMOYO_TYPE_PATH_ACL; 598 + r->param.path.filename = filename; 599 + r->param.path.operation = TOMOYO_TYPE_EXECUTE; 600 + tomoyo_check_acl(r, tomoyo_check_path_acl); 601 + r->ee->transition = r->matched_acl && r->matched_acl->cond ? 602 + r->matched_acl->cond->transit : NULL; 603 + if (r->mode != TOMOYO_CONFIG_DISABLED) 604 + return tomoyo_audit_path_log(r); 605 + return 0; 580 606 } 581 607 582 608 /**
+235 -305
security/tomoyo/gc.c
··· 8 8 #include <linux/kthread.h> 9 9 #include <linux/slab.h> 10 10 11 + /** 12 + * tomoyo_memory_free - Free memory for elements. 13 + * 14 + * @ptr: Pointer to allocated memory. 15 + * 16 + * Returns nothing. 17 + * 18 + * Caller holds tomoyo_policy_lock mutex. 19 + */ 20 + static inline void tomoyo_memory_free(void *ptr) 21 + { 22 + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr); 23 + kfree(ptr); 24 + } 25 + 11 26 /* The list for "struct tomoyo_io_buffer". */ 12 27 static LIST_HEAD(tomoyo_io_buffer_list); 13 28 /* Lock for protecting tomoyo_io_buffer_list. */ 14 29 static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock); 15 - 16 - /* Size of an element. */ 17 - static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = { 18 - [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group), 19 - [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group), 20 - [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group), 21 - [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator), 22 - [TOMOYO_ID_TRANSITION_CONTROL] = 23 - sizeof(struct tomoyo_transition_control), 24 - [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager), 25 - /* [TOMOYO_ID_CONDITION] = "struct tomoyo_condition"->size, */ 26 - /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */ 27 - /* [TOMOYO_ID_ACL] = 28 - tomoyo_acl_size["struct tomoyo_acl_info"->type], */ 29 - [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info), 30 - }; 31 - 32 - /* Size of a domain ACL element. */ 33 - static const u8 tomoyo_acl_size[] = { 34 - [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl), 35 - [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl), 36 - [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl), 37 - [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl), 38 - [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl), 39 - }; 40 30 41 31 /** 42 32 * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not. ··· 45 55 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { 46 56 head->users++; 47 57 spin_unlock(&tomoyo_io_buffer_list_lock); 48 - if (mutex_lock_interruptible(&head->io_sem)) { 49 - in_use = true; 50 - goto out; 51 - } 58 + mutex_lock(&head->io_sem); 52 59 if (head->r.domain == element || head->r.group == element || 53 60 head->r.acl == element || &head->w.domain->list == element) 54 61 in_use = true; 55 62 mutex_unlock(&head->io_sem); 56 - out: 57 63 spin_lock(&tomoyo_io_buffer_list_lock); 58 64 head->users--; 59 65 if (in_use) ··· 63 77 * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not. 64 78 * 65 79 * @string: String to check. 66 - * @size: Memory allocated for @string . 67 80 * 68 81 * Returns true if @string is used by /sys/kernel/security/tomoyo/ users, 69 82 * false otherwise. 70 83 */ 71 - static bool tomoyo_name_used_by_io_buffer(const char *string, 72 - const size_t size) 84 + static bool tomoyo_name_used_by_io_buffer(const char *string) 73 85 { 74 86 struct tomoyo_io_buffer *head; 87 + const size_t size = strlen(string) + 1; 75 88 bool in_use = false; 76 89 77 90 spin_lock(&tomoyo_io_buffer_list_lock); ··· 78 93 int i; 79 94 head->users++; 80 95 spin_unlock(&tomoyo_io_buffer_list_lock); 81 - if (mutex_lock_interruptible(&head->io_sem)) { 82 - in_use = true; 83 - goto out; 84 - } 96 + mutex_lock(&head->io_sem); 85 97 for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) { 86 98 const char *w = head->r.w[i]; 87 99 if (w < string || w > string + size) ··· 87 105 break; 88 106 } 89 107 mutex_unlock(&head->io_sem); 90 - out: 91 108 spin_lock(&tomoyo_io_buffer_list_lock); 92 109 head->users--; 93 110 if (in_use) ··· 96 115 return in_use; 97 116 } 98 117 99 - /* Structure for garbage collection. */ 100 - struct tomoyo_gc { 101 - struct list_head list; 102 - enum tomoyo_policy_id type; 103 - size_t size; 104 - struct list_head *element; 105 - }; 106 - /* List of entries to be deleted. */ 107 - static LIST_HEAD(tomoyo_gc_list); 108 - /* Length of tomoyo_gc_list. */ 109 - static int tomoyo_gc_list_len; 110 - 111 - /** 112 - * tomoyo_add_to_gc - Add an entry to to be deleted list. 113 - * 114 - * @type: One of values in "enum tomoyo_policy_id". 115 - * @element: Pointer to "struct list_head". 116 - * 117 - * Returns true on success, false otherwise. 118 - * 119 - * Caller holds tomoyo_policy_lock mutex. 120 - * 121 - * Adding an entry needs kmalloc(). Thus, if we try to add thousands of 122 - * entries at once, it will take too long time. Thus, do not add more than 128 123 - * entries per a scan. But to be able to handle worst case where all entries 124 - * are in-use, we accept one more entry per a scan. 125 - * 126 - * If we use singly linked list using "struct list_head"->prev (which is 127 - * LIST_POISON2), we can avoid kmalloc(). 128 - */ 129 - static bool tomoyo_add_to_gc(const int type, struct list_head *element) 130 - { 131 - struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 132 - if (!entry) 133 - return false; 134 - entry->type = type; 135 - if (type == TOMOYO_ID_ACL) 136 - entry->size = tomoyo_acl_size[ 137 - container_of(element, 138 - typeof(struct tomoyo_acl_info), 139 - list)->type]; 140 - else if (type == TOMOYO_ID_NAME) 141 - entry->size = strlen(container_of(element, 142 - typeof(struct tomoyo_name), 143 - head.list)->entry.name) + 1; 144 - else if (type == TOMOYO_ID_CONDITION) 145 - entry->size = 146 - container_of(element, typeof(struct tomoyo_condition), 147 - head.list)->size; 148 - else 149 - entry->size = tomoyo_element_size[type]; 150 - entry->element = element; 151 - list_add(&entry->list, &tomoyo_gc_list); 152 - list_del_rcu(element); 153 - return tomoyo_gc_list_len++ < 128; 154 - } 155 - 156 - /** 157 - * tomoyo_element_linked_by_gc - Validate next element of an entry. 158 - * 159 - * @element: Pointer to an element. 160 - * @size: Size of @element in byte. 161 - * 162 - * Returns true if @element is linked by other elements in the garbage 163 - * collector's queue, false otherwise. 164 - */ 165 - static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size) 166 - { 167 - struct tomoyo_gc *p; 168 - list_for_each_entry(p, &tomoyo_gc_list, list) { 169 - const u8 *ptr = (const u8 *) p->element->next; 170 - if (ptr < element || element + size < ptr) 171 - continue; 172 - return true; 173 - } 174 - return false; 175 - } 176 - 177 118 /** 178 119 * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control". 179 120 * ··· 103 200 * 104 201 * Returns nothing. 105 202 */ 106 - static void tomoyo_del_transition_control(struct list_head *element) 203 + static inline void tomoyo_del_transition_control(struct list_head *element) 107 204 { 108 205 struct tomoyo_transition_control *ptr = 109 206 container_of(element, typeof(*ptr), head.list); ··· 118 215 * 119 216 * Returns nothing. 120 217 */ 121 - static void tomoyo_del_aggregator(struct list_head *element) 218 + static inline void tomoyo_del_aggregator(struct list_head *element) 122 219 { 123 220 struct tomoyo_aggregator *ptr = 124 221 container_of(element, typeof(*ptr), head.list); ··· 133 230 * 134 231 * Returns nothing. 135 232 */ 136 - static void tomoyo_del_manager(struct list_head *element) 233 + static inline void tomoyo_del_manager(struct list_head *element) 137 234 { 138 235 struct tomoyo_manager *ptr = 139 236 container_of(element, typeof(*ptr), head.list); ··· 196 293 tomoyo_put_number_union(&entry->flags); 197 294 } 198 295 break; 296 + case TOMOYO_TYPE_ENV_ACL: 297 + { 298 + struct tomoyo_env_acl *entry = 299 + container_of(acl, typeof(*entry), head); 300 + 301 + tomoyo_put_name(entry->env); 302 + } 303 + break; 304 + case TOMOYO_TYPE_INET_ACL: 305 + { 306 + struct tomoyo_inet_acl *entry = 307 + container_of(acl, typeof(*entry), head); 308 + 309 + tomoyo_put_group(entry->address.group); 310 + tomoyo_put_number_union(&entry->port); 311 + } 312 + break; 313 + case TOMOYO_TYPE_UNIX_ACL: 314 + { 315 + struct tomoyo_unix_acl *entry = 316 + container_of(acl, typeof(*entry), head); 317 + 318 + tomoyo_put_name_union(&entry->name); 319 + } 320 + break; 321 + case TOMOYO_TYPE_MANUAL_TASK_ACL: 322 + { 323 + struct tomoyo_task_acl *entry = 324 + container_of(acl, typeof(*entry), head); 325 + tomoyo_put_name(entry->domainname); 326 + } 327 + break; 199 328 } 200 329 } 201 330 ··· 236 301 * 237 302 * @element: Pointer to "struct list_head". 238 303 * 239 - * Returns true if deleted, false otherwise. 304 + * Returns nothing. 305 + * 306 + * Caller holds tomoyo_policy_lock mutex. 240 307 */ 241 - static bool tomoyo_del_domain(struct list_head *element) 308 + static inline void tomoyo_del_domain(struct list_head *element) 242 309 { 243 310 struct tomoyo_domain_info *domain = 244 311 container_of(element, typeof(*domain), list); 245 312 struct tomoyo_acl_info *acl; 246 313 struct tomoyo_acl_info *tmp; 247 314 /* 248 - * Since we don't protect whole execve() operation using SRCU, 249 - * we need to recheck domain->users at this point. 250 - * 251 - * (1) Reader starts SRCU section upon execve(). 252 - * (2) Reader traverses tomoyo_domain_list and finds this domain. 253 - * (3) Writer marks this domain as deleted. 254 - * (4) Garbage collector removes this domain from tomoyo_domain_list 255 - * because this domain is marked as deleted and used by nobody. 256 - * (5) Reader saves reference to this domain into 257 - * "struct linux_binprm"->cred->security . 258 - * (6) Reader finishes SRCU section, although execve() operation has 259 - * not finished yet. 260 - * (7) Garbage collector waits for SRCU synchronization. 261 - * (8) Garbage collector kfree() this domain because this domain is 262 - * used by nobody. 263 - * (9) Reader finishes execve() operation and restores this domain from 264 - * "struct linux_binprm"->cred->security. 265 - * 266 - * By updating domain->users at (5), we can solve this race problem 267 - * by rechecking domain->users at (8). 315 + * Since this domain is referenced from neither 316 + * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete 317 + * elements without checking for is_deleted flag. 268 318 */ 269 - if (atomic_read(&domain->users)) 270 - return false; 271 319 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { 272 320 tomoyo_del_acl(&acl->list); 273 321 tomoyo_memory_free(acl); 274 322 } 275 323 tomoyo_put_name(domain->domainname); 276 - return true; 277 324 } 278 325 279 326 /** ··· 304 387 * 305 388 * Returns nothing. 306 389 */ 307 - static void tomoyo_del_name(struct list_head *element) 390 + static inline void tomoyo_del_name(struct list_head *element) 308 391 { 309 - const struct tomoyo_name *ptr = 310 - container_of(element, typeof(*ptr), head.list); 392 + /* Nothing to do. */ 311 393 } 312 394 313 395 /** ··· 316 400 * 317 401 * Returns nothing. 318 402 */ 319 - static void tomoyo_del_path_group(struct list_head *element) 403 + static inline void tomoyo_del_path_group(struct list_head *element) 320 404 { 321 405 struct tomoyo_path_group *member = 322 406 container_of(element, typeof(*member), head.list); ··· 330 414 * 331 415 * Returns nothing. 332 416 */ 333 - static void tomoyo_del_group(struct list_head *element) 417 + static inline void tomoyo_del_group(struct list_head *element) 334 418 { 335 419 struct tomoyo_group *group = 336 420 container_of(element, typeof(*group), head.list); 337 421 tomoyo_put_name(group->group_name); 422 + } 423 + 424 + /** 425 + * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group". 426 + * 427 + * @element: Pointer to "struct list_head". 428 + * 429 + * Returns nothing. 430 + */ 431 + static inline void tomoyo_del_address_group(struct list_head *element) 432 + { 433 + /* Nothing to do. */ 338 434 } 339 435 340 436 /** ··· 356 428 * 357 429 * Returns nothing. 358 430 */ 359 - static void tomoyo_del_number_group(struct list_head *element) 431 + static inline void tomoyo_del_number_group(struct list_head *element) 360 432 { 361 - struct tomoyo_number_group *member = 362 - container_of(element, typeof(*member), head.list); 433 + /* Nothing to do. */ 434 + } 435 + 436 + /** 437 + * tomoyo_try_to_gc - Try to kfree() an entry. 438 + * 439 + * @type: One of values in "enum tomoyo_policy_id". 440 + * @element: Pointer to "struct list_head". 441 + * 442 + * Returns nothing. 443 + * 444 + * Caller holds tomoyo_policy_lock mutex. 445 + */ 446 + static void tomoyo_try_to_gc(const enum tomoyo_policy_id type, 447 + struct list_head *element) 448 + { 449 + /* 450 + * __list_del_entry() guarantees that the list element became no longer 451 + * reachable from the list which the element was originally on (e.g. 452 + * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the 453 + * list element became no longer referenced by syscall users. 454 + */ 455 + __list_del_entry(element); 456 + mutex_unlock(&tomoyo_policy_lock); 457 + synchronize_srcu(&tomoyo_ss); 458 + /* 459 + * However, there are two users which may still be using the list 460 + * element. We need to defer until both users forget this element. 461 + * 462 + * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} 463 + * and "struct tomoyo_io_buffer"->w.domain forget this element. 464 + */ 465 + if (tomoyo_struct_used_by_io_buffer(element)) 466 + goto reinject; 467 + switch (type) { 468 + case TOMOYO_ID_TRANSITION_CONTROL: 469 + tomoyo_del_transition_control(element); 470 + break; 471 + case TOMOYO_ID_MANAGER: 472 + tomoyo_del_manager(element); 473 + break; 474 + case TOMOYO_ID_AGGREGATOR: 475 + tomoyo_del_aggregator(element); 476 + break; 477 + case TOMOYO_ID_GROUP: 478 + tomoyo_del_group(element); 479 + break; 480 + case TOMOYO_ID_PATH_GROUP: 481 + tomoyo_del_path_group(element); 482 + break; 483 + case TOMOYO_ID_ADDRESS_GROUP: 484 + tomoyo_del_address_group(element); 485 + break; 486 + case TOMOYO_ID_NUMBER_GROUP: 487 + tomoyo_del_number_group(element); 488 + break; 489 + case TOMOYO_ID_CONDITION: 490 + tomoyo_del_condition(element); 491 + break; 492 + case TOMOYO_ID_NAME: 493 + /* 494 + * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[] 495 + * forget this element. 496 + */ 497 + if (tomoyo_name_used_by_io_buffer 498 + (container_of(element, typeof(struct tomoyo_name), 499 + head.list)->entry.name)) 500 + goto reinject; 501 + tomoyo_del_name(element); 502 + break; 503 + case TOMOYO_ID_ACL: 504 + tomoyo_del_acl(element); 505 + break; 506 + case TOMOYO_ID_DOMAIN: 507 + /* 508 + * Don't kfree() until all "struct cred"->security forget this 509 + * element. 510 + */ 511 + if (atomic_read(&container_of 512 + (element, typeof(struct tomoyo_domain_info), 513 + list)->users)) 514 + goto reinject; 515 + break; 516 + case TOMOYO_MAX_POLICY: 517 + break; 518 + } 519 + mutex_lock(&tomoyo_policy_lock); 520 + if (type == TOMOYO_ID_DOMAIN) 521 + tomoyo_del_domain(element); 522 + tomoyo_memory_free(element); 523 + return; 524 + reinject: 525 + /* 526 + * We can safely reinject this element here bacause 527 + * (1) Appending list elements and removing list elements are protected 528 + * by tomoyo_policy_lock mutex. 529 + * (2) Only this function removes list elements and this function is 530 + * exclusively executed by tomoyo_gc_mutex mutex. 531 + * are true. 532 + */ 533 + mutex_lock(&tomoyo_policy_lock); 534 + list_add_rcu(element, element->prev); 363 535 } 364 536 365 537 /** ··· 468 440 * @id: One of values in "enum tomoyo_policy_id". 469 441 * @member_list: Pointer to "struct list_head". 470 442 * 471 - * Returns true if some elements are deleted, false otherwise. 443 + * Returns nothing. 472 444 */ 473 - static bool tomoyo_collect_member(const enum tomoyo_policy_id id, 445 + static void tomoyo_collect_member(const enum tomoyo_policy_id id, 474 446 struct list_head *member_list) 475 447 { 476 448 struct tomoyo_acl_head *member; 477 - list_for_each_entry(member, member_list, list) { 449 + struct tomoyo_acl_head *tmp; 450 + list_for_each_entry_safe(member, tmp, member_list, list) { 478 451 if (!member->is_deleted) 479 452 continue; 480 - if (!tomoyo_add_to_gc(id, &member->list)) 481 - return false; 453 + member->is_deleted = TOMOYO_GC_IN_PROGRESS; 454 + tomoyo_try_to_gc(id, &member->list); 482 455 } 483 - return true; 484 456 } 485 457 486 458 /** ··· 488 460 * 489 461 * @list: Pointer to "struct list_head". 490 462 * 491 - * Returns true if some elements are deleted, false otherwise. 463 + * Returns nothing. 492 464 */ 493 - static bool tomoyo_collect_acl(struct list_head *list) 465 + static void tomoyo_collect_acl(struct list_head *list) 494 466 { 495 467 struct tomoyo_acl_info *acl; 496 - list_for_each_entry(acl, list, list) { 468 + struct tomoyo_acl_info *tmp; 469 + list_for_each_entry_safe(acl, tmp, list, list) { 497 470 if (!acl->is_deleted) 498 471 continue; 499 - if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list)) 500 - return false; 472 + acl->is_deleted = TOMOYO_GC_IN_PROGRESS; 473 + tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list); 501 474 } 502 - return true; 503 475 } 504 476 505 477 /** 506 - * tomoyo_collect_entry - Scan lists for deleted elements. 478 + * tomoyo_collect_entry - Try to kfree() deleted elements. 507 479 * 508 480 * Returns nothing. 509 481 */ ··· 512 484 int i; 513 485 enum tomoyo_policy_id id; 514 486 struct tomoyo_policy_namespace *ns; 515 - int idx; 516 - if (mutex_lock_interruptible(&tomoyo_policy_lock)) 517 - return; 518 - idx = tomoyo_read_lock(); 487 + mutex_lock(&tomoyo_policy_lock); 519 488 { 520 489 struct tomoyo_domain_info *domain; 521 - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { 522 - if (!tomoyo_collect_acl(&domain->acl_info_list)) 523 - goto unlock; 490 + struct tomoyo_domain_info *tmp; 491 + list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, 492 + list) { 493 + tomoyo_collect_acl(&domain->acl_info_list); 524 494 if (!domain->is_deleted || atomic_read(&domain->users)) 525 495 continue; 526 - /* 527 - * Nobody is referring this domain. But somebody may 528 - * refer this domain after successful execve(). 529 - * We recheck domain->users after SRCU synchronization. 530 - */ 531 - if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list)) 532 - goto unlock; 496 + tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list); 533 497 } 534 498 } 535 - list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) { 499 + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { 536 500 for (id = 0; id < TOMOYO_MAX_POLICY; id++) 537 - if (!tomoyo_collect_member(id, &ns->policy_list[id])) 538 - goto unlock; 501 + tomoyo_collect_member(id, &ns->policy_list[id]); 539 502 for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) 540 - if (!tomoyo_collect_acl(&ns->acl_group[i])) 541 - goto unlock; 503 + tomoyo_collect_acl(&ns->acl_group[i]); 504 + } 505 + { 506 + struct tomoyo_shared_acl_head *ptr; 507 + struct tomoyo_shared_acl_head *tmp; 508 + list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list, 509 + list) { 510 + if (atomic_read(&ptr->users) > 0) 511 + continue; 512 + atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); 513 + tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list); 514 + } 515 + } 516 + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { 542 517 for (i = 0; i < TOMOYO_MAX_GROUP; i++) { 543 518 struct list_head *list = &ns->group_list[i]; 544 519 struct tomoyo_group *group; 520 + struct tomoyo_group *tmp; 545 521 switch (i) { 546 522 case 0: 547 523 id = TOMOYO_ID_PATH_GROUP; 548 524 break; 549 - default: 525 + case 1: 550 526 id = TOMOYO_ID_NUMBER_GROUP; 551 527 break; 528 + default: 529 + id = TOMOYO_ID_ADDRESS_GROUP; 530 + break; 552 531 } 553 - list_for_each_entry(group, list, head.list) { 554 - if (!tomoyo_collect_member 555 - (id, &group->member_list)) 556 - goto unlock; 532 + list_for_each_entry_safe(group, tmp, list, head.list) { 533 + tomoyo_collect_member(id, &group->member_list); 557 534 if (!list_empty(&group->member_list) || 558 - atomic_read(&group->head.users)) 535 + atomic_read(&group->head.users) > 0) 559 536 continue; 560 - if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, 561 - &group->head.list)) 562 - goto unlock; 537 + atomic_set(&group->head.users, 538 + TOMOYO_GC_IN_PROGRESS); 539 + tomoyo_try_to_gc(TOMOYO_ID_GROUP, 540 + &group->head.list); 563 541 } 564 542 } 565 543 } 566 - id = TOMOYO_ID_CONDITION; 567 - for (i = 0; i < TOMOYO_MAX_HASH + 1; i++) { 568 - struct list_head *list = !i ? 569 - &tomoyo_condition_list : &tomoyo_name_list[i - 1]; 544 + for (i = 0; i < TOMOYO_MAX_HASH; i++) { 545 + struct list_head *list = &tomoyo_name_list[i]; 570 546 struct tomoyo_shared_acl_head *ptr; 571 - list_for_each_entry(ptr, list, list) { 572 - if (atomic_read(&ptr->users)) 547 + struct tomoyo_shared_acl_head *tmp; 548 + list_for_each_entry_safe(ptr, tmp, list, list) { 549 + if (atomic_read(&ptr->users) > 0) 573 550 continue; 574 - if (!tomoyo_add_to_gc(id, &ptr->list)) 575 - goto unlock; 551 + atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); 552 + tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list); 576 553 } 577 - id = TOMOYO_ID_NAME; 578 554 } 579 - unlock: 580 - tomoyo_read_unlock(idx); 581 555 mutex_unlock(&tomoyo_policy_lock); 582 - } 583 - 584 - /** 585 - * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list. 586 - * 587 - * Returns true if some entries were kfree()d, false otherwise. 588 - */ 589 - static bool tomoyo_kfree_entry(void) 590 - { 591 - struct tomoyo_gc *p; 592 - struct tomoyo_gc *tmp; 593 - bool result = false; 594 - 595 - list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) { 596 - struct list_head *element = p->element; 597 - 598 - /* 599 - * list_del_rcu() in tomoyo_add_to_gc() guarantees that the 600 - * list element became no longer reachable from the list which 601 - * the element was originally on (e.g. tomoyo_domain_list). 602 - * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees 603 - * that the list element became no longer referenced by syscall 604 - * users. 605 - * 606 - * However, there are three users which may still be using the 607 - * list element. We need to defer until all of these users 608 - * forget the list element. 609 - * 610 - * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain, 611 - * group,acl} and "struct tomoyo_io_buffer"->w.domain forget 612 - * the list element. 613 - */ 614 - if (tomoyo_struct_used_by_io_buffer(element)) 615 - continue; 616 - /* 617 - * Secondly, defer until all other elements in the 618 - * tomoyo_gc_list list forget the list element. 619 - */ 620 - if (tomoyo_element_linked_by_gc((const u8 *) element, p->size)) 621 - continue; 622 - switch (p->type) { 623 - case TOMOYO_ID_TRANSITION_CONTROL: 624 - tomoyo_del_transition_control(element); 625 - break; 626 - case TOMOYO_ID_AGGREGATOR: 627 - tomoyo_del_aggregator(element); 628 - break; 629 - case TOMOYO_ID_MANAGER: 630 - tomoyo_del_manager(element); 631 - break; 632 - case TOMOYO_ID_CONDITION: 633 - tomoyo_del_condition(element); 634 - break; 635 - case TOMOYO_ID_NAME: 636 - /* 637 - * Thirdly, defer until all "struct tomoyo_io_buffer" 638 - * ->r.w[] forget the list element. 639 - */ 640 - if (tomoyo_name_used_by_io_buffer( 641 - container_of(element, typeof(struct tomoyo_name), 642 - head.list)->entry.name, p->size)) 643 - continue; 644 - tomoyo_del_name(element); 645 - break; 646 - case TOMOYO_ID_ACL: 647 - tomoyo_del_acl(element); 648 - break; 649 - case TOMOYO_ID_DOMAIN: 650 - if (!tomoyo_del_domain(element)) 651 - continue; 652 - break; 653 - case TOMOYO_ID_PATH_GROUP: 654 - tomoyo_del_path_group(element); 655 - break; 656 - case TOMOYO_ID_GROUP: 657 - tomoyo_del_group(element); 658 - break; 659 - case TOMOYO_ID_NUMBER_GROUP: 660 - tomoyo_del_number_group(element); 661 - break; 662 - case TOMOYO_MAX_POLICY: 663 - break; 664 - } 665 - tomoyo_memory_free(element); 666 - list_del(&p->list); 667 - kfree(p); 668 - tomoyo_gc_list_len--; 669 - result = true; 670 - } 671 - return result; 672 556 } 673 557 674 558 /** 675 559 * tomoyo_gc_thread - Garbage collector thread function. 676 560 * 677 561 * @unused: Unused. 678 - * 679 - * In case OOM-killer choose this thread for termination, we create this thread 680 - * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was 681 - * close()d. 682 562 * 683 563 * Returns 0. 684 564 */ ··· 596 660 static DEFINE_MUTEX(tomoyo_gc_mutex); 597 661 if (!mutex_trylock(&tomoyo_gc_mutex)) 598 662 goto out; 599 - daemonize("GC for TOMOYO"); 600 - do { 601 - tomoyo_collect_entry(); 602 - if (list_empty(&tomoyo_gc_list)) 603 - break; 604 - synchronize_srcu(&tomoyo_ss); 605 - } while (tomoyo_kfree_entry()); 663 + tomoyo_collect_entry(); 606 664 { 607 665 struct tomoyo_io_buffer *head; 608 666 struct tomoyo_io_buffer *tmp;
+60 -1
security/tomoyo/group.c
··· 42 42 } 43 43 44 44 /** 45 - * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list. 45 + * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry. 46 + * 47 + * @a: Pointer to "struct tomoyo_acl_head". 48 + * @b: Pointer to "struct tomoyo_acl_head". 49 + * 50 + * Returns true if @a == @b, false otherwise. 51 + */ 52 + static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a, 53 + const struct tomoyo_acl_head *b) 54 + { 55 + const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1), 56 + head); 57 + const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2), 58 + head); 59 + 60 + return tomoyo_same_ipaddr_union(&p1->address, &p2->address); 61 + } 62 + 63 + /** 64 + * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list. 46 65 * 47 66 * @param: Pointer to "struct tomoyo_acl_param". 48 67 * @type: Type of this group. ··· 96 77 * tomoyo_put_number_union() is not needed because 97 78 * param->data[0] != '@'. 98 79 */ 80 + } else { 81 + struct tomoyo_address_group e = { }; 82 + 83 + if (param->data[0] == '@' || 84 + !tomoyo_parse_ipaddr_union(param, &e.address)) 85 + goto out; 86 + error = tomoyo_update_policy(&e.head, sizeof(e), param, 87 + tomoyo_same_address_group); 99 88 } 100 89 out: 101 90 tomoyo_put_group(group); ··· 158 131 continue; 159 132 if (min > member->number.values[1] || 160 133 max < member->number.values[0]) 134 + continue; 135 + matched = true; 136 + break; 137 + } 138 + return matched; 139 + } 140 + 141 + /** 142 + * tomoyo_address_matches_group - Check whether the given address matches members of the given address group. 143 + * 144 + * @is_ipv6: True if @address is an IPv6 address. 145 + * @address: An IPv4 or IPv6 address. 146 + * @group: Pointer to "struct tomoyo_address_group". 147 + * 148 + * Returns true if @address matches addresses in @group group, false otherwise. 149 + * 150 + * Caller holds tomoyo_read_lock(). 151 + */ 152 + bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, 153 + const struct tomoyo_group *group) 154 + { 155 + struct tomoyo_address_group *member; 156 + bool matched = false; 157 + const u8 size = is_ipv6 ? 16 : 4; 158 + 159 + list_for_each_entry_rcu(member, &group->member_list, head.list) { 160 + if (member->head.is_deleted) 161 + continue; 162 + if (member->address.is_ipv6 != is_ipv6) 163 + continue; 164 + if (memcmp(&member->address.ip[0], address, size) > 0 || 165 + memcmp(address, &member->address.ip[1], size) > 0) 161 166 continue; 162 167 matched = true; 163 168 break;
+12 -27
security/tomoyo/memory.c
··· 27 27 panic("MAC Initialization failed.\n"); 28 28 } 29 29 30 - /* Lock for protecting tomoyo_memory_used. */ 31 - static DEFINE_SPINLOCK(tomoyo_policy_memory_lock); 32 30 /* Memoy currently used by policy/audit log/query. */ 33 31 unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; 34 32 /* Memory quota for "policy"/"audit log"/"query". */ ··· 40 42 * Returns true on success, false otherwise. 41 43 * 42 44 * Returns true if @ptr is not NULL and quota not exceeded, false otherwise. 45 + * 46 + * Caller holds tomoyo_policy_lock mutex. 43 47 */ 44 48 bool tomoyo_memory_ok(void *ptr) 45 49 { 46 50 if (ptr) { 47 51 const size_t s = ksize(ptr); 48 - bool result; 49 - spin_lock(&tomoyo_policy_memory_lock); 50 52 tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s; 51 - result = !tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] || 52 - tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <= 53 - tomoyo_memory_quota[TOMOYO_MEMORY_POLICY]; 54 - if (!result) 55 - tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s; 56 - spin_unlock(&tomoyo_policy_memory_lock); 57 - if (result) 53 + if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] || 54 + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <= 55 + tomoyo_memory_quota[TOMOYO_MEMORY_POLICY]) 58 56 return true; 57 + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s; 59 58 } 60 59 tomoyo_warn_oom(__func__); 61 60 return false; ··· 66 71 * 67 72 * Returns pointer to allocated memory on success, NULL otherwise. 68 73 * @data is zero-cleared on success. 74 + * 75 + * Caller holds tomoyo_policy_lock mutex. 69 76 */ 70 77 void *tomoyo_commit_ok(void *data, const unsigned int size) 71 78 { ··· 79 82 } 80 83 kfree(ptr); 81 84 return NULL; 82 - } 83 - 84 - /** 85 - * tomoyo_memory_free - Free memory for elements. 86 - * 87 - * @ptr: Pointer to allocated memory. 88 - */ 89 - void tomoyo_memory_free(void *ptr) 90 - { 91 - size_t s = ksize(ptr); 92 - spin_lock(&tomoyo_policy_memory_lock); 93 - tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s; 94 - spin_unlock(&tomoyo_policy_memory_lock); 95 - kfree(ptr); 96 85 } 97 86 98 87 /** ··· 106 123 goto out; 107 124 list = &param->ns->group_list[idx]; 108 125 list_for_each_entry(group, list, head.list) { 109 - if (e.group_name != group->group_name) 126 + if (e.group_name != group->group_name || 127 + atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) 110 128 continue; 111 129 atomic_inc(&group->head.users); 112 130 found = true; ··· 159 175 if (mutex_lock_interruptible(&tomoyo_policy_lock)) 160 176 return NULL; 161 177 list_for_each_entry(ptr, head, head.list) { 162 - if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name)) 178 + if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) || 179 + atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) 163 180 continue; 164 181 atomic_inc(&ptr->head.users); 165 182 goto out;
+771
security/tomoyo/network.c
··· 1 + /* 2 + * security/tomoyo/network.c 3 + * 4 + * Copyright (C) 2005-2011 NTT DATA CORPORATION 5 + */ 6 + 7 + #include "common.h" 8 + #include <linux/slab.h> 9 + 10 + /* Structure for holding inet domain socket's address. */ 11 + struct tomoyo_inet_addr_info { 12 + __be16 port; /* In network byte order. */ 13 + const __be32 *address; /* In network byte order. */ 14 + bool is_ipv6; 15 + }; 16 + 17 + /* Structure for holding unix domain socket's address. */ 18 + struct tomoyo_unix_addr_info { 19 + u8 *addr; /* This may not be '\0' terminated string. */ 20 + unsigned int addr_len; 21 + }; 22 + 23 + /* Structure for holding socket address. */ 24 + struct tomoyo_addr_info { 25 + u8 protocol; 26 + u8 operation; 27 + struct tomoyo_inet_addr_info inet; 28 + struct tomoyo_unix_addr_info unix0; 29 + }; 30 + 31 + /* String table for socket's protocols. */ 32 + const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = { 33 + [SOCK_STREAM] = "stream", 34 + [SOCK_DGRAM] = "dgram", 35 + [SOCK_RAW] = "raw", 36 + [SOCK_SEQPACKET] = "seqpacket", 37 + [0] = " ", /* Dummy for avoiding NULL pointer dereference. */ 38 + [4] = " ", /* Dummy for avoiding NULL pointer dereference. */ 39 + }; 40 + 41 + /** 42 + * tomoyo_parse_ipaddr_union - Parse an IP address. 43 + * 44 + * @param: Pointer to "struct tomoyo_acl_param". 45 + * @ptr: Pointer to "struct tomoyo_ipaddr_union". 46 + * 47 + * Returns true on success, false otherwise. 48 + */ 49 + bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param, 50 + struct tomoyo_ipaddr_union *ptr) 51 + { 52 + u8 * const min = ptr->ip[0].in6_u.u6_addr8; 53 + u8 * const max = ptr->ip[1].in6_u.u6_addr8; 54 + char *address = tomoyo_read_token(param); 55 + const char *end; 56 + 57 + if (!strchr(address, ':') && 58 + in4_pton(address, -1, min, '-', &end) > 0) { 59 + ptr->is_ipv6 = false; 60 + if (!*end) 61 + ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0]; 62 + else if (*end++ != '-' || 63 + in4_pton(end, -1, max, '\0', &end) <= 0 || *end) 64 + return false; 65 + return true; 66 + } 67 + if (in6_pton(address, -1, min, '-', &end) > 0) { 68 + ptr->is_ipv6 = true; 69 + if (!*end) 70 + memmove(max, min, sizeof(u16) * 8); 71 + else if (*end++ != '-' || 72 + in6_pton(end, -1, max, '\0', &end) <= 0 || *end) 73 + return false; 74 + return true; 75 + } 76 + return false; 77 + } 78 + 79 + /** 80 + * tomoyo_print_ipv4 - Print an IPv4 address. 81 + * 82 + * @buffer: Buffer to write to. 83 + * @buffer_len: Size of @buffer. 84 + * @min_ip: Pointer to __be32. 85 + * @max_ip: Pointer to __be32. 86 + * 87 + * Returns nothing. 88 + */ 89 + static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len, 90 + const __be32 *min_ip, const __be32 *max_ip) 91 + { 92 + snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip, 93 + *min_ip == *max_ip ? '\0' : '-', max_ip); 94 + } 95 + 96 + /** 97 + * tomoyo_print_ipv6 - Print an IPv6 address. 98 + * 99 + * @buffer: Buffer to write to. 100 + * @buffer_len: Size of @buffer. 101 + * @min_ip: Pointer to "struct in6_addr". 102 + * @max_ip: Pointer to "struct in6_addr". 103 + * 104 + * Returns nothing. 105 + */ 106 + static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len, 107 + const struct in6_addr *min_ip, 108 + const struct in6_addr *max_ip) 109 + { 110 + snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip, 111 + !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip); 112 + } 113 + 114 + /** 115 + * tomoyo_print_ip - Print an IP address. 116 + * 117 + * @buf: Buffer to write to. 118 + * @size: Size of @buf. 119 + * @ptr: Pointer to "struct ipaddr_union". 120 + * 121 + * Returns nothing. 122 + */ 123 + void tomoyo_print_ip(char *buf, const unsigned int size, 124 + const struct tomoyo_ipaddr_union *ptr) 125 + { 126 + if (ptr->is_ipv6) 127 + tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]); 128 + else 129 + tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0], 130 + &ptr->ip[1].s6_addr32[0]); 131 + } 132 + 133 + /* 134 + * Mapping table from "enum tomoyo_network_acl_index" to 135 + * "enum tomoyo_mac_index" for inet domain socket. 136 + */ 137 + static const u8 tomoyo_inet2mac 138 + [TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = { 139 + [SOCK_STREAM] = { 140 + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND, 141 + [TOMOYO_NETWORK_LISTEN] = 142 + TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN, 143 + [TOMOYO_NETWORK_CONNECT] = 144 + TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT, 145 + }, 146 + [SOCK_DGRAM] = { 147 + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND, 148 + [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND, 149 + }, 150 + [SOCK_RAW] = { 151 + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND, 152 + [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND, 153 + }, 154 + }; 155 + 156 + /* 157 + * Mapping table from "enum tomoyo_network_acl_index" to 158 + * "enum tomoyo_mac_index" for unix domain socket. 159 + */ 160 + static const u8 tomoyo_unix2mac 161 + [TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = { 162 + [SOCK_STREAM] = { 163 + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND, 164 + [TOMOYO_NETWORK_LISTEN] = 165 + TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN, 166 + [TOMOYO_NETWORK_CONNECT] = 167 + TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT, 168 + }, 169 + [SOCK_DGRAM] = { 170 + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND, 171 + [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND, 172 + }, 173 + [SOCK_SEQPACKET] = { 174 + [TOMOYO_NETWORK_BIND] = 175 + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND, 176 + [TOMOYO_NETWORK_LISTEN] = 177 + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN, 178 + [TOMOYO_NETWORK_CONNECT] = 179 + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT, 180 + }, 181 + }; 182 + 183 + /** 184 + * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry. 185 + * 186 + * @a: Pointer to "struct tomoyo_acl_info". 187 + * @b: Pointer to "struct tomoyo_acl_info". 188 + * 189 + * Returns true if @a == @b except permission bits, false otherwise. 190 + */ 191 + static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a, 192 + const struct tomoyo_acl_info *b) 193 + { 194 + const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head); 195 + const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head); 196 + 197 + return p1->protocol == p2->protocol && 198 + tomoyo_same_ipaddr_union(&p1->address, &p2->address) && 199 + tomoyo_same_number_union(&p1->port, &p2->port); 200 + } 201 + 202 + /** 203 + * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry. 204 + * 205 + * @a: Pointer to "struct tomoyo_acl_info". 206 + * @b: Pointer to "struct tomoyo_acl_info". 207 + * 208 + * Returns true if @a == @b except permission bits, false otherwise. 209 + */ 210 + static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a, 211 + const struct tomoyo_acl_info *b) 212 + { 213 + const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head); 214 + const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head); 215 + 216 + return p1->protocol == p2->protocol && 217 + tomoyo_same_name_union(&p1->name, &p2->name); 218 + } 219 + 220 + /** 221 + * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry. 222 + * 223 + * @a: Pointer to "struct tomoyo_acl_info". 224 + * @b: Pointer to "struct tomoyo_acl_info". 225 + * @is_delete: True for @a &= ~@b, false for @a |= @b. 226 + * 227 + * Returns true if @a is empty, false otherwise. 228 + */ 229 + static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a, 230 + struct tomoyo_acl_info *b, 231 + const bool is_delete) 232 + { 233 + u8 * const a_perm = 234 + &container_of(a, struct tomoyo_inet_acl, head)->perm; 235 + u8 perm = *a_perm; 236 + const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm; 237 + 238 + if (is_delete) 239 + perm &= ~b_perm; 240 + else 241 + perm |= b_perm; 242 + *a_perm = perm; 243 + return !perm; 244 + } 245 + 246 + /** 247 + * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry. 248 + * 249 + * @a: Pointer to "struct tomoyo_acl_info". 250 + * @b: Pointer to "struct tomoyo_acl_info". 251 + * @is_delete: True for @a &= ~@b, false for @a |= @b. 252 + * 253 + * Returns true if @a is empty, false otherwise. 254 + */ 255 + static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a, 256 + struct tomoyo_acl_info *b, 257 + const bool is_delete) 258 + { 259 + u8 * const a_perm = 260 + &container_of(a, struct tomoyo_unix_acl, head)->perm; 261 + u8 perm = *a_perm; 262 + const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm; 263 + 264 + if (is_delete) 265 + perm &= ~b_perm; 266 + else 267 + perm |= b_perm; 268 + *a_perm = perm; 269 + return !perm; 270 + } 271 + 272 + /** 273 + * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list. 274 + * 275 + * @param: Pointer to "struct tomoyo_acl_param". 276 + * 277 + * Returns 0 on success, negative value otherwise. 278 + * 279 + * Caller holds tomoyo_read_lock(). 280 + */ 281 + int tomoyo_write_inet_network(struct tomoyo_acl_param *param) 282 + { 283 + struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL }; 284 + int error = -EINVAL; 285 + u8 type; 286 + const char *protocol = tomoyo_read_token(param); 287 + const char *operation = tomoyo_read_token(param); 288 + 289 + for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++) 290 + if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol])) 291 + break; 292 + for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++) 293 + if (tomoyo_permstr(operation, tomoyo_socket_keyword[type])) 294 + e.perm |= 1 << type; 295 + if (e.protocol == TOMOYO_SOCK_MAX || !e.perm) 296 + return -EINVAL; 297 + if (param->data[0] == '@') { 298 + param->data++; 299 + e.address.group = 300 + tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP); 301 + if (!e.address.group) 302 + return -ENOMEM; 303 + } else { 304 + if (!tomoyo_parse_ipaddr_union(param, &e.address)) 305 + goto out; 306 + } 307 + if (!tomoyo_parse_number_union(param, &e.port) || 308 + e.port.values[1] > 65535) 309 + goto out; 310 + error = tomoyo_update_domain(&e.head, sizeof(e), param, 311 + tomoyo_same_inet_acl, 312 + tomoyo_merge_inet_acl); 313 + out: 314 + tomoyo_put_group(e.address.group); 315 + tomoyo_put_number_union(&e.port); 316 + return error; 317 + } 318 + 319 + /** 320 + * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list. 321 + * 322 + * @param: Pointer to "struct tomoyo_acl_param". 323 + * 324 + * Returns 0 on success, negative value otherwise. 325 + */ 326 + int tomoyo_write_unix_network(struct tomoyo_acl_param *param) 327 + { 328 + struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL }; 329 + int error; 330 + u8 type; 331 + const char *protocol = tomoyo_read_token(param); 332 + const char *operation = tomoyo_read_token(param); 333 + 334 + for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++) 335 + if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol])) 336 + break; 337 + for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++) 338 + if (tomoyo_permstr(operation, tomoyo_socket_keyword[type])) 339 + e.perm |= 1 << type; 340 + if (e.protocol == TOMOYO_SOCK_MAX || !e.perm) 341 + return -EINVAL; 342 + if (!tomoyo_parse_name_union(param, &e.name)) 343 + return -EINVAL; 344 + error = tomoyo_update_domain(&e.head, sizeof(e), param, 345 + tomoyo_same_unix_acl, 346 + tomoyo_merge_unix_acl); 347 + tomoyo_put_name_union(&e.name); 348 + return error; 349 + } 350 + 351 + /** 352 + * tomoyo_audit_net_log - Audit network log. 353 + * 354 + * @r: Pointer to "struct tomoyo_request_info". 355 + * @family: Name of socket family ("inet" or "unix"). 356 + * @protocol: Name of protocol in @family. 357 + * @operation: Name of socket operation. 358 + * @address: Name of address. 359 + * 360 + * Returns 0 on success, negative value otherwise. 361 + */ 362 + static int tomoyo_audit_net_log(struct tomoyo_request_info *r, 363 + const char *family, const u8 protocol, 364 + const u8 operation, const char *address) 365 + { 366 + return tomoyo_supervisor(r, "network %s %s %s %s\n", family, 367 + tomoyo_proto_keyword[protocol], 368 + tomoyo_socket_keyword[operation], address); 369 + } 370 + 371 + /** 372 + * tomoyo_audit_inet_log - Audit INET network log. 373 + * 374 + * @r: Pointer to "struct tomoyo_request_info". 375 + * 376 + * Returns 0 on success, negative value otherwise. 377 + */ 378 + static int tomoyo_audit_inet_log(struct tomoyo_request_info *r) 379 + { 380 + char buf[128]; 381 + int len; 382 + const __be32 *address = r->param.inet_network.address; 383 + 384 + if (r->param.inet_network.is_ipv6) 385 + tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *) 386 + address, (const struct in6_addr *) address); 387 + else 388 + tomoyo_print_ipv4(buf, sizeof(buf), address, address); 389 + len = strlen(buf); 390 + snprintf(buf + len, sizeof(buf) - len, " %u", 391 + r->param.inet_network.port); 392 + return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol, 393 + r->param.inet_network.operation, buf); 394 + } 395 + 396 + /** 397 + * tomoyo_audit_unix_log - Audit UNIX network log. 398 + * 399 + * @r: Pointer to "struct tomoyo_request_info". 400 + * 401 + * Returns 0 on success, negative value otherwise. 402 + */ 403 + static int tomoyo_audit_unix_log(struct tomoyo_request_info *r) 404 + { 405 + return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol, 406 + r->param.unix_network.operation, 407 + r->param.unix_network.address->name); 408 + } 409 + 410 + /** 411 + * tomoyo_check_inet_acl - Check permission for inet domain socket operation. 412 + * 413 + * @r: Pointer to "struct tomoyo_request_info". 414 + * @ptr: Pointer to "struct tomoyo_acl_info". 415 + * 416 + * Returns true if granted, false otherwise. 417 + */ 418 + static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r, 419 + const struct tomoyo_acl_info *ptr) 420 + { 421 + const struct tomoyo_inet_acl *acl = 422 + container_of(ptr, typeof(*acl), head); 423 + const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4; 424 + 425 + if (!(acl->perm & (1 << r->param.inet_network.operation)) || 426 + !tomoyo_compare_number_union(r->param.inet_network.port, 427 + &acl->port)) 428 + return false; 429 + if (acl->address.group) 430 + return tomoyo_address_matches_group 431 + (r->param.inet_network.is_ipv6, 432 + r->param.inet_network.address, acl->address.group); 433 + return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 && 434 + memcmp(&acl->address.ip[0], 435 + r->param.inet_network.address, size) <= 0 && 436 + memcmp(r->param.inet_network.address, 437 + &acl->address.ip[1], size) <= 0; 438 + } 439 + 440 + /** 441 + * tomoyo_check_unix_acl - Check permission for unix domain socket operation. 442 + * 443 + * @r: Pointer to "struct tomoyo_request_info". 444 + * @ptr: Pointer to "struct tomoyo_acl_info". 445 + * 446 + * Returns true if granted, false otherwise. 447 + */ 448 + static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r, 449 + const struct tomoyo_acl_info *ptr) 450 + { 451 + const struct tomoyo_unix_acl *acl = 452 + container_of(ptr, typeof(*acl), head); 453 + 454 + return (acl->perm & (1 << r->param.unix_network.operation)) && 455 + tomoyo_compare_name_union(r->param.unix_network.address, 456 + &acl->name); 457 + } 458 + 459 + /** 460 + * tomoyo_inet_entry - Check permission for INET network operation. 461 + * 462 + * @address: Pointer to "struct tomoyo_addr_info". 463 + * 464 + * Returns 0 on success, negative value otherwise. 465 + */ 466 + static int tomoyo_inet_entry(const struct tomoyo_addr_info *address) 467 + { 468 + const int idx = tomoyo_read_lock(); 469 + struct tomoyo_request_info r; 470 + int error = 0; 471 + const u8 type = tomoyo_inet2mac[address->protocol][address->operation]; 472 + 473 + if (type && tomoyo_init_request_info(&r, NULL, type) 474 + != TOMOYO_CONFIG_DISABLED) { 475 + r.param_type = TOMOYO_TYPE_INET_ACL; 476 + r.param.inet_network.protocol = address->protocol; 477 + r.param.inet_network.operation = address->operation; 478 + r.param.inet_network.is_ipv6 = address->inet.is_ipv6; 479 + r.param.inet_network.address = address->inet.address; 480 + r.param.inet_network.port = ntohs(address->inet.port); 481 + do { 482 + tomoyo_check_acl(&r, tomoyo_check_inet_acl); 483 + error = tomoyo_audit_inet_log(&r); 484 + } while (error == TOMOYO_RETRY_REQUEST); 485 + } 486 + tomoyo_read_unlock(idx); 487 + return error; 488 + } 489 + 490 + /** 491 + * tomoyo_check_inet_address - Check permission for inet domain socket's operation. 492 + * 493 + * @addr: Pointer to "struct sockaddr". 494 + * @addr_len: Size of @addr. 495 + * @port: Port number. 496 + * @address: Pointer to "struct tomoyo_addr_info". 497 + * 498 + * Returns 0 on success, negative value otherwise. 499 + */ 500 + static int tomoyo_check_inet_address(const struct sockaddr *addr, 501 + const unsigned int addr_len, 502 + const u16 port, 503 + struct tomoyo_addr_info *address) 504 + { 505 + struct tomoyo_inet_addr_info *i = &address->inet; 506 + 507 + switch (addr->sa_family) { 508 + case AF_INET6: 509 + if (addr_len < SIN6_LEN_RFC2133) 510 + goto skip; 511 + i->is_ipv6 = true; 512 + i->address = (__be32 *) 513 + ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr; 514 + i->port = ((struct sockaddr_in6 *) addr)->sin6_port; 515 + break; 516 + case AF_INET: 517 + if (addr_len < sizeof(struct sockaddr_in)) 518 + goto skip; 519 + i->is_ipv6 = false; 520 + i->address = (__be32 *) 521 + &((struct sockaddr_in *) addr)->sin_addr; 522 + i->port = ((struct sockaddr_in *) addr)->sin_port; 523 + break; 524 + default: 525 + goto skip; 526 + } 527 + if (address->protocol == SOCK_RAW) 528 + i->port = htons(port); 529 + return tomoyo_inet_entry(address); 530 + skip: 531 + return 0; 532 + } 533 + 534 + /** 535 + * tomoyo_unix_entry - Check permission for UNIX network operation. 536 + * 537 + * @address: Pointer to "struct tomoyo_addr_info". 538 + * 539 + * Returns 0 on success, negative value otherwise. 540 + */ 541 + static int tomoyo_unix_entry(const struct tomoyo_addr_info *address) 542 + { 543 + const int idx = tomoyo_read_lock(); 544 + struct tomoyo_request_info r; 545 + int error = 0; 546 + const u8 type = tomoyo_unix2mac[address->protocol][address->operation]; 547 + 548 + if (type && tomoyo_init_request_info(&r, NULL, type) 549 + != TOMOYO_CONFIG_DISABLED) { 550 + char *buf = address->unix0.addr; 551 + int len = address->unix0.addr_len - sizeof(sa_family_t); 552 + 553 + if (len <= 0) { 554 + buf = "anonymous"; 555 + len = 9; 556 + } else if (buf[0]) { 557 + len = strnlen(buf, len); 558 + } 559 + buf = tomoyo_encode2(buf, len); 560 + if (buf) { 561 + struct tomoyo_path_info addr; 562 + 563 + addr.name = buf; 564 + tomoyo_fill_path_info(&addr); 565 + r.param_type = TOMOYO_TYPE_UNIX_ACL; 566 + r.param.unix_network.protocol = address->protocol; 567 + r.param.unix_network.operation = address->operation; 568 + r.param.unix_network.address = &addr; 569 + do { 570 + tomoyo_check_acl(&r, tomoyo_check_unix_acl); 571 + error = tomoyo_audit_unix_log(&r); 572 + } while (error == TOMOYO_RETRY_REQUEST); 573 + kfree(buf); 574 + } else 575 + error = -ENOMEM; 576 + } 577 + tomoyo_read_unlock(idx); 578 + return error; 579 + } 580 + 581 + /** 582 + * tomoyo_check_unix_address - Check permission for unix domain socket's operation. 583 + * 584 + * @addr: Pointer to "struct sockaddr". 585 + * @addr_len: Size of @addr. 586 + * @address: Pointer to "struct tomoyo_addr_info". 587 + * 588 + * Returns 0 on success, negative value otherwise. 589 + */ 590 + static int tomoyo_check_unix_address(struct sockaddr *addr, 591 + const unsigned int addr_len, 592 + struct tomoyo_addr_info *address) 593 + { 594 + struct tomoyo_unix_addr_info *u = &address->unix0; 595 + 596 + if (addr->sa_family != AF_UNIX) 597 + return 0; 598 + u->addr = ((struct sockaddr_un *) addr)->sun_path; 599 + u->addr_len = addr_len; 600 + return tomoyo_unix_entry(address); 601 + } 602 + 603 + /** 604 + * tomoyo_kernel_service - Check whether I'm kernel service or not. 605 + * 606 + * Returns true if I'm kernel service, false otherwise. 607 + */ 608 + static bool tomoyo_kernel_service(void) 609 + { 610 + /* Nothing to do if I am a kernel service. */ 611 + return segment_eq(get_fs(), KERNEL_DS); 612 + } 613 + 614 + /** 615 + * tomoyo_sock_family - Get socket's family. 616 + * 617 + * @sk: Pointer to "struct sock". 618 + * 619 + * Returns one of PF_INET, PF_INET6, PF_UNIX or 0. 620 + */ 621 + static u8 tomoyo_sock_family(struct sock *sk) 622 + { 623 + u8 family; 624 + 625 + if (tomoyo_kernel_service()) 626 + return 0; 627 + family = sk->sk_family; 628 + switch (family) { 629 + case PF_INET: 630 + case PF_INET6: 631 + case PF_UNIX: 632 + return family; 633 + default: 634 + return 0; 635 + } 636 + } 637 + 638 + /** 639 + * tomoyo_socket_listen_permission - Check permission for listening a socket. 640 + * 641 + * @sock: Pointer to "struct socket". 642 + * 643 + * Returns 0 on success, negative value otherwise. 644 + */ 645 + int tomoyo_socket_listen_permission(struct socket *sock) 646 + { 647 + struct tomoyo_addr_info address; 648 + const u8 family = tomoyo_sock_family(sock->sk); 649 + const unsigned int type = sock->type; 650 + struct sockaddr_storage addr; 651 + int addr_len; 652 + 653 + if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET)) 654 + return 0; 655 + { 656 + const int error = sock->ops->getname(sock, (struct sockaddr *) 657 + &addr, &addr_len, 0); 658 + 659 + if (error) 660 + return error; 661 + } 662 + address.protocol = type; 663 + address.operation = TOMOYO_NETWORK_LISTEN; 664 + if (family == PF_UNIX) 665 + return tomoyo_check_unix_address((struct sockaddr *) &addr, 666 + addr_len, &address); 667 + return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len, 668 + 0, &address); 669 + } 670 + 671 + /** 672 + * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket. 673 + * 674 + * @sock: Pointer to "struct socket". 675 + * @addr: Pointer to "struct sockaddr". 676 + * @addr_len: Size of @addr. 677 + * 678 + * Returns 0 on success, negative value otherwise. 679 + */ 680 + int tomoyo_socket_connect_permission(struct socket *sock, 681 + struct sockaddr *addr, int addr_len) 682 + { 683 + struct tomoyo_addr_info address; 684 + const u8 family = tomoyo_sock_family(sock->sk); 685 + const unsigned int type = sock->type; 686 + 687 + if (!family) 688 + return 0; 689 + address.protocol = type; 690 + switch (type) { 691 + case SOCK_DGRAM: 692 + case SOCK_RAW: 693 + address.operation = TOMOYO_NETWORK_SEND; 694 + break; 695 + case SOCK_STREAM: 696 + case SOCK_SEQPACKET: 697 + address.operation = TOMOYO_NETWORK_CONNECT; 698 + break; 699 + default: 700 + return 0; 701 + } 702 + if (family == PF_UNIX) 703 + return tomoyo_check_unix_address(addr, addr_len, &address); 704 + return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol, 705 + &address); 706 + } 707 + 708 + /** 709 + * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket. 710 + * 711 + * @sock: Pointer to "struct socket". 712 + * @addr: Pointer to "struct sockaddr". 713 + * @addr_len: Size of @addr. 714 + * 715 + * Returns 0 on success, negative value otherwise. 716 + */ 717 + int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr, 718 + int addr_len) 719 + { 720 + struct tomoyo_addr_info address; 721 + const u8 family = tomoyo_sock_family(sock->sk); 722 + const unsigned int type = sock->type; 723 + 724 + if (!family) 725 + return 0; 726 + switch (type) { 727 + case SOCK_STREAM: 728 + case SOCK_DGRAM: 729 + case SOCK_RAW: 730 + case SOCK_SEQPACKET: 731 + address.protocol = type; 732 + address.operation = TOMOYO_NETWORK_BIND; 733 + break; 734 + default: 735 + return 0; 736 + } 737 + if (family == PF_UNIX) 738 + return tomoyo_check_unix_address(addr, addr_len, &address); 739 + return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol, 740 + &address); 741 + } 742 + 743 + /** 744 + * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram. 745 + * 746 + * @sock: Pointer to "struct socket". 747 + * @msg: Pointer to "struct msghdr". 748 + * @size: Unused. 749 + * 750 + * Returns 0 on success, negative value otherwise. 751 + */ 752 + int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg, 753 + int size) 754 + { 755 + struct tomoyo_addr_info address; 756 + const u8 family = tomoyo_sock_family(sock->sk); 757 + const unsigned int type = sock->type; 758 + 759 + if (!msg->msg_name || !family || 760 + (type != SOCK_DGRAM && type != SOCK_RAW)) 761 + return 0; 762 + address.protocol = type; 763 + address.operation = TOMOYO_NETWORK_SEND; 764 + if (family == PF_UNIX) 765 + return tomoyo_check_unix_address((struct sockaddr *) 766 + msg->msg_name, 767 + msg->msg_namelen, &address); 768 + return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name, 769 + msg->msg_namelen, 770 + sock->sk->sk_protocol, &address); 771 + }
+25 -7
security/tomoyo/realpath.c
··· 15 15 #include "../../fs/internal.h" 16 16 17 17 /** 18 - * tomoyo_encode: Convert binary string to ascii string. 18 + * tomoyo_encode2 - Encode binary string to ascii string. 19 19 * 20 - * @str: String in binary format. 20 + * @str: String in binary format. 21 + * @str_len: Size of @str in byte. 21 22 * 22 23 * Returns pointer to @str in ascii format on success, NULL otherwise. 23 24 * 24 25 * This function uses kzalloc(), so caller must kfree() if this function 25 26 * didn't return NULL. 26 27 */ 27 - char *tomoyo_encode(const char *str) 28 + char *tomoyo_encode2(const char *str, int str_len) 28 29 { 30 + int i; 29 31 int len = 0; 30 32 const char *p = str; 31 33 char *cp; ··· 35 33 36 34 if (!p) 37 35 return NULL; 38 - while (*p) { 39 - const unsigned char c = *p++; 36 + for (i = 0; i < str_len; i++) { 37 + const unsigned char c = p[i]; 38 + 40 39 if (c == '\\') 41 40 len += 2; 42 41 else if (c > ' ' && c < 127) ··· 52 49 return NULL; 53 50 cp0 = cp; 54 51 p = str; 55 - while (*p) { 56 - const unsigned char c = *p++; 52 + for (i = 0; i < str_len; i++) { 53 + const unsigned char c = p[i]; 57 54 58 55 if (c == '\\') { 59 56 *cp++ = '\\'; ··· 68 65 } 69 66 } 70 67 return cp0; 68 + } 69 + 70 + /** 71 + * tomoyo_encode - Encode binary string to ascii string. 72 + * 73 + * @str: String in binary format. 74 + * 75 + * Returns pointer to @str in ascii format on success, NULL otherwise. 76 + * 77 + * This function uses kzalloc(), so caller must kfree() if this function 78 + * didn't return NULL. 79 + */ 80 + char *tomoyo_encode(const char *str) 81 + { 82 + return str ? tomoyo_encode2(str, strlen(str)) : NULL; 71 83 } 72 84 73 85 /**
+121 -2
security/tomoyo/securityfs_if.c
··· 8 8 #include "common.h" 9 9 10 10 /** 11 + * tomoyo_check_task_acl - Check permission for task operation. 12 + * 13 + * @r: Pointer to "struct tomoyo_request_info". 14 + * @ptr: Pointer to "struct tomoyo_acl_info". 15 + * 16 + * Returns true if granted, false otherwise. 17 + */ 18 + static bool tomoyo_check_task_acl(struct tomoyo_request_info *r, 19 + const struct tomoyo_acl_info *ptr) 20 + { 21 + const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl), 22 + head); 23 + return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname); 24 + } 25 + 26 + /** 27 + * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface. 28 + * 29 + * @file: Pointer to "struct file". 30 + * @buf: Domainname to transit to. 31 + * @count: Size of @buf. 32 + * @ppos: Unused. 33 + * 34 + * Returns @count on success, negative value otherwise. 35 + * 36 + * If domain transition was permitted but the domain transition failed, this 37 + * function returns error rather than terminating current thread with SIGKILL. 38 + */ 39 + static ssize_t tomoyo_write_self(struct file *file, const char __user *buf, 40 + size_t count, loff_t *ppos) 41 + { 42 + char *data; 43 + int error; 44 + if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10) 45 + return -ENOMEM; 46 + data = kzalloc(count + 1, GFP_NOFS); 47 + if (!data) 48 + return -ENOMEM; 49 + if (copy_from_user(data, buf, count)) { 50 + error = -EFAULT; 51 + goto out; 52 + } 53 + tomoyo_normalize_line(data); 54 + if (tomoyo_correct_domain(data)) { 55 + const int idx = tomoyo_read_lock(); 56 + struct tomoyo_path_info name; 57 + struct tomoyo_request_info r; 58 + name.name = data; 59 + tomoyo_fill_path_info(&name); 60 + /* Check "task manual_domain_transition" permission. */ 61 + tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE); 62 + r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL; 63 + r.param.task.domainname = &name; 64 + tomoyo_check_acl(&r, tomoyo_check_task_acl); 65 + if (!r.granted) 66 + error = -EPERM; 67 + else { 68 + struct tomoyo_domain_info *new_domain = 69 + tomoyo_assign_domain(data, true); 70 + if (!new_domain) { 71 + error = -ENOENT; 72 + } else { 73 + struct cred *cred = prepare_creds(); 74 + if (!cred) { 75 + error = -ENOMEM; 76 + } else { 77 + struct tomoyo_domain_info *old_domain = 78 + cred->security; 79 + cred->security = new_domain; 80 + atomic_inc(&new_domain->users); 81 + atomic_dec(&old_domain->users); 82 + commit_creds(cred); 83 + error = 0; 84 + } 85 + } 86 + } 87 + tomoyo_read_unlock(idx); 88 + } else 89 + error = -EINVAL; 90 + out: 91 + kfree(data); 92 + return error ? error : count; 93 + } 94 + 95 + /** 96 + * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface. 97 + * 98 + * @file: Pointer to "struct file". 99 + * @buf: Domainname which current thread belongs to. 100 + * @count: Size of @buf. 101 + * @ppos: Bytes read by now. 102 + * 103 + * Returns read size on success, negative value otherwise. 104 + */ 105 + static ssize_t tomoyo_read_self(struct file *file, char __user *buf, 106 + size_t count, loff_t *ppos) 107 + { 108 + const char *domain = tomoyo_domain()->domainname->name; 109 + loff_t len = strlen(domain); 110 + loff_t pos = *ppos; 111 + if (pos >= len || !count) 112 + return 0; 113 + len -= pos; 114 + if (count < len) 115 + len = count; 116 + if (copy_to_user(buf, domain + pos, len)) 117 + return -EFAULT; 118 + *ppos += len; 119 + return len; 120 + } 121 + 122 + /* Operations for /sys/kernel/security/tomoyo/self_domain interface. */ 123 + static const struct file_operations tomoyo_self_operations = { 124 + .write = tomoyo_write_self, 125 + .read = tomoyo_read_self, 126 + }; 127 + 128 + /** 11 129 * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface. 12 130 * 13 131 * @inode: Pointer to "struct inode". ··· 253 135 TOMOYO_EXCEPTIONPOLICY); 254 136 tomoyo_create_entry("audit", 0400, tomoyo_dir, 255 137 TOMOYO_AUDIT); 256 - tomoyo_create_entry("self_domain", 0400, tomoyo_dir, 257 - TOMOYO_SELFDOMAIN); 258 138 tomoyo_create_entry(".process_status", 0600, tomoyo_dir, 259 139 TOMOYO_PROCESS_STATUS); 260 140 tomoyo_create_entry("stat", 0644, tomoyo_dir, ··· 263 147 TOMOYO_MANAGER); 264 148 tomoyo_create_entry("version", 0400, tomoyo_dir, 265 149 TOMOYO_VERSION); 150 + securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL, 151 + &tomoyo_self_operations); 152 + tomoyo_load_builtin_policy(); 266 153 return 0; 267 154 } 268 155
+62
security/tomoyo/tomoyo.c
··· 442 442 return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path); 443 443 } 444 444 445 + /** 446 + * tomoyo_socket_listen - Check permission for listen(). 447 + * 448 + * @sock: Pointer to "struct socket". 449 + * @backlog: Backlog parameter. 450 + * 451 + * Returns 0 on success, negative value otherwise. 452 + */ 453 + static int tomoyo_socket_listen(struct socket *sock, int backlog) 454 + { 455 + return tomoyo_socket_listen_permission(sock); 456 + } 457 + 458 + /** 459 + * tomoyo_socket_connect - Check permission for connect(). 460 + * 461 + * @sock: Pointer to "struct socket". 462 + * @addr: Pointer to "struct sockaddr". 463 + * @addr_len: Size of @addr. 464 + * 465 + * Returns 0 on success, negative value otherwise. 466 + */ 467 + static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, 468 + int addr_len) 469 + { 470 + return tomoyo_socket_connect_permission(sock, addr, addr_len); 471 + } 472 + 473 + /** 474 + * tomoyo_socket_bind - Check permission for bind(). 475 + * 476 + * @sock: Pointer to "struct socket". 477 + * @addr: Pointer to "struct sockaddr". 478 + * @addr_len: Size of @addr. 479 + * 480 + * Returns 0 on success, negative value otherwise. 481 + */ 482 + static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, 483 + int addr_len) 484 + { 485 + return tomoyo_socket_bind_permission(sock, addr, addr_len); 486 + } 487 + 488 + /** 489 + * tomoyo_socket_sendmsg - Check permission for sendmsg(). 490 + * 491 + * @sock: Pointer to "struct socket". 492 + * @msg: Pointer to "struct msghdr". 493 + * @size: Size of message. 494 + * 495 + * Returns 0 on success, negative value otherwise. 496 + */ 497 + static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, 498 + int size) 499 + { 500 + return tomoyo_socket_sendmsg_permission(sock, msg, size); 501 + } 502 + 445 503 /* 446 504 * tomoyo_security_ops is a "struct security_operations" which is used for 447 505 * registering TOMOYO. ··· 530 472 .sb_mount = tomoyo_sb_mount, 531 473 .sb_umount = tomoyo_sb_umount, 532 474 .sb_pivotroot = tomoyo_sb_pivotroot, 475 + .socket_bind = tomoyo_socket_bind, 476 + .socket_connect = tomoyo_socket_connect, 477 + .socket_listen = tomoyo_socket_listen, 478 + .socket_sendmsg = tomoyo_socket_sendmsg, 533 479 }; 534 480 535 481 /* Lock for GC. */
+76 -4
security/tomoyo/util.c
··· 42 42 [TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE, 43 43 [TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE, 44 44 [TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE, 45 + /* CONFIG::network group */ 46 + [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = 47 + TOMOYO_MAC_CATEGORY_NETWORK, 48 + [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = 49 + TOMOYO_MAC_CATEGORY_NETWORK, 50 + [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = 51 + TOMOYO_MAC_CATEGORY_NETWORK, 52 + [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = 53 + TOMOYO_MAC_CATEGORY_NETWORK, 54 + [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = 55 + TOMOYO_MAC_CATEGORY_NETWORK, 56 + [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = 57 + TOMOYO_MAC_CATEGORY_NETWORK, 58 + [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = 59 + TOMOYO_MAC_CATEGORY_NETWORK, 60 + [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = 61 + TOMOYO_MAC_CATEGORY_NETWORK, 62 + [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = 63 + TOMOYO_MAC_CATEGORY_NETWORK, 64 + [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = 65 + TOMOYO_MAC_CATEGORY_NETWORK, 66 + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = 67 + TOMOYO_MAC_CATEGORY_NETWORK, 68 + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = 69 + TOMOYO_MAC_CATEGORY_NETWORK, 70 + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = 71 + TOMOYO_MAC_CATEGORY_NETWORK, 72 + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = 73 + TOMOYO_MAC_CATEGORY_NETWORK, 74 + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = 75 + TOMOYO_MAC_CATEGORY_NETWORK, 76 + /* CONFIG::misc group */ 77 + [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC, 45 78 }; 46 79 47 80 /** ··· 156 123 del = pos + strlen(pos); 157 124 param->data = del; 158 125 return pos; 126 + } 127 + 128 + /** 129 + * tomoyo_get_domainname - Read a domainname from a line. 130 + * 131 + * @param: Pointer to "struct tomoyo_acl_param". 132 + * 133 + * Returns a domainname on success, NULL otherwise. 134 + */ 135 + const struct tomoyo_path_info *tomoyo_get_domainname 136 + (struct tomoyo_acl_param *param) 137 + { 138 + char *start = param->data; 139 + char *pos = start; 140 + while (*pos) { 141 + if (*pos++ != ' ' || *pos++ == '/') 142 + continue; 143 + pos -= 2; 144 + *pos++ = '\0'; 145 + break; 146 + } 147 + param->data = pos; 148 + if (tomoyo_correct_domain(start)) 149 + return tomoyo_get_name(start); 150 + return NULL; 159 151 } 160 152 161 153 /** ··· 978 920 const u8 index) 979 921 { 980 922 u8 mode; 981 - const u8 category = TOMOYO_MAC_CATEGORY_FILE; 923 + struct tomoyo_profile *p; 924 + 982 925 if (!tomoyo_policy_loaded) 983 926 return TOMOYO_CONFIG_DISABLED; 984 - mode = tomoyo_profile(ns, profile)->config[index]; 927 + p = tomoyo_profile(ns, profile); 928 + mode = p->config[index]; 985 929 if (mode == TOMOYO_CONFIG_USE_DEFAULT) 986 - mode = tomoyo_profile(ns, profile)->config[category]; 930 + mode = p->config[tomoyo_index2category[index] 931 + + TOMOYO_MAX_MAC_INDEX]; 987 932 if (mode == TOMOYO_CONFIG_USE_DEFAULT) 988 - mode = tomoyo_profile(ns, profile)->default_config; 933 + mode = p->default_config; 989 934 return mode & 3; 990 935 } 991 936 ··· 1056 995 case TOMOYO_TYPE_MKDEV_ACL: 1057 996 perm = container_of(ptr, struct tomoyo_mkdev_acl, 1058 997 head)->perm; 998 + break; 999 + case TOMOYO_TYPE_INET_ACL: 1000 + perm = container_of(ptr, struct tomoyo_inet_acl, 1001 + head)->perm; 1002 + break; 1003 + case TOMOYO_TYPE_UNIX_ACL: 1004 + perm = container_of(ptr, struct tomoyo_unix_acl, 1005 + head)->perm; 1006 + break; 1007 + case TOMOYO_TYPE_MANUAL_TASK_ACL: 1008 + perm = 0; 1059 1009 break; 1060 1010 default: 1061 1011 perm = 1;