Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

apparmor: add support for mapping secids and using secctxes

Use a radix tree to provide a map between the secid and the label,
and along with it a basic ability to provide secctx conversion.

Shared/cached secctx will be added later.

Signed-off-by: John Johansen <john.johansen@canonical.com>

+224 -25
+1 -1
security/apparmor/include/label.h
··· 281 281 282 282 void aa_label_free(struct aa_label *label); 283 283 void aa_label_kref(struct kref *kref); 284 - bool aa_label_init(struct aa_label *label, int size); 284 + bool aa_label_init(struct aa_label *label, int size, gfp_t gfp); 285 285 struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp); 286 286 287 287 bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
+12 -3
security/apparmor/include/secid.h
··· 3 3 * 4 4 * This file contains AppArmor security identifier (secid) definitions 5 5 * 6 - * Copyright 2009-2010 Canonical Ltd. 6 + * Copyright 2009-2018 Canonical Ltd. 7 7 * 8 8 * This program is free software; you can redistribute it and/or 9 9 * modify it under the terms of the GNU General Public License as ··· 14 14 #ifndef __AA_SECID_H 15 15 #define __AA_SECID_H 16 16 17 + #include <linux/slab.h> 17 18 #include <linux/types.h> 19 + 20 + struct aa_label; 18 21 19 22 /* secid value that will not be allocated */ 20 23 #define AA_SECID_INVALID 0 21 - #define AA_SECID_ALLOC AA_SECID_INVALID 22 24 23 - u32 aa_alloc_secid(void); 25 + struct aa_label *aa_secid_to_label(u32 secid); 26 + int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); 27 + int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); 28 + void apparmor_release_secctx(char *secdata, u32 seclen); 29 + 30 + 31 + u32 aa_alloc_secid(struct aa_label *label, gfp_t gfp); 24 32 void aa_free_secid(u32 secid); 33 + void aa_secid_update(u32 secid, struct aa_label *label); 25 34 26 35 #endif /* __AA_SECID_H */
+3 -3
security/apparmor/label.c
··· 402 402 aa_put_label(new); 403 403 } 404 404 405 - bool aa_label_init(struct aa_label *label, int size) 405 + bool aa_label_init(struct aa_label *label, int size, gfp_t gfp) 406 406 { 407 407 AA_BUG(!label); 408 408 AA_BUG(size < 1); 409 409 410 - label->secid = aa_alloc_secid(); 410 + label->secid = aa_alloc_secid(label, gfp); 411 411 if (label->secid == AA_SECID_INVALID) 412 412 return false; 413 413 ··· 441 441 if (!new) 442 442 goto fail; 443 443 444 - if (!aa_label_init(new, size)) 444 + if (!aa_label_init(new, size, gfp)) 445 445 goto fail; 446 446 447 447 if (!proxy) {
+5
security/apparmor/lsm.c
··· 39 39 #include "include/policy_ns.h" 40 40 #include "include/procattr.h" 41 41 #include "include/mount.h" 42 + #include "include/secid.h" 42 43 43 44 /* Flag indicating whether initialization completed */ 44 45 int apparmor_initialized; ··· 1189 1188 LSM_HOOK_INIT(task_alloc, apparmor_task_alloc), 1190 1189 LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit), 1191 1190 LSM_HOOK_INIT(task_kill, apparmor_task_kill), 1191 + 1192 + LSM_HOOK_INIT(secid_to_secctx, apparmor_secid_to_secctx), 1193 + LSM_HOOK_INIT(secctx_to_secid, apparmor_secctx_to_secid), 1194 + LSM_HOOK_INIT(release_secctx, apparmor_release_secctx), 1192 1195 }; 1193 1196 1194 1197 /*
+1 -1
security/apparmor/policy.c
··· 268 268 269 269 if (!aa_policy_init(&profile->base, NULL, hname, gfp)) 270 270 goto fail; 271 - if (!aa_label_init(&profile->label, 1)) 271 + if (!aa_label_init(&profile->label, 1, gfp)) 272 272 goto fail; 273 273 274 274 /* update being set needed by fs interface */
+202 -17
security/apparmor/secid.c
··· 3 3 * 4 4 * This file contains AppArmor security identifier (secid) manipulation fns 5 5 * 6 - * Copyright 2009-2010 Canonical Ltd. 6 + * Copyright 2009-2017 Canonical Ltd. 7 7 * 8 8 * This program is free software; you can redistribute it and/or 9 9 * modify it under the terms of the GNU General Public License as ··· 11 11 * License. 12 12 * 13 13 * 14 - * AppArmor allocates a unique secid for every profile loaded. If a profile 15 - * is replaced it receives the secid of the profile it is replacing. 16 - * 17 - * The secid value of 0 is invalid. 14 + * AppArmor allocates a unique secid for every label used. If a label 15 + * is replaced it receives the secid of the label it is replacing. 18 16 */ 19 17 20 - #include <linux/spinlock.h> 21 18 #include <linux/errno.h> 22 19 #include <linux/err.h> 20 + #include <linux/gfp.h> 21 + #include <linux/slab.h> 22 + #include <linux/spinlock.h> 23 23 24 + #include "include/cred.h" 25 + #include "include/lib.h" 24 26 #include "include/secid.h" 27 + #include "include/label.h" 28 + #include "include/policy_ns.h" 25 29 26 - /* global counter from which secids are allocated */ 27 - static u32 global_secid; 30 + /* 31 + * secids - do not pin labels with a refcount. They rely on the label 32 + * properly updating/freeing them 33 + * 34 + * A singly linked free list is used to track secids that have been 35 + * freed and reuse them before allocating new ones 36 + */ 37 + 38 + #define FREE_LIST_HEAD 1 39 + 40 + static RADIX_TREE(aa_secids_map, GFP_ATOMIC); 28 41 static DEFINE_SPINLOCK(secid_lock); 42 + static u32 alloced_secid = FREE_LIST_HEAD; 43 + static u32 free_list = FREE_LIST_HEAD; 44 + static unsigned long free_count; 29 45 30 - /* TODO FIXME: add secid to profile mapping, and secid recycling */ 46 + /* 47 + * TODO: allow policy to reserve a secid range? 48 + * TODO: add secid pinning 49 + * TODO: use secid_update in label replace 50 + */ 51 + 52 + #define SECID_MAX U32_MAX 53 + 54 + /* TODO: mark free list as exceptional */ 55 + static void *to_ptr(u32 secid) 56 + { 57 + return (void *) 58 + ((((unsigned long) secid) << RADIX_TREE_EXCEPTIONAL_SHIFT)); 59 + } 60 + 61 + static u32 to_secid(void *ptr) 62 + { 63 + return (u32) (((unsigned long) ptr) >> RADIX_TREE_EXCEPTIONAL_SHIFT); 64 + } 65 + 66 + 67 + /* TODO: tag free_list entries to mark them as different */ 68 + static u32 __pop(struct aa_label *label) 69 + { 70 + u32 secid = free_list; 71 + void __rcu **slot; 72 + void *entry; 73 + 74 + if (free_list == FREE_LIST_HEAD) 75 + return AA_SECID_INVALID; 76 + 77 + slot = radix_tree_lookup_slot(&aa_secids_map, secid); 78 + AA_BUG(!slot); 79 + entry = radix_tree_deref_slot_protected(slot, &secid_lock); 80 + free_list = to_secid(entry); 81 + radix_tree_replace_slot(&aa_secids_map, slot, label); 82 + free_count--; 83 + 84 + return secid; 85 + } 86 + 87 + static void __push(u32 secid) 88 + { 89 + void __rcu **slot; 90 + 91 + slot = radix_tree_lookup_slot(&aa_secids_map, secid); 92 + AA_BUG(!slot); 93 + radix_tree_replace_slot(&aa_secids_map, slot, to_ptr(free_list)); 94 + free_list = secid; 95 + free_count++; 96 + } 97 + 98 + static struct aa_label * __secid_update(u32 secid, struct aa_label *label) 99 + { 100 + struct aa_label *old; 101 + void __rcu **slot; 102 + 103 + slot = radix_tree_lookup_slot(&aa_secids_map, secid); 104 + AA_BUG(!slot); 105 + old = radix_tree_deref_slot_protected(slot, &secid_lock); 106 + radix_tree_replace_slot(&aa_secids_map, slot, label); 107 + 108 + return old; 109 + } 110 + 111 + /** 112 + * aa_secid_update - update a secid mapping to a new label 113 + * @secid: secid to update 114 + * @label: label the secid will now map to 115 + */ 116 + void aa_secid_update(u32 secid, struct aa_label *label) 117 + { 118 + struct aa_label *old; 119 + unsigned long flags; 120 + 121 + spin_lock_irqsave(&secid_lock, flags); 122 + old = __secid_update(secid, label); 123 + spin_unlock_irqrestore(&secid_lock, flags); 124 + } 125 + 126 + /** 127 + * 128 + * see label for inverse aa_label_to_secid 129 + */ 130 + struct aa_label *aa_secid_to_label(u32 secid) 131 + { 132 + struct aa_label *label; 133 + 134 + rcu_read_lock(); 135 + label = radix_tree_lookup(&aa_secids_map, secid); 136 + rcu_read_unlock(); 137 + 138 + return label; 139 + } 140 + 141 + int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 142 + { 143 + /* TODO: cache secctx and ref count so we don't have to recreate */ 144 + struct aa_label *label = aa_secid_to_label(secid); 145 + 146 + AA_BUG(!secdata); 147 + AA_BUG(!seclen); 148 + 149 + if (!label) 150 + return -EINVAL; 151 + 152 + if (secdata) 153 + *seclen = aa_label_asxprint(secdata, root_ns, label, 154 + FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | 155 + FLAG_HIDDEN_UNCONFINED | 156 + FLAG_ABS_ROOT, GFP_ATOMIC); 157 + else 158 + *seclen = aa_label_snxprint(NULL, 0, root_ns, label, 159 + FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | 160 + FLAG_HIDDEN_UNCONFINED | 161 + FLAG_ABS_ROOT); 162 + if (*seclen < 0) 163 + return -ENOMEM; 164 + 165 + return 0; 166 + } 167 + 168 + 169 + int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) 170 + { 171 + struct aa_label *label; 172 + 173 + label = aa_label_strn_parse(&root_ns->unconfined->label, secdata, 174 + seclen, GFP_KERNEL, false, false); 175 + if (IS_ERR(label)) 176 + return PTR_ERR(label); 177 + *secid = label->secid; 178 + 179 + return 0; 180 + } 181 + 182 + void apparmor_release_secctx(char *secdata, u32 seclen) 183 + { 184 + kfree(secdata); 185 + } 186 + 31 187 32 188 /** 33 189 * aa_alloc_secid - allocate a new secid for a profile 34 190 */ 35 - u32 aa_alloc_secid(void) 191 + u32 aa_alloc_secid(struct aa_label *label, gfp_t gfp) 36 192 { 193 + unsigned long flags; 37 194 u32 secid; 38 195 39 - /* 40 - * TODO FIXME: secid recycling - part of profile mapping table 41 - */ 42 - spin_lock(&secid_lock); 43 - secid = (++global_secid); 44 - spin_unlock(&secid_lock); 196 + /* racey, but at worst causes new allocation instead of reuse */ 197 + if (free_list == FREE_LIST_HEAD) { 198 + bool preload = 0; 199 + int res; 200 + 201 + retry: 202 + if (gfpflags_allow_blocking(gfp) && !radix_tree_preload(gfp)) 203 + preload = 1; 204 + spin_lock_irqsave(&secid_lock, flags); 205 + if (alloced_secid != SECID_MAX) { 206 + secid = ++alloced_secid; 207 + res = radix_tree_insert(&aa_secids_map, secid, label); 208 + AA_BUG(res == -EEXIST); 209 + } else { 210 + secid = AA_SECID_INVALID; 211 + } 212 + spin_unlock_irqrestore(&secid_lock, flags); 213 + if (preload) 214 + radix_tree_preload_end(); 215 + } else { 216 + spin_lock_irqsave(&secid_lock, flags); 217 + /* remove entry from free list */ 218 + secid = __pop(label); 219 + if (secid == AA_SECID_INVALID) { 220 + spin_unlock_irqrestore(&secid_lock, flags); 221 + goto retry; 222 + } 223 + spin_unlock_irqrestore(&secid_lock, flags); 224 + } 225 + 45 226 return secid; 46 227 } 47 228 ··· 232 51 */ 233 52 void aa_free_secid(u32 secid) 234 53 { 235 - ; /* NOP ATM */ 54 + unsigned long flags; 55 + 56 + spin_lock_irqsave(&secid_lock, flags); 57 + __push(secid); 58 + spin_unlock_irqrestore(&secid_lock, flags); 236 59 }