Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KEYS: Expand the capacity of a keyring

Expand the capacity of a keyring to be able to hold a lot more keys by using
the previously added associative array implementation. Currently the maximum
capacity is:

(PAGE_SIZE - sizeof(header)) / sizeof(struct key *)

which, on a 64-bit system, is a little more 500. However, since this is being
used for the NFS uid mapper, we need more than that. The new implementation
gives us effectively unlimited capacity.

With some alterations, the keyutils testsuite runs successfully to completion
after this patch is applied. The alterations are because (a) keyrings that
are simply added to no longer appear ordered and (b) some of the errors have
changed a bit.

Signed-off-by: David Howells <dhowells@redhat.com>

+802 -761
+2 -15
include/keys/keyring-type.h
··· 1 1 /* Keyring key type 2 2 * 3 - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 + * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved. 4 4 * Written by David Howells (dhowells@redhat.com) 5 5 * 6 6 * This program is free software; you can redistribute it and/or ··· 13 13 #define _KEYS_KEYRING_TYPE_H 14 14 15 15 #include <linux/key.h> 16 - #include <linux/rcupdate.h> 17 - 18 - /* 19 - * the keyring payload contains a list of the keys to which the keyring is 20 - * subscribed 21 - */ 22 - struct keyring_list { 23 - struct rcu_head rcu; /* RCU deletion hook */ 24 - unsigned short maxkeys; /* max keys this list can hold */ 25 - unsigned short nkeys; /* number of keys currently held */ 26 - unsigned short delkey; /* key to be unlinked by RCU */ 27 - struct key __rcu *keys[0]; 28 - }; 29 - 16 + #include <linux/assoc_array.h> 30 17 31 18 #endif /* _KEYS_KEYRING_TYPE_H */
+8 -5
include/linux/key.h
··· 22 22 #include <linux/sysctl.h> 23 23 #include <linux/rwsem.h> 24 24 #include <linux/atomic.h> 25 + #include <linux/assoc_array.h> 25 26 26 27 #ifdef __KERNEL__ 27 28 #include <linux/uidgid.h> ··· 197 196 * whatever 198 197 */ 199 198 union { 200 - unsigned long value; 201 - void __rcu *rcudata; 202 - void *data; 203 - struct keyring_list __rcu *subscriptions; 204 - } payload; 199 + union { 200 + unsigned long value; 201 + void __rcu *rcudata; 202 + void *data; 203 + } payload; 204 + struct assoc_array keys; 205 + }; 205 206 }; 206 207 207 208 extern struct key *key_alloc(struct key_type *type,
+1
lib/assoc_array.c
··· 12 12 */ 13 13 //#define DEBUG 14 14 #include <linux/slab.h> 15 + #include <linux/err.h> 15 16 #include <linux/assoc_array_priv.h> 16 17 17 18 /*
+1
security/keys/Kconfig
··· 4 4 5 5 config KEYS 6 6 bool "Enable access key retention support" 7 + select ASSOCIATIVE_ARRAY 7 8 help 8 9 This option provides support for retaining authentication tokens and 9 10 access keys in the kernel.
+14 -19
security/keys/gc.c
··· 130 130 kleave(""); 131 131 } 132 132 133 + static int key_gc_keyring_func(const void *object, void *iterator_data) 134 + { 135 + const struct key *key = object; 136 + time_t *limit = iterator_data; 137 + return key_is_dead(key, *limit); 138 + } 139 + 133 140 /* 134 141 * Garbage collect pointers from a keyring. 135 142 * ··· 145 138 */ 146 139 static void key_gc_keyring(struct key *keyring, time_t limit) 147 140 { 148 - struct keyring_list *klist; 149 - int loop; 141 + int result; 150 142 151 - kenter("%x", key_serial(keyring)); 143 + kenter("%x{%s}", keyring->serial, keyring->description ?: ""); 152 144 153 145 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | 154 146 (1 << KEY_FLAG_REVOKED))) ··· 155 149 156 150 /* scan the keyring looking for dead keys */ 157 151 rcu_read_lock(); 158 - klist = rcu_dereference(keyring->payload.subscriptions); 159 - if (!klist) 160 - goto unlock_dont_gc; 161 - 162 - loop = klist->nkeys; 163 - smp_rmb(); 164 - for (loop--; loop >= 0; loop--) { 165 - struct key *key = rcu_dereference(klist->keys[loop]); 166 - if (key_is_dead(key, limit)) 167 - goto do_gc; 168 - } 169 - 170 - unlock_dont_gc: 152 + result = assoc_array_iterate(&keyring->keys, 153 + key_gc_keyring_func, &limit); 171 154 rcu_read_unlock(); 155 + if (result == true) 156 + goto do_gc; 157 + 172 158 dont_gc: 173 159 kleave(" [no gc]"); 174 160 return; 175 161 176 162 do_gc: 177 - rcu_read_unlock(); 178 - 179 163 keyring_gc(keyring, limit); 180 164 kleave(" [gc]"); 181 165 } ··· 388 392 */ 389 393 found_keyring: 390 394 spin_unlock(&key_serial_lock); 391 - kdebug("scan keyring %d", key->serial); 392 395 key_gc_keyring(key, limit); 393 396 goto maybe_resched; 394 397
+11 -6
security/keys/internal.h
··· 90 90 91 91 extern int __key_link_begin(struct key *keyring, 92 92 const struct keyring_index_key *index_key, 93 - unsigned long *_prealloc); 93 + struct assoc_array_edit **_edit); 94 94 extern int __key_link_check_live_key(struct key *keyring, struct key *key); 95 - extern void __key_link(struct key *keyring, struct key *key, 96 - unsigned long *_prealloc); 95 + extern void __key_link(struct key *key, struct assoc_array_edit **_edit); 97 96 extern void __key_link_end(struct key *keyring, 98 97 const struct keyring_index_key *index_key, 99 - unsigned long prealloc); 98 + struct assoc_array_edit *edit); 100 99 101 - extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, 102 - const struct keyring_index_key *index_key); 100 + extern key_ref_t find_key_to_update(key_ref_t keyring_ref, 101 + const struct keyring_index_key *index_key); 103 102 104 103 extern struct key *keyring_search_instkey(struct key *keyring, 105 104 key_serial_t target_id); 105 + 106 + extern int iterate_over_keyring(const struct key *keyring, 107 + int (*func)(const struct key *key, void *data), 108 + void *data); 106 109 107 110 typedef int (*key_match_func_t)(const struct key *, const void *); 108 111 ··· 121 118 #define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */ 122 119 #define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */ 123 120 #define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */ 121 + 122 + int (*iterator)(const void *object, void *iterator_data); 124 123 125 124 /* Internal stuff */ 126 125 int skipped_ret;
+17 -18
security/keys/key.c
··· 409 409 struct key_preparsed_payload *prep, 410 410 struct key *keyring, 411 411 struct key *authkey, 412 - unsigned long *_prealloc) 412 + struct assoc_array_edit **_edit) 413 413 { 414 414 int ret, awaken; 415 415 ··· 436 436 437 437 /* and link it into the destination keyring */ 438 438 if (keyring) 439 - __key_link(keyring, key, _prealloc); 439 + __key_link(key, _edit); 440 440 441 441 /* disable the authorisation key */ 442 442 if (authkey) ··· 476 476 struct key *authkey) 477 477 { 478 478 struct key_preparsed_payload prep; 479 - unsigned long prealloc; 479 + struct assoc_array_edit *edit; 480 480 int ret; 481 481 482 482 memset(&prep, 0, sizeof(prep)); ··· 490 490 } 491 491 492 492 if (keyring) { 493 - ret = __key_link_begin(keyring, &key->index_key, &prealloc); 493 + ret = __key_link_begin(keyring, &key->index_key, &edit); 494 494 if (ret < 0) 495 495 goto error_free_preparse; 496 496 } 497 497 498 - ret = __key_instantiate_and_link(key, &prep, keyring, authkey, 499 - &prealloc); 498 + ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); 500 499 501 500 if (keyring) 502 - __key_link_end(keyring, &key->index_key, prealloc); 501 + __key_link_end(keyring, &key->index_key, edit); 503 502 504 503 error_free_preparse: 505 504 if (key->type->preparse) ··· 536 537 struct key *keyring, 537 538 struct key *authkey) 538 539 { 539 - unsigned long prealloc; 540 + struct assoc_array_edit *edit; 540 541 struct timespec now; 541 542 int ret, awaken, link_ret = 0; 542 543 ··· 547 548 ret = -EBUSY; 548 549 549 550 if (keyring) 550 - link_ret = __key_link_begin(keyring, &key->index_key, &prealloc); 551 + link_ret = __key_link_begin(keyring, &key->index_key, &edit); 551 552 552 553 mutex_lock(&key_construction_mutex); 553 554 ··· 569 570 570 571 /* and link it into the destination keyring */ 571 572 if (keyring && link_ret == 0) 572 - __key_link(keyring, key, &prealloc); 573 + __key_link(key, &edit); 573 574 574 575 /* disable the authorisation key */ 575 576 if (authkey) ··· 579 580 mutex_unlock(&key_construction_mutex); 580 581 581 582 if (keyring) 582 - __key_link_end(keyring, &key->index_key, prealloc); 583 + __key_link_end(keyring, &key->index_key, edit); 583 584 584 585 /* wake up anyone waiting for a key to be constructed */ 585 586 if (awaken) ··· 782 783 .description = description, 783 784 }; 784 785 struct key_preparsed_payload prep; 786 + struct assoc_array_edit *edit; 785 787 const struct cred *cred = current_cred(); 786 - unsigned long prealloc; 787 788 struct key *keyring, *key = NULL; 788 789 key_ref_t key_ref; 789 790 int ret; ··· 827 828 } 828 829 index_key.desc_len = strlen(index_key.description); 829 830 830 - ret = __key_link_begin(keyring, &index_key, &prealloc); 831 + ret = __key_link_begin(keyring, &index_key, &edit); 831 832 if (ret < 0) { 832 833 key_ref = ERR_PTR(ret); 833 834 goto error_free_prep; ··· 846 847 * update that instead if possible 847 848 */ 848 849 if (index_key.type->update) { 849 - key_ref = __keyring_search_one(keyring_ref, &index_key); 850 - if (!IS_ERR(key_ref)) 850 + key_ref = find_key_to_update(keyring_ref, &index_key); 851 + if (key_ref) 851 852 goto found_matching_key; 852 853 } 853 854 ··· 873 874 } 874 875 875 876 /* instantiate it and link it into the target keyring */ 876 - ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &prealloc); 877 + ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); 877 878 if (ret < 0) { 878 879 key_put(key); 879 880 key_ref = ERR_PTR(ret); ··· 883 884 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 884 885 885 886 error_link_end: 886 - __key_link_end(keyring, &index_key, prealloc); 887 + __key_link_end(keyring, &index_key, edit); 887 888 error_free_prep: 888 889 if (index_key.type->preparse) 889 890 index_key.type->free_preparse(&prep); ··· 896 897 /* we found a matching key, so we're going to try to update it 897 898 * - we can drop the locks first as we have the key pinned 898 899 */ 899 - __key_link_end(keyring, &index_key, prealloc); 900 + __key_link_end(keyring, &index_key, edit); 900 901 901 902 key_ref = __key_update(key_ref, &prep); 902 903 goto error_free_prep;
+742 -692
security/keys/keyring.c
··· 1 1 /* Keyring handling 2 2 * 3 - * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. 3 + * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. 4 4 * Written by David Howells (dhowells@redhat.com) 5 5 * 6 6 * This program is free software; you can redistribute it and/or ··· 17 17 #include <linux/seq_file.h> 18 18 #include <linux/err.h> 19 19 #include <keys/keyring-type.h> 20 + #include <keys/user-type.h> 21 + #include <linux/assoc_array_priv.h> 20 22 #include <linux/uaccess.h> 21 23 #include "internal.h" 22 - 23 - #define rcu_dereference_locked_keyring(keyring) \ 24 - (rcu_dereference_protected( \ 25 - (keyring)->payload.subscriptions, \ 26 - rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) 27 - 28 - #define rcu_deref_link_locked(klist, index, keyring) \ 29 - (rcu_dereference_protected( \ 30 - (klist)->keys[index], \ 31 - rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) 32 - 33 - #define MAX_KEYRING_LINKS \ 34 - min_t(size_t, USHRT_MAX - 1, \ 35 - ((PAGE_SIZE - sizeof(struct keyring_list)) / sizeof(struct key *))) 36 - 37 - #define KEY_LINK_FIXQUOTA 1UL 38 24 39 25 /* 40 26 * When plumbing the depths of the key tree, this sets a hard limit ··· 32 46 * We keep all named keyrings in a hash to speed looking them up. 33 47 */ 34 48 #define KEYRING_NAME_HASH_SIZE (1 << 5) 49 + 50 + /* 51 + * We mark pointers we pass to the associative array with bit 1 set if 52 + * they're keyrings and clear otherwise. 53 + */ 54 + #define KEYRING_PTR_SUBTYPE 0x2UL 55 + 56 + static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) 57 + { 58 + return (unsigned long)x & KEYRING_PTR_SUBTYPE; 59 + } 60 + static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) 61 + { 62 + void *object = assoc_array_ptr_to_leaf(x); 63 + return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); 64 + } 65 + static inline void *keyring_key_to_ptr(struct key *key) 66 + { 67 + if (key->type == &key_type_keyring) 68 + return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); 69 + return key; 70 + } 35 71 36 72 static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; 37 73 static DEFINE_RWLOCK(keyring_name_lock); ··· 75 67 */ 76 68 static int keyring_instantiate(struct key *keyring, 77 69 struct key_preparsed_payload *prep); 78 - static int keyring_match(const struct key *keyring, const void *criterion); 79 70 static void keyring_revoke(struct key *keyring); 80 71 static void keyring_destroy(struct key *keyring); 81 72 static void keyring_describe(const struct key *keyring, struct seq_file *m); ··· 83 76 84 77 struct key_type key_type_keyring = { 85 78 .name = "keyring", 86 - .def_datalen = sizeof(struct keyring_list), 79 + .def_datalen = 0, 87 80 .instantiate = keyring_instantiate, 88 - .match = keyring_match, 81 + .match = user_match, 89 82 .revoke = keyring_revoke, 90 83 .destroy = keyring_destroy, 91 84 .describe = keyring_describe, ··· 134 127 135 128 ret = -EINVAL; 136 129 if (prep->datalen == 0) { 130 + assoc_array_init(&keyring->keys); 137 131 /* make the keyring available by name if it has one */ 138 132 keyring_publish_name(keyring); 139 133 ret = 0; ··· 144 136 } 145 137 146 138 /* 147 - * Match keyrings on their name 139 + * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd 140 + * fold the carry back too, but that requires inline asm. 148 141 */ 149 - static int keyring_match(const struct key *keyring, const void *description) 142 + static u64 mult_64x32_and_fold(u64 x, u32 y) 150 143 { 151 - return keyring->description && 152 - strcmp(keyring->description, description) == 0; 144 + u64 hi = (u64)(u32)(x >> 32) * y; 145 + u64 lo = (u64)(u32)(x) * y; 146 + return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); 153 147 } 148 + 149 + /* 150 + * Hash a key type and description. 151 + */ 152 + static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) 153 + { 154 + const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; 155 + const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK; 156 + const char *description = index_key->description; 157 + unsigned long hash, type; 158 + u32 piece; 159 + u64 acc; 160 + int n, desc_len = index_key->desc_len; 161 + 162 + type = (unsigned long)index_key->type; 163 + 164 + acc = mult_64x32_and_fold(type, desc_len + 13); 165 + acc = mult_64x32_and_fold(acc, 9207); 166 + for (;;) { 167 + n = desc_len; 168 + if (n <= 0) 169 + break; 170 + if (n > 4) 171 + n = 4; 172 + piece = 0; 173 + memcpy(&piece, description, n); 174 + description += n; 175 + desc_len -= n; 176 + acc = mult_64x32_and_fold(acc, piece); 177 + acc = mult_64x32_and_fold(acc, 9207); 178 + } 179 + 180 + /* Fold the hash down to 32 bits if need be. */ 181 + hash = acc; 182 + if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) 183 + hash ^= acc >> 32; 184 + 185 + /* Squidge all the keyrings into a separate part of the tree to 186 + * ordinary keys by making sure the lowest level segment in the hash is 187 + * zero for keyrings and non-zero otherwise. 188 + */ 189 + if (index_key->type != &key_type_keyring && (hash & level_mask) == 0) 190 + return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; 191 + if (index_key->type == &key_type_keyring && (hash & level_mask) != 0) 192 + return (hash + (hash << level_shift)) & ~level_mask; 193 + return hash; 194 + } 195 + 196 + /* 197 + * Build the next index key chunk. 198 + * 199 + * On 32-bit systems the index key is laid out as: 200 + * 201 + * 0 4 5 9... 202 + * hash desclen typeptr desc[] 203 + * 204 + * On 64-bit systems: 205 + * 206 + * 0 8 9 17... 207 + * hash desclen typeptr desc[] 208 + * 209 + * We return it one word-sized chunk at a time. 210 + */ 211 + static unsigned long keyring_get_key_chunk(const void *data, int level) 212 + { 213 + const struct keyring_index_key *index_key = data; 214 + unsigned long chunk = 0; 215 + long offset = 0; 216 + int desc_len = index_key->desc_len, n = sizeof(chunk); 217 + 218 + level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; 219 + switch (level) { 220 + case 0: 221 + return hash_key_type_and_desc(index_key); 222 + case 1: 223 + return ((unsigned long)index_key->type << 8) | desc_len; 224 + case 2: 225 + if (desc_len == 0) 226 + return (u8)((unsigned long)index_key->type >> 227 + (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); 228 + n--; 229 + offset = 1; 230 + default: 231 + offset += sizeof(chunk) - 1; 232 + offset += (level - 3) * sizeof(chunk); 233 + if (offset >= desc_len) 234 + return 0; 235 + desc_len -= offset; 236 + if (desc_len > n) 237 + desc_len = n; 238 + offset += desc_len; 239 + do { 240 + chunk <<= 8; 241 + chunk |= ((u8*)index_key->description)[--offset]; 242 + } while (--desc_len > 0); 243 + 244 + if (level == 2) { 245 + chunk <<= 8; 246 + chunk |= (u8)((unsigned long)index_key->type >> 247 + (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); 248 + } 249 + return chunk; 250 + } 251 + } 252 + 253 + static unsigned long keyring_get_object_key_chunk(const void *object, int level) 254 + { 255 + const struct key *key = keyring_ptr_to_key(object); 256 + return keyring_get_key_chunk(&key->index_key, level); 257 + } 258 + 259 + static bool keyring_compare_object(const void *object, const void *data) 260 + { 261 + const struct keyring_index_key *index_key = data; 262 + const struct key *key = keyring_ptr_to_key(object); 263 + 264 + return key->index_key.type == index_key->type && 265 + key->index_key.desc_len == index_key->desc_len && 266 + memcmp(key->index_key.description, index_key->description, 267 + index_key->desc_len) == 0; 268 + } 269 + 270 + /* 271 + * Compare the index keys of a pair of objects and determine the bit position 272 + * at which they differ - if they differ. 273 + */ 274 + static int keyring_diff_objects(const void *_a, const void *_b) 275 + { 276 + const struct key *key_a = keyring_ptr_to_key(_a); 277 + const struct key *key_b = keyring_ptr_to_key(_b); 278 + const struct keyring_index_key *a = &key_a->index_key; 279 + const struct keyring_index_key *b = &key_b->index_key; 280 + unsigned long seg_a, seg_b; 281 + int level, i; 282 + 283 + level = 0; 284 + seg_a = hash_key_type_and_desc(a); 285 + seg_b = hash_key_type_and_desc(b); 286 + if ((seg_a ^ seg_b) != 0) 287 + goto differ; 288 + 289 + /* The number of bits contributed by the hash is controlled by a 290 + * constant in the assoc_array headers. Everything else thereafter we 291 + * can deal with as being machine word-size dependent. 292 + */ 293 + level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; 294 + seg_a = a->desc_len; 295 + seg_b = b->desc_len; 296 + if ((seg_a ^ seg_b) != 0) 297 + goto differ; 298 + 299 + /* The next bit may not work on big endian */ 300 + level++; 301 + seg_a = (unsigned long)a->type; 302 + seg_b = (unsigned long)b->type; 303 + if ((seg_a ^ seg_b) != 0) 304 + goto differ; 305 + 306 + level += sizeof(unsigned long); 307 + if (a->desc_len == 0) 308 + goto same; 309 + 310 + i = 0; 311 + if (((unsigned long)a->description | (unsigned long)b->description) & 312 + (sizeof(unsigned long) - 1)) { 313 + do { 314 + seg_a = *(unsigned long *)(a->description + i); 315 + seg_b = *(unsigned long *)(b->description + i); 316 + if ((seg_a ^ seg_b) != 0) 317 + goto differ_plus_i; 318 + i += sizeof(unsigned long); 319 + } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); 320 + } 321 + 322 + for (; i < a->desc_len; i++) { 323 + seg_a = *(unsigned char *)(a->description + i); 324 + seg_b = *(unsigned char *)(b->description + i); 325 + if ((seg_a ^ seg_b) != 0) 326 + goto differ_plus_i; 327 + } 328 + 329 + same: 330 + return -1; 331 + 332 + differ_plus_i: 333 + level += i; 334 + differ: 335 + i = level * 8 + __ffs(seg_a ^ seg_b); 336 + return i; 337 + } 338 + 339 + /* 340 + * Free an object after stripping the keyring flag off of the pointer. 341 + */ 342 + static void keyring_free_object(void *object) 343 + { 344 + key_put(keyring_ptr_to_key(object)); 345 + } 346 + 347 + /* 348 + * Operations for keyring management by the index-tree routines. 349 + */ 350 + static const struct assoc_array_ops keyring_assoc_array_ops = { 351 + .get_key_chunk = keyring_get_key_chunk, 352 + .get_object_key_chunk = keyring_get_object_key_chunk, 353 + .compare_object = keyring_compare_object, 354 + .diff_objects = keyring_diff_objects, 355 + .free_object = keyring_free_object, 356 + }; 154 357 155 358 /* 156 359 * Clean up a keyring when it is destroyed. Unpublish its name if it had one ··· 374 155 */ 375 156 static void keyring_destroy(struct key *keyring) 376 157 { 377 - struct keyring_list *klist; 378 - int loop; 379 - 380 158 if (keyring->description) { 381 159 write_lock(&keyring_name_lock); 382 160 ··· 384 168 write_unlock(&keyring_name_lock); 385 169 } 386 170 387 - klist = rcu_access_pointer(keyring->payload.subscriptions); 388 - if (klist) { 389 - for (loop = klist->nkeys - 1; loop >= 0; loop--) 390 - key_put(rcu_access_pointer(klist->keys[loop])); 391 - kfree(klist); 392 - } 171 + assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); 393 172 } 394 173 395 174 /* ··· 392 181 */ 393 182 static void keyring_describe(const struct key *keyring, struct seq_file *m) 394 183 { 395 - struct keyring_list *klist; 396 - 397 184 if (keyring->description) 398 185 seq_puts(m, keyring->description); 399 186 else 400 187 seq_puts(m, "[anon]"); 401 188 402 189 if (key_is_instantiated(keyring)) { 403 - rcu_read_lock(); 404 - klist = rcu_dereference(keyring->payload.subscriptions); 405 - if (klist) 406 - seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys); 190 + if (keyring->keys.nr_leaves_on_tree != 0) 191 + seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); 407 192 else 408 193 seq_puts(m, ": empty"); 409 - rcu_read_unlock(); 410 194 } 195 + } 196 + 197 + struct keyring_read_iterator_context { 198 + size_t qty; 199 + size_t count; 200 + key_serial_t __user *buffer; 201 + }; 202 + 203 + static int keyring_read_iterator(const void *object, void *data) 204 + { 205 + struct keyring_read_iterator_context *ctx = data; 206 + const struct key *key = keyring_ptr_to_key(object); 207 + int ret; 208 + 209 + kenter("{%s,%d},,{%zu/%zu}", 210 + key->type->name, key->serial, ctx->count, ctx->qty); 211 + 212 + if (ctx->count >= ctx->qty) 213 + return 1; 214 + 215 + ret = put_user(key->serial, ctx->buffer); 216 + if (ret < 0) 217 + return ret; 218 + ctx->buffer++; 219 + ctx->count += sizeof(key->serial); 220 + return 0; 411 221 } 412 222 413 223 /* 414 224 * Read a list of key IDs from the keyring's contents in binary form 415 225 * 416 - * The keyring's semaphore is read-locked by the caller. 226 + * The keyring's semaphore is read-locked by the caller. This prevents someone 227 + * from modifying it under us - which could cause us to read key IDs multiple 228 + * times. 417 229 */ 418 230 static long keyring_read(const struct key *keyring, 419 231 char __user *buffer, size_t buflen) 420 232 { 421 - struct keyring_list *klist; 422 - struct key *key; 423 - size_t qty, tmp; 424 - int loop, ret; 233 + struct keyring_read_iterator_context ctx; 234 + unsigned long nr_keys; 235 + int ret; 425 236 426 - ret = 0; 427 - klist = rcu_dereference_locked_keyring(keyring); 428 - if (klist) { 429 - /* calculate how much data we could return */ 430 - qty = klist->nkeys * sizeof(key_serial_t); 237 + kenter("{%d},,%zu", key_serial(keyring), buflen); 431 238 432 - if (buffer && buflen > 0) { 433 - if (buflen > qty) 434 - buflen = qty; 239 + if (buflen & (sizeof(key_serial_t) - 1)) 240 + return -EINVAL; 435 241 436 - /* copy the IDs of the subscribed keys into the 437 - * buffer */ 438 - ret = -EFAULT; 242 + nr_keys = keyring->keys.nr_leaves_on_tree; 243 + if (nr_keys == 0) 244 + return 0; 439 245 440 - for (loop = 0; loop < klist->nkeys; loop++) { 441 - key = rcu_deref_link_locked(klist, loop, 442 - keyring); 246 + /* Calculate how much data we could return */ 247 + ctx.qty = nr_keys * sizeof(key_serial_t); 443 248 444 - tmp = sizeof(key_serial_t); 445 - if (tmp > buflen) 446 - tmp = buflen; 249 + if (!buffer || !buflen) 250 + return ctx.qty; 447 251 448 - if (copy_to_user(buffer, 449 - &key->serial, 450 - tmp) != 0) 451 - goto error; 252 + if (buflen > ctx.qty) 253 + ctx.qty = buflen; 452 254 453 - buflen -= tmp; 454 - if (buflen == 0) 455 - break; 456 - buffer += tmp; 457 - } 458 - } 459 - 460 - ret = qty; 255 + /* Copy the IDs of the subscribed keys into the buffer */ 256 + ctx.buffer = (key_serial_t __user *)buffer; 257 + ctx.count = 0; 258 + ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); 259 + if (ret < 0) { 260 + kleave(" = %d [iterate]", ret); 261 + return ret; 461 262 } 462 263 463 - error: 464 - return ret; 264 + kleave(" = %zu [ok]", ctx.count); 265 + return ctx.count; 465 266 } 466 267 467 268 /* ··· 500 277 } 501 278 EXPORT_SYMBOL(keyring_alloc); 502 279 280 + /* 281 + * Iteration function to consider each key found. 282 + */ 283 + static int keyring_search_iterator(const void *object, void *iterator_data) 284 + { 285 + struct keyring_search_context *ctx = iterator_data; 286 + const struct key *key = keyring_ptr_to_key(object); 287 + unsigned long kflags = key->flags; 288 + 289 + kenter("{%d}", key->serial); 290 + 291 + /* ignore keys not of this type */ 292 + if (key->type != ctx->index_key.type) { 293 + kleave(" = 0 [!type]"); 294 + return 0; 295 + } 296 + 297 + /* skip invalidated, revoked and expired keys */ 298 + if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 299 + if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 300 + (1 << KEY_FLAG_REVOKED))) { 301 + ctx->result = ERR_PTR(-EKEYREVOKED); 302 + kleave(" = %d [invrev]", ctx->skipped_ret); 303 + goto skipped; 304 + } 305 + 306 + if (key->expiry && ctx->now.tv_sec >= key->expiry) { 307 + ctx->result = ERR_PTR(-EKEYEXPIRED); 308 + kleave(" = %d [expire]", ctx->skipped_ret); 309 + goto skipped; 310 + } 311 + } 312 + 313 + /* keys that don't match */ 314 + if (!ctx->match(key, ctx->match_data)) { 315 + kleave(" = 0 [!match]"); 316 + return 0; 317 + } 318 + 319 + /* key must have search permissions */ 320 + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && 321 + key_task_permission(make_key_ref(key, ctx->possessed), 322 + ctx->cred, KEY_SEARCH) < 0) { 323 + ctx->result = ERR_PTR(-EACCES); 324 + kleave(" = %d [!perm]", ctx->skipped_ret); 325 + goto skipped; 326 + } 327 + 328 + if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 329 + /* we set a different error code if we pass a negative key */ 330 + if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 331 + ctx->result = ERR_PTR(key->type_data.reject_error); 332 + kleave(" = %d [neg]", ctx->skipped_ret); 333 + goto skipped; 334 + } 335 + } 336 + 337 + /* Found */ 338 + ctx->result = make_key_ref(key, ctx->possessed); 339 + kleave(" = 1 [found]"); 340 + return 1; 341 + 342 + skipped: 343 + return ctx->skipped_ret; 344 + } 345 + 346 + /* 347 + * Search inside a keyring for a key. We can search by walking to it 348 + * directly based on its index-key or we can iterate over the entire 349 + * tree looking for it, based on the match function. 350 + */ 351 + static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) 352 + { 353 + if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) == 354 + KEYRING_SEARCH_LOOKUP_DIRECT) { 355 + const void *object; 356 + 357 + object = assoc_array_find(&keyring->keys, 358 + &keyring_assoc_array_ops, 359 + &ctx->index_key); 360 + return object ? ctx->iterator(object, ctx) : 0; 361 + } 362 + return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); 363 + } 364 + 365 + /* 366 + * Search a tree of keyrings that point to other keyrings up to the maximum 367 + * depth. 368 + */ 369 + static bool search_nested_keyrings(struct key *keyring, 370 + struct keyring_search_context *ctx) 371 + { 372 + struct { 373 + struct key *keyring; 374 + struct assoc_array_node *node; 375 + int slot; 376 + } stack[KEYRING_SEARCH_MAX_DEPTH]; 377 + 378 + struct assoc_array_shortcut *shortcut; 379 + struct assoc_array_node *node; 380 + struct assoc_array_ptr *ptr; 381 + struct key *key; 382 + int sp = 0, slot; 383 + 384 + kenter("{%d},{%s,%s}", 385 + keyring->serial, 386 + ctx->index_key.type->name, 387 + ctx->index_key.description); 388 + 389 + if (ctx->index_key.description) 390 + ctx->index_key.desc_len = strlen(ctx->index_key.description); 391 + 392 + /* Check to see if this top-level keyring is what we are looking for 393 + * and whether it is valid or not. 394 + */ 395 + if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE || 396 + keyring_compare_object(keyring, &ctx->index_key)) { 397 + ctx->skipped_ret = 2; 398 + ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK; 399 + switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { 400 + case 1: 401 + goto found; 402 + case 2: 403 + return false; 404 + default: 405 + break; 406 + } 407 + } 408 + 409 + ctx->skipped_ret = 0; 410 + if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK) 411 + ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK; 412 + 413 + /* Start processing a new keyring */ 414 + descend_to_keyring: 415 + kdebug("descend to %d", keyring->serial); 416 + if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | 417 + (1 << KEY_FLAG_REVOKED))) 418 + goto not_this_keyring; 419 + 420 + /* Search through the keys in this keyring before its searching its 421 + * subtrees. 422 + */ 423 + if (search_keyring(keyring, ctx)) 424 + goto found; 425 + 426 + /* Then manually iterate through the keyrings nested in this one. 427 + * 428 + * Start from the root node of the index tree. Because of the way the 429 + * hash function has been set up, keyrings cluster on the leftmost 430 + * branch of the root node (root slot 0) or in the root node itself. 431 + * Non-keyrings avoid the leftmost branch of the root entirely (root 432 + * slots 1-15). 433 + */ 434 + ptr = ACCESS_ONCE(keyring->keys.root); 435 + if (!ptr) 436 + goto not_this_keyring; 437 + 438 + if (assoc_array_ptr_is_shortcut(ptr)) { 439 + /* If the root is a shortcut, either the keyring only contains 440 + * keyring pointers (everything clusters behind root slot 0) or 441 + * doesn't contain any keyring pointers. 442 + */ 443 + shortcut = assoc_array_ptr_to_shortcut(ptr); 444 + smp_read_barrier_depends(); 445 + if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) 446 + goto not_this_keyring; 447 + 448 + ptr = ACCESS_ONCE(shortcut->next_node); 449 + node = assoc_array_ptr_to_node(ptr); 450 + goto begin_node; 451 + } 452 + 453 + node = assoc_array_ptr_to_node(ptr); 454 + smp_read_barrier_depends(); 455 + 456 + ptr = node->slots[0]; 457 + if (!assoc_array_ptr_is_meta(ptr)) 458 + goto begin_node; 459 + 460 + descend_to_node: 461 + /* Descend to a more distal node in this keyring's content tree and go 462 + * through that. 463 + */ 464 + kdebug("descend"); 465 + if (assoc_array_ptr_is_shortcut(ptr)) { 466 + shortcut = assoc_array_ptr_to_shortcut(ptr); 467 + smp_read_barrier_depends(); 468 + ptr = ACCESS_ONCE(shortcut->next_node); 469 + BUG_ON(!assoc_array_ptr_is_node(ptr)); 470 + node = assoc_array_ptr_to_node(ptr); 471 + } 472 + 473 + begin_node: 474 + kdebug("begin_node"); 475 + smp_read_barrier_depends(); 476 + slot = 0; 477 + ascend_to_node: 478 + /* Go through the slots in a node */ 479 + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 480 + ptr = ACCESS_ONCE(node->slots[slot]); 481 + 482 + if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) 483 + goto descend_to_node; 484 + 485 + if (!keyring_ptr_is_keyring(ptr)) 486 + continue; 487 + 488 + key = keyring_ptr_to_key(ptr); 489 + 490 + if (sp >= KEYRING_SEARCH_MAX_DEPTH) { 491 + if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { 492 + ctx->result = ERR_PTR(-ELOOP); 493 + return false; 494 + } 495 + goto not_this_keyring; 496 + } 497 + 498 + /* Search a nested keyring */ 499 + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && 500 + key_task_permission(make_key_ref(key, ctx->possessed), 501 + ctx->cred, KEY_SEARCH) < 0) 502 + continue; 503 + 504 + /* stack the current position */ 505 + stack[sp].keyring = keyring; 506 + stack[sp].node = node; 507 + stack[sp].slot = slot; 508 + sp++; 509 + 510 + /* begin again with the new keyring */ 511 + keyring = key; 512 + goto descend_to_keyring; 513 + } 514 + 515 + /* We've dealt with all the slots in the current node, so now we need 516 + * to ascend to the parent and continue processing there. 517 + */ 518 + ptr = ACCESS_ONCE(node->back_pointer); 519 + slot = node->parent_slot; 520 + 521 + if (ptr && assoc_array_ptr_is_shortcut(ptr)) { 522 + shortcut = assoc_array_ptr_to_shortcut(ptr); 523 + smp_read_barrier_depends(); 524 + ptr = ACCESS_ONCE(shortcut->back_pointer); 525 + slot = shortcut->parent_slot; 526 + } 527 + if (!ptr) 528 + goto not_this_keyring; 529 + node = assoc_array_ptr_to_node(ptr); 530 + smp_read_barrier_depends(); 531 + slot++; 532 + 533 + /* If we've ascended to the root (zero backpointer), we must have just 534 + * finished processing the leftmost branch rather than the root slots - 535 + * so there can't be any more keyrings for us to find. 536 + */ 537 + if (node->back_pointer) { 538 + kdebug("ascend %d", slot); 539 + goto ascend_to_node; 540 + } 541 + 542 + /* The keyring we're looking at was disqualified or didn't contain a 543 + * matching key. 544 + */ 545 + not_this_keyring: 546 + kdebug("not_this_keyring %d", sp); 547 + if (sp <= 0) { 548 + kleave(" = false"); 549 + return false; 550 + } 551 + 552 + /* Resume the processing of a keyring higher up in the tree */ 553 + sp--; 554 + keyring = stack[sp].keyring; 555 + node = stack[sp].node; 556 + slot = stack[sp].slot + 1; 557 + kdebug("ascend to %d [%d]", keyring->serial, slot); 558 + goto ascend_to_node; 559 + 560 + /* We found a viable match */ 561 + found: 562 + key = key_ref_to_ptr(ctx->result); 563 + key_check(key); 564 + if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { 565 + key->last_used_at = ctx->now.tv_sec; 566 + keyring->last_used_at = ctx->now.tv_sec; 567 + while (sp > 0) 568 + stack[--sp].keyring->last_used_at = ctx->now.tv_sec; 569 + } 570 + kleave(" = true"); 571 + return true; 572 + } 573 + 503 574 /** 504 575 * keyring_search_aux - Search a keyring tree for a key matching some criteria 505 576 * @keyring_ref: A pointer to the keyring with possession indicator. ··· 815 298 * determine the match. Normally the match function from the key type would be 816 299 * used. 817 300 * 818 - * RCU is used to prevent the keyring key lists from disappearing without the 819 - * need to take lots of locks. 301 + * RCU can be used to prevent the keyring key lists from disappearing without 302 + * the need to take lots of locks. 820 303 * 821 304 * Returns a pointer to the found key and increments the key usage count if 822 305 * successful; -EAGAIN if no matching keys were found, or if expired or revoked ··· 829 312 key_ref_t keyring_search_aux(key_ref_t keyring_ref, 830 313 struct keyring_search_context *ctx) 831 314 { 832 - struct { 833 - /* Need a separate keylist pointer for RCU purposes */ 834 - struct key *keyring; 835 - struct keyring_list *keylist; 836 - int kix; 837 - } stack[KEYRING_SEARCH_MAX_DEPTH]; 838 - 839 - struct keyring_list *keylist; 840 - unsigned long kflags; 841 - struct key *keyring, *key; 842 - key_ref_t key_ref; 315 + struct key *keyring; 843 316 long err; 844 - int sp, nkeys, kix; 317 + 318 + ctx->iterator = keyring_search_iterator; 319 + ctx->possessed = is_key_possessed(keyring_ref); 320 + ctx->result = ERR_PTR(-EAGAIN); 845 321 846 322 keyring = key_ref_to_ptr(keyring_ref); 847 - ctx->possessed = is_key_possessed(keyring_ref); 848 323 key_check(keyring); 849 324 850 - /* top keyring must have search permission to begin the search */ 851 - err = key_task_permission(keyring_ref, ctx->cred, KEY_SEARCH); 852 - if (err < 0) { 853 - key_ref = ERR_PTR(err); 854 - goto error; 855 - } 856 - 857 - key_ref = ERR_PTR(-ENOTDIR); 858 325 if (keyring->type != &key_type_keyring) 859 - goto error; 326 + return ERR_PTR(-ENOTDIR); 327 + 328 + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { 329 + err = key_task_permission(keyring_ref, ctx->cred, KEY_SEARCH); 330 + if (err < 0) 331 + return ERR_PTR(err); 332 + } 860 333 861 334 rcu_read_lock(); 862 - 863 335 ctx->now = current_kernel_time(); 864 - err = -EAGAIN; 865 - sp = 0; 866 - 867 - /* firstly we should check to see if this top-level keyring is what we 868 - * are looking for */ 869 - key_ref = ERR_PTR(-EAGAIN); 870 - kflags = keyring->flags; 871 - if (keyring->type == ctx->index_key.type && 872 - ctx->match(keyring, ctx->match_data)) { 873 - key = keyring; 874 - if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK) 875 - goto found; 876 - 877 - /* check it isn't negative and hasn't expired or been 878 - * revoked */ 879 - if (kflags & (1 << KEY_FLAG_REVOKED)) 880 - goto error_2; 881 - if (key->expiry && ctx->now.tv_sec >= key->expiry) 882 - goto error_2; 883 - key_ref = ERR_PTR(key->type_data.reject_error); 884 - if (kflags & (1 << KEY_FLAG_NEGATIVE)) 885 - goto error_2; 886 - goto found; 887 - } 888 - 889 - /* otherwise, the top keyring must not be revoked, expired, or 890 - * negatively instantiated if we are to search it */ 891 - key_ref = ERR_PTR(-EAGAIN); 892 - if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 893 - (1 << KEY_FLAG_REVOKED) | 894 - (1 << KEY_FLAG_NEGATIVE)) || 895 - (keyring->expiry && ctx->now.tv_sec >= keyring->expiry)) 896 - goto error_2; 897 - 898 - /* start processing a new keyring */ 899 - descend: 900 - kflags = keyring->flags; 901 - if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 902 - (1 << KEY_FLAG_REVOKED))) 903 - goto not_this_keyring; 904 - 905 - keylist = rcu_dereference(keyring->payload.subscriptions); 906 - if (!keylist) 907 - goto not_this_keyring; 908 - 909 - /* iterate through the keys in this keyring first */ 910 - nkeys = keylist->nkeys; 911 - smp_rmb(); 912 - for (kix = 0; kix < nkeys; kix++) { 913 - key = rcu_dereference(keylist->keys[kix]); 914 - kflags = key->flags; 915 - 916 - /* ignore keys not of this type */ 917 - if (key->type != ctx->index_key.type) 918 - continue; 919 - 920 - /* skip invalidated, revoked and expired keys */ 921 - if (!(ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)) { 922 - if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 923 - (1 << KEY_FLAG_REVOKED))) 924 - continue; 925 - 926 - if (key->expiry && ctx->now.tv_sec >= key->expiry) 927 - continue; 928 - } 929 - 930 - /* keys that don't match */ 931 - if (!ctx->match(key, ctx->match_data)) 932 - continue; 933 - 934 - /* key must have search permissions */ 935 - if (key_task_permission(make_key_ref(key, ctx->possessed), 936 - ctx->cred, KEY_SEARCH) < 0) 937 - continue; 938 - 939 - if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK) 940 - goto found; 941 - 942 - /* we set a different error code if we pass a negative key */ 943 - if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 944 - err = key->type_data.reject_error; 945 - continue; 946 - } 947 - 948 - goto found; 949 - } 950 - 951 - /* search through the keyrings nested in this one */ 952 - kix = 0; 953 - ascend: 954 - nkeys = keylist->nkeys; 955 - smp_rmb(); 956 - for (; kix < nkeys; kix++) { 957 - key = rcu_dereference(keylist->keys[kix]); 958 - if (key->type != &key_type_keyring) 959 - continue; 960 - 961 - /* recursively search nested keyrings 962 - * - only search keyrings for which we have search permission 963 - */ 964 - if (sp >= KEYRING_SEARCH_MAX_DEPTH) 965 - continue; 966 - 967 - if (key_task_permission(make_key_ref(key, ctx->possessed), 968 - ctx->cred, KEY_SEARCH) < 0) 969 - continue; 970 - 971 - /* stack the current position */ 972 - stack[sp].keyring = keyring; 973 - stack[sp].keylist = keylist; 974 - stack[sp].kix = kix; 975 - sp++; 976 - 977 - /* begin again with the new keyring */ 978 - keyring = key; 979 - goto descend; 980 - } 981 - 982 - /* the keyring we're looking at was disqualified or didn't contain a 983 - * matching key */ 984 - not_this_keyring: 985 - if (sp > 0) { 986 - /* resume the processing of a keyring higher up in the tree */ 987 - sp--; 988 - keyring = stack[sp].keyring; 989 - keylist = stack[sp].keylist; 990 - kix = stack[sp].kix + 1; 991 - goto ascend; 992 - } 993 - 994 - key_ref = ERR_PTR(err); 995 - goto error_2; 996 - 997 - /* we found a viable match */ 998 - found: 999 - __key_get(key); 1000 - key->last_used_at = ctx->now.tv_sec; 1001 - keyring->last_used_at = ctx->now.tv_sec; 1002 - while (sp > 0) 1003 - stack[--sp].keyring->last_used_at = ctx->now.tv_sec; 1004 - key_check(key); 1005 - key_ref = make_key_ref(key, ctx->possessed); 1006 - error_2: 336 + if (search_nested_keyrings(keyring, ctx)) 337 + __key_get(key_ref_to_ptr(ctx->result)); 1007 338 rcu_read_unlock(); 1008 - error: 1009 - return key_ref; 339 + return ctx->result; 1010 340 } 1011 341 1012 342 /** ··· 863 499 * @description: The name of the keyring we want to find. 864 500 * 865 501 * As keyring_search_aux() above, but using the current task's credentials and 866 - * type's default matching function. 502 + * type's default matching function and preferred search method. 867 503 */ 868 504 key_ref_t keyring_search(key_ref_t keyring, 869 505 struct key_type *type, ··· 887 523 EXPORT_SYMBOL(keyring_search); 888 524 889 525 /* 890 - * Search the given keyring only (no recursion). 526 + * Search the given keyring for a key that might be updated. 891 527 * 892 528 * The caller must guarantee that the keyring is a keyring and that the 893 - * permission is granted to search the keyring as no check is made here. 894 - * 895 - * RCU is used to make it unnecessary to lock the keyring key list here. 529 + * permission is granted to modify the keyring as no check is made here. The 530 + * caller must also hold a lock on the keyring semaphore. 896 531 * 897 532 * Returns a pointer to the found key with usage count incremented if 898 - * successful and returns -ENOKEY if not found. Revoked and invalidated keys 899 - * are skipped over. 533 + * successful and returns NULL if not found. Revoked and invalidated keys are 534 + * skipped over. 900 535 * 901 536 * If successful, the possession indicator is propagated from the keyring ref 902 537 * to the returned key reference. 903 538 */ 904 - key_ref_t __keyring_search_one(key_ref_t keyring_ref, 905 - const struct keyring_index_key *index_key) 539 + key_ref_t find_key_to_update(key_ref_t keyring_ref, 540 + const struct keyring_index_key *index_key) 906 541 { 907 - struct keyring_list *klist; 908 542 struct key *keyring, *key; 909 - bool possessed; 910 - int nkeys, loop; 543 + const void *object; 911 544 912 545 keyring = key_ref_to_ptr(keyring_ref); 913 - possessed = is_key_possessed(keyring_ref); 914 546 915 - rcu_read_lock(); 547 + kenter("{%d},{%s,%s}", 548 + keyring->serial, index_key->type->name, index_key->description); 916 549 917 - klist = rcu_dereference(keyring->payload.subscriptions); 918 - if (klist) { 919 - nkeys = klist->nkeys; 920 - smp_rmb(); 921 - for (loop = 0; loop < nkeys ; loop++) { 922 - key = rcu_dereference(klist->keys[loop]); 923 - if (key->type == index_key->type && 924 - (!key->type->match || 925 - key->type->match(key, index_key->description)) && 926 - !(key->flags & ((1 << KEY_FLAG_INVALIDATED) | 927 - (1 << KEY_FLAG_REVOKED))) 928 - ) 929 - goto found; 930 - } 931 - } 550 + object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, 551 + index_key); 932 552 933 - rcu_read_unlock(); 934 - return ERR_PTR(-ENOKEY); 553 + if (object) 554 + goto found; 555 + 556 + kleave(" = NULL"); 557 + return NULL; 935 558 936 559 found: 560 + key = keyring_ptr_to_key(object); 561 + if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | 562 + (1 << KEY_FLAG_REVOKED))) { 563 + kleave(" = NULL [x]"); 564 + return NULL; 565 + } 937 566 __key_get(key); 938 - keyring->last_used_at = key->last_used_at = 939 - current_kernel_time().tv_sec; 940 - rcu_read_unlock(); 941 - return make_key_ref(key, possessed); 567 + kleave(" = {%d}", key->serial); 568 + return make_key_ref(key, is_key_possessed(keyring_ref)); 942 569 } 943 570 944 571 /* ··· 992 637 return keyring; 993 638 } 994 639 640 + static int keyring_detect_cycle_iterator(const void *object, 641 + void *iterator_data) 642 + { 643 + struct keyring_search_context *ctx = iterator_data; 644 + const struct key *key = keyring_ptr_to_key(object); 645 + 646 + kenter("{%d}", key->serial); 647 + 648 + BUG_ON(key != ctx->match_data); 649 + ctx->result = ERR_PTR(-EDEADLK); 650 + return 1; 651 + } 652 + 995 653 /* 996 654 * See if a cycle will will be created by inserting acyclic tree B in acyclic 997 655 * tree A at the topmost level (ie: as a direct child of A). ··· 1014 646 */ 1015 647 static int keyring_detect_cycle(struct key *A, struct key *B) 1016 648 { 1017 - struct { 1018 - struct keyring_list *keylist; 1019 - int kix; 1020 - } stack[KEYRING_SEARCH_MAX_DEPTH]; 1021 - 1022 - struct keyring_list *keylist; 1023 - struct key *subtree, *key; 1024 - int sp, nkeys, kix, ret; 649 + struct keyring_search_context ctx = { 650 + .index_key = A->index_key, 651 + .match_data = A, 652 + .iterator = keyring_detect_cycle_iterator, 653 + .flags = (KEYRING_SEARCH_LOOKUP_DIRECT | 654 + KEYRING_SEARCH_NO_STATE_CHECK | 655 + KEYRING_SEARCH_NO_UPDATE_TIME | 656 + KEYRING_SEARCH_NO_CHECK_PERM | 657 + KEYRING_SEARCH_DETECT_TOO_DEEP), 658 + }; 1025 659 1026 660 rcu_read_lock(); 1027 - 1028 - ret = -EDEADLK; 1029 - if (A == B) 1030 - goto cycle_detected; 1031 - 1032 - subtree = B; 1033 - sp = 0; 1034 - 1035 - /* start processing a new keyring */ 1036 - descend: 1037 - if (test_bit(KEY_FLAG_REVOKED, &subtree->flags)) 1038 - goto not_this_keyring; 1039 - 1040 - keylist = rcu_dereference(subtree->payload.subscriptions); 1041 - if (!keylist) 1042 - goto not_this_keyring; 1043 - kix = 0; 1044 - 1045 - ascend: 1046 - /* iterate through the remaining keys in this keyring */ 1047 - nkeys = keylist->nkeys; 1048 - smp_rmb(); 1049 - for (; kix < nkeys; kix++) { 1050 - key = rcu_dereference(keylist->keys[kix]); 1051 - 1052 - if (key == A) 1053 - goto cycle_detected; 1054 - 1055 - /* recursively check nested keyrings */ 1056 - if (key->type == &key_type_keyring) { 1057 - if (sp >= KEYRING_SEARCH_MAX_DEPTH) 1058 - goto too_deep; 1059 - 1060 - /* stack the current position */ 1061 - stack[sp].keylist = keylist; 1062 - stack[sp].kix = kix; 1063 - sp++; 1064 - 1065 - /* begin again with the new keyring */ 1066 - subtree = key; 1067 - goto descend; 1068 - } 1069 - } 1070 - 1071 - /* the keyring we're looking at was disqualified or didn't contain a 1072 - * matching key */ 1073 - not_this_keyring: 1074 - if (sp > 0) { 1075 - /* resume the checking of a keyring higher up in the tree */ 1076 - sp--; 1077 - keylist = stack[sp].keylist; 1078 - kix = stack[sp].kix + 1; 1079 - goto ascend; 1080 - } 1081 - 1082 - ret = 0; /* no cycles detected */ 1083 - 1084 - error: 661 + search_nested_keyrings(B, &ctx); 1085 662 rcu_read_unlock(); 1086 - return ret; 1087 - 1088 - too_deep: 1089 - ret = -ELOOP; 1090 - goto error; 1091 - 1092 - cycle_detected: 1093 - ret = -EDEADLK; 1094 - goto error; 1095 - } 1096 - 1097 - /* 1098 - * Dispose of a keyring list after the RCU grace period, freeing the unlinked 1099 - * key 1100 - */ 1101 - static void keyring_unlink_rcu_disposal(struct rcu_head *rcu) 1102 - { 1103 - struct keyring_list *klist = 1104 - container_of(rcu, struct keyring_list, rcu); 1105 - 1106 - if (klist->delkey != USHRT_MAX) 1107 - key_put(rcu_access_pointer(klist->keys[klist->delkey])); 1108 - kfree(klist); 663 + return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); 1109 664 } 1110 665 1111 666 /* 1112 667 * Preallocate memory so that a key can be linked into to a keyring. 1113 668 */ 1114 - int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, 1115 - unsigned long *_prealloc) 669 + int __key_link_begin(struct key *keyring, 670 + const struct keyring_index_key *index_key, 671 + struct assoc_array_edit **_edit) 1116 672 __acquires(&keyring->sem) 1117 673 __acquires(&keyring_serialise_link_sem) 1118 674 { 1119 - struct keyring_list *klist, *nklist; 1120 - unsigned long prealloc; 1121 - unsigned max; 1122 - time_t lowest_lru; 1123 - size_t size; 1124 - int loop, lru, ret; 675 + struct assoc_array_edit *edit; 676 + int ret; 1125 677 1126 678 kenter("%d,%s,%s,", 1127 - key_serial(keyring), index_key->type->name, index_key->description); 679 + keyring->serial, index_key->type->name, index_key->description); 680 + 681 + BUG_ON(index_key->desc_len == 0); 1128 682 1129 683 if (keyring->type != &key_type_keyring) 1130 684 return -ENOTDIR; ··· 1062 772 if (index_key->type == &key_type_keyring) 1063 773 down_write(&keyring_serialise_link_sem); 1064 774 1065 - klist = rcu_dereference_locked_keyring(keyring); 1066 - 1067 - /* see if there's a matching key we can displace */ 1068 - lru = -1; 1069 - if (klist && klist->nkeys > 0) { 1070 - lowest_lru = TIME_T_MAX; 1071 - for (loop = klist->nkeys - 1; loop >= 0; loop--) { 1072 - struct key *key = rcu_deref_link_locked(klist, loop, 1073 - keyring); 1074 - if (key->type == index_key->type && 1075 - strcmp(key->description, index_key->description) == 0) { 1076 - /* Found a match - we'll replace the link with 1077 - * one to the new key. We record the slot 1078 - * position. 1079 - */ 1080 - klist->delkey = loop; 1081 - prealloc = 0; 1082 - goto done; 1083 - } 1084 - if (key->last_used_at < lowest_lru) { 1085 - lowest_lru = key->last_used_at; 1086 - lru = loop; 1087 - } 1088 - } 1089 - } 1090 - 1091 - /* If the keyring is full then do an LRU discard */ 1092 - if (klist && 1093 - klist->nkeys == klist->maxkeys && 1094 - klist->maxkeys >= MAX_KEYRING_LINKS) { 1095 - kdebug("LRU discard %d\n", lru); 1096 - klist->delkey = lru; 1097 - prealloc = 0; 1098 - goto done; 1099 - } 1100 - 1101 775 /* check that we aren't going to overrun the user's quota */ 1102 776 ret = key_payload_reserve(keyring, 1103 777 keyring->datalen + KEYQUOTA_LINK_BYTES); 1104 778 if (ret < 0) 1105 779 goto error_sem; 1106 780 1107 - if (klist && klist->nkeys < klist->maxkeys) { 1108 - /* there's sufficient slack space to append directly */ 1109 - klist->delkey = klist->nkeys; 1110 - prealloc = KEY_LINK_FIXQUOTA; 1111 - } else { 1112 - /* grow the key list */ 1113 - max = 4; 1114 - if (klist) { 1115 - max += klist->maxkeys; 1116 - if (max > MAX_KEYRING_LINKS) 1117 - max = MAX_KEYRING_LINKS; 1118 - BUG_ON(max <= klist->maxkeys); 1119 - } 1120 - 1121 - size = sizeof(*klist) + sizeof(struct key *) * max; 1122 - 1123 - ret = -ENOMEM; 1124 - nklist = kmalloc(size, GFP_KERNEL); 1125 - if (!nklist) 1126 - goto error_quota; 1127 - 1128 - nklist->maxkeys = max; 1129 - if (klist) { 1130 - memcpy(nklist->keys, klist->keys, 1131 - sizeof(struct key *) * klist->nkeys); 1132 - nklist->delkey = klist->nkeys; 1133 - nklist->nkeys = klist->nkeys + 1; 1134 - klist->delkey = USHRT_MAX; 1135 - } else { 1136 - nklist->nkeys = 1; 1137 - nklist->delkey = 0; 1138 - } 1139 - 1140 - /* add the key into the new space */ 1141 - RCU_INIT_POINTER(nklist->keys[nklist->delkey], NULL); 1142 - prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA; 781 + /* Create an edit script that will insert/replace the key in the 782 + * keyring tree. 783 + */ 784 + edit = assoc_array_insert(&keyring->keys, 785 + &keyring_assoc_array_ops, 786 + index_key, 787 + NULL); 788 + if (IS_ERR(edit)) { 789 + ret = PTR_ERR(edit); 790 + goto error_quota; 1143 791 } 1144 792 1145 - done: 1146 - *_prealloc = prealloc; 793 + *_edit = edit; 1147 794 kleave(" = 0"); 1148 795 return 0; 1149 796 ··· 1120 893 * holds at most one link to any given key of a particular type+description 1121 894 * combination. 1122 895 */ 1123 - void __key_link(struct key *keyring, struct key *key, 1124 - unsigned long *_prealloc) 896 + void __key_link(struct key *key, struct assoc_array_edit **_edit) 1125 897 { 1126 - struct keyring_list *klist, *nklist; 1127 - struct key *discard; 1128 - 1129 - nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA); 1130 - *_prealloc = 0; 1131 - 1132 - kenter("%d,%d,%p", keyring->serial, key->serial, nklist); 1133 - 1134 - klist = rcu_dereference_locked_keyring(keyring); 1135 - 1136 898 __key_get(key); 1137 - keyring->last_used_at = key->last_used_at = 1138 - current_kernel_time().tv_sec; 1139 - 1140 - /* there's a matching key we can displace or an empty slot in a newly 1141 - * allocated list we can fill */ 1142 - if (nklist) { 1143 - kdebug("reissue %hu/%hu/%hu", 1144 - nklist->delkey, nklist->nkeys, nklist->maxkeys); 1145 - 1146 - RCU_INIT_POINTER(nklist->keys[nklist->delkey], key); 1147 - 1148 - rcu_assign_pointer(keyring->payload.subscriptions, nklist); 1149 - 1150 - /* dispose of the old keyring list and, if there was one, the 1151 - * displaced key */ 1152 - if (klist) { 1153 - kdebug("dispose %hu/%hu/%hu", 1154 - klist->delkey, klist->nkeys, klist->maxkeys); 1155 - call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); 1156 - } 1157 - } else if (klist->delkey < klist->nkeys) { 1158 - kdebug("replace %hu/%hu/%hu", 1159 - klist->delkey, klist->nkeys, klist->maxkeys); 1160 - 1161 - discard = rcu_dereference_protected( 1162 - klist->keys[klist->delkey], 1163 - rwsem_is_locked(&keyring->sem)); 1164 - rcu_assign_pointer(klist->keys[klist->delkey], key); 1165 - /* The garbage collector will take care of RCU 1166 - * synchronisation */ 1167 - key_put(discard); 1168 - } else { 1169 - /* there's sufficient slack space to append directly */ 1170 - kdebug("append %hu/%hu/%hu", 1171 - klist->delkey, klist->nkeys, klist->maxkeys); 1172 - 1173 - RCU_INIT_POINTER(klist->keys[klist->delkey], key); 1174 - smp_wmb(); 1175 - klist->nkeys++; 1176 - } 899 + assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); 900 + assoc_array_apply_edit(*_edit); 901 + *_edit = NULL; 1177 902 } 1178 903 1179 904 /* ··· 1135 956 */ 1136 957 void __key_link_end(struct key *keyring, 1137 958 const struct keyring_index_key *index_key, 1138 - unsigned long prealloc) 959 + struct assoc_array_edit *edit) 1139 960 __releases(&keyring->sem) 1140 961 __releases(&keyring_serialise_link_sem) 1141 962 { 1142 963 BUG_ON(index_key->type == NULL); 1143 - BUG_ON(index_key->type->name == NULL); 1144 - kenter("%d,%s,%lx", keyring->serial, index_key->type->name, prealloc); 964 + kenter("%d,%s,", keyring->serial, index_key->type->name); 1145 965 1146 966 if (index_key->type == &key_type_keyring) 1147 967 up_write(&keyring_serialise_link_sem); 1148 968 1149 - if (prealloc) { 1150 - if (prealloc & KEY_LINK_FIXQUOTA) 1151 - key_payload_reserve(keyring, 1152 - keyring->datalen - 1153 - KEYQUOTA_LINK_BYTES); 1154 - kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA)); 969 + if (edit) { 970 + key_payload_reserve(keyring, 971 + keyring->datalen - KEYQUOTA_LINK_BYTES); 972 + assoc_array_cancel_edit(edit); 1155 973 } 1156 974 up_write(&keyring->sem); 1157 975 } ··· 1175 999 */ 1176 1000 int key_link(struct key *keyring, struct key *key) 1177 1001 { 1178 - unsigned long prealloc; 1002 + struct assoc_array_edit *edit; 1179 1003 int ret; 1004 + 1005 + kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); 1180 1006 1181 1007 key_check(keyring); 1182 1008 key_check(key); 1183 1009 1184 - ret = __key_link_begin(keyring, &key->index_key, &prealloc); 1010 + ret = __key_link_begin(keyring, &key->index_key, &edit); 1185 1011 if (ret == 0) { 1012 + kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); 1186 1013 ret = __key_link_check_live_key(keyring, key); 1187 1014 if (ret == 0) 1188 - __key_link(keyring, key, &prealloc); 1189 - __key_link_end(keyring, &key->index_key, prealloc); 1015 + __key_link(key, &edit); 1016 + __key_link_end(keyring, &key->index_key, edit); 1190 1017 } 1191 1018 1019 + kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); 1192 1020 return ret; 1193 1021 } 1194 1022 EXPORT_SYMBOL(key_link); ··· 1216 1036 */ 1217 1037 int key_unlink(struct key *keyring, struct key *key) 1218 1038 { 1219 - struct keyring_list *klist, *nklist; 1220 - int loop, ret; 1039 + struct assoc_array_edit *edit; 1040 + int ret; 1221 1041 1222 1042 key_check(keyring); 1223 1043 key_check(key); 1224 1044 1225 - ret = -ENOTDIR; 1226 1045 if (keyring->type != &key_type_keyring) 1227 - goto error; 1046 + return -ENOTDIR; 1228 1047 1229 1048 down_write(&keyring->sem); 1230 1049 1231 - klist = rcu_dereference_locked_keyring(keyring); 1232 - if (klist) { 1233 - /* search the keyring for the key */ 1234 - for (loop = 0; loop < klist->nkeys; loop++) 1235 - if (rcu_access_pointer(klist->keys[loop]) == key) 1236 - goto key_is_present; 1050 + edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, 1051 + &key->index_key); 1052 + if (IS_ERR(edit)) { 1053 + ret = PTR_ERR(edit); 1054 + goto error; 1237 1055 } 1238 - 1239 - up_write(&keyring->sem); 1240 1056 ret = -ENOENT; 1241 - goto error; 1057 + if (edit == NULL) 1058 + goto error; 1242 1059 1243 - key_is_present: 1244 - /* we need to copy the key list for RCU purposes */ 1245 - nklist = kmalloc(sizeof(*klist) + 1246 - sizeof(struct key *) * klist->maxkeys, 1247 - GFP_KERNEL); 1248 - if (!nklist) 1249 - goto nomem; 1250 - nklist->maxkeys = klist->maxkeys; 1251 - nklist->nkeys = klist->nkeys - 1; 1252 - 1253 - if (loop > 0) 1254 - memcpy(&nklist->keys[0], 1255 - &klist->keys[0], 1256 - loop * sizeof(struct key *)); 1257 - 1258 - if (loop < nklist->nkeys) 1259 - memcpy(&nklist->keys[loop], 1260 - &klist->keys[loop + 1], 1261 - (nklist->nkeys - loop) * sizeof(struct key *)); 1262 - 1263 - /* adjust the user's quota */ 1264 - key_payload_reserve(keyring, 1265 - keyring->datalen - KEYQUOTA_LINK_BYTES); 1266 - 1267 - rcu_assign_pointer(keyring->payload.subscriptions, nklist); 1268 - 1269 - up_write(&keyring->sem); 1270 - 1271 - /* schedule for later cleanup */ 1272 - klist->delkey = loop; 1273 - call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); 1274 - 1060 + assoc_array_apply_edit(edit); 1275 1061 ret = 0; 1276 1062 1277 1063 error: 1278 - return ret; 1279 - nomem: 1280 - ret = -ENOMEM; 1281 1064 up_write(&keyring->sem); 1282 - goto error; 1065 + return ret; 1283 1066 } 1284 1067 EXPORT_SYMBOL(key_unlink); 1285 - 1286 - /* 1287 - * Dispose of a keyring list after the RCU grace period, releasing the keys it 1288 - * links to. 1289 - */ 1290 - static void keyring_clear_rcu_disposal(struct rcu_head *rcu) 1291 - { 1292 - struct keyring_list *klist; 1293 - int loop; 1294 - 1295 - klist = container_of(rcu, struct keyring_list, rcu); 1296 - 1297 - for (loop = klist->nkeys - 1; loop >= 0; loop--) 1298 - key_put(rcu_access_pointer(klist->keys[loop])); 1299 - 1300 - kfree(klist); 1301 - } 1302 1068 1303 1069 /** 1304 1070 * keyring_clear - Clear a keyring ··· 1256 1130 */ 1257 1131 int keyring_clear(struct key *keyring) 1258 1132 { 1259 - struct keyring_list *klist; 1133 + struct assoc_array_edit *edit; 1260 1134 int ret; 1261 1135 1262 - ret = -ENOTDIR; 1263 - if (keyring->type == &key_type_keyring) { 1264 - /* detach the pointer block with the locks held */ 1265 - down_write(&keyring->sem); 1136 + if (keyring->type != &key_type_keyring) 1137 + return -ENOTDIR; 1266 1138 1267 - klist = rcu_dereference_locked_keyring(keyring); 1268 - if (klist) { 1269 - /* adjust the quota */ 1270 - key_payload_reserve(keyring, 1271 - sizeof(struct keyring_list)); 1139 + down_write(&keyring->sem); 1272 1140 1273 - rcu_assign_pointer(keyring->payload.subscriptions, 1274 - NULL); 1275 - } 1276 - 1277 - up_write(&keyring->sem); 1278 - 1279 - /* free the keys after the locks have been dropped */ 1280 - if (klist) 1281 - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); 1282 - 1141 + edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); 1142 + if (IS_ERR(edit)) { 1143 + ret = PTR_ERR(edit); 1144 + } else { 1145 + if (edit) 1146 + assoc_array_apply_edit(edit); 1147 + key_payload_reserve(keyring, 0); 1283 1148 ret = 0; 1284 1149 } 1285 1150 1151 + up_write(&keyring->sem); 1286 1152 return ret; 1287 1153 } 1288 1154 EXPORT_SYMBOL(keyring_clear); ··· 1286 1168 */ 1287 1169 static void keyring_revoke(struct key *keyring) 1288 1170 { 1289 - struct keyring_list *klist; 1171 + struct assoc_array_edit *edit; 1290 1172 1291 - klist = rcu_dereference_locked_keyring(keyring); 1292 - 1293 - /* adjust the quota */ 1294 - key_payload_reserve(keyring, 0); 1295 - 1296 - if (klist) { 1297 - rcu_assign_pointer(keyring->payload.subscriptions, NULL); 1298 - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); 1173 + edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); 1174 + if (!IS_ERR(edit)) { 1175 + if (edit) 1176 + assoc_array_apply_edit(edit); 1177 + key_payload_reserve(keyring, 0); 1299 1178 } 1179 + } 1180 + 1181 + static bool gc_iterator(void *object, void *iterator_data) 1182 + { 1183 + struct key *key = keyring_ptr_to_key(object); 1184 + time_t *limit = iterator_data; 1185 + 1186 + if (key_is_dead(key, *limit)) 1187 + return false; 1188 + key_get(key); 1189 + return true; 1300 1190 } 1301 1191 1302 1192 /* ··· 1317 1191 */ 1318 1192 void keyring_gc(struct key *keyring, time_t limit) 1319 1193 { 1320 - struct keyring_list *klist, *new; 1321 - struct key *key; 1322 - int loop, keep, max; 1323 - 1324 1194 kenter("{%x,%s}", key_serial(keyring), keyring->description); 1325 1195 1326 1196 down_write(&keyring->sem); 1327 - 1328 - klist = rcu_dereference_locked_keyring(keyring); 1329 - if (!klist) 1330 - goto no_klist; 1331 - 1332 - /* work out how many subscriptions we're keeping */ 1333 - keep = 0; 1334 - for (loop = klist->nkeys - 1; loop >= 0; loop--) 1335 - if (!key_is_dead(rcu_deref_link_locked(klist, loop, keyring), 1336 - limit)) 1337 - keep++; 1338 - 1339 - if (keep == klist->nkeys) 1340 - goto just_return; 1341 - 1342 - /* allocate a new keyring payload */ 1343 - max = roundup(keep, 4); 1344 - new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *), 1345 - GFP_KERNEL); 1346 - if (!new) 1347 - goto nomem; 1348 - new->maxkeys = max; 1349 - new->nkeys = 0; 1350 - new->delkey = 0; 1351 - 1352 - /* install the live keys 1353 - * - must take care as expired keys may be updated back to life 1354 - */ 1355 - keep = 0; 1356 - for (loop = klist->nkeys - 1; loop >= 0; loop--) { 1357 - key = rcu_deref_link_locked(klist, loop, keyring); 1358 - if (!key_is_dead(key, limit)) { 1359 - if (keep >= max) 1360 - goto discard_new; 1361 - RCU_INIT_POINTER(new->keys[keep++], key_get(key)); 1362 - } 1363 - } 1364 - new->nkeys = keep; 1365 - 1366 - /* adjust the quota */ 1367 - key_payload_reserve(keyring, 1368 - sizeof(struct keyring_list) + 1369 - KEYQUOTA_LINK_BYTES * keep); 1370 - 1371 - if (keep == 0) { 1372 - rcu_assign_pointer(keyring->payload.subscriptions, NULL); 1373 - kfree(new); 1374 - } else { 1375 - rcu_assign_pointer(keyring->payload.subscriptions, new); 1376 - } 1377 - 1197 + assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, 1198 + gc_iterator, &limit); 1378 1199 up_write(&keyring->sem); 1379 1200 1380 - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); 1381 - kleave(" [yes]"); 1382 - return; 1383 - 1384 - discard_new: 1385 - new->nkeys = keep; 1386 - keyring_clear_rcu_disposal(&new->rcu); 1387 - up_write(&keyring->sem); 1388 - kleave(" [discard]"); 1389 - return; 1390 - 1391 - just_return: 1392 - up_write(&keyring->sem); 1393 - kleave(" [no dead]"); 1394 - return; 1395 - 1396 - no_klist: 1397 - up_write(&keyring->sem); 1398 - kleave(" [no_klist]"); 1399 - return; 1400 - 1401 - nomem: 1402 - up_write(&keyring->sem); 1403 - kleave(" [oom]"); 1201 + kleave(""); 1404 1202 }
+6 -6
security/keys/request_key.c
··· 351 351 struct key_user *user, 352 352 struct key **_key) 353 353 { 354 - unsigned long prealloc; 354 + struct assoc_array_edit *edit; 355 355 struct key *key; 356 356 key_perm_t perm; 357 357 key_ref_t key_ref; ··· 380 380 set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); 381 381 382 382 if (dest_keyring) { 383 - ret = __key_link_begin(dest_keyring, &ctx->index_key, &prealloc); 383 + ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit); 384 384 if (ret < 0) 385 385 goto link_prealloc_failed; 386 386 } ··· 395 395 goto key_already_present; 396 396 397 397 if (dest_keyring) 398 - __key_link(dest_keyring, key, &prealloc); 398 + __key_link(key, &edit); 399 399 400 400 mutex_unlock(&key_construction_mutex); 401 401 if (dest_keyring) 402 - __key_link_end(dest_keyring, &ctx->index_key, prealloc); 402 + __key_link_end(dest_keyring, &ctx->index_key, edit); 403 403 mutex_unlock(&user->cons_lock); 404 404 *_key = key; 405 405 kleave(" = 0 [%d]", key_serial(key)); ··· 414 414 if (dest_keyring) { 415 415 ret = __key_link_check_live_key(dest_keyring, key); 416 416 if (ret == 0) 417 - __key_link(dest_keyring, key, &prealloc); 418 - __key_link_end(dest_keyring, &ctx->index_key, prealloc); 417 + __key_link(key, &edit); 418 + __key_link_end(dest_keyring, &ctx->index_key, edit); 419 419 if (ret < 0) 420 420 goto link_check_failed; 421 421 }