Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Basic authentication token and access key management
3 *
4 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/export.h>
9#include <linux/init.h>
10#include <linux/poison.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/security.h>
14#include <linux/workqueue.h>
15#include <linux/random.h>
16#include <linux/err.h>
17#include "internal.h"
18
19struct kmem_cache *key_jar;
20struct rb_root key_serial_tree; /* tree of keys indexed by serial */
21DEFINE_SPINLOCK(key_serial_lock);
22
23struct rb_root key_user_tree; /* tree of quota records indexed by UID */
24DEFINE_SPINLOCK(key_user_lock);
25
26unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
27unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
28unsigned int key_quota_maxkeys = 200; /* general key count quota */
29unsigned int key_quota_maxbytes = 20000; /* general key space quota */
30
31static LIST_HEAD(key_types_list);
32static DECLARE_RWSEM(key_types_sem);
33
34/* We serialise key instantiation and link */
35DEFINE_MUTEX(key_construction_mutex);
36
37#ifdef KEY_DEBUGGING
38void __key_check(const struct key *key)
39{
40 printk("__key_check: key %p {%08x} should be {%08x}\n",
41 key, key->magic, KEY_DEBUG_MAGIC);
42 BUG();
43}
44#endif
45
46/*
47 * Get the key quota record for a user, allocating a new record if one doesn't
48 * already exist.
49 */
50struct key_user *key_user_lookup(kuid_t uid)
51{
52 struct key_user *candidate = NULL, *user;
53 struct rb_node *parent, **p;
54
55try_again:
56 parent = NULL;
57 p = &key_user_tree.rb_node;
58 spin_lock(&key_user_lock);
59
60 /* search the tree for a user record with a matching UID */
61 while (*p) {
62 parent = *p;
63 user = rb_entry(parent, struct key_user, node);
64
65 if (uid_lt(uid, user->uid))
66 p = &(*p)->rb_left;
67 else if (uid_gt(uid, user->uid))
68 p = &(*p)->rb_right;
69 else
70 goto found;
71 }
72
73 /* if we get here, we failed to find a match in the tree */
74 if (!candidate) {
75 /* allocate a candidate user record if we don't already have
76 * one */
77 spin_unlock(&key_user_lock);
78
79 user = NULL;
80 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
81 if (unlikely(!candidate))
82 goto out;
83
84 /* the allocation may have scheduled, so we need to repeat the
85 * search lest someone else added the record whilst we were
86 * asleep */
87 goto try_again;
88 }
89
90 /* if we get here, then the user record still hadn't appeared on the
91 * second pass - so we use the candidate record */
92 refcount_set(&candidate->usage, 1);
93 atomic_set(&candidate->nkeys, 0);
94 atomic_set(&candidate->nikeys, 0);
95 candidate->uid = uid;
96 candidate->qnkeys = 0;
97 candidate->qnbytes = 0;
98 spin_lock_init(&candidate->lock);
99 mutex_init(&candidate->cons_lock);
100
101 rb_link_node(&candidate->node, parent, p);
102 rb_insert_color(&candidate->node, &key_user_tree);
103 spin_unlock(&key_user_lock);
104 user = candidate;
105 goto out;
106
107 /* okay - we found a user record for this UID */
108found:
109 refcount_inc(&user->usage);
110 spin_unlock(&key_user_lock);
111 kfree(candidate);
112out:
113 return user;
114}
115
116/*
117 * Dispose of a user structure
118 */
119void key_user_put(struct key_user *user)
120{
121 if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
122 rb_erase(&user->node, &key_user_tree);
123 spin_unlock(&key_user_lock);
124
125 kfree(user);
126 }
127}
128
129/*
130 * Allocate a serial number for a key. These are assigned randomly to avoid
131 * security issues through covert channel problems.
132 */
133static inline void key_alloc_serial(struct key *key)
134{
135 struct rb_node *parent, **p;
136 struct key *xkey;
137
138 /* propose a random serial number and look for a hole for it in the
139 * serial number tree */
140 do {
141 get_random_bytes(&key->serial, sizeof(key->serial));
142
143 key->serial >>= 1; /* negative numbers are not permitted */
144 } while (key->serial < 3);
145
146 spin_lock(&key_serial_lock);
147
148attempt_insertion:
149 parent = NULL;
150 p = &key_serial_tree.rb_node;
151
152 while (*p) {
153 parent = *p;
154 xkey = rb_entry(parent, struct key, serial_node);
155
156 if (key->serial < xkey->serial)
157 p = &(*p)->rb_left;
158 else if (key->serial > xkey->serial)
159 p = &(*p)->rb_right;
160 else
161 goto serial_exists;
162 }
163
164 /* we've found a suitable hole - arrange for this key to occupy it */
165 rb_link_node(&key->serial_node, parent, p);
166 rb_insert_color(&key->serial_node, &key_serial_tree);
167
168 spin_unlock(&key_serial_lock);
169 return;
170
171 /* we found a key with the proposed serial number - walk the tree from
172 * that point looking for the next unused serial number */
173serial_exists:
174 for (;;) {
175 key->serial++;
176 if (key->serial < 3) {
177 key->serial = 3;
178 goto attempt_insertion;
179 }
180
181 parent = rb_next(parent);
182 if (!parent)
183 goto attempt_insertion;
184
185 xkey = rb_entry(parent, struct key, serial_node);
186 if (key->serial < xkey->serial)
187 goto attempt_insertion;
188 }
189}
190
191/**
192 * key_alloc - Allocate a key of the specified type.
193 * @type: The type of key to allocate.
194 * @desc: The key description to allow the key to be searched out.
195 * @uid: The owner of the new key.
196 * @gid: The group ID for the new key's group permissions.
197 * @cred: The credentials specifying UID namespace.
198 * @perm: The permissions mask of the new key.
199 * @flags: Flags specifying quota properties.
200 * @restrict_link: Optional link restriction for new keyrings.
201 *
202 * Allocate a key of the specified type with the attributes given. The key is
203 * returned in an uninstantiated state and the caller needs to instantiate the
204 * key before returning.
205 *
206 * The restrict_link structure (if not NULL) will be freed when the
207 * keyring is destroyed, so it must be dynamically allocated.
208 *
209 * The user's key count quota is updated to reflect the creation of the key and
210 * the user's key data quota has the default for the key type reserved. The
211 * instantiation function should amend this as necessary. If insufficient
212 * quota is available, -EDQUOT will be returned.
213 *
214 * The LSM security modules can prevent a key being created, in which case
215 * -EACCES will be returned.
216 *
217 * Returns a pointer to the new key if successful and an error code otherwise.
218 *
219 * Note that the caller needs to ensure the key type isn't uninstantiated.
220 * Internally this can be done by locking key_types_sem. Externally, this can
221 * be done by either never unregistering the key type, or making sure
222 * key_alloc() calls don't race with module unloading.
223 */
224struct key *key_alloc(struct key_type *type, const char *desc,
225 kuid_t uid, kgid_t gid, const struct cred *cred,
226 key_perm_t perm, unsigned long flags,
227 struct key_restriction *restrict_link)
228{
229 struct key_user *user = NULL;
230 struct key *key;
231 size_t desclen, quotalen;
232 int ret;
233
234 key = ERR_PTR(-EINVAL);
235 if (!desc || !*desc)
236 goto error;
237
238 if (type->vet_description) {
239 ret = type->vet_description(desc);
240 if (ret < 0) {
241 key = ERR_PTR(ret);
242 goto error;
243 }
244 }
245
246 desclen = strlen(desc);
247 quotalen = desclen + 1 + type->def_datalen;
248
249 /* get hold of the key tracking for this user */
250 user = key_user_lookup(uid);
251 if (!user)
252 goto no_memory_1;
253
254 /* check that the user's quota permits allocation of another key and
255 * its description */
256 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
257 unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
258 key_quota_root_maxkeys : key_quota_maxkeys;
259 unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
260 key_quota_root_maxbytes : key_quota_maxbytes;
261
262 spin_lock(&user->lock);
263 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
264 if (user->qnkeys + 1 > maxkeys ||
265 user->qnbytes + quotalen > maxbytes ||
266 user->qnbytes + quotalen < user->qnbytes)
267 goto no_quota;
268 }
269
270 user->qnkeys++;
271 user->qnbytes += quotalen;
272 spin_unlock(&user->lock);
273 }
274
275 /* allocate and initialise the key and its description */
276 key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
277 if (!key)
278 goto no_memory_2;
279
280 key->index_key.desc_len = desclen;
281 key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
282 if (!key->index_key.description)
283 goto no_memory_3;
284
285 refcount_set(&key->usage, 1);
286 init_rwsem(&key->sem);
287 lockdep_set_class(&key->sem, &type->lock_class);
288 key->index_key.type = type;
289 key->user = user;
290 key->quotalen = quotalen;
291 key->datalen = type->def_datalen;
292 key->uid = uid;
293 key->gid = gid;
294 key->perm = perm;
295 key->restrict_link = restrict_link;
296 key->last_used_at = ktime_get_real_seconds();
297
298 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
299 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
300 if (flags & KEY_ALLOC_BUILT_IN)
301 key->flags |= 1 << KEY_FLAG_BUILTIN;
302 if (flags & KEY_ALLOC_UID_KEYRING)
303 key->flags |= 1 << KEY_FLAG_UID_KEYRING;
304
305#ifdef KEY_DEBUGGING
306 key->magic = KEY_DEBUG_MAGIC;
307#endif
308
309 /* let the security module know about the key */
310 ret = security_key_alloc(key, cred, flags);
311 if (ret < 0)
312 goto security_error;
313
314 /* publish the key by giving it a serial number */
315 atomic_inc(&user->nkeys);
316 key_alloc_serial(key);
317
318error:
319 return key;
320
321security_error:
322 kfree(key->description);
323 kmem_cache_free(key_jar, key);
324 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
325 spin_lock(&user->lock);
326 user->qnkeys--;
327 user->qnbytes -= quotalen;
328 spin_unlock(&user->lock);
329 }
330 key_user_put(user);
331 key = ERR_PTR(ret);
332 goto error;
333
334no_memory_3:
335 kmem_cache_free(key_jar, key);
336no_memory_2:
337 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
338 spin_lock(&user->lock);
339 user->qnkeys--;
340 user->qnbytes -= quotalen;
341 spin_unlock(&user->lock);
342 }
343 key_user_put(user);
344no_memory_1:
345 key = ERR_PTR(-ENOMEM);
346 goto error;
347
348no_quota:
349 spin_unlock(&user->lock);
350 key_user_put(user);
351 key = ERR_PTR(-EDQUOT);
352 goto error;
353}
354EXPORT_SYMBOL(key_alloc);
355
356/**
357 * key_payload_reserve - Adjust data quota reservation for the key's payload
358 * @key: The key to make the reservation for.
359 * @datalen: The amount of data payload the caller now wants.
360 *
361 * Adjust the amount of the owning user's key data quota that a key reserves.
362 * If the amount is increased, then -EDQUOT may be returned if there isn't
363 * enough free quota available.
364 *
365 * If successful, 0 is returned.
366 */
367int key_payload_reserve(struct key *key, size_t datalen)
368{
369 int delta = (int)datalen - key->datalen;
370 int ret = 0;
371
372 key_check(key);
373
374 /* contemplate the quota adjustment */
375 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
376 unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
377 key_quota_root_maxbytes : key_quota_maxbytes;
378
379 spin_lock(&key->user->lock);
380
381 if (delta > 0 &&
382 (key->user->qnbytes + delta >= maxbytes ||
383 key->user->qnbytes + delta < key->user->qnbytes)) {
384 ret = -EDQUOT;
385 }
386 else {
387 key->user->qnbytes += delta;
388 key->quotalen += delta;
389 }
390 spin_unlock(&key->user->lock);
391 }
392
393 /* change the recorded data length if that didn't generate an error */
394 if (ret == 0)
395 key->datalen = datalen;
396
397 return ret;
398}
399EXPORT_SYMBOL(key_payload_reserve);
400
401/*
402 * Change the key state to being instantiated.
403 */
404static void mark_key_instantiated(struct key *key, int reject_error)
405{
406 /* Commit the payload before setting the state; barrier versus
407 * key_read_state().
408 */
409 smp_store_release(&key->state,
410 (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
411}
412
413/*
414 * Instantiate a key and link it into the target keyring atomically. Must be
415 * called with the target keyring's semaphore writelocked. The target key's
416 * semaphore need not be locked as instantiation is serialised by
417 * key_construction_mutex.
418 */
419static int __key_instantiate_and_link(struct key *key,
420 struct key_preparsed_payload *prep,
421 struct key *keyring,
422 struct key *authkey,
423 struct assoc_array_edit **_edit)
424{
425 int ret, awaken;
426
427 key_check(key);
428 key_check(keyring);
429
430 awaken = 0;
431 ret = -EBUSY;
432
433 mutex_lock(&key_construction_mutex);
434
435 /* can't instantiate twice */
436 if (key->state == KEY_IS_UNINSTANTIATED) {
437 /* instantiate the key */
438 ret = key->type->instantiate(key, prep);
439
440 if (ret == 0) {
441 /* mark the key as being instantiated */
442 atomic_inc(&key->user->nikeys);
443 mark_key_instantiated(key, 0);
444
445 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
446 awaken = 1;
447
448 /* and link it into the destination keyring */
449 if (keyring) {
450 if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
451 set_bit(KEY_FLAG_KEEP, &key->flags);
452
453 __key_link(key, _edit);
454 }
455
456 /* disable the authorisation key */
457 if (authkey)
458 key_revoke(authkey);
459
460 if (prep->expiry != TIME64_MAX) {
461 key->expiry = prep->expiry;
462 key_schedule_gc(prep->expiry + key_gc_delay);
463 }
464 }
465 }
466
467 mutex_unlock(&key_construction_mutex);
468
469 /* wake up anyone waiting for a key to be constructed */
470 if (awaken)
471 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
472
473 return ret;
474}
475
476/**
477 * key_instantiate_and_link - Instantiate a key and link it into the keyring.
478 * @key: The key to instantiate.
479 * @data: The data to use to instantiate the keyring.
480 * @datalen: The length of @data.
481 * @keyring: Keyring to create a link in on success (or NULL).
482 * @authkey: The authorisation token permitting instantiation.
483 *
484 * Instantiate a key that's in the uninstantiated state using the provided data
485 * and, if successful, link it in to the destination keyring if one is
486 * supplied.
487 *
488 * If successful, 0 is returned, the authorisation token is revoked and anyone
489 * waiting for the key is woken up. If the key was already instantiated,
490 * -EBUSY will be returned.
491 */
492int key_instantiate_and_link(struct key *key,
493 const void *data,
494 size_t datalen,
495 struct key *keyring,
496 struct key *authkey)
497{
498 struct key_preparsed_payload prep;
499 struct assoc_array_edit *edit;
500 int ret;
501
502 memset(&prep, 0, sizeof(prep));
503 prep.data = data;
504 prep.datalen = datalen;
505 prep.quotalen = key->type->def_datalen;
506 prep.expiry = TIME64_MAX;
507 if (key->type->preparse) {
508 ret = key->type->preparse(&prep);
509 if (ret < 0)
510 goto error;
511 }
512
513 if (keyring) {
514 ret = __key_link_begin(keyring, &key->index_key, &edit);
515 if (ret < 0)
516 goto error;
517
518 if (keyring->restrict_link && keyring->restrict_link->check) {
519 struct key_restriction *keyres = keyring->restrict_link;
520
521 ret = keyres->check(keyring, key->type, &prep.payload,
522 keyres->key);
523 if (ret < 0)
524 goto error_link_end;
525 }
526 }
527
528 ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
529
530error_link_end:
531 if (keyring)
532 __key_link_end(keyring, &key->index_key, edit);
533
534error:
535 if (key->type->preparse)
536 key->type->free_preparse(&prep);
537 return ret;
538}
539
540EXPORT_SYMBOL(key_instantiate_and_link);
541
542/**
543 * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
544 * @key: The key to instantiate.
545 * @timeout: The timeout on the negative key.
546 * @error: The error to return when the key is hit.
547 * @keyring: Keyring to create a link in on success (or NULL).
548 * @authkey: The authorisation token permitting instantiation.
549 *
550 * Negatively instantiate a key that's in the uninstantiated state and, if
551 * successful, set its timeout and stored error and link it in to the
552 * destination keyring if one is supplied. The key and any links to the key
553 * will be automatically garbage collected after the timeout expires.
554 *
555 * Negative keys are used to rate limit repeated request_key() calls by causing
556 * them to return the stored error code (typically ENOKEY) until the negative
557 * key expires.
558 *
559 * If successful, 0 is returned, the authorisation token is revoked and anyone
560 * waiting for the key is woken up. If the key was already instantiated,
561 * -EBUSY will be returned.
562 */
563int key_reject_and_link(struct key *key,
564 unsigned timeout,
565 unsigned error,
566 struct key *keyring,
567 struct key *authkey)
568{
569 struct assoc_array_edit *edit;
570 int ret, awaken, link_ret = 0;
571
572 key_check(key);
573 key_check(keyring);
574
575 awaken = 0;
576 ret = -EBUSY;
577
578 if (keyring) {
579 if (keyring->restrict_link)
580 return -EPERM;
581
582 link_ret = __key_link_begin(keyring, &key->index_key, &edit);
583 }
584
585 mutex_lock(&key_construction_mutex);
586
587 /* can't instantiate twice */
588 if (key->state == KEY_IS_UNINSTANTIATED) {
589 /* mark the key as being negatively instantiated */
590 atomic_inc(&key->user->nikeys);
591 mark_key_instantiated(key, -error);
592 key->expiry = ktime_get_real_seconds() + timeout;
593 key_schedule_gc(key->expiry + key_gc_delay);
594
595 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
596 awaken = 1;
597
598 ret = 0;
599
600 /* and link it into the destination keyring */
601 if (keyring && link_ret == 0)
602 __key_link(key, &edit);
603
604 /* disable the authorisation key */
605 if (authkey)
606 key_revoke(authkey);
607 }
608
609 mutex_unlock(&key_construction_mutex);
610
611 if (keyring && link_ret == 0)
612 __key_link_end(keyring, &key->index_key, edit);
613
614 /* wake up anyone waiting for a key to be constructed */
615 if (awaken)
616 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
617
618 return ret == 0 ? link_ret : ret;
619}
620EXPORT_SYMBOL(key_reject_and_link);
621
622/**
623 * key_put - Discard a reference to a key.
624 * @key: The key to discard a reference from.
625 *
626 * Discard a reference to a key, and when all the references are gone, we
627 * schedule the cleanup task to come and pull it out of the tree in process
628 * context at some later time.
629 */
630void key_put(struct key *key)
631{
632 if (key) {
633 key_check(key);
634
635 if (refcount_dec_and_test(&key->usage))
636 schedule_work(&key_gc_work);
637 }
638}
639EXPORT_SYMBOL(key_put);
640
641/*
642 * Find a key by its serial number.
643 */
644struct key *key_lookup(key_serial_t id)
645{
646 struct rb_node *n;
647 struct key *key;
648
649 spin_lock(&key_serial_lock);
650
651 /* search the tree for the specified key */
652 n = key_serial_tree.rb_node;
653 while (n) {
654 key = rb_entry(n, struct key, serial_node);
655
656 if (id < key->serial)
657 n = n->rb_left;
658 else if (id > key->serial)
659 n = n->rb_right;
660 else
661 goto found;
662 }
663
664not_found:
665 key = ERR_PTR(-ENOKEY);
666 goto error;
667
668found:
669 /* A key is allowed to be looked up only if someone still owns a
670 * reference to it - otherwise it's awaiting the gc.
671 */
672 if (!refcount_inc_not_zero(&key->usage))
673 goto not_found;
674
675error:
676 spin_unlock(&key_serial_lock);
677 return key;
678}
679
680/*
681 * Find and lock the specified key type against removal.
682 *
683 * We return with the sem read-locked if successful. If the type wasn't
684 * available -ENOKEY is returned instead.
685 */
686struct key_type *key_type_lookup(const char *type)
687{
688 struct key_type *ktype;
689
690 down_read(&key_types_sem);
691
692 /* look up the key type to see if it's one of the registered kernel
693 * types */
694 list_for_each_entry(ktype, &key_types_list, link) {
695 if (strcmp(ktype->name, type) == 0)
696 goto found_kernel_type;
697 }
698
699 up_read(&key_types_sem);
700 ktype = ERR_PTR(-ENOKEY);
701
702found_kernel_type:
703 return ktype;
704}
705
706void key_set_timeout(struct key *key, unsigned timeout)
707{
708 time64_t expiry = 0;
709
710 /* make the changes with the locks held to prevent races */
711 down_write(&key->sem);
712
713 if (timeout > 0)
714 expiry = ktime_get_real_seconds() + timeout;
715
716 key->expiry = expiry;
717 key_schedule_gc(key->expiry + key_gc_delay);
718
719 up_write(&key->sem);
720}
721EXPORT_SYMBOL_GPL(key_set_timeout);
722
723/*
724 * Unlock a key type locked by key_type_lookup().
725 */
726void key_type_put(struct key_type *ktype)
727{
728 up_read(&key_types_sem);
729}
730
731/*
732 * Attempt to update an existing key.
733 *
734 * The key is given to us with an incremented refcount that we need to discard
735 * if we get an error.
736 */
737static inline key_ref_t __key_update(key_ref_t key_ref,
738 struct key_preparsed_payload *prep)
739{
740 struct key *key = key_ref_to_ptr(key_ref);
741 int ret;
742
743 /* need write permission on the key to update it */
744 ret = key_permission(key_ref, KEY_NEED_WRITE);
745 if (ret < 0)
746 goto error;
747
748 ret = -EEXIST;
749 if (!key->type->update)
750 goto error;
751
752 down_write(&key->sem);
753
754 ret = key->type->update(key, prep);
755 if (ret == 0)
756 /* Updating a negative key positively instantiates it */
757 mark_key_instantiated(key, 0);
758
759 up_write(&key->sem);
760
761 if (ret < 0)
762 goto error;
763out:
764 return key_ref;
765
766error:
767 key_put(key);
768 key_ref = ERR_PTR(ret);
769 goto out;
770}
771
772/**
773 * key_create_or_update - Update or create and instantiate a key.
774 * @keyring_ref: A pointer to the destination keyring with possession flag.
775 * @type: The type of key.
776 * @description: The searchable description for the key.
777 * @payload: The data to use to instantiate or update the key.
778 * @plen: The length of @payload.
779 * @perm: The permissions mask for a new key.
780 * @flags: The quota flags for a new key.
781 *
782 * Search the destination keyring for a key of the same description and if one
783 * is found, update it, otherwise create and instantiate a new one and create a
784 * link to it from that keyring.
785 *
786 * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
787 * concocted.
788 *
789 * Returns a pointer to the new key if successful, -ENODEV if the key type
790 * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
791 * caller isn't permitted to modify the keyring or the LSM did not permit
792 * creation of the key.
793 *
794 * On success, the possession flag from the keyring ref will be tacked on to
795 * the key ref before it is returned.
796 */
797key_ref_t key_create_or_update(key_ref_t keyring_ref,
798 const char *type,
799 const char *description,
800 const void *payload,
801 size_t plen,
802 key_perm_t perm,
803 unsigned long flags)
804{
805 struct keyring_index_key index_key = {
806 .description = description,
807 };
808 struct key_preparsed_payload prep;
809 struct assoc_array_edit *edit;
810 const struct cred *cred = current_cred();
811 struct key *keyring, *key = NULL;
812 key_ref_t key_ref;
813 int ret;
814 struct key_restriction *restrict_link = NULL;
815
816 /* look up the key type to see if it's one of the registered kernel
817 * types */
818 index_key.type = key_type_lookup(type);
819 if (IS_ERR(index_key.type)) {
820 key_ref = ERR_PTR(-ENODEV);
821 goto error;
822 }
823
824 key_ref = ERR_PTR(-EINVAL);
825 if (!index_key.type->instantiate ||
826 (!index_key.description && !index_key.type->preparse))
827 goto error_put_type;
828
829 keyring = key_ref_to_ptr(keyring_ref);
830
831 key_check(keyring);
832
833 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
834 restrict_link = keyring->restrict_link;
835
836 key_ref = ERR_PTR(-ENOTDIR);
837 if (keyring->type != &key_type_keyring)
838 goto error_put_type;
839
840 memset(&prep, 0, sizeof(prep));
841 prep.data = payload;
842 prep.datalen = plen;
843 prep.quotalen = index_key.type->def_datalen;
844 prep.expiry = TIME64_MAX;
845 if (index_key.type->preparse) {
846 ret = index_key.type->preparse(&prep);
847 if (ret < 0) {
848 key_ref = ERR_PTR(ret);
849 goto error_free_prep;
850 }
851 if (!index_key.description)
852 index_key.description = prep.description;
853 key_ref = ERR_PTR(-EINVAL);
854 if (!index_key.description)
855 goto error_free_prep;
856 }
857 index_key.desc_len = strlen(index_key.description);
858
859 ret = __key_link_begin(keyring, &index_key, &edit);
860 if (ret < 0) {
861 key_ref = ERR_PTR(ret);
862 goto error_free_prep;
863 }
864
865 if (restrict_link && restrict_link->check) {
866 ret = restrict_link->check(keyring, index_key.type,
867 &prep.payload, restrict_link->key);
868 if (ret < 0) {
869 key_ref = ERR_PTR(ret);
870 goto error_link_end;
871 }
872 }
873
874 /* if we're going to allocate a new key, we're going to have
875 * to modify the keyring */
876 ret = key_permission(keyring_ref, KEY_NEED_WRITE);
877 if (ret < 0) {
878 key_ref = ERR_PTR(ret);
879 goto error_link_end;
880 }
881
882 /* if it's possible to update this type of key, search for an existing
883 * key of the same type and description in the destination keyring and
884 * update that instead if possible
885 */
886 if (index_key.type->update) {
887 key_ref = find_key_to_update(keyring_ref, &index_key);
888 if (key_ref)
889 goto found_matching_key;
890 }
891
892 /* if the client doesn't provide, decide on the permissions we want */
893 if (perm == KEY_PERM_UNDEF) {
894 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
895 perm |= KEY_USR_VIEW;
896
897 if (index_key.type->read)
898 perm |= KEY_POS_READ;
899
900 if (index_key.type == &key_type_keyring ||
901 index_key.type->update)
902 perm |= KEY_POS_WRITE;
903 }
904
905 /* allocate a new key */
906 key = key_alloc(index_key.type, index_key.description,
907 cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
908 if (IS_ERR(key)) {
909 key_ref = ERR_CAST(key);
910 goto error_link_end;
911 }
912
913 /* instantiate it and link it into the target keyring */
914 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
915 if (ret < 0) {
916 key_put(key);
917 key_ref = ERR_PTR(ret);
918 goto error_link_end;
919 }
920
921 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
922
923error_link_end:
924 __key_link_end(keyring, &index_key, edit);
925error_free_prep:
926 if (index_key.type->preparse)
927 index_key.type->free_preparse(&prep);
928error_put_type:
929 key_type_put(index_key.type);
930error:
931 return key_ref;
932
933 found_matching_key:
934 /* we found a matching key, so we're going to try to update it
935 * - we can drop the locks first as we have the key pinned
936 */
937 __key_link_end(keyring, &index_key, edit);
938
939 key = key_ref_to_ptr(key_ref);
940 if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
941 ret = wait_for_key_construction(key, true);
942 if (ret < 0) {
943 key_ref_put(key_ref);
944 key_ref = ERR_PTR(ret);
945 goto error_free_prep;
946 }
947 }
948
949 key_ref = __key_update(key_ref, &prep);
950 goto error_free_prep;
951}
952EXPORT_SYMBOL(key_create_or_update);
953
954/**
955 * key_update - Update a key's contents.
956 * @key_ref: The pointer (plus possession flag) to the key.
957 * @payload: The data to be used to update the key.
958 * @plen: The length of @payload.
959 *
960 * Attempt to update the contents of a key with the given payload data. The
961 * caller must be granted Write permission on the key. Negative keys can be
962 * instantiated by this method.
963 *
964 * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
965 * type does not support updating. The key type may return other errors.
966 */
967int key_update(key_ref_t key_ref, const void *payload, size_t plen)
968{
969 struct key_preparsed_payload prep;
970 struct key *key = key_ref_to_ptr(key_ref);
971 int ret;
972
973 key_check(key);
974
975 /* the key must be writable */
976 ret = key_permission(key_ref, KEY_NEED_WRITE);
977 if (ret < 0)
978 return ret;
979
980 /* attempt to update it if supported */
981 if (!key->type->update)
982 return -EOPNOTSUPP;
983
984 memset(&prep, 0, sizeof(prep));
985 prep.data = payload;
986 prep.datalen = plen;
987 prep.quotalen = key->type->def_datalen;
988 prep.expiry = TIME64_MAX;
989 if (key->type->preparse) {
990 ret = key->type->preparse(&prep);
991 if (ret < 0)
992 goto error;
993 }
994
995 down_write(&key->sem);
996
997 ret = key->type->update(key, &prep);
998 if (ret == 0)
999 /* Updating a negative key positively instantiates it */
1000 mark_key_instantiated(key, 0);
1001
1002 up_write(&key->sem);
1003
1004error:
1005 if (key->type->preparse)
1006 key->type->free_preparse(&prep);
1007 return ret;
1008}
1009EXPORT_SYMBOL(key_update);
1010
1011/**
1012 * key_revoke - Revoke a key.
1013 * @key: The key to be revoked.
1014 *
1015 * Mark a key as being revoked and ask the type to free up its resources. The
1016 * revocation timeout is set and the key and all its links will be
1017 * automatically garbage collected after key_gc_delay amount of time if they
1018 * are not manually dealt with first.
1019 */
1020void key_revoke(struct key *key)
1021{
1022 time64_t time;
1023
1024 key_check(key);
1025
1026 /* make sure no one's trying to change or use the key when we mark it
1027 * - we tell lockdep that we might nest because we might be revoking an
1028 * authorisation key whilst holding the sem on a key we've just
1029 * instantiated
1030 */
1031 down_write_nested(&key->sem, 1);
1032 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
1033 key->type->revoke)
1034 key->type->revoke(key);
1035
1036 /* set the death time to no more than the expiry time */
1037 time = ktime_get_real_seconds();
1038 if (key->revoked_at == 0 || key->revoked_at > time) {
1039 key->revoked_at = time;
1040 key_schedule_gc(key->revoked_at + key_gc_delay);
1041 }
1042
1043 up_write(&key->sem);
1044}
1045EXPORT_SYMBOL(key_revoke);
1046
1047/**
1048 * key_invalidate - Invalidate a key.
1049 * @key: The key to be invalidated.
1050 *
1051 * Mark a key as being invalidated and have it cleaned up immediately. The key
1052 * is ignored by all searches and other operations from this point.
1053 */
1054void key_invalidate(struct key *key)
1055{
1056 kenter("%d", key_serial(key));
1057
1058 key_check(key);
1059
1060 if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
1061 down_write_nested(&key->sem, 1);
1062 if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
1063 key_schedule_gc_links();
1064 up_write(&key->sem);
1065 }
1066}
1067EXPORT_SYMBOL(key_invalidate);
1068
1069/**
1070 * generic_key_instantiate - Simple instantiation of a key from preparsed data
1071 * @key: The key to be instantiated
1072 * @prep: The preparsed data to load.
1073 *
1074 * Instantiate a key from preparsed data. We assume we can just copy the data
1075 * in directly and clear the old pointers.
1076 *
1077 * This can be pointed to directly by the key type instantiate op pointer.
1078 */
1079int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
1080{
1081 int ret;
1082
1083 pr_devel("==>%s()\n", __func__);
1084
1085 ret = key_payload_reserve(key, prep->quotalen);
1086 if (ret == 0) {
1087 rcu_assign_keypointer(key, prep->payload.data[0]);
1088 key->payload.data[1] = prep->payload.data[1];
1089 key->payload.data[2] = prep->payload.data[2];
1090 key->payload.data[3] = prep->payload.data[3];
1091 prep->payload.data[0] = NULL;
1092 prep->payload.data[1] = NULL;
1093 prep->payload.data[2] = NULL;
1094 prep->payload.data[3] = NULL;
1095 }
1096 pr_devel("<==%s() = %d\n", __func__, ret);
1097 return ret;
1098}
1099EXPORT_SYMBOL(generic_key_instantiate);
1100
1101/**
1102 * register_key_type - Register a type of key.
1103 * @ktype: The new key type.
1104 *
1105 * Register a new key type.
1106 *
1107 * Returns 0 on success or -EEXIST if a type of this name already exists.
1108 */
1109int register_key_type(struct key_type *ktype)
1110{
1111 struct key_type *p;
1112 int ret;
1113
1114 memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
1115
1116 ret = -EEXIST;
1117 down_write(&key_types_sem);
1118
1119 /* disallow key types with the same name */
1120 list_for_each_entry(p, &key_types_list, link) {
1121 if (strcmp(p->name, ktype->name) == 0)
1122 goto out;
1123 }
1124
1125 /* store the type */
1126 list_add(&ktype->link, &key_types_list);
1127
1128 pr_notice("Key type %s registered\n", ktype->name);
1129 ret = 0;
1130
1131out:
1132 up_write(&key_types_sem);
1133 return ret;
1134}
1135EXPORT_SYMBOL(register_key_type);
1136
1137/**
1138 * unregister_key_type - Unregister a type of key.
1139 * @ktype: The key type.
1140 *
1141 * Unregister a key type and mark all the extant keys of this type as dead.
1142 * Those keys of this type are then destroyed to get rid of their payloads and
1143 * they and their links will be garbage collected as soon as possible.
1144 */
1145void unregister_key_type(struct key_type *ktype)
1146{
1147 down_write(&key_types_sem);
1148 list_del_init(&ktype->link);
1149 downgrade_write(&key_types_sem);
1150 key_gc_keytype(ktype);
1151 pr_notice("Key type %s unregistered\n", ktype->name);
1152 up_read(&key_types_sem);
1153}
1154EXPORT_SYMBOL(unregister_key_type);
1155
1156/*
1157 * Initialise the key management state.
1158 */
1159void __init key_init(void)
1160{
1161 /* allocate a slab in which we can store keys */
1162 key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1163 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1164
1165 /* add the special key types */
1166 list_add_tail(&key_type_keyring.link, &key_types_list);
1167 list_add_tail(&key_type_dead.link, &key_types_list);
1168 list_add_tail(&key_type_user.link, &key_types_list);
1169 list_add_tail(&key_type_logon.link, &key_types_list);
1170
1171 /* record the root user tracking */
1172 rb_link_node(&root_key_user.node,
1173 NULL,
1174 &key_user_tree.rb_node);
1175
1176 rb_insert_color(&root_key_user.node,
1177 &key_user_tree);
1178}