KEYS: Add payload preparsing opportunity prior to key instantiate or update
[deliverable/linux.git] / security / keys / key.c
1 /* Basic authentication token and access key management
2 *
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
21 #include <linux/user_namespace.h>
22 #include "internal.h"
23
24 struct kmem_cache *key_jar;
25 struct rb_root key_serial_tree; /* tree of keys indexed by serial */
26 DEFINE_SPINLOCK(key_serial_lock);
27
28 struct rb_root key_user_tree; /* tree of quota records indexed by UID */
29 DEFINE_SPINLOCK(key_user_lock);
30
31 unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
32 unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
33 unsigned int key_quota_maxkeys = 200; /* general key count quota */
34 unsigned int key_quota_maxbytes = 20000; /* general key space quota */
35
36 static LIST_HEAD(key_types_list);
37 static DECLARE_RWSEM(key_types_sem);
38
39 /* We serialise key instantiation and link */
40 DEFINE_MUTEX(key_construction_mutex);
41
42 #ifdef KEY_DEBUGGING
43 void __key_check(const struct key *key)
44 {
45 printk("__key_check: key %p {%08x} should be {%08x}\n",
46 key, key->magic, KEY_DEBUG_MAGIC);
47 BUG();
48 }
49 #endif
50
51 /*
52 * Get the key quota record for a user, allocating a new record if one doesn't
53 * already exist.
54 */
55 struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
56 {
57 struct key_user *candidate = NULL, *user;
58 struct rb_node *parent = NULL;
59 struct rb_node **p;
60
61 try_again:
62 p = &key_user_tree.rb_node;
63 spin_lock(&key_user_lock);
64
65 /* search the tree for a user record with a matching UID */
66 while (*p) {
67 parent = *p;
68 user = rb_entry(parent, struct key_user, node);
69
70 if (uid < user->uid)
71 p = &(*p)->rb_left;
72 else if (uid > user->uid)
73 p = &(*p)->rb_right;
74 else if (user_ns < user->user_ns)
75 p = &(*p)->rb_left;
76 else if (user_ns > user->user_ns)
77 p = &(*p)->rb_right;
78 else
79 goto found;
80 }
81
82 /* if we get here, we failed to find a match in the tree */
83 if (!candidate) {
84 /* allocate a candidate user record if we don't already have
85 * one */
86 spin_unlock(&key_user_lock);
87
88 user = NULL;
89 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
90 if (unlikely(!candidate))
91 goto out;
92
93 /* the allocation may have scheduled, so we need to repeat the
94 * search lest someone else added the record whilst we were
95 * asleep */
96 goto try_again;
97 }
98
99 /* if we get here, then the user record still hadn't appeared on the
100 * second pass - so we use the candidate record */
101 atomic_set(&candidate->usage, 1);
102 atomic_set(&candidate->nkeys, 0);
103 atomic_set(&candidate->nikeys, 0);
104 candidate->uid = uid;
105 candidate->user_ns = get_user_ns(user_ns);
106 candidate->qnkeys = 0;
107 candidate->qnbytes = 0;
108 spin_lock_init(&candidate->lock);
109 mutex_init(&candidate->cons_lock);
110
111 rb_link_node(&candidate->node, parent, p);
112 rb_insert_color(&candidate->node, &key_user_tree);
113 spin_unlock(&key_user_lock);
114 user = candidate;
115 goto out;
116
117 /* okay - we found a user record for this UID */
118 found:
119 atomic_inc(&user->usage);
120 spin_unlock(&key_user_lock);
121 kfree(candidate);
122 out:
123 return user;
124 }
125
126 /*
127 * Dispose of a user structure
128 */
129 void key_user_put(struct key_user *user)
130 {
131 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
132 rb_erase(&user->node, &key_user_tree);
133 spin_unlock(&key_user_lock);
134 put_user_ns(user->user_ns);
135
136 kfree(user);
137 }
138 }
139
140 /*
141 * Allocate a serial number for a key. These are assigned randomly to avoid
142 * security issues through covert channel problems.
143 */
144 static inline void key_alloc_serial(struct key *key)
145 {
146 struct rb_node *parent, **p;
147 struct key *xkey;
148
149 /* propose a random serial number and look for a hole for it in the
150 * serial number tree */
151 do {
152 get_random_bytes(&key->serial, sizeof(key->serial));
153
154 key->serial >>= 1; /* negative numbers are not permitted */
155 } while (key->serial < 3);
156
157 spin_lock(&key_serial_lock);
158
159 attempt_insertion:
160 parent = NULL;
161 p = &key_serial_tree.rb_node;
162
163 while (*p) {
164 parent = *p;
165 xkey = rb_entry(parent, struct key, serial_node);
166
167 if (key->serial < xkey->serial)
168 p = &(*p)->rb_left;
169 else if (key->serial > xkey->serial)
170 p = &(*p)->rb_right;
171 else
172 goto serial_exists;
173 }
174
175 /* we've found a suitable hole - arrange for this key to occupy it */
176 rb_link_node(&key->serial_node, parent, p);
177 rb_insert_color(&key->serial_node, &key_serial_tree);
178
179 spin_unlock(&key_serial_lock);
180 return;
181
182 /* we found a key with the proposed serial number - walk the tree from
183 * that point looking for the next unused serial number */
184 serial_exists:
185 for (;;) {
186 key->serial++;
187 if (key->serial < 3) {
188 key->serial = 3;
189 goto attempt_insertion;
190 }
191
192 parent = rb_next(parent);
193 if (!parent)
194 goto attempt_insertion;
195
196 xkey = rb_entry(parent, struct key, serial_node);
197 if (key->serial < xkey->serial)
198 goto attempt_insertion;
199 }
200 }
201
202 /**
203 * key_alloc - Allocate a key of the specified type.
204 * @type: The type of key to allocate.
205 * @desc: The key description to allow the key to be searched out.
206 * @uid: The owner of the new key.
207 * @gid: The group ID for the new key's group permissions.
208 * @cred: The credentials specifying UID namespace.
209 * @perm: The permissions mask of the new key.
210 * @flags: Flags specifying quota properties.
211 *
212 * Allocate a key of the specified type with the attributes given. The key is
213 * returned in an uninstantiated state and the caller needs to instantiate the
214 * key before returning.
215 *
216 * The user's key count quota is updated to reflect the creation of the key and
217 * the user's key data quota has the default for the key type reserved. The
218 * instantiation function should amend this as necessary. If insufficient
219 * quota is available, -EDQUOT will be returned.
220 *
221 * The LSM security modules can prevent a key being created, in which case
222 * -EACCES will be returned.
223 *
224 * Returns a pointer to the new key if successful and an error code otherwise.
225 *
226 * Note that the caller needs to ensure the key type isn't uninstantiated.
227 * Internally this can be done by locking key_types_sem. Externally, this can
228 * be done by either never unregistering the key type, or making sure
229 * key_alloc() calls don't race with module unloading.
230 */
231 struct key *key_alloc(struct key_type *type, const char *desc,
232 uid_t uid, gid_t gid, const struct cred *cred,
233 key_perm_t perm, unsigned long flags)
234 {
235 struct key_user *user = NULL;
236 struct key *key;
237 size_t desclen, quotalen;
238 int ret;
239
240 key = ERR_PTR(-EINVAL);
241 if (!desc || !*desc)
242 goto error;
243
244 if (type->vet_description) {
245 ret = type->vet_description(desc);
246 if (ret < 0) {
247 key = ERR_PTR(ret);
248 goto error;
249 }
250 }
251
252 desclen = strlen(desc) + 1;
253 quotalen = desclen + type->def_datalen;
254
255 /* get hold of the key tracking for this user */
256 user = key_user_lookup(uid, cred->user_ns);
257 if (!user)
258 goto no_memory_1;
259
260 /* check that the user's quota permits allocation of another key and
261 * its description */
262 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
263 unsigned maxkeys = (uid == 0) ?
264 key_quota_root_maxkeys : key_quota_maxkeys;
265 unsigned maxbytes = (uid == 0) ?
266 key_quota_root_maxbytes : key_quota_maxbytes;
267
268 spin_lock(&user->lock);
269 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
270 if (user->qnkeys + 1 >= maxkeys ||
271 user->qnbytes + quotalen >= maxbytes ||
272 user->qnbytes + quotalen < user->qnbytes)
273 goto no_quota;
274 }
275
276 user->qnkeys++;
277 user->qnbytes += quotalen;
278 spin_unlock(&user->lock);
279 }
280
281 /* allocate and initialise the key and its description */
282 key = kmem_cache_alloc(key_jar, GFP_KERNEL);
283 if (!key)
284 goto no_memory_2;
285
286 if (desc) {
287 key->description = kmemdup(desc, desclen, GFP_KERNEL);
288 if (!key->description)
289 goto no_memory_3;
290 }
291
292 atomic_set(&key->usage, 1);
293 init_rwsem(&key->sem);
294 lockdep_set_class(&key->sem, &type->lock_class);
295 key->type = type;
296 key->user = user;
297 key->quotalen = quotalen;
298 key->datalen = type->def_datalen;
299 key->uid = uid;
300 key->gid = gid;
301 key->perm = perm;
302 key->flags = 0;
303 key->expiry = 0;
304 key->payload.data = NULL;
305 key->security = NULL;
306
307 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
308 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
309
310 memset(&key->type_data, 0, sizeof(key->type_data));
311
312 #ifdef KEY_DEBUGGING
313 key->magic = KEY_DEBUG_MAGIC;
314 #endif
315
316 /* let the security module know about the key */
317 ret = security_key_alloc(key, cred, flags);
318 if (ret < 0)
319 goto security_error;
320
321 /* publish the key by giving it a serial number */
322 atomic_inc(&user->nkeys);
323 key_alloc_serial(key);
324
325 error:
326 return key;
327
328 security_error:
329 kfree(key->description);
330 kmem_cache_free(key_jar, key);
331 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
332 spin_lock(&user->lock);
333 user->qnkeys--;
334 user->qnbytes -= quotalen;
335 spin_unlock(&user->lock);
336 }
337 key_user_put(user);
338 key = ERR_PTR(ret);
339 goto error;
340
341 no_memory_3:
342 kmem_cache_free(key_jar, key);
343 no_memory_2:
344 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
345 spin_lock(&user->lock);
346 user->qnkeys--;
347 user->qnbytes -= quotalen;
348 spin_unlock(&user->lock);
349 }
350 key_user_put(user);
351 no_memory_1:
352 key = ERR_PTR(-ENOMEM);
353 goto error;
354
355 no_quota:
356 spin_unlock(&user->lock);
357 key_user_put(user);
358 key = ERR_PTR(-EDQUOT);
359 goto error;
360 }
361 EXPORT_SYMBOL(key_alloc);
362
363 /**
364 * key_payload_reserve - Adjust data quota reservation for the key's payload
365 * @key: The key to make the reservation for.
366 * @datalen: The amount of data payload the caller now wants.
367 *
368 * Adjust the amount of the owning user's key data quota that a key reserves.
369 * If the amount is increased, then -EDQUOT may be returned if there isn't
370 * enough free quota available.
371 *
372 * If successful, 0 is returned.
373 */
374 int key_payload_reserve(struct key *key, size_t datalen)
375 {
376 int delta = (int)datalen - key->datalen;
377 int ret = 0;
378
379 key_check(key);
380
381 /* contemplate the quota adjustment */
382 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
383 unsigned maxbytes = (key->user->uid == 0) ?
384 key_quota_root_maxbytes : key_quota_maxbytes;
385
386 spin_lock(&key->user->lock);
387
388 if (delta > 0 &&
389 (key->user->qnbytes + delta >= maxbytes ||
390 key->user->qnbytes + delta < key->user->qnbytes)) {
391 ret = -EDQUOT;
392 }
393 else {
394 key->user->qnbytes += delta;
395 key->quotalen += delta;
396 }
397 spin_unlock(&key->user->lock);
398 }
399
400 /* change the recorded data length if that didn't generate an error */
401 if (ret == 0)
402 key->datalen = datalen;
403
404 return ret;
405 }
406 EXPORT_SYMBOL(key_payload_reserve);
407
408 /*
409 * Instantiate a key and link it into the target keyring atomically. Must be
410 * called with the target keyring's semaphore writelocked. The target key's
411 * semaphore need not be locked as instantiation is serialised by
412 * key_construction_mutex.
413 */
414 static int __key_instantiate_and_link(struct key *key,
415 struct key_preparsed_payload *prep,
416 struct key *keyring,
417 struct key *authkey,
418 unsigned long *_prealloc)
419 {
420 int ret, awaken;
421
422 key_check(key);
423 key_check(keyring);
424
425 awaken = 0;
426 ret = -EBUSY;
427
428 mutex_lock(&key_construction_mutex);
429
430 /* can't instantiate twice */
431 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
432 /* instantiate the key */
433 ret = key->type->instantiate(key, prep);
434
435 if (ret == 0) {
436 /* mark the key as being instantiated */
437 atomic_inc(&key->user->nikeys);
438 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
439
440 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
441 awaken = 1;
442
443 /* and link it into the destination keyring */
444 if (keyring)
445 __key_link(keyring, key, _prealloc);
446
447 /* disable the authorisation key */
448 if (authkey)
449 key_revoke(authkey);
450 }
451 }
452
453 mutex_unlock(&key_construction_mutex);
454
455 /* wake up anyone waiting for a key to be constructed */
456 if (awaken)
457 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
458
459 return ret;
460 }
461
462 /**
463 * key_instantiate_and_link - Instantiate a key and link it into the keyring.
464 * @key: The key to instantiate.
465 * @data: The data to use to instantiate the keyring.
466 * @datalen: The length of @data.
467 * @keyring: Keyring to create a link in on success (or NULL).
468 * @authkey: The authorisation token permitting instantiation.
469 *
470 * Instantiate a key that's in the uninstantiated state using the provided data
471 * and, if successful, link it in to the destination keyring if one is
472 * supplied.
473 *
474 * If successful, 0 is returned, the authorisation token is revoked and anyone
475 * waiting for the key is woken up. If the key was already instantiated,
476 * -EBUSY will be returned.
477 */
478 int key_instantiate_and_link(struct key *key,
479 const void *data,
480 size_t datalen,
481 struct key *keyring,
482 struct key *authkey)
483 {
484 struct key_preparsed_payload prep;
485 unsigned long prealloc;
486 int ret;
487
488 memset(&prep, 0, sizeof(prep));
489 prep.data = data;
490 prep.datalen = datalen;
491 prep.quotalen = key->type->def_datalen;
492 if (key->type->preparse) {
493 ret = key->type->preparse(&prep);
494 if (ret < 0)
495 goto error;
496 }
497
498 if (keyring) {
499 ret = __key_link_begin(keyring, key->type, key->description,
500 &prealloc);
501 if (ret < 0)
502 goto error_free_preparse;
503 }
504
505 ret = __key_instantiate_and_link(key, &prep, keyring, authkey,
506 &prealloc);
507
508 if (keyring)
509 __key_link_end(keyring, key->type, prealloc);
510
511 error_free_preparse:
512 if (key->type->preparse)
513 key->type->free_preparse(&prep);
514 error:
515 return ret;
516 }
517
518 EXPORT_SYMBOL(key_instantiate_and_link);
519
520 /**
521 * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
522 * @key: The key to instantiate.
523 * @timeout: The timeout on the negative key.
524 * @error: The error to return when the key is hit.
525 * @keyring: Keyring to create a link in on success (or NULL).
526 * @authkey: The authorisation token permitting instantiation.
527 *
528 * Negatively instantiate a key that's in the uninstantiated state and, if
529 * successful, set its timeout and stored error and link it in to the
530 * destination keyring if one is supplied. The key and any links to the key
531 * will be automatically garbage collected after the timeout expires.
532 *
533 * Negative keys are used to rate limit repeated request_key() calls by causing
534 * them to return the stored error code (typically ENOKEY) until the negative
535 * key expires.
536 *
537 * If successful, 0 is returned, the authorisation token is revoked and anyone
538 * waiting for the key is woken up. If the key was already instantiated,
539 * -EBUSY will be returned.
540 */
541 int key_reject_and_link(struct key *key,
542 unsigned timeout,
543 unsigned error,
544 struct key *keyring,
545 struct key *authkey)
546 {
547 unsigned long prealloc;
548 struct timespec now;
549 int ret, awaken, link_ret = 0;
550
551 key_check(key);
552 key_check(keyring);
553
554 awaken = 0;
555 ret = -EBUSY;
556
557 if (keyring)
558 link_ret = __key_link_begin(keyring, key->type,
559 key->description, &prealloc);
560
561 mutex_lock(&key_construction_mutex);
562
563 /* can't instantiate twice */
564 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
565 /* mark the key as being negatively instantiated */
566 atomic_inc(&key->user->nikeys);
567 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
568 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
569 key->type_data.reject_error = -error;
570 now = current_kernel_time();
571 key->expiry = now.tv_sec + timeout;
572 key_schedule_gc(key->expiry + key_gc_delay);
573
574 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
575 awaken = 1;
576
577 ret = 0;
578
579 /* and link it into the destination keyring */
580 if (keyring && link_ret == 0)
581 __key_link(keyring, key, &prealloc);
582
583 /* disable the authorisation key */
584 if (authkey)
585 key_revoke(authkey);
586 }
587
588 mutex_unlock(&key_construction_mutex);
589
590 if (keyring)
591 __key_link_end(keyring, key->type, prealloc);
592
593 /* wake up anyone waiting for a key to be constructed */
594 if (awaken)
595 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
596
597 return ret == 0 ? link_ret : ret;
598 }
599 EXPORT_SYMBOL(key_reject_and_link);
600
601 /**
602 * key_put - Discard a reference to a key.
603 * @key: The key to discard a reference from.
604 *
605 * Discard a reference to a key, and when all the references are gone, we
606 * schedule the cleanup task to come and pull it out of the tree in process
607 * context at some later time.
608 */
609 void key_put(struct key *key)
610 {
611 if (key) {
612 key_check(key);
613
614 if (atomic_dec_and_test(&key->usage))
615 queue_work(system_nrt_wq, &key_gc_work);
616 }
617 }
618 EXPORT_SYMBOL(key_put);
619
620 /*
621 * Find a key by its serial number.
622 */
623 struct key *key_lookup(key_serial_t id)
624 {
625 struct rb_node *n;
626 struct key *key;
627
628 spin_lock(&key_serial_lock);
629
630 /* search the tree for the specified key */
631 n = key_serial_tree.rb_node;
632 while (n) {
633 key = rb_entry(n, struct key, serial_node);
634
635 if (id < key->serial)
636 n = n->rb_left;
637 else if (id > key->serial)
638 n = n->rb_right;
639 else
640 goto found;
641 }
642
643 not_found:
644 key = ERR_PTR(-ENOKEY);
645 goto error;
646
647 found:
648 /* pretend it doesn't exist if it is awaiting deletion */
649 if (atomic_read(&key->usage) == 0)
650 goto not_found;
651
652 /* this races with key_put(), but that doesn't matter since key_put()
653 * doesn't actually change the key
654 */
655 atomic_inc(&key->usage);
656
657 error:
658 spin_unlock(&key_serial_lock);
659 return key;
660 }
661
662 /*
663 * Find and lock the specified key type against removal.
664 *
665 * We return with the sem read-locked if successful. If the type wasn't
666 * available -ENOKEY is returned instead.
667 */
668 struct key_type *key_type_lookup(const char *type)
669 {
670 struct key_type *ktype;
671
672 down_read(&key_types_sem);
673
674 /* look up the key type to see if it's one of the registered kernel
675 * types */
676 list_for_each_entry(ktype, &key_types_list, link) {
677 if (strcmp(ktype->name, type) == 0)
678 goto found_kernel_type;
679 }
680
681 up_read(&key_types_sem);
682 ktype = ERR_PTR(-ENOKEY);
683
684 found_kernel_type:
685 return ktype;
686 }
687
688 void key_set_timeout(struct key *key, unsigned timeout)
689 {
690 struct timespec now;
691 time_t expiry = 0;
692
693 /* make the changes with the locks held to prevent races */
694 down_write(&key->sem);
695
696 if (timeout > 0) {
697 now = current_kernel_time();
698 expiry = now.tv_sec + timeout;
699 }
700
701 key->expiry = expiry;
702 key_schedule_gc(key->expiry + key_gc_delay);
703
704 up_write(&key->sem);
705 }
706 EXPORT_SYMBOL_GPL(key_set_timeout);
707
708 /*
709 * Unlock a key type locked by key_type_lookup().
710 */
711 void key_type_put(struct key_type *ktype)
712 {
713 up_read(&key_types_sem);
714 }
715
716 /*
717 * Attempt to update an existing key.
718 *
719 * The key is given to us with an incremented refcount that we need to discard
720 * if we get an error.
721 */
722 static inline key_ref_t __key_update(key_ref_t key_ref,
723 struct key_preparsed_payload *prep)
724 {
725 struct key *key = key_ref_to_ptr(key_ref);
726 int ret;
727
728 /* need write permission on the key to update it */
729 ret = key_permission(key_ref, KEY_WRITE);
730 if (ret < 0)
731 goto error;
732
733 ret = -EEXIST;
734 if (!key->type->update)
735 goto error;
736
737 down_write(&key->sem);
738
739 ret = key->type->update(key, prep);
740 if (ret == 0)
741 /* updating a negative key instantiates it */
742 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
743
744 up_write(&key->sem);
745
746 if (ret < 0)
747 goto error;
748 out:
749 return key_ref;
750
751 error:
752 key_put(key);
753 key_ref = ERR_PTR(ret);
754 goto out;
755 }
756
757 /**
758 * key_create_or_update - Update or create and instantiate a key.
759 * @keyring_ref: A pointer to the destination keyring with possession flag.
760 * @type: The type of key.
761 * @description: The searchable description for the key.
762 * @payload: The data to use to instantiate or update the key.
763 * @plen: The length of @payload.
764 * @perm: The permissions mask for a new key.
765 * @flags: The quota flags for a new key.
766 *
767 * Search the destination keyring for a key of the same description and if one
768 * is found, update it, otherwise create and instantiate a new one and create a
769 * link to it from that keyring.
770 *
771 * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
772 * concocted.
773 *
774 * Returns a pointer to the new key if successful, -ENODEV if the key type
775 * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
776 * caller isn't permitted to modify the keyring or the LSM did not permit
777 * creation of the key.
778 *
779 * On success, the possession flag from the keyring ref will be tacked on to
780 * the key ref before it is returned.
781 */
782 key_ref_t key_create_or_update(key_ref_t keyring_ref,
783 const char *type,
784 const char *description,
785 const void *payload,
786 size_t plen,
787 key_perm_t perm,
788 unsigned long flags)
789 {
790 unsigned long prealloc;
791 struct key_preparsed_payload prep;
792 const struct cred *cred = current_cred();
793 struct key_type *ktype;
794 struct key *keyring, *key = NULL;
795 key_ref_t key_ref;
796 int ret;
797
798 /* look up the key type to see if it's one of the registered kernel
799 * types */
800 ktype = key_type_lookup(type);
801 if (IS_ERR(ktype)) {
802 key_ref = ERR_PTR(-ENODEV);
803 goto error;
804 }
805
806 key_ref = ERR_PTR(-EINVAL);
807 if (!ktype->match || !ktype->instantiate ||
808 (!description && !ktype->preparse))
809 goto error_put_type;
810
811 keyring = key_ref_to_ptr(keyring_ref);
812
813 key_check(keyring);
814
815 key_ref = ERR_PTR(-ENOTDIR);
816 if (keyring->type != &key_type_keyring)
817 goto error_put_type;
818
819 memset(&prep, 0, sizeof(prep));
820 prep.data = payload;
821 prep.datalen = plen;
822 prep.quotalen = ktype->def_datalen;
823 if (ktype->preparse) {
824 ret = ktype->preparse(&prep);
825 if (ret < 0) {
826 key_ref = ERR_PTR(ret);
827 goto error_put_type;
828 }
829 if (!description)
830 description = prep.description;
831 key_ref = ERR_PTR(-EINVAL);
832 if (!description)
833 goto error_free_prep;
834 }
835
836 ret = __key_link_begin(keyring, ktype, description, &prealloc);
837 if (ret < 0) {
838 key_ref = ERR_PTR(ret);
839 goto error_free_prep;
840 }
841
842 /* if we're going to allocate a new key, we're going to have
843 * to modify the keyring */
844 ret = key_permission(keyring_ref, KEY_WRITE);
845 if (ret < 0) {
846 key_ref = ERR_PTR(ret);
847 goto error_link_end;
848 }
849
850 /* if it's possible to update this type of key, search for an existing
851 * key of the same type and description in the destination keyring and
852 * update that instead if possible
853 */
854 if (ktype->update) {
855 key_ref = __keyring_search_one(keyring_ref, ktype, description,
856 0);
857 if (!IS_ERR(key_ref))
858 goto found_matching_key;
859 }
860
861 /* if the client doesn't provide, decide on the permissions we want */
862 if (perm == KEY_PERM_UNDEF) {
863 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
864 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
865
866 if (ktype->read)
867 perm |= KEY_POS_READ | KEY_USR_READ;
868
869 if (ktype == &key_type_keyring || ktype->update)
870 perm |= KEY_USR_WRITE;
871 }
872
873 /* allocate a new key */
874 key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
875 perm, flags);
876 if (IS_ERR(key)) {
877 key_ref = ERR_CAST(key);
878 goto error_link_end;
879 }
880
881 /* instantiate it and link it into the target keyring */
882 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &prealloc);
883 if (ret < 0) {
884 key_put(key);
885 key_ref = ERR_PTR(ret);
886 goto error_link_end;
887 }
888
889 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
890
891 error_link_end:
892 __key_link_end(keyring, ktype, prealloc);
893 error_free_prep:
894 if (ktype->preparse)
895 ktype->free_preparse(&prep);
896 error_put_type:
897 key_type_put(ktype);
898 error:
899 return key_ref;
900
901 found_matching_key:
902 /* we found a matching key, so we're going to try to update it
903 * - we can drop the locks first as we have the key pinned
904 */
905 __key_link_end(keyring, ktype, prealloc);
906
907 key_ref = __key_update(key_ref, &prep);
908 goto error_free_prep;
909 }
910 EXPORT_SYMBOL(key_create_or_update);
911
912 /**
913 * key_update - Update a key's contents.
914 * @key_ref: The pointer (plus possession flag) to the key.
915 * @payload: The data to be used to update the key.
916 * @plen: The length of @payload.
917 *
918 * Attempt to update the contents of a key with the given payload data. The
919 * caller must be granted Write permission on the key. Negative keys can be
920 * instantiated by this method.
921 *
922 * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
923 * type does not support updating. The key type may return other errors.
924 */
925 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
926 {
927 struct key_preparsed_payload prep;
928 struct key *key = key_ref_to_ptr(key_ref);
929 int ret;
930
931 key_check(key);
932
933 /* the key must be writable */
934 ret = key_permission(key_ref, KEY_WRITE);
935 if (ret < 0)
936 goto error;
937
938 /* attempt to update it if supported */
939 ret = -EOPNOTSUPP;
940 if (!key->type->update)
941 goto error;
942
943 memset(&prep, 0, sizeof(prep));
944 prep.data = payload;
945 prep.datalen = plen;
946 prep.quotalen = key->type->def_datalen;
947 if (key->type->preparse) {
948 ret = key->type->preparse(&prep);
949 if (ret < 0)
950 goto error;
951 }
952
953 down_write(&key->sem);
954
955 ret = key->type->update(key, &prep);
956 if (ret == 0)
957 /* updating a negative key instantiates it */
958 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
959
960 up_write(&key->sem);
961
962 if (key->type->preparse)
963 key->type->free_preparse(&prep);
964 error:
965 return ret;
966 }
967 EXPORT_SYMBOL(key_update);
968
969 /**
970 * key_revoke - Revoke a key.
971 * @key: The key to be revoked.
972 *
973 * Mark a key as being revoked and ask the type to free up its resources. The
974 * revocation timeout is set and the key and all its links will be
975 * automatically garbage collected after key_gc_delay amount of time if they
976 * are not manually dealt with first.
977 */
978 void key_revoke(struct key *key)
979 {
980 struct timespec now;
981 time_t time;
982
983 key_check(key);
984
985 /* make sure no one's trying to change or use the key when we mark it
986 * - we tell lockdep that we might nest because we might be revoking an
987 * authorisation key whilst holding the sem on a key we've just
988 * instantiated
989 */
990 down_write_nested(&key->sem, 1);
991 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
992 key->type->revoke)
993 key->type->revoke(key);
994
995 /* set the death time to no more than the expiry time */
996 now = current_kernel_time();
997 time = now.tv_sec;
998 if (key->revoked_at == 0 || key->revoked_at > time) {
999 key->revoked_at = time;
1000 key_schedule_gc(key->revoked_at + key_gc_delay);
1001 }
1002
1003 up_write(&key->sem);
1004 }
1005 EXPORT_SYMBOL(key_revoke);
1006
1007 /**
1008 * key_invalidate - Invalidate a key.
1009 * @key: The key to be invalidated.
1010 *
1011 * Mark a key as being invalidated and have it cleaned up immediately. The key
1012 * is ignored by all searches and other operations from this point.
1013 */
1014 void key_invalidate(struct key *key)
1015 {
1016 kenter("%d", key_serial(key));
1017
1018 key_check(key);
1019
1020 if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
1021 down_write_nested(&key->sem, 1);
1022 if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
1023 key_schedule_gc_links();
1024 up_write(&key->sem);
1025 }
1026 }
1027 EXPORT_SYMBOL(key_invalidate);
1028
1029 /**
1030 * register_key_type - Register a type of key.
1031 * @ktype: The new key type.
1032 *
1033 * Register a new key type.
1034 *
1035 * Returns 0 on success or -EEXIST if a type of this name already exists.
1036 */
1037 int register_key_type(struct key_type *ktype)
1038 {
1039 struct key_type *p;
1040 int ret;
1041
1042 memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
1043
1044 ret = -EEXIST;
1045 down_write(&key_types_sem);
1046
1047 /* disallow key types with the same name */
1048 list_for_each_entry(p, &key_types_list, link) {
1049 if (strcmp(p->name, ktype->name) == 0)
1050 goto out;
1051 }
1052
1053 /* store the type */
1054 list_add(&ktype->link, &key_types_list);
1055
1056 pr_notice("Key type %s registered\n", ktype->name);
1057 ret = 0;
1058
1059 out:
1060 up_write(&key_types_sem);
1061 return ret;
1062 }
1063 EXPORT_SYMBOL(register_key_type);
1064
1065 /**
1066 * unregister_key_type - Unregister a type of key.
1067 * @ktype: The key type.
1068 *
1069 * Unregister a key type and mark all the extant keys of this type as dead.
1070 * Those keys of this type are then destroyed to get rid of their payloads and
1071 * they and their links will be garbage collected as soon as possible.
1072 */
1073 void unregister_key_type(struct key_type *ktype)
1074 {
1075 down_write(&key_types_sem);
1076 list_del_init(&ktype->link);
1077 downgrade_write(&key_types_sem);
1078 key_gc_keytype(ktype);
1079 pr_notice("Key type %s unregistered\n", ktype->name);
1080 up_read(&key_types_sem);
1081 }
1082 EXPORT_SYMBOL(unregister_key_type);
1083
1084 /*
1085 * Initialise the key management state.
1086 */
1087 void __init key_init(void)
1088 {
1089 /* allocate a slab in which we can store keys */
1090 key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1091 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1092
1093 /* add the special key types */
1094 list_add_tail(&key_type_keyring.link, &key_types_list);
1095 list_add_tail(&key_type_dead.link, &key_types_list);
1096 list_add_tail(&key_type_user.link, &key_types_list);
1097 list_add_tail(&key_type_logon.link, &key_types_list);
1098
1099 /* record the root user tracking */
1100 rb_link_node(&root_key_user.node,
1101 NULL,
1102 &key_user_tree.rb_node);
1103
1104 rb_insert_color(&root_key_user.node,
1105 &key_user_tree);
1106 }
This page took 0.051465 seconds and 6 git commands to generate.