3 * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/security.h>
17 #include <linux/seq_file.h>
18 #include <linux/err.h>
19 #include <keys/keyring-type.h>
20 #include <keys/user-type.h>
21 #include <linux/assoc_array_priv.h>
22 #include <linux/uaccess.h>
26 * When plumbing the depths of the key tree, this sets a hard limit
27 * set on how deep we're willing to go.
29 #define KEYRING_SEARCH_MAX_DEPTH 6
32 * We keep all named keyrings in a hash to speed looking them up.
34 #define KEYRING_NAME_HASH_SIZE (1 << 5)
37 * We mark pointers we pass to the associative array with bit 1 set if
38 * they're keyrings and clear otherwise.
40 #define KEYRING_PTR_SUBTYPE 0x2UL
42 static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr
*x
)
44 return (unsigned long)x
& KEYRING_PTR_SUBTYPE
;
46 static inline struct key
*keyring_ptr_to_key(const struct assoc_array_ptr
*x
)
48 void *object
= assoc_array_ptr_to_leaf(x
);
49 return (struct key
*)((unsigned long)object
& ~KEYRING_PTR_SUBTYPE
);
51 static inline void *keyring_key_to_ptr(struct key
*key
)
53 if (key
->type
== &key_type_keyring
)
54 return (void *)((unsigned long)key
| KEYRING_PTR_SUBTYPE
);
58 static struct list_head keyring_name_hash
[KEYRING_NAME_HASH_SIZE
];
59 static DEFINE_RWLOCK(keyring_name_lock
);
61 static inline unsigned keyring_hash(const char *desc
)
66 bucket
+= (unsigned char)*desc
;
68 return bucket
& (KEYRING_NAME_HASH_SIZE
- 1);
72 * The keyring key type definition. Keyrings are simply keys of this type and
73 * can be treated as ordinary keys in addition to having their own special
76 static int keyring_preparse(struct key_preparsed_payload
*prep
);
77 static void keyring_free_preparse(struct key_preparsed_payload
*prep
);
78 static int keyring_instantiate(struct key
*keyring
,
79 struct key_preparsed_payload
*prep
);
80 static void keyring_revoke(struct key
*keyring
);
81 static void keyring_destroy(struct key
*keyring
);
82 static void keyring_describe(const struct key
*keyring
, struct seq_file
*m
);
83 static long keyring_read(const struct key
*keyring
,
84 char __user
*buffer
, size_t buflen
);
86 struct key_type key_type_keyring
= {
89 .preparse
= keyring_preparse
,
90 .free_preparse
= keyring_free_preparse
,
91 .instantiate
= keyring_instantiate
,
92 .revoke
= keyring_revoke
,
93 .destroy
= keyring_destroy
,
94 .describe
= keyring_describe
,
97 EXPORT_SYMBOL(key_type_keyring
);
100 * Semaphore to serialise link/link calls to prevent two link calls in parallel
101 * introducing a cycle.
103 static DECLARE_RWSEM(keyring_serialise_link_sem
);
106 * Publish the name of a keyring so that it can be found by name (if it has
109 static void keyring_publish_name(struct key
*keyring
)
113 if (keyring
->description
) {
114 bucket
= keyring_hash(keyring
->description
);
116 write_lock(&keyring_name_lock
);
118 if (!keyring_name_hash
[bucket
].next
)
119 INIT_LIST_HEAD(&keyring_name_hash
[bucket
]);
121 list_add_tail(&keyring
->name_link
,
122 &keyring_name_hash
[bucket
]);
124 write_unlock(&keyring_name_lock
);
129 * Preparse a keyring payload
131 static int keyring_preparse(struct key_preparsed_payload
*prep
)
133 return prep
->datalen
!= 0 ? -EINVAL
: 0;
137 * Free a preparse of a user defined key payload
139 static void keyring_free_preparse(struct key_preparsed_payload
*prep
)
144 * Initialise a keyring.
146 * Returns 0 on success, -EINVAL if given any data.
148 static int keyring_instantiate(struct key
*keyring
,
149 struct key_preparsed_payload
*prep
)
151 assoc_array_init(&keyring
->keys
);
152 /* make the keyring available by name if it has one */
153 keyring_publish_name(keyring
);
158 * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
159 * fold the carry back too, but that requires inline asm.
161 static u64
mult_64x32_and_fold(u64 x
, u32 y
)
163 u64 hi
= (u64
)(u32
)(x
>> 32) * y
;
164 u64 lo
= (u64
)(u32
)(x
) * y
;
165 return lo
+ ((u64
)(u32
)hi
<< 32) + (u32
)(hi
>> 32);
169 * Hash a key type and description.
171 static unsigned long hash_key_type_and_desc(const struct keyring_index_key
*index_key
)
173 const unsigned level_shift
= ASSOC_ARRAY_LEVEL_STEP
;
174 const unsigned long fan_mask
= ASSOC_ARRAY_FAN_MASK
;
175 const char *description
= index_key
->description
;
176 unsigned long hash
, type
;
179 int n
, desc_len
= index_key
->desc_len
;
181 type
= (unsigned long)index_key
->type
;
183 acc
= mult_64x32_and_fold(type
, desc_len
+ 13);
184 acc
= mult_64x32_and_fold(acc
, 9207);
192 memcpy(&piece
, description
, n
);
195 acc
= mult_64x32_and_fold(acc
, piece
);
196 acc
= mult_64x32_and_fold(acc
, 9207);
199 /* Fold the hash down to 32 bits if need be. */
201 if (ASSOC_ARRAY_KEY_CHUNK_SIZE
== 32)
204 /* Squidge all the keyrings into a separate part of the tree to
205 * ordinary keys by making sure the lowest level segment in the hash is
206 * zero for keyrings and non-zero otherwise.
208 if (index_key
->type
!= &key_type_keyring
&& (hash
& fan_mask
) == 0)
209 return hash
| (hash
>> (ASSOC_ARRAY_KEY_CHUNK_SIZE
- level_shift
)) | 1;
210 if (index_key
->type
== &key_type_keyring
&& (hash
& fan_mask
) != 0)
211 return (hash
+ (hash
<< level_shift
)) & ~fan_mask
;
216 * Build the next index key chunk.
218 * On 32-bit systems the index key is laid out as:
221 * hash desclen typeptr desc[]
226 * hash desclen typeptr desc[]
228 * We return it one word-sized chunk at a time.
230 static unsigned long keyring_get_key_chunk(const void *data
, int level
)
232 const struct keyring_index_key
*index_key
= data
;
233 unsigned long chunk
= 0;
235 int desc_len
= index_key
->desc_len
, n
= sizeof(chunk
);
237 level
/= ASSOC_ARRAY_KEY_CHUNK_SIZE
;
240 return hash_key_type_and_desc(index_key
);
242 return ((unsigned long)index_key
->type
<< 8) | desc_len
;
245 return (u8
)((unsigned long)index_key
->type
>>
246 (ASSOC_ARRAY_KEY_CHUNK_SIZE
- 8));
250 offset
+= sizeof(chunk
) - 1;
251 offset
+= (level
- 3) * sizeof(chunk
);
252 if (offset
>= desc_len
)
260 chunk
|= ((u8
*)index_key
->description
)[--offset
];
261 } while (--desc_len
> 0);
265 chunk
|= (u8
)((unsigned long)index_key
->type
>>
266 (ASSOC_ARRAY_KEY_CHUNK_SIZE
- 8));
272 static unsigned long keyring_get_object_key_chunk(const void *object
, int level
)
274 const struct key
*key
= keyring_ptr_to_key(object
);
275 return keyring_get_key_chunk(&key
->index_key
, level
);
278 static bool keyring_compare_object(const void *object
, const void *data
)
280 const struct keyring_index_key
*index_key
= data
;
281 const struct key
*key
= keyring_ptr_to_key(object
);
283 return key
->index_key
.type
== index_key
->type
&&
284 key
->index_key
.desc_len
== index_key
->desc_len
&&
285 memcmp(key
->index_key
.description
, index_key
->description
,
286 index_key
->desc_len
) == 0;
290 * Compare the index keys of a pair of objects and determine the bit position
291 * at which they differ - if they differ.
293 static int keyring_diff_objects(const void *object
, const void *data
)
295 const struct key
*key_a
= keyring_ptr_to_key(object
);
296 const struct keyring_index_key
*a
= &key_a
->index_key
;
297 const struct keyring_index_key
*b
= data
;
298 unsigned long seg_a
, seg_b
;
302 seg_a
= hash_key_type_and_desc(a
);
303 seg_b
= hash_key_type_and_desc(b
);
304 if ((seg_a
^ seg_b
) != 0)
307 /* The number of bits contributed by the hash is controlled by a
308 * constant in the assoc_array headers. Everything else thereafter we
309 * can deal with as being machine word-size dependent.
311 level
+= ASSOC_ARRAY_KEY_CHUNK_SIZE
/ 8;
314 if ((seg_a
^ seg_b
) != 0)
317 /* The next bit may not work on big endian */
319 seg_a
= (unsigned long)a
->type
;
320 seg_b
= (unsigned long)b
->type
;
321 if ((seg_a
^ seg_b
) != 0)
324 level
+= sizeof(unsigned long);
325 if (a
->desc_len
== 0)
329 if (((unsigned long)a
->description
| (unsigned long)b
->description
) &
330 (sizeof(unsigned long) - 1)) {
332 seg_a
= *(unsigned long *)(a
->description
+ i
);
333 seg_b
= *(unsigned long *)(b
->description
+ i
);
334 if ((seg_a
^ seg_b
) != 0)
336 i
+= sizeof(unsigned long);
337 } while (i
< (a
->desc_len
& (sizeof(unsigned long) - 1)));
340 for (; i
< a
->desc_len
; i
++) {
341 seg_a
= *(unsigned char *)(a
->description
+ i
);
342 seg_b
= *(unsigned char *)(b
->description
+ i
);
343 if ((seg_a
^ seg_b
) != 0)
353 i
= level
* 8 + __ffs(seg_a
^ seg_b
);
358 * Free an object after stripping the keyring flag off of the pointer.
360 static void keyring_free_object(void *object
)
362 key_put(keyring_ptr_to_key(object
));
366 * Operations for keyring management by the index-tree routines.
368 static const struct assoc_array_ops keyring_assoc_array_ops
= {
369 .get_key_chunk
= keyring_get_key_chunk
,
370 .get_object_key_chunk
= keyring_get_object_key_chunk
,
371 .compare_object
= keyring_compare_object
,
372 .diff_objects
= keyring_diff_objects
,
373 .free_object
= keyring_free_object
,
377 * Clean up a keyring when it is destroyed. Unpublish its name if it had one
378 * and dispose of its data.
380 * The garbage collector detects the final key_put(), removes the keyring from
381 * the serial number tree and then does RCU synchronisation before coming here,
382 * so we shouldn't need to worry about code poking around here with the RCU
383 * readlock held by this time.
385 static void keyring_destroy(struct key
*keyring
)
387 if (keyring
->description
) {
388 write_lock(&keyring_name_lock
);
390 if (keyring
->name_link
.next
!= NULL
&&
391 !list_empty(&keyring
->name_link
))
392 list_del(&keyring
->name_link
);
394 write_unlock(&keyring_name_lock
);
397 if (keyring
->restrict_link
) {
398 struct key_restriction
*keyres
= keyring
->restrict_link
;
400 key_put(keyres
->key
);
404 assoc_array_destroy(&keyring
->keys
, &keyring_assoc_array_ops
);
408 * Describe a keyring for /proc.
410 static void keyring_describe(const struct key
*keyring
, struct seq_file
*m
)
412 if (keyring
->description
)
413 seq_puts(m
, keyring
->description
);
415 seq_puts(m
, "[anon]");
417 if (key_is_positive(keyring
)) {
418 if (keyring
->keys
.nr_leaves_on_tree
!= 0)
419 seq_printf(m
, ": %lu", keyring
->keys
.nr_leaves_on_tree
);
421 seq_puts(m
, ": empty");
425 struct keyring_read_iterator_context
{
428 key_serial_t __user
*buffer
;
431 static int keyring_read_iterator(const void *object
, void *data
)
433 struct keyring_read_iterator_context
*ctx
= data
;
434 const struct key
*key
= keyring_ptr_to_key(object
);
437 kenter("{%s,%d},,{%zu/%zu}",
438 key
->type
->name
, key
->serial
, ctx
->count
, ctx
->buflen
);
440 if (ctx
->count
>= ctx
->buflen
)
443 ret
= put_user(key
->serial
, ctx
->buffer
);
447 ctx
->count
+= sizeof(key
->serial
);
452 * Read a list of key IDs from the keyring's contents in binary form
454 * The keyring's semaphore is read-locked by the caller. This prevents someone
455 * from modifying it under us - which could cause us to read key IDs multiple
458 static long keyring_read(const struct key
*keyring
,
459 char __user
*buffer
, size_t buflen
)
461 struct keyring_read_iterator_context ctx
;
464 kenter("{%d},,%zu", key_serial(keyring
), buflen
);
466 if (buflen
& (sizeof(key_serial_t
) - 1))
469 /* Copy as many key IDs as fit into the buffer */
470 if (buffer
&& buflen
) {
471 ctx
.buffer
= (key_serial_t __user
*)buffer
;
474 ret
= assoc_array_iterate(&keyring
->keys
,
475 keyring_read_iterator
, &ctx
);
477 kleave(" = %ld [iterate]", ret
);
482 /* Return the size of the buffer needed */
483 ret
= keyring
->keys
.nr_leaves_on_tree
* sizeof(key_serial_t
);
485 kleave("= %ld [ok]", ret
);
487 kleave("= %ld [buffer too small]", ret
);
492 * Allocate a keyring and link into the destination keyring.
494 struct key
*keyring_alloc(const char *description
, kuid_t uid
, kgid_t gid
,
495 const struct cred
*cred
, key_perm_t perm
,
497 struct key_restriction
*restrict_link
,
503 keyring
= key_alloc(&key_type_keyring
, description
,
504 uid
, gid
, cred
, perm
, flags
, restrict_link
);
505 if (!IS_ERR(keyring
)) {
506 ret
= key_instantiate_and_link(keyring
, NULL
, 0, dest
, NULL
);
509 keyring
= ERR_PTR(ret
);
515 EXPORT_SYMBOL(keyring_alloc
);
518 * restrict_link_reject - Give -EPERM to restrict link
519 * @keyring: The keyring being added to.
520 * @type: The type of key being added.
521 * @payload: The payload of the key intended to be added.
522 * @data: Additional data for evaluating restriction.
524 * Reject the addition of any links to a keyring. It can be overridden by
525 * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
526 * adding a key to a keyring.
528 * This is meant to be stored in a key_restriction structure which is passed
529 * in the restrict_link parameter to keyring_alloc().
531 int restrict_link_reject(struct key
*keyring
,
532 const struct key_type
*type
,
533 const union key_payload
*payload
,
534 struct key
*restriction_key
)
540 * By default, we keys found by getting an exact match on their descriptions.
542 bool key_default_cmp(const struct key
*key
,
543 const struct key_match_data
*match_data
)
545 return strcmp(key
->description
, match_data
->raw_data
) == 0;
549 * Iteration function to consider each key found.
551 static int keyring_search_iterator(const void *object
, void *iterator_data
)
553 struct keyring_search_context
*ctx
= iterator_data
;
554 const struct key
*key
= keyring_ptr_to_key(object
);
555 unsigned long kflags
= READ_ONCE(key
->flags
);
556 short state
= READ_ONCE(key
->state
);
558 kenter("{%d}", key
->serial
);
560 /* ignore keys not of this type */
561 if (key
->type
!= ctx
->index_key
.type
) {
562 kleave(" = 0 [!type]");
566 /* skip invalidated, revoked and expired keys */
567 if (ctx
->flags
& KEYRING_SEARCH_DO_STATE_CHECK
) {
568 if (kflags
& ((1 << KEY_FLAG_INVALIDATED
) |
569 (1 << KEY_FLAG_REVOKED
))) {
570 ctx
->result
= ERR_PTR(-EKEYREVOKED
);
571 kleave(" = %d [invrev]", ctx
->skipped_ret
);
575 if (key
->expiry
&& ctx
->now
.tv_sec
>= key
->expiry
) {
576 if (!(ctx
->flags
& KEYRING_SEARCH_SKIP_EXPIRED
))
577 ctx
->result
= ERR_PTR(-EKEYEXPIRED
);
578 kleave(" = %d [expire]", ctx
->skipped_ret
);
583 /* keys that don't match */
584 if (!ctx
->match_data
.cmp(key
, &ctx
->match_data
)) {
585 kleave(" = 0 [!match]");
589 /* key must have search permissions */
590 if (!(ctx
->flags
& KEYRING_SEARCH_NO_CHECK_PERM
) &&
591 key_task_permission(make_key_ref(key
, ctx
->possessed
),
592 ctx
->cred
, KEY_NEED_SEARCH
) < 0) {
593 ctx
->result
= ERR_PTR(-EACCES
);
594 kleave(" = %d [!perm]", ctx
->skipped_ret
);
598 if (ctx
->flags
& KEYRING_SEARCH_DO_STATE_CHECK
) {
599 /* we set a different error code if we pass a negative key */
601 ctx
->result
= ERR_PTR(state
);
602 kleave(" = %d [neg]", ctx
->skipped_ret
);
608 ctx
->result
= make_key_ref(key
, ctx
->possessed
);
609 kleave(" = 1 [found]");
613 return ctx
->skipped_ret
;
617 * Search inside a keyring for a key. We can search by walking to it
618 * directly based on its index-key or we can iterate over the entire
619 * tree looking for it, based on the match function.
621 static int search_keyring(struct key
*keyring
, struct keyring_search_context
*ctx
)
623 if (ctx
->match_data
.lookup_type
== KEYRING_SEARCH_LOOKUP_DIRECT
) {
626 object
= assoc_array_find(&keyring
->keys
,
627 &keyring_assoc_array_ops
,
629 return object
? ctx
->iterator(object
, ctx
) : 0;
631 return assoc_array_iterate(&keyring
->keys
, ctx
->iterator
, ctx
);
635 * Search a tree of keyrings that point to other keyrings up to the maximum
638 static bool search_nested_keyrings(struct key
*keyring
,
639 struct keyring_search_context
*ctx
)
643 struct assoc_array_node
*node
;
645 } stack
[KEYRING_SEARCH_MAX_DEPTH
];
647 struct assoc_array_shortcut
*shortcut
;
648 struct assoc_array_node
*node
;
649 struct assoc_array_ptr
*ptr
;
653 kenter("{%d},{%s,%s}",
655 ctx
->index_key
.type
->name
,
656 ctx
->index_key
.description
);
658 #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
659 BUG_ON((ctx
->flags
& STATE_CHECKS
) == 0 ||
660 (ctx
->flags
& STATE_CHECKS
) == STATE_CHECKS
);
662 if (ctx
->index_key
.description
)
663 ctx
->index_key
.desc_len
= strlen(ctx
->index_key
.description
);
665 /* Check to see if this top-level keyring is what we are looking for
666 * and whether it is valid or not.
668 if (ctx
->match_data
.lookup_type
== KEYRING_SEARCH_LOOKUP_ITERATE
||
669 keyring_compare_object(keyring
, &ctx
->index_key
)) {
670 ctx
->skipped_ret
= 2;
671 switch (ctx
->iterator(keyring_key_to_ptr(keyring
), ctx
)) {
681 ctx
->skipped_ret
= 0;
683 /* Start processing a new keyring */
685 kdebug("descend to %d", keyring
->serial
);
686 if (keyring
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
687 (1 << KEY_FLAG_REVOKED
)))
688 goto not_this_keyring
;
690 /* Search through the keys in this keyring before its searching its
693 if (search_keyring(keyring
, ctx
))
696 /* Then manually iterate through the keyrings nested in this one.
698 * Start from the root node of the index tree. Because of the way the
699 * hash function has been set up, keyrings cluster on the leftmost
700 * branch of the root node (root slot 0) or in the root node itself.
701 * Non-keyrings avoid the leftmost branch of the root entirely (root
704 ptr
= READ_ONCE(keyring
->keys
.root
);
706 goto not_this_keyring
;
708 if (assoc_array_ptr_is_shortcut(ptr
)) {
709 /* If the root is a shortcut, either the keyring only contains
710 * keyring pointers (everything clusters behind root slot 0) or
711 * doesn't contain any keyring pointers.
713 shortcut
= assoc_array_ptr_to_shortcut(ptr
);
714 smp_read_barrier_depends();
715 if ((shortcut
->index_key
[0] & ASSOC_ARRAY_FAN_MASK
) != 0)
716 goto not_this_keyring
;
718 ptr
= READ_ONCE(shortcut
->next_node
);
719 node
= assoc_array_ptr_to_node(ptr
);
723 node
= assoc_array_ptr_to_node(ptr
);
724 smp_read_barrier_depends();
726 ptr
= node
->slots
[0];
727 if (!assoc_array_ptr_is_meta(ptr
))
731 /* Descend to a more distal node in this keyring's content tree and go
735 if (assoc_array_ptr_is_shortcut(ptr
)) {
736 shortcut
= assoc_array_ptr_to_shortcut(ptr
);
737 smp_read_barrier_depends();
738 ptr
= READ_ONCE(shortcut
->next_node
);
739 BUG_ON(!assoc_array_ptr_is_node(ptr
));
741 node
= assoc_array_ptr_to_node(ptr
);
744 kdebug("begin_node");
745 smp_read_barrier_depends();
748 /* Go through the slots in a node */
749 for (; slot
< ASSOC_ARRAY_FAN_OUT
; slot
++) {
750 ptr
= READ_ONCE(node
->slots
[slot
]);
752 if (assoc_array_ptr_is_meta(ptr
) && node
->back_pointer
)
753 goto descend_to_node
;
755 if (!keyring_ptr_is_keyring(ptr
))
758 key
= keyring_ptr_to_key(ptr
);
760 if (sp
>= KEYRING_SEARCH_MAX_DEPTH
) {
761 if (ctx
->flags
& KEYRING_SEARCH_DETECT_TOO_DEEP
) {
762 ctx
->result
= ERR_PTR(-ELOOP
);
765 goto not_this_keyring
;
768 /* Search a nested keyring */
769 if (!(ctx
->flags
& KEYRING_SEARCH_NO_CHECK_PERM
) &&
770 key_task_permission(make_key_ref(key
, ctx
->possessed
),
771 ctx
->cred
, KEY_NEED_SEARCH
) < 0)
774 /* stack the current position */
775 stack
[sp
].keyring
= keyring
;
776 stack
[sp
].node
= node
;
777 stack
[sp
].slot
= slot
;
780 /* begin again with the new keyring */
782 goto descend_to_keyring
;
785 /* We've dealt with all the slots in the current node, so now we need
786 * to ascend to the parent and continue processing there.
788 ptr
= READ_ONCE(node
->back_pointer
);
789 slot
= node
->parent_slot
;
791 if (ptr
&& assoc_array_ptr_is_shortcut(ptr
)) {
792 shortcut
= assoc_array_ptr_to_shortcut(ptr
);
793 smp_read_barrier_depends();
794 ptr
= READ_ONCE(shortcut
->back_pointer
);
795 slot
= shortcut
->parent_slot
;
798 goto not_this_keyring
;
799 node
= assoc_array_ptr_to_node(ptr
);
800 smp_read_barrier_depends();
803 /* If we've ascended to the root (zero backpointer), we must have just
804 * finished processing the leftmost branch rather than the root slots -
805 * so there can't be any more keyrings for us to find.
807 if (node
->back_pointer
) {
808 kdebug("ascend %d", slot
);
812 /* The keyring we're looking at was disqualified or didn't contain a
816 kdebug("not_this_keyring %d", sp
);
822 /* Resume the processing of a keyring higher up in the tree */
824 keyring
= stack
[sp
].keyring
;
825 node
= stack
[sp
].node
;
826 slot
= stack
[sp
].slot
+ 1;
827 kdebug("ascend to %d [%d]", keyring
->serial
, slot
);
830 /* We found a viable match */
832 key
= key_ref_to_ptr(ctx
->result
);
834 if (!(ctx
->flags
& KEYRING_SEARCH_NO_UPDATE_TIME
)) {
835 key
->last_used_at
= ctx
->now
.tv_sec
;
836 keyring
->last_used_at
= ctx
->now
.tv_sec
;
838 stack
[--sp
].keyring
->last_used_at
= ctx
->now
.tv_sec
;
845 * keyring_search_aux - Search a keyring tree for a key matching some criteria
846 * @keyring_ref: A pointer to the keyring with possession indicator.
847 * @ctx: The keyring search context.
849 * Search the supplied keyring tree for a key that matches the criteria given.
850 * The root keyring and any linked keyrings must grant Search permission to the
851 * caller to be searchable and keys can only be found if they too grant Search
852 * to the caller. The possession flag on the root keyring pointer controls use
853 * of the possessor bits in permissions checking of the entire tree. In
854 * addition, the LSM gets to forbid keyring searches and key matches.
856 * The search is performed as a breadth-then-depth search up to the prescribed
857 * limit (KEYRING_SEARCH_MAX_DEPTH).
859 * Keys are matched to the type provided and are then filtered by the match
860 * function, which is given the description to use in any way it sees fit. The
861 * match function may use any attributes of a key that it wishes to to
862 * determine the match. Normally the match function from the key type would be
865 * RCU can be used to prevent the keyring key lists from disappearing without
866 * the need to take lots of locks.
868 * Returns a pointer to the found key and increments the key usage count if
869 * successful; -EAGAIN if no matching keys were found, or if expired or revoked
870 * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
871 * specified keyring wasn't a keyring.
873 * In the case of a successful return, the possession attribute from
874 * @keyring_ref is propagated to the returned key reference.
876 key_ref_t
keyring_search_aux(key_ref_t keyring_ref
,
877 struct keyring_search_context
*ctx
)
882 ctx
->iterator
= keyring_search_iterator
;
883 ctx
->possessed
= is_key_possessed(keyring_ref
);
884 ctx
->result
= ERR_PTR(-EAGAIN
);
886 keyring
= key_ref_to_ptr(keyring_ref
);
889 if (keyring
->type
!= &key_type_keyring
)
890 return ERR_PTR(-ENOTDIR
);
892 if (!(ctx
->flags
& KEYRING_SEARCH_NO_CHECK_PERM
)) {
893 err
= key_task_permission(keyring_ref
, ctx
->cred
, KEY_NEED_SEARCH
);
899 ctx
->now
= current_kernel_time();
900 if (search_nested_keyrings(keyring
, ctx
))
901 __key_get(key_ref_to_ptr(ctx
->result
));
907 * keyring_search - Search the supplied keyring tree for a matching key
908 * @keyring: The root of the keyring tree to be searched.
909 * @type: The type of keyring we want to find.
910 * @description: The name of the keyring we want to find.
912 * As keyring_search_aux() above, but using the current task's credentials and
913 * type's default matching function and preferred search method.
915 key_ref_t
keyring_search(key_ref_t keyring
,
916 struct key_type
*type
,
917 const char *description
)
919 struct keyring_search_context ctx
= {
920 .index_key
.type
= type
,
921 .index_key
.description
= description
,
922 .cred
= current_cred(),
923 .match_data
.cmp
= key_default_cmp
,
924 .match_data
.raw_data
= description
,
925 .match_data
.lookup_type
= KEYRING_SEARCH_LOOKUP_DIRECT
,
926 .flags
= KEYRING_SEARCH_DO_STATE_CHECK
,
931 if (type
->match_preparse
) {
932 ret
= type
->match_preparse(&ctx
.match_data
);
937 key
= keyring_search_aux(keyring
, &ctx
);
939 if (type
->match_free
)
940 type
->match_free(&ctx
.match_data
);
943 EXPORT_SYMBOL(keyring_search
);
945 static struct key_restriction
*keyring_restriction_alloc(
946 key_restrict_link_func_t check
)
948 struct key_restriction
*keyres
=
949 kzalloc(sizeof(struct key_restriction
), GFP_KERNEL
);
952 return ERR_PTR(-ENOMEM
);
954 keyres
->check
= check
;
960 * Semaphore to serialise restriction setup to prevent reference count
961 * cycles through restriction key pointers.
963 static DECLARE_RWSEM(keyring_serialise_restrict_sem
);
966 * Check for restriction cycles that would prevent keyring garbage collection.
967 * keyring_serialise_restrict_sem must be held.
969 static bool keyring_detect_restriction_cycle(const struct key
*dest_keyring
,
970 struct key_restriction
*keyres
)
972 while (keyres
&& keyres
->key
&&
973 keyres
->key
->type
== &key_type_keyring
) {
974 if (keyres
->key
== dest_keyring
)
977 keyres
= keyres
->key
->restrict_link
;
984 * keyring_restrict - Look up and apply a restriction to a keyring
986 * @keyring: The keyring to be restricted
987 * @restriction: The restriction options to apply to the keyring
989 int keyring_restrict(key_ref_t keyring_ref
, const char *type
,
990 const char *restriction
)
993 struct key_type
*restrict_type
= NULL
;
994 struct key_restriction
*restrict_link
;
997 keyring
= key_ref_to_ptr(keyring_ref
);
1000 if (keyring
->type
!= &key_type_keyring
)
1004 restrict_link
= keyring_restriction_alloc(restrict_link_reject
);
1006 restrict_type
= key_type_lookup(type
);
1008 if (IS_ERR(restrict_type
))
1009 return PTR_ERR(restrict_type
);
1011 if (!restrict_type
->lookup_restriction
) {
1016 restrict_link
= restrict_type
->lookup_restriction(restriction
);
1019 if (IS_ERR(restrict_link
)) {
1020 ret
= PTR_ERR(restrict_link
);
1024 down_write(&keyring
->sem
);
1025 down_write(&keyring_serialise_restrict_sem
);
1027 if (keyring
->restrict_link
)
1029 else if (keyring_detect_restriction_cycle(keyring
, restrict_link
))
1032 keyring
->restrict_link
= restrict_link
;
1034 up_write(&keyring_serialise_restrict_sem
);
1035 up_write(&keyring
->sem
);
1038 key_put(restrict_link
->key
);
1039 kfree(restrict_link
);
1044 key_type_put(restrict_type
);
1048 EXPORT_SYMBOL(keyring_restrict
);
1051 * Search the given keyring for a key that might be updated.
1053 * The caller must guarantee that the keyring is a keyring and that the
1054 * permission is granted to modify the keyring as no check is made here. The
1055 * caller must also hold a lock on the keyring semaphore.
1057 * Returns a pointer to the found key with usage count incremented if
1058 * successful and returns NULL if not found. Revoked and invalidated keys are
1061 * If successful, the possession indicator is propagated from the keyring ref
1062 * to the returned key reference.
1064 key_ref_t
find_key_to_update(key_ref_t keyring_ref
,
1065 const struct keyring_index_key
*index_key
)
1067 struct key
*keyring
, *key
;
1070 keyring
= key_ref_to_ptr(keyring_ref
);
1072 kenter("{%d},{%s,%s}",
1073 keyring
->serial
, index_key
->type
->name
, index_key
->description
);
1075 object
= assoc_array_find(&keyring
->keys
, &keyring_assoc_array_ops
,
1085 key
= keyring_ptr_to_key(object
);
1086 if (key
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
1087 (1 << KEY_FLAG_REVOKED
))) {
1088 kleave(" = NULL [x]");
1092 kleave(" = {%d}", key
->serial
);
1093 return make_key_ref(key
, is_key_possessed(keyring_ref
));
1097 * Find a keyring with the specified name.
1099 * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
1100 * user in the current user namespace are considered. If @uid_keyring is %true,
1101 * the keyring additionally must have been allocated as a user or user session
1102 * keyring; otherwise, it must grant Search permission directly to the caller.
1104 * Returns a pointer to the keyring with the keyring's refcount having being
1105 * incremented on success. -ENOKEY is returned if a key could not be found.
1107 struct key
*find_keyring_by_name(const char *name
, bool uid_keyring
)
1109 struct key
*keyring
;
1113 return ERR_PTR(-EINVAL
);
1115 bucket
= keyring_hash(name
);
1117 read_lock(&keyring_name_lock
);
1119 if (keyring_name_hash
[bucket
].next
) {
1120 /* search this hash bucket for a keyring with a matching name
1121 * that's readable and that hasn't been revoked */
1122 list_for_each_entry(keyring
,
1123 &keyring_name_hash
[bucket
],
1126 if (!kuid_has_mapping(current_user_ns(), keyring
->user
->uid
))
1129 if (test_bit(KEY_FLAG_REVOKED
, &keyring
->flags
))
1132 if (strcmp(keyring
->description
, name
) != 0)
1136 if (!test_bit(KEY_FLAG_UID_KEYRING
,
1140 if (key_permission(make_key_ref(keyring
, 0),
1141 KEY_NEED_SEARCH
) < 0)
1145 /* we've got a match but we might end up racing with
1146 * key_cleanup() if the keyring is currently 'dead'
1147 * (ie. it has a zero usage count) */
1148 if (!refcount_inc_not_zero(&keyring
->usage
))
1150 keyring
->last_used_at
= current_kernel_time().tv_sec
;
1155 keyring
= ERR_PTR(-ENOKEY
);
1157 read_unlock(&keyring_name_lock
);
1161 static int keyring_detect_cycle_iterator(const void *object
,
1162 void *iterator_data
)
1164 struct keyring_search_context
*ctx
= iterator_data
;
1165 const struct key
*key
= keyring_ptr_to_key(object
);
1167 kenter("{%d}", key
->serial
);
1169 /* We might get a keyring with matching index-key that is nonetheless a
1170 * different keyring. */
1171 if (key
!= ctx
->match_data
.raw_data
)
1174 ctx
->result
= ERR_PTR(-EDEADLK
);
1179 * See if a cycle will will be created by inserting acyclic tree B in acyclic
1180 * tree A at the topmost level (ie: as a direct child of A).
1182 * Since we are adding B to A at the top level, checking for cycles should just
1183 * be a matter of seeing if node A is somewhere in tree B.
1185 static int keyring_detect_cycle(struct key
*A
, struct key
*B
)
1187 struct keyring_search_context ctx
= {
1188 .index_key
= A
->index_key
,
1189 .match_data
.raw_data
= A
,
1190 .match_data
.lookup_type
= KEYRING_SEARCH_LOOKUP_DIRECT
,
1191 .iterator
= keyring_detect_cycle_iterator
,
1192 .flags
= (KEYRING_SEARCH_NO_STATE_CHECK
|
1193 KEYRING_SEARCH_NO_UPDATE_TIME
|
1194 KEYRING_SEARCH_NO_CHECK_PERM
|
1195 KEYRING_SEARCH_DETECT_TOO_DEEP
),
1199 search_nested_keyrings(B
, &ctx
);
1201 return PTR_ERR(ctx
.result
) == -EAGAIN
? 0 : PTR_ERR(ctx
.result
);
1205 * Preallocate memory so that a key can be linked into to a keyring.
1207 int __key_link_begin(struct key
*keyring
,
1208 const struct keyring_index_key
*index_key
,
1209 struct assoc_array_edit
**_edit
)
1210 __acquires(&keyring
->sem
)
1211 __acquires(&keyring_serialise_link_sem
)
1213 struct assoc_array_edit
*edit
;
1217 keyring
->serial
, index_key
->type
->name
, index_key
->description
);
1219 BUG_ON(index_key
->desc_len
== 0);
1221 if (keyring
->type
!= &key_type_keyring
)
1224 down_write(&keyring
->sem
);
1227 if (test_bit(KEY_FLAG_REVOKED
, &keyring
->flags
))
1230 /* serialise link/link calls to prevent parallel calls causing a cycle
1231 * when linking two keyring in opposite orders */
1232 if (index_key
->type
== &key_type_keyring
)
1233 down_write(&keyring_serialise_link_sem
);
1235 /* Create an edit script that will insert/replace the key in the
1238 edit
= assoc_array_insert(&keyring
->keys
,
1239 &keyring_assoc_array_ops
,
1243 ret
= PTR_ERR(edit
);
1247 /* If we're not replacing a link in-place then we're going to need some
1250 if (!edit
->dead_leaf
) {
1251 ret
= key_payload_reserve(keyring
,
1252 keyring
->datalen
+ KEYQUOTA_LINK_BYTES
);
1262 assoc_array_cancel_edit(edit
);
1264 if (index_key
->type
== &key_type_keyring
)
1265 up_write(&keyring_serialise_link_sem
);
1267 up_write(&keyring
->sem
);
1268 kleave(" = %d", ret
);
1273 * Check already instantiated keys aren't going to be a problem.
1275 * The caller must have called __key_link_begin(). Don't need to call this for
1276 * keys that were created since __key_link_begin() was called.
1278 int __key_link_check_live_key(struct key
*keyring
, struct key
*key
)
1280 if (key
->type
== &key_type_keyring
)
1281 /* check that we aren't going to create a cycle by linking one
1282 * keyring to another */
1283 return keyring_detect_cycle(keyring
, key
);
1288 * Link a key into to a keyring.
1290 * Must be called with __key_link_begin() having being called. Discards any
1291 * already extant link to matching key if there is one, so that each keyring
1292 * holds at most one link to any given key of a particular type+description
1295 void __key_link(struct key
*key
, struct assoc_array_edit
**_edit
)
1298 assoc_array_insert_set_object(*_edit
, keyring_key_to_ptr(key
));
1299 assoc_array_apply_edit(*_edit
);
1304 * Finish linking a key into to a keyring.
1306 * Must be called with __key_link_begin() having being called.
1308 void __key_link_end(struct key
*keyring
,
1309 const struct keyring_index_key
*index_key
,
1310 struct assoc_array_edit
*edit
)
1311 __releases(&keyring
->sem
)
1312 __releases(&keyring_serialise_link_sem
)
1314 BUG_ON(index_key
->type
== NULL
);
1315 kenter("%d,%s,", keyring
->serial
, index_key
->type
->name
);
1317 if (index_key
->type
== &key_type_keyring
)
1318 up_write(&keyring_serialise_link_sem
);
1321 if (!edit
->dead_leaf
) {
1322 key_payload_reserve(keyring
,
1323 keyring
->datalen
- KEYQUOTA_LINK_BYTES
);
1325 assoc_array_cancel_edit(edit
);
1327 up_write(&keyring
->sem
);
1331 * Check addition of keys to restricted keyrings.
1333 static int __key_link_check_restriction(struct key
*keyring
, struct key
*key
)
1335 if (!keyring
->restrict_link
|| !keyring
->restrict_link
->check
)
1337 return keyring
->restrict_link
->check(keyring
, key
->type
, &key
->payload
,
1338 keyring
->restrict_link
->key
);
1342 * key_link - Link a key to a keyring
1343 * @keyring: The keyring to make the link in.
1344 * @key: The key to link to.
1346 * Make a link in a keyring to a key, such that the keyring holds a reference
1347 * on that key and the key can potentially be found by searching that keyring.
1349 * This function will write-lock the keyring's semaphore and will consume some
1350 * of the user's key data quota to hold the link.
1352 * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
1353 * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
1354 * full, -EDQUOT if there is insufficient key data quota remaining to add
1355 * another link or -ENOMEM if there's insufficient memory.
1357 * It is assumed that the caller has checked that it is permitted for a link to
1358 * be made (the keyring should have Write permission and the key Link
1361 int key_link(struct key
*keyring
, struct key
*key
)
1363 struct assoc_array_edit
*edit
;
1366 kenter("{%d,%d}", keyring
->serial
, refcount_read(&keyring
->usage
));
1371 ret
= __key_link_begin(keyring
, &key
->index_key
, &edit
);
1373 kdebug("begun {%d,%d}", keyring
->serial
, refcount_read(&keyring
->usage
));
1374 ret
= __key_link_check_restriction(keyring
, key
);
1376 ret
= __key_link_check_live_key(keyring
, key
);
1378 __key_link(key
, &edit
);
1379 __key_link_end(keyring
, &key
->index_key
, edit
);
1382 kleave(" = %d {%d,%d}", ret
, keyring
->serial
, refcount_read(&keyring
->usage
));
1385 EXPORT_SYMBOL(key_link
);
1388 * key_unlink - Unlink the first link to a key from a keyring.
1389 * @keyring: The keyring to remove the link from.
1390 * @key: The key the link is to.
1392 * Remove a link from a keyring to a key.
1394 * This function will write-lock the keyring's semaphore.
1396 * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
1397 * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
1400 * It is assumed that the caller has checked that it is permitted for a link to
1401 * be removed (the keyring should have Write permission; no permissions are
1402 * required on the key).
1404 int key_unlink(struct key
*keyring
, struct key
*key
)
1406 struct assoc_array_edit
*edit
;
1412 if (keyring
->type
!= &key_type_keyring
)
1415 down_write(&keyring
->sem
);
1417 edit
= assoc_array_delete(&keyring
->keys
, &keyring_assoc_array_ops
,
1420 ret
= PTR_ERR(edit
);
1427 assoc_array_apply_edit(edit
);
1428 key_payload_reserve(keyring
, keyring
->datalen
- KEYQUOTA_LINK_BYTES
);
1432 up_write(&keyring
->sem
);
1435 EXPORT_SYMBOL(key_unlink
);
1438 * keyring_clear - Clear a keyring
1439 * @keyring: The keyring to clear.
1441 * Clear the contents of the specified keyring.
1443 * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
1445 int keyring_clear(struct key
*keyring
)
1447 struct assoc_array_edit
*edit
;
1450 if (keyring
->type
!= &key_type_keyring
)
1453 down_write(&keyring
->sem
);
1455 edit
= assoc_array_clear(&keyring
->keys
, &keyring_assoc_array_ops
);
1457 ret
= PTR_ERR(edit
);
1460 assoc_array_apply_edit(edit
);
1461 key_payload_reserve(keyring
, 0);
1465 up_write(&keyring
->sem
);
1468 EXPORT_SYMBOL(keyring_clear
);
1471 * Dispose of the links from a revoked keyring.
1473 * This is called with the key sem write-locked.
1475 static void keyring_revoke(struct key
*keyring
)
1477 struct assoc_array_edit
*edit
;
1479 edit
= assoc_array_clear(&keyring
->keys
, &keyring_assoc_array_ops
);
1480 if (!IS_ERR(edit
)) {
1482 assoc_array_apply_edit(edit
);
1483 key_payload_reserve(keyring
, 0);
1487 static bool keyring_gc_select_iterator(void *object
, void *iterator_data
)
1489 struct key
*key
= keyring_ptr_to_key(object
);
1490 time_t *limit
= iterator_data
;
1492 if (key_is_dead(key
, *limit
))
1498 static int keyring_gc_check_iterator(const void *object
, void *iterator_data
)
1500 const struct key
*key
= keyring_ptr_to_key(object
);
1501 time_t *limit
= iterator_data
;
1504 return key_is_dead(key
, *limit
);
1508 * Garbage collect pointers from a keyring.
1510 * Not called with any locks held. The keyring's key struct will not be
1511 * deallocated under us as only our caller may deallocate it.
1513 void keyring_gc(struct key
*keyring
, time_t limit
)
1517 kenter("%x{%s}", keyring
->serial
, keyring
->description
?: "");
1519 if (keyring
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
1520 (1 << KEY_FLAG_REVOKED
)))
1523 /* scan the keyring looking for dead keys */
1525 result
= assoc_array_iterate(&keyring
->keys
,
1526 keyring_gc_check_iterator
, &limit
);
1536 down_write(&keyring
->sem
);
1537 assoc_array_gc(&keyring
->keys
, &keyring_assoc_array_ops
,
1538 keyring_gc_select_iterator
, &limit
);
1539 up_write(&keyring
->sem
);
1544 * Garbage collect restriction pointers from a keyring.
1546 * Keyring restrictions are associated with a key type, and must be cleaned
1547 * up if the key type is unregistered. The restriction is altered to always
1548 * reject additional keys so a keyring cannot be opened up by unregistering
1551 * Not called with any keyring locks held. The keyring's key struct will not
1552 * be deallocated under us as only our caller may deallocate it.
1554 * The caller is required to hold key_types_sem and dead_type->sem. This is
1555 * fulfilled by key_gc_keytype() holding the locks on behalf of
1556 * key_garbage_collector(), which it invokes on a workqueue.
1558 void keyring_restriction_gc(struct key
*keyring
, struct key_type
*dead_type
)
1560 struct key_restriction
*keyres
;
1562 kenter("%x{%s}", keyring
->serial
, keyring
->description
?: "");
1565 * keyring->restrict_link is only assigned at key allocation time
1566 * or with the key type locked, so the only values that could be
1567 * concurrently assigned to keyring->restrict_link are for key
1568 * types other than dead_type. Given this, it's ok to check
1569 * the key type before acquiring keyring->sem.
1571 if (!dead_type
|| !keyring
->restrict_link
||
1572 keyring
->restrict_link
->keytype
!= dead_type
) {
1573 kleave(" [no restriction gc]");
1577 /* Lock the keyring to ensure that a link is not in progress */
1578 down_write(&keyring
->sem
);
1580 keyres
= keyring
->restrict_link
;
1582 keyres
->check
= restrict_link_reject
;
1584 key_put(keyres
->key
);
1586 keyres
->keytype
= NULL
;
1588 up_write(&keyring
->sem
);
1590 kleave(" [restriction gc]");