4 * Implementation of the Domain-Based Mandatory Access Control.
6 * Copyright (C) 2005-2010 NTT DATA CORPORATION
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
14 /* The list for "struct tomoyo_io_buffer". */
15 static LIST_HEAD(tomoyo_io_buffer_list
);
16 /* Lock for protecting tomoyo_io_buffer_list. */
17 static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock
);
19 /* Size of an element. */
20 static const u8 tomoyo_element_size
[TOMOYO_MAX_POLICY
] = {
21 [TOMOYO_ID_GROUP
] = sizeof(struct tomoyo_group
),
22 [TOMOYO_ID_PATH_GROUP
] = sizeof(struct tomoyo_path_group
),
23 [TOMOYO_ID_NUMBER_GROUP
] = sizeof(struct tomoyo_number_group
),
24 [TOMOYO_ID_AGGREGATOR
] = sizeof(struct tomoyo_aggregator
),
25 [TOMOYO_ID_TRANSITION_CONTROL
] =
26 sizeof(struct tomoyo_transition_control
),
27 [TOMOYO_ID_MANAGER
] = sizeof(struct tomoyo_manager
),
28 /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
30 tomoyo_acl_size["struct tomoyo_acl_info"->type], */
31 [TOMOYO_ID_DOMAIN
] = sizeof(struct tomoyo_domain_info
),
34 /* Size of a domain ACL element. */
35 static const u8 tomoyo_acl_size
[] = {
36 [TOMOYO_TYPE_PATH_ACL
] = sizeof(struct tomoyo_path_acl
),
37 [TOMOYO_TYPE_PATH2_ACL
] = sizeof(struct tomoyo_path2_acl
),
38 [TOMOYO_TYPE_PATH_NUMBER_ACL
] = sizeof(struct tomoyo_path_number_acl
),
39 [TOMOYO_TYPE_MKDEV_ACL
] = sizeof(struct tomoyo_mkdev_acl
),
40 [TOMOYO_TYPE_MOUNT_ACL
] = sizeof(struct tomoyo_mount_acl
),
44 * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
46 * @element: Pointer to "struct list_head".
48 * Returns true if @element is used by /sys/kernel/security/tomoyo/ users,
51 static bool tomoyo_struct_used_by_io_buffer(const struct list_head
*element
)
53 struct tomoyo_io_buffer
*head
;
56 spin_lock(&tomoyo_io_buffer_list_lock
);
57 list_for_each_entry(head
, &tomoyo_io_buffer_list
, list
) {
59 spin_unlock(&tomoyo_io_buffer_list_lock
);
60 if (mutex_lock_interruptible(&head
->io_sem
)) {
64 if (head
->r
.domain
== element
|| head
->r
.group
== element
||
65 head
->r
.acl
== element
|| &head
->w
.domain
->list
== element
)
67 mutex_unlock(&head
->io_sem
);
69 spin_lock(&tomoyo_io_buffer_list_lock
);
74 spin_unlock(&tomoyo_io_buffer_list_lock
);
79 * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
81 * @string: String to check.
82 * @size: Memory allocated for @string .
84 * Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
87 static bool tomoyo_name_used_by_io_buffer(const char *string
,
90 struct tomoyo_io_buffer
*head
;
93 spin_lock(&tomoyo_io_buffer_list_lock
);
94 list_for_each_entry(head
, &tomoyo_io_buffer_list
, list
) {
97 spin_unlock(&tomoyo_io_buffer_list_lock
);
98 if (mutex_lock_interruptible(&head
->io_sem
)) {
102 for (i
= 0; i
< TOMOYO_MAX_IO_READ_QUEUE
; i
++) {
103 const char *w
= head
->r
.w
[i
];
104 if (w
< string
|| w
> string
+ size
)
109 mutex_unlock(&head
->io_sem
);
111 spin_lock(&tomoyo_io_buffer_list_lock
);
116 spin_unlock(&tomoyo_io_buffer_list_lock
);
120 /* Structure for garbage collection. */
122 struct list_head list
;
123 enum tomoyo_policy_id type
;
125 struct list_head
*element
;
127 /* List of entries to be deleted. */
128 static LIST_HEAD(tomoyo_gc_list
);
129 /* Length of tomoyo_gc_list. */
130 static int tomoyo_gc_list_len
;
133 * tomoyo_add_to_gc - Add an entry to to be deleted list.
135 * @type: One of values in "enum tomoyo_policy_id".
136 * @element: Pointer to "struct list_head".
138 * Returns true on success, false otherwise.
140 * Caller holds tomoyo_policy_lock mutex.
142 * Adding an entry needs kmalloc(). Thus, if we try to add thousands of
143 * entries at once, it will take too long time. Thus, do not add more than 128
144 * entries per a scan. But to be able to handle worst case where all entries
145 * are in-use, we accept one more entry per a scan.
147 * If we use singly linked list using "struct list_head"->prev (which is
148 * LIST_POISON2), we can avoid kmalloc().
150 static bool tomoyo_add_to_gc(const int type
, struct list_head
*element
)
152 struct tomoyo_gc
*entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
156 if (type
== TOMOYO_ID_ACL
)
157 entry
->size
= tomoyo_acl_size
[
158 container_of(element
,
159 typeof(struct tomoyo_acl_info
),
161 else if (type
== TOMOYO_ID_NAME
)
162 entry
->size
= strlen(container_of(element
,
163 typeof(struct tomoyo_name
),
164 head
.list
)->entry
.name
) + 1;
166 entry
->size
= tomoyo_element_size
[type
];
167 entry
->element
= element
;
168 list_add(&entry
->list
, &tomoyo_gc_list
);
169 list_del_rcu(element
);
170 return tomoyo_gc_list_len
++ < 128;
174 * tomoyo_element_linked_by_gc - Validate next element of an entry.
176 * @element: Pointer to an element.
177 * @size: Size of @element in byte.
179 * Returns true if @element is linked by other elements in the garbage
180 * collector's queue, false otherwise.
182 static bool tomoyo_element_linked_by_gc(const u8
*element
, const size_t size
)
185 list_for_each_entry(p
, &tomoyo_gc_list
, list
) {
186 const u8
*ptr
= (const u8
*) p
->element
->next
;
187 if (ptr
< element
|| element
+ size
< ptr
)
195 * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
197 * @element: Pointer to "struct list_head".
201 static void tomoyo_del_transition_control(struct list_head
*element
)
203 struct tomoyo_transition_control
*ptr
=
204 container_of(element
, typeof(*ptr
), head
.list
);
205 tomoyo_put_name(ptr
->domainname
);
206 tomoyo_put_name(ptr
->program
);
210 * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator".
212 * @element: Pointer to "struct list_head".
216 static void tomoyo_del_aggregator(struct list_head
*element
)
218 struct tomoyo_aggregator
*ptr
=
219 container_of(element
, typeof(*ptr
), head
.list
);
220 tomoyo_put_name(ptr
->original_name
);
221 tomoyo_put_name(ptr
->aggregated_name
);
225 * tomoyo_del_manager - Delete members in "struct tomoyo_manager".
227 * @element: Pointer to "struct list_head".
231 static void tomoyo_del_manager(struct list_head
*element
)
233 struct tomoyo_manager
*ptr
=
234 container_of(element
, typeof(*ptr
), head
.list
);
235 tomoyo_put_name(ptr
->manager
);
239 * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info".
241 * @element: Pointer to "struct list_head".
245 static void tomoyo_del_acl(struct list_head
*element
)
247 struct tomoyo_acl_info
*acl
=
248 container_of(element
, typeof(*acl
), list
);
250 case TOMOYO_TYPE_PATH_ACL
:
252 struct tomoyo_path_acl
*entry
253 = container_of(acl
, typeof(*entry
), head
);
254 tomoyo_put_name_union(&entry
->name
);
257 case TOMOYO_TYPE_PATH2_ACL
:
259 struct tomoyo_path2_acl
*entry
260 = container_of(acl
, typeof(*entry
), head
);
261 tomoyo_put_name_union(&entry
->name1
);
262 tomoyo_put_name_union(&entry
->name2
);
265 case TOMOYO_TYPE_PATH_NUMBER_ACL
:
267 struct tomoyo_path_number_acl
*entry
268 = container_of(acl
, typeof(*entry
), head
);
269 tomoyo_put_name_union(&entry
->name
);
270 tomoyo_put_number_union(&entry
->number
);
273 case TOMOYO_TYPE_MKDEV_ACL
:
275 struct tomoyo_mkdev_acl
*entry
276 = container_of(acl
, typeof(*entry
), head
);
277 tomoyo_put_name_union(&entry
->name
);
278 tomoyo_put_number_union(&entry
->mode
);
279 tomoyo_put_number_union(&entry
->major
);
280 tomoyo_put_number_union(&entry
->minor
);
283 case TOMOYO_TYPE_MOUNT_ACL
:
285 struct tomoyo_mount_acl
*entry
286 = container_of(acl
, typeof(*entry
), head
);
287 tomoyo_put_name_union(&entry
->dev_name
);
288 tomoyo_put_name_union(&entry
->dir_name
);
289 tomoyo_put_name_union(&entry
->fs_type
);
290 tomoyo_put_number_union(&entry
->flags
);
297 * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info".
299 * @element: Pointer to "struct list_head".
301 * Returns true if deleted, false otherwise.
303 static bool tomoyo_del_domain(struct list_head
*element
)
305 struct tomoyo_domain_info
*domain
=
306 container_of(element
, typeof(*domain
), list
);
307 struct tomoyo_acl_info
*acl
;
308 struct tomoyo_acl_info
*tmp
;
310 * Since we don't protect whole execve() operation using SRCU,
311 * we need to recheck domain->users at this point.
313 * (1) Reader starts SRCU section upon execve().
314 * (2) Reader traverses tomoyo_domain_list and finds this domain.
315 * (3) Writer marks this domain as deleted.
316 * (4) Garbage collector removes this domain from tomoyo_domain_list
317 * because this domain is marked as deleted and used by nobody.
318 * (5) Reader saves reference to this domain into
319 * "struct linux_binprm"->cred->security .
320 * (6) Reader finishes SRCU section, although execve() operation has
322 * (7) Garbage collector waits for SRCU synchronization.
323 * (8) Garbage collector kfree() this domain because this domain is
325 * (9) Reader finishes execve() operation and restores this domain from
326 * "struct linux_binprm"->cred->security.
328 * By updating domain->users at (5), we can solve this race problem
329 * by rechecking domain->users at (8).
331 if (atomic_read(&domain
->users
))
333 list_for_each_entry_safe(acl
, tmp
, &domain
->acl_info_list
, list
) {
334 tomoyo_del_acl(&acl
->list
);
335 tomoyo_memory_free(acl
);
337 tomoyo_put_name(domain
->domainname
);
343 * tomoyo_del_name - Delete members in "struct tomoyo_name".
345 * @element: Pointer to "struct list_head".
349 static void tomoyo_del_name(struct list_head
*element
)
351 const struct tomoyo_name
*ptr
=
352 container_of(element
, typeof(*ptr
), head
.list
);
356 * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group".
358 * @element: Pointer to "struct list_head".
362 static void tomoyo_del_path_group(struct list_head
*element
)
364 struct tomoyo_path_group
*member
=
365 container_of(element
, typeof(*member
), head
.list
);
366 tomoyo_put_name(member
->member_name
);
370 * tomoyo_del_group - Delete "struct tomoyo_group".
372 * @element: Pointer to "struct list_head".
376 static void tomoyo_del_group(struct list_head
*element
)
378 struct tomoyo_group
*group
=
379 container_of(element
, typeof(*group
), head
.list
);
380 tomoyo_put_name(group
->group_name
);
384 * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
386 * @element: Pointer to "struct list_head".
390 static void tomoyo_del_number_group(struct list_head
*element
)
392 struct tomoyo_number_group
*member
=
393 container_of(element
, typeof(*member
), head
.list
);
397 * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head".
399 * @id: One of values in "enum tomoyo_policy_id".
400 * @member_list: Pointer to "struct list_head".
402 * Returns true if some elements are deleted, false otherwise.
404 static bool tomoyo_collect_member(const enum tomoyo_policy_id id
,
405 struct list_head
*member_list
)
407 struct tomoyo_acl_head
*member
;
408 list_for_each_entry(member
, member_list
, list
) {
409 if (!member
->is_deleted
)
411 if (!tomoyo_add_to_gc(id
, &member
->list
))
418 * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info".
420 * @list: Pointer to "struct list_head".
422 * Returns true if some elements are deleted, false otherwise.
424 static bool tomoyo_collect_acl(struct list_head
*list
)
426 struct tomoyo_acl_info
*acl
;
427 list_for_each_entry(acl
, list
, list
) {
428 if (!acl
->is_deleted
)
430 if (!tomoyo_add_to_gc(TOMOYO_ID_ACL
, &acl
->list
))
437 * tomoyo_collect_entry - Scan lists for deleted elements.
441 static void tomoyo_collect_entry(void)
444 enum tomoyo_policy_id id
;
445 struct tomoyo_policy_namespace
*ns
;
447 if (mutex_lock_interruptible(&tomoyo_policy_lock
))
449 idx
= tomoyo_read_lock();
451 struct tomoyo_domain_info
*domain
;
452 list_for_each_entry_rcu(domain
, &tomoyo_domain_list
, list
) {
453 if (!tomoyo_collect_acl(&domain
->acl_info_list
))
455 if (!domain
->is_deleted
|| atomic_read(&domain
->users
))
458 * Nobody is referring this domain. But somebody may
459 * refer this domain after successful execve().
460 * We recheck domain->users after SRCU synchronization.
462 if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN
, &domain
->list
))
466 list_for_each_entry_rcu(ns
, &tomoyo_namespace_list
, namespace_list
) {
467 for (id
= 0; id
< TOMOYO_MAX_POLICY
; id
++)
468 if (!tomoyo_collect_member(id
, &ns
->policy_list
[id
]))
470 for (i
= 0; i
< TOMOYO_MAX_ACL_GROUPS
; i
++)
471 if (!tomoyo_collect_acl(&ns
->acl_group
[i
]))
473 for (i
= 0; i
< TOMOYO_MAX_GROUP
; i
++) {
474 struct list_head
*list
= &ns
->group_list
[i
];
475 struct tomoyo_group
*group
;
478 id
= TOMOYO_ID_PATH_GROUP
;
481 id
= TOMOYO_ID_NUMBER_GROUP
;
484 list_for_each_entry(group
, list
, head
.list
) {
485 if (!tomoyo_collect_member
486 (id
, &group
->member_list
))
488 if (!list_empty(&group
->member_list
) ||
489 atomic_read(&group
->head
.users
))
491 if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP
,
497 for (i
= 0; i
< TOMOYO_MAX_HASH
; i
++) {
498 struct list_head
*list
= &tomoyo_name_list
[i
];
499 struct tomoyo_shared_acl_head
*ptr
;
500 list_for_each_entry(ptr
, list
, list
) {
501 if (atomic_read(&ptr
->users
))
503 if (!tomoyo_add_to_gc(TOMOYO_ID_NAME
, &ptr
->list
))
508 tomoyo_read_unlock(idx
);
509 mutex_unlock(&tomoyo_policy_lock
);
513 * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
515 * Returns true if some entries were kfree()d, false otherwise.
517 static bool tomoyo_kfree_entry(void)
520 struct tomoyo_gc
*tmp
;
523 list_for_each_entry_safe(p
, tmp
, &tomoyo_gc_list
, list
) {
524 struct list_head
*element
= p
->element
;
527 * list_del_rcu() in tomoyo_add_to_gc() guarantees that the
528 * list element became no longer reachable from the list which
529 * the element was originally on (e.g. tomoyo_domain_list).
530 * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
531 * that the list element became no longer referenced by syscall
534 * However, there are three users which may still be using the
535 * list element. We need to defer until all of these users
536 * forget the list element.
538 * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
539 * group,acl} and "struct tomoyo_io_buffer"->w.domain forget
542 if (tomoyo_struct_used_by_io_buffer(element
))
545 * Secondly, defer until all other elements in the
546 * tomoyo_gc_list list forget the list element.
548 if (tomoyo_element_linked_by_gc((const u8
*) element
, p
->size
))
551 case TOMOYO_ID_TRANSITION_CONTROL
:
552 tomoyo_del_transition_control(element
);
554 case TOMOYO_ID_AGGREGATOR
:
555 tomoyo_del_aggregator(element
);
557 case TOMOYO_ID_MANAGER
:
558 tomoyo_del_manager(element
);
562 * Thirdly, defer until all "struct tomoyo_io_buffer"
563 * ->r.w[] forget the list element.
565 if (tomoyo_name_used_by_io_buffer(
566 container_of(element
, typeof(struct tomoyo_name
),
567 head
.list
)->entry
.name
, p
->size
))
569 tomoyo_del_name(element
);
572 tomoyo_del_acl(element
);
574 case TOMOYO_ID_DOMAIN
:
575 if (!tomoyo_del_domain(element
))
578 case TOMOYO_ID_PATH_GROUP
:
579 tomoyo_del_path_group(element
);
581 case TOMOYO_ID_GROUP
:
582 tomoyo_del_group(element
);
584 case TOMOYO_ID_NUMBER_GROUP
:
585 tomoyo_del_number_group(element
);
587 case TOMOYO_MAX_POLICY
:
590 tomoyo_memory_free(element
);
593 tomoyo_gc_list_len
--;
600 * tomoyo_gc_thread - Garbage collector thread function.
604 * In case OOM-killer choose this thread for termination, we create this thread
605 * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
610 static int tomoyo_gc_thread(void *unused
)
612 /* Garbage collector thread is exclusive. */
613 static DEFINE_MUTEX(tomoyo_gc_mutex
);
614 if (!mutex_trylock(&tomoyo_gc_mutex
))
616 daemonize("GC for TOMOYO");
618 tomoyo_collect_entry();
619 if (list_empty(&tomoyo_gc_list
))
621 synchronize_srcu(&tomoyo_ss
);
622 } while (tomoyo_kfree_entry());
624 struct tomoyo_io_buffer
*head
;
625 struct tomoyo_io_buffer
*tmp
;
627 spin_lock(&tomoyo_io_buffer_list_lock
);
628 list_for_each_entry_safe(head
, tmp
, &tomoyo_io_buffer_list
,
632 list_del(&head
->list
);
633 kfree(head
->read_buf
);
634 kfree(head
->write_buf
);
637 spin_unlock(&tomoyo_io_buffer_list_lock
);
639 mutex_unlock(&tomoyo_gc_mutex
);
641 /* This acts as do_exit(0). */
646 * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users.
648 * @head: Pointer to "struct tomoyo_io_buffer".
649 * @is_register: True if register, false if unregister.
653 void tomoyo_notify_gc(struct tomoyo_io_buffer
*head
, const bool is_register
)
655 bool is_write
= false;
657 spin_lock(&tomoyo_io_buffer_list_lock
);
660 list_add(&head
->list
, &tomoyo_io_buffer_list
);
662 is_write
= head
->write_buf
!= NULL
;
663 if (!--head
->users
) {
664 list_del(&head
->list
);
665 kfree(head
->read_buf
);
666 kfree(head
->write_buf
);
670 spin_unlock(&tomoyo_io_buffer_list_lock
);
672 struct task_struct
*task
= kthread_create(tomoyo_gc_thread
,
676 wake_up_process(task
);