Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

TOMOYO: Simplify garbage collector.

When TOMOYO started using garbage collector at commit 847b173e "TOMOYO: Add
garbage collector.", we waited for close() before kfree(). Thus, elements to be
kfree()d were queued up using tomoyo_gc_list list.

But it turned out that tomoyo_element_linked_by_gc() tends to choke garbage
collector when certain pattern of entries are queued.

Since garbage collector is no longer waiting for close() since commit 2e503bbb
"TOMOYO: Fix lockdep warning.", we can remove tomoyo_gc_list list and
tomoyo_element_linked_by_gc() by doing sequential processing.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: James Morris <jmorris@namei.org>

authored by

Tetsuo Handa and committed by
James Morris
f9732ea1 778c4a4d

+186 -319
+5 -2
security/tomoyo/common.h
··· 52 52 53 53 #define TOMOYO_EXEC_TMPSIZE 4096 54 54 55 + /* Garbage collector is trying to kfree() this element. */ 56 + #define TOMOYO_GC_IN_PROGRESS -1 57 + 55 58 /* Profile number is an integer between 0 and 255. */ 56 59 #define TOMOYO_MAX_PROFILES 256 57 60 ··· 401 398 /* Common header for holding ACL entries. */ 402 399 struct tomoyo_acl_head { 403 400 struct list_head list; 404 - bool is_deleted; 401 + s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */ 405 402 } __packed; 406 403 407 404 /* Common header for shared entries. */ ··· 668 665 struct tomoyo_acl_info { 669 666 struct list_head list; 670 667 struct tomoyo_condition *cond; /* Maybe NULL. */ 671 - bool is_deleted; 668 + s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */ 672 669 u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */ 673 670 } __packed; 674 671
+4 -4
security/tomoyo/condition.c
··· 400 400 found = true; 401 401 goto out; 402 402 } 403 - list_for_each_entry_rcu(ptr, &tomoyo_condition_list, head.list) { 404 - if (!tomoyo_same_condition(ptr, entry)) 403 + list_for_each_entry(ptr, &tomoyo_condition_list, head.list) { 404 + if (!tomoyo_same_condition(ptr, entry) || 405 + atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) 405 406 continue; 406 407 /* Same entry found. Share this entry. */ 407 408 atomic_inc(&ptr->head.users); ··· 412 411 if (!found) { 413 412 if (tomoyo_memory_ok(entry)) { 414 413 atomic_set(&entry->head.users, 1); 415 - list_add_rcu(&entry->head.list, 416 - &tomoyo_condition_list); 414 + list_add(&entry->head.list, &tomoyo_condition_list); 417 415 } else { 418 416 found = true; 419 417 ptr = NULL;
+4
security/tomoyo/domain.c
··· 39 39 if (mutex_lock_interruptible(&tomoyo_policy_lock)) 40 40 return -ENOMEM; 41 41 list_for_each_entry_rcu(entry, list, list) { 42 + if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) 43 + continue; 42 44 if (!check_duplicate(entry, new_entry)) 43 45 continue; 44 46 entry->is_deleted = param->is_delete; ··· 117 115 if (mutex_lock_interruptible(&tomoyo_policy_lock)) 118 116 goto out; 119 117 list_for_each_entry_rcu(entry, list, list) { 118 + if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) 119 + continue; 120 120 if (!tomoyo_same_acl_head(entry, new_entry) || 121 121 !check_duplicate(entry, new_entry)) 122 122 continue;
+169 -311
security/tomoyo/gc.c
··· 13 13 /* Lock for protecting tomoyo_io_buffer_list. */ 14 14 static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock); 15 15 16 - /* Size of an element. */ 17 - static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = { 18 - [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group), 19 - [TOMOYO_ID_ADDRESS_GROUP] = sizeof(struct tomoyo_address_group), 20 - [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group), 21 - [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group), 22 - [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator), 23 - [TOMOYO_ID_TRANSITION_CONTROL] = 24 - sizeof(struct tomoyo_transition_control), 25 - [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager), 26 - /* [TOMOYO_ID_CONDITION] = "struct tomoyo_condition"->size, */ 27 - /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */ 28 - /* [TOMOYO_ID_ACL] = 29 - tomoyo_acl_size["struct tomoyo_acl_info"->type], */ 30 - [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info), 31 - }; 32 - 33 - /* Size of a domain ACL element. */ 34 - static const u8 tomoyo_acl_size[] = { 35 - [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl), 36 - [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl), 37 - [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl), 38 - [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl), 39 - [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl), 40 - [TOMOYO_TYPE_INET_ACL] = sizeof(struct tomoyo_inet_acl), 41 - [TOMOYO_TYPE_UNIX_ACL] = sizeof(struct tomoyo_unix_acl), 42 - [TOMOYO_TYPE_ENV_ACL] = sizeof(struct tomoyo_env_acl), 43 - }; 44 - 45 16 /** 46 17 * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not. 47 18 * ··· 30 59 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { 31 60 head->users++; 32 61 spin_unlock(&tomoyo_io_buffer_list_lock); 33 - if (mutex_lock_interruptible(&head->io_sem)) { 34 - in_use = true; 35 - goto out; 36 - } 62 + mutex_lock(&head->io_sem); 37 63 if (head->r.domain == element || head->r.group == element || 38 64 head->r.acl == element || &head->w.domain->list == element) 39 65 in_use = true; 40 66 mutex_unlock(&head->io_sem); 41 - out: 42 67 spin_lock(&tomoyo_io_buffer_list_lock); 43 68 head->users--; 44 69 if (in_use) ··· 48 81 * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not. 49 82 * 50 83 * @string: String to check. 51 - * @size: Memory allocated for @string . 52 84 * 53 85 * Returns true if @string is used by /sys/kernel/security/tomoyo/ users, 54 86 * false otherwise. 55 87 */ 56 - static bool tomoyo_name_used_by_io_buffer(const char *string, 57 - const size_t size) 88 + static bool tomoyo_name_used_by_io_buffer(const char *string) 58 89 { 59 90 struct tomoyo_io_buffer *head; 91 + const size_t size = strlen(string) + 1; 60 92 bool in_use = false; 61 93 62 94 spin_lock(&tomoyo_io_buffer_list_lock); ··· 63 97 int i; 64 98 head->users++; 65 99 spin_unlock(&tomoyo_io_buffer_list_lock); 66 - if (mutex_lock_interruptible(&head->io_sem)) { 67 - in_use = true; 68 - goto out; 69 - } 100 + mutex_lock(&head->io_sem); 70 101 for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) { 71 102 const char *w = head->r.w[i]; 72 103 if (w < string || w > string + size) ··· 72 109 break; 73 110 } 74 111 mutex_unlock(&head->io_sem); 75 - out: 76 112 spin_lock(&tomoyo_io_buffer_list_lock); 77 113 head->users--; 78 114 if (in_use) ··· 81 119 return in_use; 82 120 } 83 121 84 - /* Structure for garbage collection. */ 85 - struct tomoyo_gc { 86 - struct list_head list; 87 - enum tomoyo_policy_id type; 88 - size_t size; 89 - struct list_head *element; 90 - }; 91 - /* List of entries to be deleted. */ 92 - static LIST_HEAD(tomoyo_gc_list); 93 - /* Length of tomoyo_gc_list. */ 94 - static int tomoyo_gc_list_len; 95 - 96 - /** 97 - * tomoyo_add_to_gc - Add an entry to to be deleted list. 98 - * 99 - * @type: One of values in "enum tomoyo_policy_id". 100 - * @element: Pointer to "struct list_head". 101 - * 102 - * Returns true on success, false otherwise. 103 - * 104 - * Caller holds tomoyo_policy_lock mutex. 105 - * 106 - * Adding an entry needs kmalloc(). Thus, if we try to add thousands of 107 - * entries at once, it will take too long time. Thus, do not add more than 128 108 - * entries per a scan. But to be able to handle worst case where all entries 109 - * are in-use, we accept one more entry per a scan. 110 - * 111 - * If we use singly linked list using "struct list_head"->prev (which is 112 - * LIST_POISON2), we can avoid kmalloc(). 113 - */ 114 - static bool tomoyo_add_to_gc(const int type, struct list_head *element) 115 - { 116 - struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 117 - if (!entry) 118 - return false; 119 - entry->type = type; 120 - if (type == TOMOYO_ID_ACL) 121 - entry->size = tomoyo_acl_size[ 122 - container_of(element, 123 - typeof(struct tomoyo_acl_info), 124 - list)->type]; 125 - else if (type == TOMOYO_ID_NAME) 126 - entry->size = strlen(container_of(element, 127 - typeof(struct tomoyo_name), 128 - head.list)->entry.name) + 1; 129 - else if (type == TOMOYO_ID_CONDITION) 130 - entry->size = 131 - container_of(element, typeof(struct tomoyo_condition), 132 - head.list)->size; 133 - else 134 - entry->size = tomoyo_element_size[type]; 135 - entry->element = element; 136 - list_add(&entry->list, &tomoyo_gc_list); 137 - list_del_rcu(element); 138 - return tomoyo_gc_list_len++ < 128; 139 - } 140 - 141 - /** 142 - * tomoyo_element_linked_by_gc - Validate next element of an entry. 143 - * 144 - * @element: Pointer to an element. 145 - * @size: Size of @element in byte. 146 - * 147 - * Returns true if @element is linked by other elements in the garbage 148 - * collector's queue, false otherwise. 149 - */ 150 - static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size) 151 - { 152 - struct tomoyo_gc *p; 153 - list_for_each_entry(p, &tomoyo_gc_list, list) { 154 - const u8 *ptr = (const u8 *) p->element->next; 155 - if (ptr < element || element + size < ptr) 156 - continue; 157 - return true; 158 - } 159 - return false; 160 - } 161 - 162 122 /** 163 123 * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control". 164 124 * ··· 88 204 * 89 205 * Returns nothing. 90 206 */ 91 - static void tomoyo_del_transition_control(struct list_head *element) 207 + static inline void tomoyo_del_transition_control(struct list_head *element) 92 208 { 93 209 struct tomoyo_transition_control *ptr = 94 210 container_of(element, typeof(*ptr), head.list); ··· 103 219 * 104 220 * Returns nothing. 105 221 */ 106 - static void tomoyo_del_aggregator(struct list_head *element) 222 + static inline void tomoyo_del_aggregator(struct list_head *element) 107 223 { 108 224 struct tomoyo_aggregator *ptr = 109 225 container_of(element, typeof(*ptr), head.list); ··· 118 234 * 119 235 * Returns nothing. 120 236 */ 121 - static void tomoyo_del_manager(struct list_head *element) 237 + static inline void tomoyo_del_manager(struct list_head *element) 122 238 { 123 239 struct tomoyo_manager *ptr = 124 240 container_of(element, typeof(*ptr), head.list); ··· 214 330 * 215 331 * @element: Pointer to "struct list_head". 216 332 * 217 - * Returns true if deleted, false otherwise. 333 + * Returns nothing. 218 334 */ 219 - static bool tomoyo_del_domain(struct list_head *element) 335 + static inline void tomoyo_del_domain(struct list_head *element) 220 336 { 221 337 struct tomoyo_domain_info *domain = 222 338 container_of(element, typeof(*domain), list); 223 339 struct tomoyo_acl_info *acl; 224 340 struct tomoyo_acl_info *tmp; 225 341 /* 226 - * Since we don't protect whole execve() operation using SRCU, 227 - * we need to recheck domain->users at this point. 228 - * 229 - * (1) Reader starts SRCU section upon execve(). 230 - * (2) Reader traverses tomoyo_domain_list and finds this domain. 231 - * (3) Writer marks this domain as deleted. 232 - * (4) Garbage collector removes this domain from tomoyo_domain_list 233 - * because this domain is marked as deleted and used by nobody. 234 - * (5) Reader saves reference to this domain into 235 - * "struct linux_binprm"->cred->security . 236 - * (6) Reader finishes SRCU section, although execve() operation has 237 - * not finished yet. 238 - * (7) Garbage collector waits for SRCU synchronization. 239 - * (8) Garbage collector kfree() this domain because this domain is 240 - * used by nobody. 241 - * (9) Reader finishes execve() operation and restores this domain from 242 - * "struct linux_binprm"->cred->security. 243 - * 244 - * By updating domain->users at (5), we can solve this race problem 245 - * by rechecking domain->users at (8). 342 + * Since this domain is referenced from neither 343 + * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete 344 + * elements without checking for is_deleted flag. 246 345 */ 247 - if (atomic_read(&domain->users)) 248 - return false; 249 346 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { 250 347 tomoyo_del_acl(&acl->list); 251 348 tomoyo_memory_free(acl); 252 349 } 253 350 tomoyo_put_name(domain->domainname); 254 - return true; 255 351 } 256 352 257 353 /** ··· 280 416 * 281 417 * Returns nothing. 282 418 */ 283 - static void tomoyo_del_name(struct list_head *element) 419 + static inline void tomoyo_del_name(struct list_head *element) 284 420 { 285 - const struct tomoyo_name *ptr = 286 - container_of(element, typeof(*ptr), head.list); 421 + /* Nothing to do. */ 287 422 } 288 423 289 424 /** ··· 292 429 * 293 430 * Returns nothing. 294 431 */ 295 - static void tomoyo_del_path_group(struct list_head *element) 432 + static inline void tomoyo_del_path_group(struct list_head *element) 296 433 { 297 434 struct tomoyo_path_group *member = 298 435 container_of(element, typeof(*member), head.list); ··· 306 443 * 307 444 * Returns nothing. 308 445 */ 309 - static void tomoyo_del_group(struct list_head *element) 446 + static inline void tomoyo_del_group(struct list_head *element) 310 447 { 311 448 struct tomoyo_group *group = 312 449 container_of(element, typeof(*group), head.list); ··· 332 469 * 333 470 * Returns nothing. 334 471 */ 335 - static void tomoyo_del_number_group(struct list_head *element) 472 + static inline void tomoyo_del_number_group(struct list_head *element) 336 473 { 337 - struct tomoyo_number_group *member = 338 - container_of(element, typeof(*member), head.list); 474 + /* Nothing to do. */ 475 + } 476 + 477 + /** 478 + * tomoyo_try_to_gc - Try to kfree() an entry. 479 + * 480 + * @type: One of values in "enum tomoyo_policy_id". 481 + * @element: Pointer to "struct list_head". 482 + * 483 + * Returns nothing. 484 + * 485 + * Caller holds tomoyo_policy_lock mutex. 486 + */ 487 + static void tomoyo_try_to_gc(const enum tomoyo_policy_id type, 488 + struct list_head *element) 489 + { 490 + /* 491 + * __list_del_entry() guarantees that the list element became no longer 492 + * reachable from the list which the element was originally on (e.g. 493 + * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the 494 + * list element became no longer referenced by syscall users. 495 + */ 496 + __list_del_entry(element); 497 + mutex_unlock(&tomoyo_policy_lock); 498 + synchronize_srcu(&tomoyo_ss); 499 + /* 500 + * However, there are two users which may still be using the list 501 + * element. We need to defer until both users forget this element. 502 + * 503 + * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} 504 + * and "struct tomoyo_io_buffer"->w.domain forget this element. 505 + */ 506 + if (tomoyo_struct_used_by_io_buffer(element)) 507 + goto reinject; 508 + switch (type) { 509 + case TOMOYO_ID_TRANSITION_CONTROL: 510 + tomoyo_del_transition_control(element); 511 + break; 512 + case TOMOYO_ID_MANAGER: 513 + tomoyo_del_manager(element); 514 + break; 515 + case TOMOYO_ID_AGGREGATOR: 516 + tomoyo_del_aggregator(element); 517 + break; 518 + case TOMOYO_ID_GROUP: 519 + tomoyo_del_group(element); 520 + break; 521 + case TOMOYO_ID_PATH_GROUP: 522 + tomoyo_del_path_group(element); 523 + break; 524 + case TOMOYO_ID_ADDRESS_GROUP: 525 + tomoyo_del_address_group(element); 526 + break; 527 + case TOMOYO_ID_NUMBER_GROUP: 528 + tomoyo_del_number_group(element); 529 + break; 530 + case TOMOYO_ID_CONDITION: 531 + tomoyo_del_condition(element); 532 + break; 533 + case TOMOYO_ID_NAME: 534 + /* 535 + * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[] 536 + * forget this element. 537 + */ 538 + if (tomoyo_name_used_by_io_buffer 539 + (container_of(element, typeof(struct tomoyo_name), 540 + head.list)->entry.name)) 541 + goto reinject; 542 + tomoyo_del_name(element); 543 + break; 544 + case TOMOYO_ID_ACL: 545 + tomoyo_del_acl(element); 546 + break; 547 + case TOMOYO_ID_DOMAIN: 548 + /* 549 + * Don't kfree() until all "struct cred"->security forget this 550 + * element. 551 + */ 552 + if (atomic_read(&container_of 553 + (element, typeof(struct tomoyo_domain_info), 554 + list)->users)) 555 + goto reinject; 556 + tomoyo_del_domain(element); 557 + break; 558 + case TOMOYO_MAX_POLICY: 559 + break; 560 + } 561 + mutex_lock(&tomoyo_policy_lock); 562 + tomoyo_memory_free(element); 563 + return; 564 + reinject: 565 + /* 566 + * We can safely reinject this element here bacause 567 + * (1) Appending list elements and removing list elements are protected 568 + * by tomoyo_policy_lock mutex. 569 + * (2) Only this function removes list elements and this function is 570 + * exclusively executed by tomoyo_gc_mutex mutex. 571 + * are true. 572 + */ 573 + mutex_lock(&tomoyo_policy_lock); 574 + list_add_rcu(element, element->prev); 339 575 } 340 576 341 577 /** ··· 443 481 * @id: One of values in "enum tomoyo_policy_id". 444 482 * @member_list: Pointer to "struct list_head". 445 483 * 446 - * Returns true if some elements are deleted, false otherwise. 484 + * Returns nothing. 447 485 */ 448 - static bool tomoyo_collect_member(const enum tomoyo_policy_id id, 486 + static void tomoyo_collect_member(const enum tomoyo_policy_id id, 449 487 struct list_head *member_list) 450 488 { 451 489 struct tomoyo_acl_head *member; 452 - list_for_each_entry(member, member_list, list) { 490 + struct tomoyo_acl_head *tmp; 491 + list_for_each_entry_safe(member, tmp, member_list, list) { 453 492 if (!member->is_deleted) 454 493 continue; 455 - if (!tomoyo_add_to_gc(id, &member->list)) 456 - return false; 494 + member->is_deleted = TOMOYO_GC_IN_PROGRESS; 495 + tomoyo_try_to_gc(id, &member->list); 457 496 } 458 - return true; 459 497 } 460 498 461 499 /** ··· 463 501 * 464 502 * @list: Pointer to "struct list_head". 465 503 * 466 - * Returns true if some elements are deleted, false otherwise. 504 + * Returns nothing. 467 505 */ 468 - static bool tomoyo_collect_acl(struct list_head *list) 506 + static void tomoyo_collect_acl(struct list_head *list) 469 507 { 470 508 struct tomoyo_acl_info *acl; 471 - list_for_each_entry(acl, list, list) { 509 + struct tomoyo_acl_info *tmp; 510 + list_for_each_entry_safe(acl, tmp, list, list) { 472 511 if (!acl->is_deleted) 473 512 continue; 474 - if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list)) 475 - return false; 513 + acl->is_deleted = TOMOYO_GC_IN_PROGRESS; 514 + tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list); 476 515 } 477 - return true; 478 516 } 479 517 480 518 /** 481 - * tomoyo_collect_entry - Scan lists for deleted elements. 519 + * tomoyo_collect_entry - Try to kfree() deleted elements. 482 520 * 483 521 * Returns nothing. 484 522 */ ··· 487 525 int i; 488 526 enum tomoyo_policy_id id; 489 527 struct tomoyo_policy_namespace *ns; 490 - int idx; 491 - if (mutex_lock_interruptible(&tomoyo_policy_lock)) 492 - return; 493 - idx = tomoyo_read_lock(); 528 + mutex_lock(&tomoyo_policy_lock); 494 529 { 495 530 struct tomoyo_domain_info *domain; 496 - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { 497 - if (!tomoyo_collect_acl(&domain->acl_info_list)) 498 - goto unlock; 531 + struct tomoyo_domain_info *tmp; 532 + list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, 533 + list) { 534 + tomoyo_collect_acl(&domain->acl_info_list); 499 535 if (!domain->is_deleted || atomic_read(&domain->users)) 500 536 continue; 501 - /* 502 - * Nobody is referring this domain. But somebody may 503 - * refer this domain after successful execve(). 504 - * We recheck domain->users after SRCU synchronization. 505 - */ 506 - if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list)) 507 - goto unlock; 537 + tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list); 508 538 } 509 539 } 510 - list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) { 540 + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { 511 541 for (id = 0; id < TOMOYO_MAX_POLICY; id++) 512 - if (!tomoyo_collect_member(id, &ns->policy_list[id])) 513 - goto unlock; 542 + tomoyo_collect_member(id, &ns->policy_list[id]); 514 543 for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) 515 - if (!tomoyo_collect_acl(&ns->acl_group[i])) 516 - goto unlock; 544 + tomoyo_collect_acl(&ns->acl_group[i]); 545 + } 546 + { 547 + struct tomoyo_shared_acl_head *ptr; 548 + struct tomoyo_shared_acl_head *tmp; 549 + list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list, 550 + list) { 551 + if (atomic_read(&ptr->users) > 0) 552 + continue; 553 + atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); 554 + tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list); 555 + } 556 + } 557 + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { 517 558 for (i = 0; i < TOMOYO_MAX_GROUP; i++) { 518 559 struct list_head *list = &ns->group_list[i]; 519 560 struct tomoyo_group *group; 561 + struct tomoyo_group *tmp; 520 562 switch (i) { 521 563 case 0: 522 564 id = TOMOYO_ID_PATH_GROUP; ··· 532 566 id = TOMOYO_ID_ADDRESS_GROUP; 533 567 break; 534 568 } 535 - list_for_each_entry(group, list, head.list) { 536 - if (!tomoyo_collect_member 537 - (id, &group->member_list)) 538 - goto unlock; 569 + list_for_each_entry_safe(group, tmp, list, head.list) { 570 + tomoyo_collect_member(id, &group->member_list); 539 571 if (!list_empty(&group->member_list) || 540 - atomic_read(&group->head.users)) 572 + atomic_read(&group->head.users) > 0) 541 573 continue; 542 - if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, 543 - &group->head.list)) 544 - goto unlock; 574 + atomic_set(&group->head.users, 575 + TOMOYO_GC_IN_PROGRESS); 576 + tomoyo_try_to_gc(TOMOYO_ID_GROUP, 577 + &group->head.list); 545 578 } 546 579 } 547 580 } 548 - id = TOMOYO_ID_CONDITION; 549 - for (i = 0; i < TOMOYO_MAX_HASH + 1; i++) { 550 - struct list_head *list = !i ? 551 - &tomoyo_condition_list : &tomoyo_name_list[i - 1]; 581 + for (i = 0; i < TOMOYO_MAX_HASH; i++) { 582 + struct list_head *list = &tomoyo_name_list[i]; 552 583 struct tomoyo_shared_acl_head *ptr; 553 - list_for_each_entry(ptr, list, list) { 554 - if (atomic_read(&ptr->users)) 584 + struct tomoyo_shared_acl_head *tmp; 585 + list_for_each_entry_safe(ptr, tmp, list, list) { 586 + if (atomic_read(&ptr->users) > 0) 555 587 continue; 556 - if (!tomoyo_add_to_gc(id, &ptr->list)) 557 - goto unlock; 588 + atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); 589 + tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list); 558 590 } 559 - id = TOMOYO_ID_NAME; 560 591 } 561 - unlock: 562 - tomoyo_read_unlock(idx); 563 592 mutex_unlock(&tomoyo_policy_lock); 564 - } 565 - 566 - /** 567 - * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list. 568 - * 569 - * Returns true if some entries were kfree()d, false otherwise. 570 - */ 571 - static bool tomoyo_kfree_entry(void) 572 - { 573 - struct tomoyo_gc *p; 574 - struct tomoyo_gc *tmp; 575 - bool result = false; 576 - 577 - list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) { 578 - struct list_head *element = p->element; 579 - 580 - /* 581 - * list_del_rcu() in tomoyo_add_to_gc() guarantees that the 582 - * list element became no longer reachable from the list which 583 - * the element was originally on (e.g. tomoyo_domain_list). 584 - * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees 585 - * that the list element became no longer referenced by syscall 586 - * users. 587 - * 588 - * However, there are three users which may still be using the 589 - * list element. We need to defer until all of these users 590 - * forget the list element. 591 - * 592 - * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain, 593 - * group,acl} and "struct tomoyo_io_buffer"->w.domain forget 594 - * the list element. 595 - */ 596 - if (tomoyo_struct_used_by_io_buffer(element)) 597 - continue; 598 - /* 599 - * Secondly, defer until all other elements in the 600 - * tomoyo_gc_list list forget the list element. 601 - */ 602 - if (tomoyo_element_linked_by_gc((const u8 *) element, p->size)) 603 - continue; 604 - switch (p->type) { 605 - case TOMOYO_ID_TRANSITION_CONTROL: 606 - tomoyo_del_transition_control(element); 607 - break; 608 - case TOMOYO_ID_AGGREGATOR: 609 - tomoyo_del_aggregator(element); 610 - break; 611 - case TOMOYO_ID_MANAGER: 612 - tomoyo_del_manager(element); 613 - break; 614 - case TOMOYO_ID_CONDITION: 615 - tomoyo_del_condition(element); 616 - break; 617 - case TOMOYO_ID_NAME: 618 - /* 619 - * Thirdly, defer until all "struct tomoyo_io_buffer" 620 - * ->r.w[] forget the list element. 621 - */ 622 - if (tomoyo_name_used_by_io_buffer( 623 - container_of(element, typeof(struct tomoyo_name), 624 - head.list)->entry.name, p->size)) 625 - continue; 626 - tomoyo_del_name(element); 627 - break; 628 - case TOMOYO_ID_ACL: 629 - tomoyo_del_acl(element); 630 - break; 631 - case TOMOYO_ID_DOMAIN: 632 - if (!tomoyo_del_domain(element)) 633 - continue; 634 - break; 635 - case TOMOYO_ID_PATH_GROUP: 636 - tomoyo_del_path_group(element); 637 - break; 638 - case TOMOYO_ID_ADDRESS_GROUP: 639 - tomoyo_del_address_group(element); 640 - break; 641 - case TOMOYO_ID_GROUP: 642 - tomoyo_del_group(element); 643 - break; 644 - case TOMOYO_ID_NUMBER_GROUP: 645 - tomoyo_del_number_group(element); 646 - break; 647 - case TOMOYO_MAX_POLICY: 648 - break; 649 - } 650 - tomoyo_memory_free(element); 651 - list_del(&p->list); 652 - kfree(p); 653 - tomoyo_gc_list_len--; 654 - result = true; 655 - } 656 - return result; 657 593 } 658 594 659 595 /** 660 596 * tomoyo_gc_thread - Garbage collector thread function. 661 597 * 662 598 * @unused: Unused. 663 - * 664 - * In case OOM-killer choose this thread for termination, we create this thread 665 - * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was 666 - * close()d. 667 599 * 668 600 * Returns 0. 669 601 */ ··· 571 707 static DEFINE_MUTEX(tomoyo_gc_mutex); 572 708 if (!mutex_trylock(&tomoyo_gc_mutex)) 573 709 goto out; 574 - 575 - do { 576 - tomoyo_collect_entry(); 577 - if (list_empty(&tomoyo_gc_list)) 578 - break; 579 - synchronize_srcu(&tomoyo_ss); 580 - } while (tomoyo_kfree_entry()); 710 + tomoyo_collect_entry(); 581 711 { 582 712 struct tomoyo_io_buffer *head; 583 713 struct tomoyo_io_buffer *tmp;
+4 -2
security/tomoyo/memory.c
··· 123 123 goto out; 124 124 list = &param->ns->group_list[idx]; 125 125 list_for_each_entry(group, list, head.list) { 126 - if (e.group_name != group->group_name) 126 + if (e.group_name != group->group_name || 127 + atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) 127 128 continue; 128 129 atomic_inc(&group->head.users); 129 130 found = true; ··· 176 175 if (mutex_lock_interruptible(&tomoyo_policy_lock)) 177 176 return NULL; 178 177 list_for_each_entry(ptr, head, head.list) { 179 - if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name)) 178 + if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) || 179 + atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) 180 180 continue; 181 181 atomic_inc(&ptr->head.users); 182 182 goto out;