Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing/user_events: Rename link fields for clarity

Currently most list_head fields of various structs within user_events
are simply named link. This causes folks to keep additional context in
their head when working with the code, which can be confusing.

Instead of using link, describe what the actual link is, for example:
list_del_rcu(&mm->link);

Changes into:
list_del_rcu(&mm->mms_link);

The reader now is given a hint the link is to the mms global list
instead of having to remember or spot check within the code.

Link: https://lkml.kernel.org/r/20230519230741.669-4-beaub@linux.microsoft.com
Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wicngggxVpbnrYHjRTwGE0WYscPRM+L2HO2BF8ia1EXgQ@mail.gmail.com/

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Beau Belgrave and committed by
Steven Rostedt (Google)
dcbd1ac2 aaecdaf9

+23 -19
+1 -1
include/linux/user_events.h
··· 17 17 18 18 #ifdef CONFIG_USER_EVENTS 19 19 struct user_event_mm { 20 - struct list_head link; 20 + struct list_head mms_link; 21 21 struct list_head enablers; 22 22 struct mm_struct *mm; 23 23 struct user_event_mm *next;
+22 -18
kernel/trace/trace_events_user.c
··· 96 96 * these to track enablement sites that are tied to an event. 97 97 */ 98 98 struct user_event_enabler { 99 - struct list_head link; 99 + struct list_head mm_enablers_link; 100 100 struct user_event *event; 101 101 unsigned long addr; 102 102 ··· 155 155 #define VALIDATOR_REL (1 << 1) 156 156 157 157 struct user_event_validator { 158 - struct list_head link; 158 + struct list_head user_event_link; 159 159 int offset; 160 160 int flags; 161 161 }; ··· 261 261 262 262 static void user_event_enabler_destroy(struct user_event_enabler *enabler) 263 263 { 264 - list_del_rcu(&enabler->link); 264 + list_del_rcu(&enabler->mm_enablers_link); 265 265 266 266 /* No longer tracking the event via the enabler */ 267 267 refcount_dec(&enabler->event->refcnt); ··· 440 440 { 441 441 struct user_event_enabler *enabler; 442 442 443 - list_for_each_entry(enabler, &mm->enablers, link) { 443 + list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { 444 444 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) 445 445 return true; 446 446 } ··· 461 461 next = mm->next; 462 462 mmap_read_lock(mm->mm); 463 463 464 - list_for_each_entry(enabler, &mm->enablers, link) { 464 + list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { 465 465 if (enabler->event == user) { 466 466 attempt = 0; 467 467 user_event_enabler_write(mm, enabler, true, &attempt); ··· 497 497 refcount_inc(&enabler->event->refcnt); 498 498 499 499 /* Enablers not exposed yet, RCU not required */ 500 - list_add(&enabler->link, &mm->enablers); 500 + list_add(&enabler->mm_enablers_link, &mm->enablers); 501 501 502 502 return true; 503 503 } ··· 527 527 */ 528 528 rcu_read_lock(); 529 529 530 - list_for_each_entry_rcu(mm, &user_event_mms, link) 531 - list_for_each_entry_rcu(enabler, &mm->enablers, link) 530 + list_for_each_entry_rcu(mm, &user_event_mms, mms_link) { 531 + list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) { 532 532 if (enabler->event == user) { 533 533 mm->next = found; 534 534 found = user_event_mm_get(mm); 535 535 break; 536 536 } 537 + } 538 + } 537 539 538 540 rcu_read_unlock(); 539 541 ··· 574 572 unsigned long flags; 575 573 576 574 spin_lock_irqsave(&user_event_mms_lock, flags); 577 - list_add_rcu(&user_mm->link, &user_event_mms); 575 + list_add_rcu(&user_mm->mms_link, &user_event_mms); 578 576 spin_unlock_irqrestore(&user_event_mms_lock, flags); 579 577 580 578 t->user_event_mm = user_mm; ··· 603 601 { 604 602 struct user_event_enabler *enabler, *next; 605 603 606 - list_for_each_entry_safe(enabler, next, &mm->enablers, link) 604 + list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) 607 605 user_event_enabler_destroy(enabler); 608 606 609 607 mmdrop(mm->mm); ··· 640 638 641 639 /* Remove the mm from the list, so it can no longer be enabled */ 642 640 spin_lock_irqsave(&user_event_mms_lock, flags); 643 - list_del_rcu(&mm->link); 641 + list_del_rcu(&mm->mms_link); 644 642 spin_unlock_irqrestore(&user_event_mms_lock, flags); 645 643 646 644 /* ··· 688 686 689 687 rcu_read_lock(); 690 688 691 - list_for_each_entry_rcu(enabler, &old_mm->enablers, link) 689 + list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) { 692 690 if (!user_event_enabler_dup(enabler, mm)) 693 691 goto error; 692 + } 694 693 695 694 rcu_read_unlock(); 696 695 ··· 760 757 */ 761 758 if (!*write_result) { 762 759 refcount_inc(&enabler->event->refcnt); 763 - list_add_rcu(&enabler->link, &user_mm->enablers); 760 + list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers); 764 761 } 765 762 766 763 mutex_unlock(&event_mutex); ··· 916 913 struct user_event_validator *validator, *next; 917 914 struct list_head *head = &user->validators; 918 915 919 - list_for_each_entry_safe(validator, next, head, link) { 920 - list_del(&validator->link); 916 + list_for_each_entry_safe(validator, next, head, user_event_link) { 917 + list_del(&validator->user_event_link); 921 918 kfree(validator); 922 919 } 923 920 } ··· 971 968 validator->offset = offset; 972 969 973 970 /* Want sequential access when validating */ 974 - list_add_tail(&validator->link, &user->validators); 971 + list_add_tail(&validator->user_event_link, &user->validators); 975 972 976 973 add_field: 977 974 field->type = type; ··· 1361 1358 void *pos, *end = data + len; 1362 1359 u32 loc, offset, size; 1363 1360 1364 - list_for_each_entry(validator, head, link) { 1361 + list_for_each_entry(validator, head, user_event_link) { 1365 1362 pos = data + validator->offset; 1366 1363 1367 1364 /* Already done min_size check, no bounds check here */ ··· 2282 2279 */ 2283 2280 mutex_lock(&event_mutex); 2284 2281 2285 - list_for_each_entry_safe(enabler, next, &mm->enablers, link) 2282 + list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) { 2286 2283 if (enabler->addr == reg.disable_addr && 2287 2284 ENABLE_BIT(enabler) == reg.disable_bit) { 2288 2285 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); ··· 2293 2290 /* Removed at least one */ 2294 2291 ret = 0; 2295 2292 } 2293 + } 2296 2294 2297 2295 mutex_unlock(&event_mutex); 2298 2296