Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-v6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

- Make sure 32-bit applications using user events have aligned access
when running on a 64-bit kernel.

- Add cond_resched in the loop that handles converting enums in
print_fmt string is trace events.

- Fix premature wake ups of polling processes in the tracing ring
buffer. When a task polls waiting for a percentage of the ring buffer
to be filled, the writer still will wake it up at every event. Add
the polling's percentage to the "shortest_full" list to tell the
writer when to wake it up.

- For eventfs dir lookups on dynamic events, an event system's only
event could be removed, leaving its dentry with no children. This is
totally legitimate. But in eventfs_release() it must not access the
children array, as it is only allocated when the dentry has children.

* tag 'trace-v6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
eventfs: Test for dentries array allocated in eventfs_release()
tracing/user_events: Align set_bit() address for all archs
tracing: relax trace_event_eval_update() execution with cond_resched()
ring-buffer: Update "shortest_full" in polling

+56 -8
+1 -1
fs/tracefs/event_inode.c
··· 421 421 if (WARN_ON_ONCE(!dlist)) 422 422 return -EINVAL; 423 423 424 - for (i = 0; dlist->dentries[i]; i++) { 424 + for (i = 0; dlist->dentries && dlist->dentries[i]; i++) { 425 425 dput(dlist->dentries[i]); 426 426 } 427 427
+3
kernel/trace/ring_buffer.c
··· 1137 1137 if (full) { 1138 1138 poll_wait(filp, &work->full_waiters, poll_table); 1139 1139 work->full_waiters_pending = true; 1140 + if (!cpu_buffer->shortest_full || 1141 + cpu_buffer->shortest_full > full) 1142 + cpu_buffer->shortest_full = full; 1140 1143 } else { 1141 1144 poll_wait(filp, &work->waiters, poll_table); 1142 1145 work->waiters_pending = true;
+1
kernel/trace/trace_events.c
··· 2770 2770 update_event_fields(call, map[i]); 2771 2771 } 2772 2772 } 2773 + cond_resched(); 2773 2774 } 2774 2775 up_write(&trace_event_sem); 2775 2776 }
+51 -7
kernel/trace/trace_events_user.c
··· 127 127 /* Bit 7 is for freeing status of enablement */ 128 128 #define ENABLE_VAL_FREEING_BIT 7 129 129 130 - /* Only duplicate the bit value */ 131 - #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK 130 + /* Bit 8 is for marking 32-bit on 64-bit */ 131 + #define ENABLE_VAL_32_ON_64_BIT 8 132 + 133 + #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT) 134 + 135 + /* Only duplicate the bit and compat values */ 136 + #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK) 132 137 133 138 #define ENABLE_BITOPS(e) (&(e)->values) 134 139 ··· 178 173 int offset; 179 174 int flags; 180 175 }; 176 + 177 + static inline void align_addr_bit(unsigned long *addr, int *bit, 178 + unsigned long *flags) 179 + { 180 + if (IS_ALIGNED(*addr, sizeof(long))) { 181 + #ifdef __BIG_ENDIAN 182 + /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */ 183 + if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags)) 184 + *bit += 32; 185 + #endif 186 + return; 187 + } 188 + 189 + *addr = ALIGN_DOWN(*addr, sizeof(long)); 190 + 191 + /* 192 + * We only support 32 and 64 bit values. The only time we need 193 + * to align is a 32 bit value on a 64 bit kernel, which on LE 194 + * is always 32 bits, and on BE requires no change when unaligned. 195 + */ 196 + #ifdef __LITTLE_ENDIAN 197 + *bit += 32; 198 + #endif 199 + } 181 200 182 201 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, 183 202 void *tpdata, bool *faulted); ··· 511 482 unsigned long *ptr; 512 483 struct page *page; 513 484 void *kaddr; 485 + int bit = ENABLE_BIT(enabler); 514 486 int ret; 515 487 516 488 lockdep_assert_held(&event_mutex); ··· 526 496 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) || 527 497 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)))) 528 498 return -EBUSY; 499 + 500 + align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler)); 529 501 530 502 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT, 531 503 &page, NULL); ··· 547 515 548 516 /* Update bit atomically, user tracers must be atomic as well */ 549 517 if (enabler->event && enabler->event->status) 550 - set_bit(ENABLE_BIT(enabler), ptr); 518 + set_bit(bit, ptr); 551 519 else 552 - clear_bit(ENABLE_BIT(enabler), ptr); 520 + clear_bit(bit, ptr); 553 521 554 522 kunmap_local(kaddr); 555 523 unpin_user_pages_dirty_lock(&page, 1, true); ··· 881 849 enabler->event = user; 882 850 enabler->addr = uaddr; 883 851 enabler->values = reg->enable_bit; 852 + 853 + #if BITS_PER_LONG >= 64 854 + if (reg->enable_size == 4) 855 + set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler)); 856 + #endif 857 + 884 858 retry: 885 859 /* Prevents state changes from racing with new enablers */ 886 860 mutex_lock(&event_mutex); ··· 2415 2377 } 2416 2378 2417 2379 static int user_event_mm_clear_bit(struct user_event_mm *user_mm, 2418 - unsigned long uaddr, unsigned char bit) 2380 + unsigned long uaddr, unsigned char bit, 2381 + unsigned long flags) 2419 2382 { 2420 2383 struct user_event_enabler enabler; 2421 2384 int result; ··· 2424 2385 2425 2386 memset(&enabler, 0, sizeof(enabler)); 2426 2387 enabler.addr = uaddr; 2427 - enabler.values = bit; 2388 + enabler.values = bit | flags; 2428 2389 retry: 2429 2390 /* Prevents state changes from racing with new enablers */ 2430 2391 mutex_lock(&event_mutex); ··· 2454 2415 struct user_event_mm *mm = current->user_event_mm; 2455 2416 struct user_event_enabler *enabler, *next; 2456 2417 struct user_unreg reg; 2418 + unsigned long flags; 2457 2419 long ret; 2458 2420 2459 2421 ret = user_unreg_get(ureg, &reg); ··· 2465 2425 if (!mm) 2466 2426 return -ENOENT; 2467 2427 2428 + flags = 0; 2468 2429 ret = -ENOENT; 2469 2430 2470 2431 /* ··· 2482 2441 ENABLE_BIT(enabler) == reg.disable_bit) { 2483 2442 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); 2484 2443 2444 + /* We must keep compat flags for the clear */ 2445 + flags |= enabler->values & ENABLE_VAL_COMPAT_MASK; 2446 + 2485 2447 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler))) 2486 2448 user_event_enabler_destroy(enabler, true); 2487 2449 ··· 2498 2454 /* Ensure bit is now cleared for user, regardless of event status */ 2499 2455 if (!ret) 2500 2456 ret = user_event_mm_clear_bit(mm, reg.disable_addr, 2501 - reg.disable_bit); 2457 + reg.disable_bit, flags); 2502 2458 2503 2459 return ret; 2504 2460 }