Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v4.17-rc5-vsprintf' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull memory barrier for from Steven Rostedt:
"The memory barrier usage in updating the random ptr hash for %p in
vsprintf is incorrect.

Instead of adding the read memory barrier into vsprintf() which will
cause a slight degradation to a commonly used function in the kernel
just to solve a very unlikely race condition that can only happen at
boot up, change the code from using a variable branch to a
static_branch.

Not only does this solve the race condition, it actually will improve
the performance of vsprintf() by removing the conditional branch that
is only needed at boot"

* tag 'trace-v4.17-rc5-vsprintf' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
vsprintf: Replace memory barrier with static_key for random_ptr_key update

+15 -11
+15 -11
lib/vsprintf.c
··· 1669 1669 return number(buf, end, (unsigned long int)ptr, spec); 1670 1670 } 1671 1671 1672 - static bool have_filled_random_ptr_key __read_mostly; 1672 + static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); 1673 1673 static siphash_key_t ptr_key __read_mostly; 1674 + 1675 + static void enable_ptr_key_workfn(struct work_struct *work) 1676 + { 1677 + get_random_bytes(&ptr_key, sizeof(ptr_key)); 1678 + /* Needs to run from preemptible context */ 1679 + static_branch_disable(&not_filled_random_ptr_key); 1680 + } 1681 + 1682 + static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); 1674 1683 1675 1684 static void fill_random_ptr_key(struct random_ready_callback *unused) 1676 1685 { 1677 - get_random_bytes(&ptr_key, sizeof(ptr_key)); 1678 - /* 1679 - * have_filled_random_ptr_key==true is dependent on get_random_bytes(). 1680 - * ptr_to_id() needs to see have_filled_random_ptr_key==true 1681 - * after get_random_bytes() returns. 1682 - */ 1683 - smp_mb(); 1684 - WRITE_ONCE(have_filled_random_ptr_key, true); 1686 + /* This may be in an interrupt handler. */ 1687 + queue_work(system_unbound_wq, &enable_ptr_key_work); 1685 1688 } 1686 1689 1687 1690 static struct random_ready_callback random_ready = { ··· 1698 1695 if (!ret) { 1699 1696 return 0; 1700 1697 } else if (ret == -EALREADY) { 1701 - fill_random_ptr_key(&random_ready); 1698 + /* This is in preemptible context */ 1699 + enable_ptr_key_workfn(&enable_ptr_key_work); 1702 1700 return 0; 1703 1701 } 1704 1702 ··· 1713 1709 unsigned long hashval; 1714 1710 const int default_width = 2 * sizeof(ptr); 1715 1711 1716 - if (unlikely(!have_filled_random_ptr_key)) { 1712 + if (static_branch_unlikely(&not_filled_random_ptr_key)) { 1717 1713 spec.field_width = default_width; 1718 1714 /* string length must be less than default_width */ 1719 1715 return string(buf, end, "(ptrval)", spec);