Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

maple_tree: fix tracepoint string pointers

maple_tree tracepoints contain pointers to function names. Such a pointer
is saved when a tracepoint logs an event. There's no guarantee that it's
still valid when the event is parsed later and the pointer is dereferenced.

The kernel warns about these unsafe pointers.

event 'ma_read' has unsafe pointer field 'fn'
WARNING: kernel/trace/trace.c:3779 at ignore_event+0x1da/0x1e4

Mark the function names as tracepoint_string() to fix the events.

One case that doesn't work without my patch would be trace-cmd record
to save the binary ringbuffer and trace-cmd report to parse it in
userspace. The address of __func__ can't be dereferenced from
userspace but tracepoint_string will add an entry to
/sys/kernel/tracing/printk_formats

Link: https://lkml.kernel.org/r/20251030155537.87972-1-martin@kaiser.cx
Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Martin Kaiser <martin@kaiser.cx>
Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Martin Kaiser and committed by
Andrew Morton
91a54090 1abbdf3d

+16 -14
+16 -14
lib/maple_tree.c
··· 64 64 #define CREATE_TRACE_POINTS 65 65 #include <trace/events/maple_tree.h> 66 66 67 + #define TP_FCT tracepoint_string(__func__) 68 + 67 69 /* 68 70 * Kernel pointer hashing renders much of the maple tree dump useless as tagged 69 71 * pointers get hashed to arbitrary values. ··· 2758 2756 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 2759 2757 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2760 2758 2761 - trace_ma_op(__func__, mas); 2759 + trace_ma_op(TP_FCT, mas); 2762 2760 2763 2761 /* 2764 2762 * Rebalancing occurs if a node is insufficient. Data is rebalanced ··· 2999 2997 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3000 2998 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3001 2999 3002 - trace_ma_op(__func__, mas); 3000 + trace_ma_op(TP_FCT, mas); 3003 3001 3004 3002 mast.l = &l_mas; 3005 3003 mast.r = &r_mas; ··· 3174 3172 return false; 3175 3173 } 3176 3174 3177 - trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); 3175 + trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry); 3178 3176 return true; 3179 3177 } 3180 3178 ··· 3418 3416 * of data may happen. 3419 3417 */ 3420 3418 mas = wr_mas->mas; 3421 - trace_ma_op(__func__, mas); 3419 + trace_ma_op(TP_FCT, mas); 3422 3420 3423 3421 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3424 3422 return mas_new_root(mas, wr_mas->entry); ··· 3554 3552 } else { 3555 3553 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 3556 3554 } 3557 - trace_ma_write(__func__, mas, 0, wr_mas->entry); 3555 + trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); 3558 3556 mas_update_gap(mas); 3559 3557 mas->end = new_end; 3560 3558 return; ··· 3598 3596 mas->offset++; /* Keep mas accurate. */ 3599 3597 } 3600 3598 3601 - trace_ma_write(__func__, mas, 0, wr_mas->entry); 3599 + trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); 3602 3600 /* 3603 3601 * Only update gap when the new entry is empty or there is an empty 3604 3602 * entry in the original two ranges. ··· 3719 3717 mas_update_gap(mas); 3720 3718 3721 3719 mas->end = new_end; 3722 - trace_ma_write(__func__, mas, new_end, wr_mas->entry); 3720 + trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry); 3723 3721 return; 3724 3722 } 3725 3723 ··· 3733 3731 { 3734 3732 struct maple_big_node b_node; 3735 3733 3736 - trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 3734 + trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry); 3737 3735 memset(&b_node, 0, sizeof(struct maple_big_node)); 3738 3736 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 3739 3737 mas_commit_b_node(wr_mas, &b_node); ··· 5064 5062 { 5065 5063 MA_WR_STATE(wr_mas, mas, entry); 5066 5064 5067 - trace_ma_write(__func__, mas, 0, entry); 5065 + trace_ma_write(TP_FCT, mas, 0, entry); 5068 5066 #ifdef CONFIG_DEBUG_MAPLE_TREE 5069 5067 if (MAS_WARN_ON(mas, mas->index > mas->last)) 5070 5068 pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last, ··· 5165 5163 } 5166 5164 5167 5165 store: 5168 - trace_ma_write(__func__, mas, 0, entry); 5166 + trace_ma_write(TP_FCT, mas, 0, entry); 5169 5167 mas_wr_store_entry(&wr_mas); 5170 5168 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); 5171 5169 mas_destroy(mas); ··· 5884 5882 MA_STATE(mas, mt, index, index); 5885 5883 void *entry; 5886 5884 5887 - trace_ma_read(__func__, &mas); 5885 + trace_ma_read(TP_FCT, &mas); 5888 5886 rcu_read_lock(); 5889 5887 retry: 5890 5888 entry = mas_start(&mas); ··· 5927 5925 MA_STATE(mas, mt, index, last); 5928 5926 int ret = 0; 5929 5927 5930 - trace_ma_write(__func__, &mas, 0, entry); 5928 + trace_ma_write(TP_FCT, &mas, 0, entry); 5931 5929 if (WARN_ON_ONCE(xa_is_advanced(entry))) 5932 5930 return -EINVAL; 5933 5931 ··· 6150 6148 void *entry = NULL; 6151 6149 6152 6150 MA_STATE(mas, mt, index, index); 6153 - trace_ma_op(__func__, &mas); 6151 + trace_ma_op(TP_FCT, &mas); 6154 6152 6155 6153 mtree_lock(mt); 6156 6154 entry = mas_erase(&mas); ··· 6487 6485 unsigned long copy = *index; 6488 6486 #endif 6489 6487 6490 - trace_ma_read(__func__, &mas); 6488 + trace_ma_read(TP_FCT, &mas); 6491 6489 6492 6490 if ((*index) > max) 6493 6491 return NULL;