Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing/user_events: Use refcount instead of atomic for ref tracking

User processes could open up enough event references to cause rollovers.
These could cause use after free scenarios, which we do not want.
Switching to refcount APIs prevent this, but will leak memory once
saturated.

Once saturated, user processes can still use the events. This prevents
a bad user process from stopping existing telemetry from being emitted.

Link: https://lkml.kernel.org/r/20220728233309.1896-5-beaub@linux.microsoft.com
Link: https://lore.kernel.org/all/2059213643.196683.1648499088753.JavaMail.zimbra@efficios.com/

Reported-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Beau Belgrave and committed by
Steven Rostedt (Google)
d401b724 e6f89a14

+24 -29
+24 -29
kernel/trace/trace_events_user.c
··· 14 14 #include <linux/uio.h> 15 15 #include <linux/ioctl.h> 16 16 #include <linux/jhash.h> 17 + #include <linux/refcount.h> 17 18 #include <linux/trace_events.h> 18 19 #include <linux/tracefs.h> 19 20 #include <linux/types.h> ··· 58 57 * within a file a user_event might be created if it does not 59 58 * already exist. These are globally used and their lifetime 60 59 * is tied to the refcnt member. These cannot go away until the 61 - * refcnt reaches zero. 60 + * refcnt reaches one. 62 61 */ 63 62 struct user_event { 64 63 struct tracepoint tracepoint; ··· 68 67 struct hlist_node node; 69 68 struct list_head fields; 70 69 struct list_head validators; 71 - atomic_t refcnt; 70 + refcount_t refcnt; 72 71 int index; 73 72 int flags; 74 73 int min_size; ··· 104 103 static u32 user_event_key(char *name) 105 104 { 106 105 return jhash(name, strlen(name), 0); 106 + } 107 + 108 + static __always_inline __must_check 109 + bool user_event_last_ref(struct user_event *user) 110 + { 111 + return refcount_read(&user->refcnt) == 1; 107 112 } 108 113 109 114 static __always_inline __must_check ··· 669 662 670 663 hash_for_each_possible(register_table, user, node, key) 671 664 if (!strcmp(EVENT_NAME(user), name)) { 672 - atomic_inc(&user->refcnt); 665 + refcount_inc(&user->refcnt); 673 666 return user; 674 667 } 675 668 ··· 883 876 884 877 return ret; 885 878 inc: 886 - atomic_inc(&user->refcnt); 879 + refcount_inc(&user->refcnt); 887 880 update_reg_page_for(user); 888 881 return 0; 889 882 dec: 890 883 update_reg_page_for(user); 891 - atomic_dec(&user->refcnt); 884 + refcount_dec(&user->refcnt); 892 885 return 0; 893 886 } 894 887 ··· 914 907 ret = user_event_parse_cmd(name, &user); 915 908 916 909 if (!ret) 917 - atomic_dec(&user->refcnt); 910 + refcount_dec(&user->refcnt); 918 911 919 912 mutex_unlock(&reg_mutex); 920 913 ··· 958 951 { 959 952 struct user_event *user = container_of(ev, struct user_event, devent); 960 953 961 - return atomic_read(&user->refcnt) != 0; 954 + return !user_event_last_ref(user); 962 955 } 963 956 964 957 static int user_event_free(struct dyn_event *ev) 965 958 { 966 959 struct user_event *user = container_of(ev, struct user_event, devent); 967 960 968 - if (atomic_read(&user->refcnt) != 0) 961 + if (!user_event_last_ref(user)) 969 962 return -EBUSY; 970 963 971 964 return destroy_user_event(user); ··· 1144 1137 1145 1138 user->index = index; 1146 1139 1147 - /* Ensure we track ref */ 1148 - atomic_inc(&user->refcnt); 1140 + /* Ensure we track self ref and caller ref (2) */ 1141 + refcount_set(&user->refcnt, 2); 1149 1142 1150 1143 dyn_event_init(&user->devent, &user_event_dops); 1151 1144 dyn_event_add(&user->devent, &user->call); ··· 1171 1164 static int delete_user_event(char *name) 1172 1165 { 1173 1166 u32 key; 1174 - int ret; 1175 1167 struct user_event *user = find_user_event(name, &key); 1176 1168 1177 1169 if (!user) 1178 1170 return -ENOENT; 1179 1171 1180 - /* Ensure we are the last ref */ 1181 - if (atomic_read(&user->refcnt) != 1) { 1182 - ret = -EBUSY; 1183 - goto put_ref; 1184 - } 1172 + refcount_dec(&user->refcnt); 1185 1173 1186 - ret = destroy_user_event(user); 1174 + if (!user_event_last_ref(user)) 1175 + return -EBUSY; 1187 1176 1188 - if (ret) 1189 - goto put_ref; 1190 - 1191 - return ret; 1192 - put_ref: 1193 - /* No longer have this ref */ 1194 - atomic_dec(&user->refcnt); 1195 - 1196 - return ret; 1177 + return destroy_user_event(user); 1197 1178 } 1198 1179 1199 1180 /* ··· 1309 1314 1310 1315 new_refs->events[i] = user; 1311 1316 1312 - atomic_inc(&user->refcnt); 1317 + refcount_inc(&user->refcnt); 1313 1318 1314 1319 rcu_assign_pointer(file->private_data, new_refs); 1315 1320 ··· 1369 1374 ret = user_events_ref_add(file, user); 1370 1375 1371 1376 /* No longer need parse ref, ref_add either worked or not */ 1372 - atomic_dec(&user->refcnt); 1377 + refcount_dec(&user->refcnt); 1373 1378 1374 1379 /* Positive number is index and valid */ 1375 1380 if (ret < 0) ··· 1459 1464 user = refs->events[i]; 1460 1465 1461 1466 if (user) 1462 - atomic_dec(&user->refcnt); 1467 + refcount_dec(&user->refcnt); 1463 1468 } 1464 1469 out: 1465 1470 file->private_data = NULL;