Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ntsync: Introduce alertable waits.

NT waits can optionally be made "alertable". This is a special channel for
thread wakeup that is mildly similar to SIGIO. A thread has an internal single
bit of "alerted" state, and if a thread is alerted while an alertable wait, the
wait will return a special value, consume the "alerted" state, and will not
consume any of its objects.

Alerts are implemented using events; the user-space NT emulator is expected to
create an internal ntsync event for each thread and pass that event to wait
functions.

Signed-off-by: Elizabeth Figura <zfigura@codeweavers.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20241213193511.457338-16-zfigura@codeweavers.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Elizabeth Figura and committed by
Greg Kroah-Hartman
a138179a e864071a

+63 -10
+61 -9
drivers/misc/ntsync.c
··· 869 869 const struct ntsync_wait_args *args, bool all, 870 870 struct ntsync_q **ret_q) 871 871 { 872 + int fds[NTSYNC_MAX_WAIT_COUNT + 1]; 872 873 const __u32 count = args->count; 873 - int fds[NTSYNC_MAX_WAIT_COUNT]; 874 874 struct ntsync_q *q; 875 + __u32 total_count; 875 876 __u32 i, j; 876 877 877 - if (args->pad[0] || args->pad[1] || (args->flags & ~NTSYNC_WAIT_REALTIME)) 878 + if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME)) 878 879 return -EINVAL; 879 880 880 881 if (args->count > NTSYNC_MAX_WAIT_COUNT) 881 882 return -EINVAL; 882 883 884 + total_count = count; 885 + if (args->alert) 886 + total_count++; 887 + 883 888 if (copy_from_user(fds, u64_to_user_ptr(args->objs), 884 889 array_size(count, sizeof(*fds)))) 885 890 return -EFAULT; 891 + if (args->alert) 892 + fds[count] = args->alert; 886 893 887 - q = kmalloc(struct_size(q, entries, count), GFP_KERNEL); 894 + q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL); 888 895 if (!q) 889 896 return -ENOMEM; 890 897 q->task = current; ··· 901 894 q->ownerdead = false; 902 895 q->count = count; 903 896 904 - for (i = 0; i < count; i++) { 897 + for (i = 0; i < total_count; i++) { 905 898 struct ntsync_q_entry *entry = &q->entries[i]; 906 899 struct ntsync_obj *obj = get_obj(dev, fds[i]); 907 900 ··· 951 944 static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) 952 945 { 953 946 struct ntsync_wait_args args; 947 + __u32 i, total_count; 954 948 struct ntsync_q *q; 955 949 int signaled; 956 950 bool all; 957 - __u32 i; 958 951 int ret; 959 952 960 953 if (copy_from_user(&args, argp, sizeof(args))) ··· 964 957 if (ret < 0) 965 958 return ret; 966 959 960 + total_count = args.count; 961 + if (args.alert) 962 + total_count++; 963 + 967 964 /* queue ourselves */ 968 965 969 - for (i = 0; i < args.count; i++) { 966 + for (i = 0; i < total_count; i++) { 970 967 struct ntsync_q_entry *entry = &q->entries[i]; 971 968 struct ntsync_obj *obj = entry->obj; 972 969 ··· 979 968 ntsync_unlock_obj(dev, obj, all); 980 969 } 981 970 982 - /* check if we are already signaled */ 971 + /* 972 + * Check if we are already signaled. 973 + * 974 + * Note that the API requires that normal objects are checked before 975 + * the alert event. Hence we queue the alert event last, and check 976 + * objects in order. 977 + */ 983 978 984 - for (i = 0; i < args.count; i++) { 979 + for (i = 0; i < total_count; i++) { 985 980 struct ntsync_obj *obj = q->entries[i].obj; 986 981 987 982 if (atomic_read(&q->signaled) != -1) ··· 1004 987 1005 988 /* and finally, unqueue */ 1006 989 1007 - for (i = 0; i < args.count; i++) { 990 + for (i = 0; i < total_count; i++) { 1008 991 struct ntsync_q_entry *entry = &q->entries[i]; 1009 992 struct ntsync_obj *obj = entry->obj; 1010 993 ··· 1064 1047 */ 1065 1048 list_add_tail(&entry->node, &obj->all_waiters); 1066 1049 } 1050 + if (args.alert) { 1051 + struct ntsync_q_entry *entry = &q->entries[args.count]; 1052 + struct ntsync_obj *obj = entry->obj; 1053 + 1054 + dev_lock_obj(dev, obj); 1055 + list_add_tail(&entry->node, &obj->any_waiters); 1056 + dev_unlock_obj(dev, obj); 1057 + } 1067 1058 1068 1059 /* check if we are already signaled */ 1069 1060 1070 1061 try_wake_all(dev, q, NULL); 1071 1062 1072 1063 mutex_unlock(&dev->wait_all_lock); 1064 + 1065 + /* 1066 + * Check if the alert event is signaled, making sure to do so only 1067 + * after checking if the other objects are signaled. 1068 + */ 1069 + 1070 + if (args.alert) { 1071 + struct ntsync_obj *obj = q->entries[args.count].obj; 1072 + 1073 + if (atomic_read(&q->signaled) == -1) { 1074 + bool all = ntsync_lock_obj(dev, obj); 1075 + try_wake_any_obj(obj); 1076 + ntsync_unlock_obj(dev, obj, all); 1077 + } 1078 + } 1073 1079 1074 1080 /* sleep */ 1075 1081 ··· 1118 1078 } 1119 1079 1120 1080 mutex_unlock(&dev->wait_all_lock); 1081 + 1082 + if (args.alert) { 1083 + struct ntsync_q_entry *entry = &q->entries[args.count]; 1084 + struct ntsync_obj *obj = entry->obj; 1085 + bool all; 1086 + 1087 + all = ntsync_lock_obj(dev, obj); 1088 + list_del(&entry->node); 1089 + ntsync_unlock_obj(dev, obj, all); 1090 + 1091 + put_obj(obj); 1092 + } 1121 1093 1122 1094 signaled = atomic_read(&q->signaled); 1123 1095 if (signaled != -1) {
+2 -1
include/uapi/linux/ntsync.h
··· 34 34 __u32 index; 35 35 __u32 flags; 36 36 __u32 owner; 37 - __u32 pad[2]; 37 + __u32 alert; 38 + __u32 pad; 38 39 }; 39 40 40 41 #define NTSYNC_MAX_WAIT_COUNT 64