bpf: Convert queue_stack map to rqspinlock

Replace all usage of raw_spinlock_t in queue_stack_maps.c with
rqspinlock. This is a map type with a set of open syzbot reports
reproducing possible deadlocks. Prior attempt to fix the issues
was at [0], but was dropped in favor of this approach.

Make sure we return the -EBUSY error in case of possible deadlocks or
timeouts, just to make sure user space or BPF programs relying on the
error code to detect problems do not break.

With these changes, the map should be safe to access in any context,
including NMIs.

[0]: https://lore.kernel.org/all/20240429165658.1305969-1-sidchintamaneni@gmail.com

Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/0000000000004c3fc90615f37756@google.com
Reported-by: syzbot+252bc5c744d0bba917e1@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/000000000000c80abd0616517df9@google.com
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250410153142.2064340-1-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by Kumar Kartikeya Dwivedi and committed by Alexei Starovoitov 2f41503d 92b90f78

+12 -23
+12 -23
kernel/bpf/queue_stack_maps.c
··· 9 #include <linux/slab.h> 10 #include <linux/btf_ids.h> 11 #include "percpu_freelist.h" 12 13 #define QUEUE_STACK_CREATE_FLAG_MASK \ 14 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) 15 16 struct bpf_queue_stack { 17 struct bpf_map map; 18 - raw_spinlock_t lock; 19 u32 head, tail; 20 u32 size; /* max_entries + 1 */ 21 ··· 79 80 qs->size = size; 81 82 - raw_spin_lock_init(&qs->lock); 83 84 return &qs->map; 85 } ··· 99 int err = 0; 100 void *ptr; 101 102 - if (in_nmi()) { 103 - if (!raw_spin_trylock_irqsave(&qs->lock, flags)) 104 - return -EBUSY; 105 - } else { 106 - raw_spin_lock_irqsave(&qs->lock, flags); 107 - } 108 109 if (queue_stack_map_is_empty(qs)) { 110 memset(value, 0, qs->map.value_size); ··· 117 } 118 119 out: 120 - raw_spin_unlock_irqrestore(&qs->lock, flags); 121 return err; 122 } 123 ··· 130 void *ptr; 131 u32 index; 132 133 - if (in_nmi()) { 134 - if (!raw_spin_trylock_irqsave(&qs->lock, flags)) 135 - return -EBUSY; 136 - } else { 137 - raw_spin_lock_irqsave(&qs->lock, flags); 138 - } 139 140 if (queue_stack_map_is_empty(qs)) { 141 memset(value, 0, qs->map.value_size); ··· 150 qs->head = index; 151 152 out: 153 - raw_spin_unlock_irqrestore(&qs->lock, flags); 154 return err; 155 } 156 ··· 196 if (flags & BPF_NOEXIST || flags > BPF_EXIST) 197 return -EINVAL; 198 199 - if (in_nmi()) { 200 - if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) 201 - return -EBUSY; 202 - } else { 203 - raw_spin_lock_irqsave(&qs->lock, irq_flags); 204 - } 205 206 if (queue_stack_map_is_full(qs)) { 207 if (!replace) { ··· 216 qs->head = 0; 217 218 out: 219 - raw_spin_unlock_irqrestore(&qs->lock, irq_flags); 220 return err; 221 } 222
··· 9 #include <linux/slab.h> 10 #include <linux/btf_ids.h> 11 #include "percpu_freelist.h" 12 + #include <asm/rqspinlock.h> 13 14 #define QUEUE_STACK_CREATE_FLAG_MASK \ 15 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) 16 17 struct bpf_queue_stack { 18 struct bpf_map map; 19 + rqspinlock_t lock; 20 u32 head, tail; 21 u32 size; /* max_entries + 1 */ 22 ··· 78 79 qs->size = size; 80 81 + raw_res_spin_lock_init(&qs->lock); 82 83 return &qs->map; 84 } ··· 98 int err = 0; 99 void *ptr; 100 101 + if (raw_res_spin_lock_irqsave(&qs->lock, flags)) 102 + return -EBUSY; 103 104 if (queue_stack_map_is_empty(qs)) { 105 memset(value, 0, qs->map.value_size); ··· 120 } 121 122 out: 123 + raw_res_spin_unlock_irqrestore(&qs->lock, flags); 124 return err; 125 } 126 ··· 133 void *ptr; 134 u32 index; 135 136 + if (raw_res_spin_lock_irqsave(&qs->lock, flags)) 137 + return -EBUSY; 138 139 if (queue_stack_map_is_empty(qs)) { 140 memset(value, 0, qs->map.value_size); ··· 157 qs->head = index; 158 159 out: 160 + raw_res_spin_unlock_irqrestore(&qs->lock, flags); 161 return err; 162 } 163 ··· 203 if (flags & BPF_NOEXIST || flags > BPF_EXIST) 204 return -EINVAL; 205 206 + if (raw_res_spin_lock_irqsave(&qs->lock, irq_flags)) 207 + return -EBUSY; 208 209 if (queue_stack_map_is_full(qs)) { 210 if (!replace) { ··· 227 qs->head = 0; 228 229 out: 230 + raw_res_spin_unlock_irqrestore(&qs->lock, irq_flags); 231 return err; 232 } 233