Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Introduce support for bpf_local_irq_{save,restore}

Teach the verifier about IRQ-disabled sections through the introduction
of two new kfuncs, bpf_local_irq_save, to save IRQ state and disable
them, and bpf_local_irq_restore, to restore IRQ state and enable them
back again.

For the purposes of tracking the saved IRQ state, the verifier is taught
about a new special object on the stack of type STACK_IRQ_FLAG. This is
a 8 byte value which saves the IRQ flags which are to be passed back to
the IRQ restore kfunc.

Renumber the enums for REF_TYPE_* to simplify the check in
find_lock_state, filtering out non-lock types as they grow will become
cumbersome and is unecessary.

To track a dynamic number of IRQ-disabled regions and their associated
saved states, a new resource type RES_TYPE_IRQ is introduced, which its
state management functions: acquire_irq_state and release_irq_state,
taking advantage of the refactoring and clean ups made in earlier
commits.

One notable requirement of the kernel's IRQ save and restore API is that
they cannot happen out of order. For this purpose, when releasing reference
we keep track of the prev_id we saw with REF_TYPE_IRQ. Since reference
states are inserted in increasing order of the index, this is used to
remember the ordering of acquisitions of IRQ saved states, so that we
maintain a logical stack in acquisition order of resource identities,
and can enforce LIFO ordering when restoring IRQ state. The top of the
stack is maintained using bpf_verifier_state's active_irq_id.

To maintain the stack property when releasing reference states, we need
to modify release_reference_state to instead shift the remaining array
left using memmove instead of swapping deleted element with last that
might break the ordering. A selftest to test this subtle behavior is
added in late patches.

The logic to detect initialized and unitialized irq flag slots, marking
and unmarking is similar to how it's done for iterators. No additional
checks are needed in refsafe for REF_TYPE_IRQ, apart from the usual
check_id satisfiability check on the ref[i].id. We have to perform the
same check_ids check on state->active_irq_id as well.

To ensure we don't get assigned REF_TYPE_PTR by default after
acquire_reference_state, if someone forgets to assign the type, let's
also renumber the enum ref_state_type. This way any unassigned types
get caught by refsafe's default switch statement, don't assume
REF_TYPE_PTR by default.

The kfuncs themselves are plain wrappers over local_irq_save and
local_irq_restore macros.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20241204030400.208005-5-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Kumar Kartikeya Dwivedi and committed by
Alexei Starovoitov
c8e2ee1f b79f5f54

+320 -4
+5 -2
include/linux/bpf_verifier.h
··· 233 233 */ 234 234 STACK_DYNPTR, 235 235 STACK_ITER, 236 + STACK_IRQ_FLAG, 236 237 }; 237 238 238 239 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ ··· 255 254 * default to pointer reference on zero initialization of a state. 256 255 */ 257 256 enum ref_state_type { 258 - REF_TYPE_PTR = 0, 259 - REF_TYPE_LOCK, 257 + REF_TYPE_PTR = 1, 258 + REF_TYPE_IRQ = 2, 259 + REF_TYPE_LOCK = 3, 260 260 } type; 261 261 /* Track each reference created with a unique id, even if the same 262 262 * instruction creates the reference multiple times (eg, via CALL). ··· 423 421 u32 acquired_refs; 424 422 u32 active_locks; 425 423 u32 active_preempt_locks; 424 + u32 active_irq_id; 426 425 bool active_rcu_lock; 427 426 428 427 bool speculative;
+17
kernel/bpf/helpers.c
··· 3057 3057 return ret + 1; 3058 3058 } 3059 3059 3060 + /* Keep unsinged long in prototype so that kfunc is usable when emitted to 3061 + * vmlinux.h in BPF programs directly, but note that while in BPF prog, the 3062 + * unsigned long always points to 8-byte region on stack, the kernel may only 3063 + * read and write the 4-bytes on 32-bit. 3064 + */ 3065 + __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag) 3066 + { 3067 + local_irq_save(*flags__irq_flag); 3068 + } 3069 + 3070 + __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag) 3071 + { 3072 + local_irq_restore(*flags__irq_flag); 3073 + } 3074 + 3060 3075 __bpf_kfunc_end_defs(); 3061 3076 3062 3077 BTF_KFUNCS_START(generic_btf_ids) ··· 3164 3149 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) 3165 3150 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3166 3151 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3152 + BTF_ID_FLAGS(func, bpf_local_irq_save) 3153 + BTF_ID_FLAGS(func, bpf_local_irq_restore) 3167 3154 BTF_KFUNCS_END(common_btf_ids) 3168 3155 3169 3156 static const struct btf_kfunc_id_set common_kfunc_set = {
+1
kernel/bpf/log.c
··· 537 537 [STACK_ZERO] = '0', 538 538 [STACK_DYNPTR] = 'd', 539 539 [STACK_ITER] = 'i', 540 + [STACK_IRQ_FLAG] = 'f' 540 541 }; 541 542 542 543 static void print_liveness(struct bpf_verifier_env *env,
+297 -2
kernel/bpf/verifier.c
··· 661 661 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots); 662 662 } 663 663 664 + static int irq_flag_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 665 + { 666 + return stack_slot_obj_get_spi(env, reg, "irq_flag", 1); 667 + } 668 + 664 669 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) 665 670 { 666 671 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { ··· 1161 1156 return 0; 1162 1157 } 1163 1158 1159 + static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx); 1160 + static int release_irq_state(struct bpf_verifier_state *state, int id); 1161 + 1162 + static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env, 1163 + struct bpf_kfunc_call_arg_meta *meta, 1164 + struct bpf_reg_state *reg, int insn_idx) 1165 + { 1166 + struct bpf_func_state *state = func(env, reg); 1167 + struct bpf_stack_state *slot; 1168 + struct bpf_reg_state *st; 1169 + int spi, i, id; 1170 + 1171 + spi = irq_flag_get_spi(env, reg); 1172 + if (spi < 0) 1173 + return spi; 1174 + 1175 + id = acquire_irq_state(env, insn_idx); 1176 + if (id < 0) 1177 + return id; 1178 + 1179 + slot = &state->stack[spi]; 1180 + st = &slot->spilled_ptr; 1181 + 1182 + __mark_reg_known_zero(st); 1183 + st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ 1184 + st->live |= REG_LIVE_WRITTEN; 1185 + st->ref_obj_id = id; 1186 + 1187 + for (i = 0; i < BPF_REG_SIZE; i++) 1188 + slot->slot_type[i] = STACK_IRQ_FLAG; 1189 + 1190 + mark_stack_slot_scratched(env, spi); 1191 + return 0; 1192 + } 1193 + 1194 + static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1195 + { 1196 + struct bpf_func_state *state = func(env, reg); 1197 + struct bpf_stack_state *slot; 1198 + struct bpf_reg_state *st; 1199 + int spi, i, err; 1200 + 1201 + spi = irq_flag_get_spi(env, reg); 1202 + if (spi < 0) 1203 + return spi; 1204 + 1205 + slot = &state->stack[spi]; 1206 + st = &slot->spilled_ptr; 1207 + 1208 + err = release_irq_state(env->cur_state, st->ref_obj_id); 1209 + WARN_ON_ONCE(err && err != -EACCES); 1210 + if (err) { 1211 + int insn_idx = 0; 1212 + 1213 + for (int i = 0; i < env->cur_state->acquired_refs; i++) { 1214 + if (env->cur_state->refs[i].id == env->cur_state->active_irq_id) { 1215 + insn_idx = env->cur_state->refs[i].insn_idx; 1216 + break; 1217 + } 1218 + } 1219 + 1220 + verbose(env, "cannot restore irq state out of order, expected id=%d acquired at insn_idx=%d\n", 1221 + env->cur_state->active_irq_id, insn_idx); 1222 + return err; 1223 + } 1224 + 1225 + __mark_reg_not_init(env, st); 1226 + 1227 + /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */ 1228 + st->live |= REG_LIVE_WRITTEN; 1229 + 1230 + for (i = 0; i < BPF_REG_SIZE; i++) 1231 + slot->slot_type[i] = STACK_INVALID; 1232 + 1233 + mark_stack_slot_scratched(env, spi); 1234 + return 0; 1235 + } 1236 + 1237 + static bool is_irq_flag_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1238 + { 1239 + struct bpf_func_state *state = func(env, reg); 1240 + struct bpf_stack_state *slot; 1241 + int spi, i; 1242 + 1243 + /* For -ERANGE (i.e. spi not falling into allocated stack slots), we 1244 + * will do check_mem_access to check and update stack bounds later, so 1245 + * return true for that case. 1246 + */ 1247 + spi = irq_flag_get_spi(env, reg); 1248 + if (spi == -ERANGE) 1249 + return true; 1250 + if (spi < 0) 1251 + return false; 1252 + 1253 + slot = &state->stack[spi]; 1254 + 1255 + for (i = 0; i < BPF_REG_SIZE; i++) 1256 + if (slot->slot_type[i] == STACK_IRQ_FLAG) 1257 + return false; 1258 + return true; 1259 + } 1260 + 1261 + static int is_irq_flag_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1262 + { 1263 + struct bpf_func_state *state = func(env, reg); 1264 + struct bpf_stack_state *slot; 1265 + struct bpf_reg_state *st; 1266 + int spi, i; 1267 + 1268 + spi = irq_flag_get_spi(env, reg); 1269 + if (spi < 0) 1270 + return -EINVAL; 1271 + 1272 + slot = &state->stack[spi]; 1273 + st = &slot->spilled_ptr; 1274 + 1275 + if (!st->ref_obj_id) 1276 + return -EINVAL; 1277 + 1278 + for (i = 0; i < BPF_REG_SIZE; i++) 1279 + if (slot->slot_type[i] != STACK_IRQ_FLAG) 1280 + return -EINVAL; 1281 + return 0; 1282 + } 1283 + 1164 1284 /* Check if given stack slot is "special": 1165 1285 * - spilled register state (STACK_SPILL); 1166 1286 * - dynptr state (STACK_DYNPTR); 1167 1287 * - iter state (STACK_ITER). 1288 + * - irq flag state (STACK_IRQ_FLAG) 1168 1289 */ 1169 1290 static bool is_stack_slot_special(const struct bpf_stack_state *stack) 1170 1291 { ··· 1300 1169 case STACK_SPILL: 1301 1170 case STACK_DYNPTR: 1302 1171 case STACK_ITER: 1172 + case STACK_IRQ_FLAG: 1303 1173 return true; 1304 1174 case STACK_INVALID: 1305 1175 case STACK_MISC: ··· 1423 1291 dst->active_locks = src->active_locks; 1424 1292 dst->active_preempt_locks = src->active_preempt_locks; 1425 1293 dst->active_rcu_lock = src->active_rcu_lock; 1294 + dst->active_irq_id = src->active_irq_id; 1426 1295 return 0; 1427 1296 } 1428 1297 ··· 1524 1391 return 0; 1525 1392 } 1526 1393 1394 + static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx) 1395 + { 1396 + struct bpf_verifier_state *state = env->cur_state; 1397 + struct bpf_reference_state *s; 1398 + 1399 + s = acquire_reference_state(env, insn_idx); 1400 + if (!s) 1401 + return -ENOMEM; 1402 + s->type = REF_TYPE_IRQ; 1403 + s->id = ++env->id_gen; 1404 + 1405 + state->active_irq_id = s->id; 1406 + return s->id; 1407 + } 1408 + 1527 1409 static void release_reference_state(struct bpf_verifier_state *state, int idx) 1528 1410 { 1529 1411 int last_idx; 1412 + size_t rem; 1530 1413 1414 + /* IRQ state requires the relative ordering of elements remaining the 1415 + * same, since it relies on the refs array to behave as a stack, so that 1416 + * it can detect out-of-order IRQ restore. Hence use memmove to shift 1417 + * the array instead of swapping the final element into the deleted idx. 1418 + */ 1531 1419 last_idx = state->acquired_refs - 1; 1420 + rem = state->acquired_refs - idx - 1; 1532 1421 if (last_idx && idx != last_idx) 1533 - memcpy(&state->refs[idx], &state->refs[last_idx], sizeof(*state->refs)); 1422 + memmove(&state->refs[idx], &state->refs[idx + 1], sizeof(*state->refs) * rem); 1534 1423 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 1535 1424 state->acquired_refs--; 1536 1425 return; ··· 1574 1419 return -EINVAL; 1575 1420 } 1576 1421 1422 + static int release_irq_state(struct bpf_verifier_state *state, int id) 1423 + { 1424 + u32 prev_id = 0; 1425 + int i; 1426 + 1427 + if (id != state->active_irq_id) 1428 + return -EACCES; 1429 + 1430 + for (i = 0; i < state->acquired_refs; i++) { 1431 + if (state->refs[i].type != REF_TYPE_IRQ) 1432 + continue; 1433 + if (state->refs[i].id == id) { 1434 + release_reference_state(state, i); 1435 + state->active_irq_id = prev_id; 1436 + return 0; 1437 + } else { 1438 + prev_id = state->refs[i].id; 1439 + } 1440 + } 1441 + return -EINVAL; 1442 + } 1443 + 1577 1444 static struct bpf_reference_state *find_lock_state(struct bpf_verifier_state *state, enum ref_state_type type, 1578 1445 int id, void *ptr) 1579 1446 { ··· 1604 1427 for (i = 0; i < state->acquired_refs; i++) { 1605 1428 struct bpf_reference_state *s = &state->refs[i]; 1606 1429 1607 - if (s->type == REF_TYPE_PTR || s->type != type) 1430 + if (s->type != type) 1608 1431 continue; 1609 1432 1610 1433 if (s->id == id && s->ptr == ptr) ··· 3410 3233 int spi, int nr_slots) 3411 3234 { 3412 3235 return mark_stack_slot_obj_read(env, reg, spi, nr_slots); 3236 + } 3237 + 3238 + static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 3239 + { 3240 + int spi; 3241 + 3242 + spi = irq_flag_get_spi(env, reg); 3243 + if (spi < 0) 3244 + return spi; 3245 + return mark_stack_slot_obj_read(env, reg, spi, 1); 3413 3246 } 3414 3247 3415 3248 /* This function is supposed to be used by the following 32-bit optimization ··· 10195 10008 return -EINVAL; 10196 10009 } 10197 10010 10011 + if (env->cur_state->active_irq_id) { 10012 + verbose(env, "global function calls are not allowed with IRQs disabled,\n" 10013 + "use static function instead\n"); 10014 + return -EINVAL; 10015 + } 10016 + 10198 10017 if (err) { 10199 10018 verbose(env, "Caller passes invalid args into func#%d ('%s')\n", 10200 10019 subprog, sub_name); ··· 10725 10532 return err; 10726 10533 } 10727 10534 10535 + if (check_lock && env->cur_state->active_irq_id) { 10536 + verbose(env, "%s cannot be used inside bpf_local_irq_save-ed region\n", prefix); 10537 + return -EINVAL; 10538 + } 10539 + 10728 10540 if (check_lock && env->cur_state->active_rcu_lock) { 10729 10541 verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix); 10730 10542 return -EINVAL; ··· 10926 10728 if (env->cur_state->active_preempt_locks) { 10927 10729 if (fn->might_sleep) { 10928 10730 verbose(env, "sleepable helper %s#%d in non-preemptible region\n", 10731 + func_id_name(func_id), func_id); 10732 + return -EINVAL; 10733 + } 10734 + 10735 + if (in_sleepable(env) && is_storage_get_function(func_id)) 10736 + env->insn_aux_data[insn_idx].storage_get_func_atomic = true; 10737 + } 10738 + 10739 + if (env->cur_state->active_irq_id) { 10740 + if (fn->might_sleep) { 10741 + verbose(env, "sleepable helper %s#%d in IRQ-disabled region\n", 10929 10742 func_id_name(func_id), func_id); 10930 10743 return -EINVAL; 10931 10744 } ··· 11506 11297 return btf_param_match_suffix(btf, arg, "__str"); 11507 11298 } 11508 11299 11300 + static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param *arg) 11301 + { 11302 + return btf_param_match_suffix(btf, arg, "__irq_flag"); 11303 + } 11304 + 11509 11305 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, 11510 11306 const struct btf_param *arg, 11511 11307 const char *name) ··· 11664 11450 KF_ARG_PTR_TO_CONST_STR, 11665 11451 KF_ARG_PTR_TO_MAP, 11666 11452 KF_ARG_PTR_TO_WORKQUEUE, 11453 + KF_ARG_PTR_TO_IRQ_FLAG, 11667 11454 }; 11668 11455 11669 11456 enum special_kfunc_type { ··· 11696 11481 KF_bpf_iter_css_task_new, 11697 11482 KF_bpf_session_cookie, 11698 11483 KF_bpf_get_kmem_cache, 11484 + KF_bpf_local_irq_save, 11485 + KF_bpf_local_irq_restore, 11699 11486 }; 11700 11487 11701 11488 BTF_SET_START(special_kfunc_set) ··· 11764 11547 BTF_ID_UNUSED 11765 11548 #endif 11766 11549 BTF_ID(func, bpf_get_kmem_cache) 11550 + BTF_ID(func, bpf_local_irq_save) 11551 + BTF_ID(func, bpf_local_irq_restore) 11767 11552 11768 11553 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) 11769 11554 { ··· 11855 11636 11856 11637 if (is_kfunc_arg_wq(meta->btf, &args[argno])) 11857 11638 return KF_ARG_PTR_TO_WORKQUEUE; 11639 + 11640 + if (is_kfunc_arg_irq_flag(meta->btf, &args[argno])) 11641 + return KF_ARG_PTR_TO_IRQ_FLAG; 11858 11642 11859 11643 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { 11860 11644 if (!btf_type_is_struct(ref_t)) { ··· 11961 11739 } 11962 11740 return 0; 11963 11741 } 11742 + 11743 + static int process_irq_flag(struct bpf_verifier_env *env, int regno, 11744 + struct bpf_kfunc_call_arg_meta *meta) 11745 + { 11746 + struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; 11747 + bool irq_save; 11748 + int err; 11749 + 11750 + if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save]) { 11751 + irq_save = true; 11752 + } else if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_restore]) { 11753 + irq_save = false; 11754 + } else { 11755 + verbose(env, "verifier internal error: unknown irq flags kfunc\n"); 11756 + return -EFAULT; 11757 + } 11758 + 11759 + if (irq_save) { 11760 + if (!is_irq_flag_reg_valid_uninit(env, reg)) { 11761 + verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1); 11762 + return -EINVAL; 11763 + } 11764 + 11765 + err = check_mem_access(env, env->insn_idx, regno, 0, BPF_DW, BPF_WRITE, -1, false, false); 11766 + if (err) 11767 + return err; 11768 + 11769 + err = mark_stack_slot_irq_flag(env, meta, reg, env->insn_idx); 11770 + if (err) 11771 + return err; 11772 + } else { 11773 + err = is_irq_flag_reg_valid_init(env, reg); 11774 + if (err) { 11775 + verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1); 11776 + return err; 11777 + } 11778 + 11779 + err = mark_irq_flag_read(env, reg); 11780 + if (err) 11781 + return err; 11782 + 11783 + err = unmark_stack_slot_irq_flag(env, reg); 11784 + if (err) 11785 + return err; 11786 + } 11787 + return 0; 11788 + } 11789 + 11964 11790 11965 11791 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 11966 11792 { ··· 12598 12328 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 12599 12329 case KF_ARG_PTR_TO_CONST_STR: 12600 12330 case KF_ARG_PTR_TO_WORKQUEUE: 12331 + case KF_ARG_PTR_TO_IRQ_FLAG: 12601 12332 break; 12602 12333 default: 12603 12334 WARN_ON_ONCE(1); ··· 12893 12622 if (ret < 0) 12894 12623 return ret; 12895 12624 break; 12625 + case KF_ARG_PTR_TO_IRQ_FLAG: 12626 + if (reg->type != PTR_TO_STACK) { 12627 + verbose(env, "arg#%d doesn't point to an irq flag on stack\n", i); 12628 + return -EINVAL; 12629 + } 12630 + ret = process_irq_flag(env, regno, meta); 12631 + if (ret < 0) 12632 + return ret; 12633 + break; 12896 12634 } 12897 12635 } 12898 12636 ··· 13080 12800 } else if (preempt_enable) { 13081 12801 verbose(env, "unmatched attempt to enable preemption (kernel function %s)\n", func_name); 13082 12802 return -EINVAL; 12803 + } 12804 + 12805 + if (env->cur_state->active_irq_id && sleepable) { 12806 + verbose(env, "kernel func %s is sleepable within IRQ-disabled region\n", func_name); 12807 + return -EACCES; 13083 12808 } 13084 12809 13085 12810 /* In case of release function, we get register number of refcounted ··· 18020 17735 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 18021 17736 return false; 18022 17737 break; 17738 + case STACK_IRQ_FLAG: 17739 + old_reg = &old->stack[spi].spilled_ptr; 17740 + cur_reg = &cur->stack[spi].spilled_ptr; 17741 + if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 17742 + return false; 17743 + break; 18023 17744 case STACK_MISC: 18024 17745 case STACK_ZERO: 18025 17746 case STACK_INVALID: ··· 18055 17764 if (old->active_rcu_lock != cur->active_rcu_lock) 18056 17765 return false; 18057 17766 17767 + if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap)) 17768 + return false; 17769 + 18058 17770 for (i = 0; i < old->acquired_refs; i++) { 18059 17771 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) || 18060 17772 old->refs[i].type != cur->refs[i].type) 18061 17773 return false; 18062 17774 switch (old->refs[i].type) { 18063 17775 case REF_TYPE_PTR: 17776 + case REF_TYPE_IRQ: 18064 17777 break; 18065 17778 case REF_TYPE_LOCK: 18066 17779 if (old->refs[i].ptr != cur->refs[i].ptr)