Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add success stats to rqspinlock stress test

Add stats to observe the success and failure rate of lock acquisition
attempts in various contexts.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20251128232802.1031906-7-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Kumar Kartikeya Dwivedi and committed by
Alexei Starovoitov
3448375e 087849cc

+43 -12
+43 -12
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
··· 33 33 }; 34 34 #define RQSL_NR_HIST_BUCKETS ARRAY_SIZE(rqsl_hist_ms) 35 35 36 + enum rqsl_context { 37 + RQSL_CTX_NORMAL = 0, 38 + RQSL_CTX_NMI, 39 + RQSL_CTX_MAX, 40 + }; 41 + 36 42 struct rqsl_cpu_hist { 37 - atomic64_t normal[RQSL_NR_HIST_BUCKETS]; 38 - atomic64_t nmi[RQSL_NR_HIST_BUCKETS]; 43 + atomic64_t hist[RQSL_CTX_MAX][RQSL_NR_HIST_BUCKETS]; 44 + atomic64_t success[RQSL_CTX_MAX]; 45 + atomic64_t failure[RQSL_CTX_MAX]; 39 46 }; 40 47 41 48 static DEFINE_PER_CPU(struct rqsl_cpu_hist, rqsl_cpu_hists); ··· 124 117 return RQSL_NR_HIST_BUCKETS - 1; 125 118 } 126 119 127 - static void rqsl_record_lock_time(u64 delta_ns, bool is_nmi) 120 + static void rqsl_record_lock_result(u64 delta_ns, enum rqsl_context ctx, int ret) 128 121 { 129 122 struct rqsl_cpu_hist *hist = this_cpu_ptr(&rqsl_cpu_hists); 130 123 u32 delta_ms = DIV_ROUND_UP_ULL(delta_ns, NSEC_PER_MSEC); 131 124 u32 bucket = rqsl_hist_bucket_idx(delta_ms); 132 - atomic64_t *buckets = is_nmi ? hist->nmi : hist->normal; 125 + atomic64_t *buckets = hist->hist[ctx]; 133 126 134 127 atomic64_inc(&buckets[bucket]); 128 + if (!ret) 129 + atomic64_inc(&hist->success[ctx]); 130 + else 131 + atomic64_inc(&hist->failure[ctx]); 135 132 } 136 133 137 134 static int rqspinlock_worker_fn(void *arg) ··· 158 147 } 159 148 start_ns = ktime_get_mono_fast_ns(); 160 149 ret = raw_res_spin_lock_irqsave(worker_lock, flags); 161 - rqsl_record_lock_time(ktime_get_mono_fast_ns() - start_ns, false); 150 + rqsl_record_lock_result(ktime_get_mono_fast_ns() - start_ns, 151 + RQSL_CTX_NORMAL, ret); 162 152 mdelay(normal_delay); 163 153 if (!ret) 164 154 raw_res_spin_unlock_irqrestore(worker_lock, flags); ··· 202 190 locks = rqsl_get_lock_pair(cpu); 203 191 start_ns = ktime_get_mono_fast_ns(); 204 192 ret = raw_res_spin_lock_irqsave(locks.nmi_lock, flags); 205 - rqsl_record_lock_time(ktime_get_mono_fast_ns() - start_ns, true); 193 + rqsl_record_lock_result(ktime_get_mono_fast_ns() - start_ns, 194 + RQSL_CTX_NMI, ret); 206 195 207 196 mdelay(nmi_delay); 208 197 ··· 313 300 u64 norm_counts[RQSL_NR_HIST_BUCKETS]; 314 301 u64 nmi_counts[RQSL_NR_HIST_BUCKETS]; 315 302 u64 total_counts[RQSL_NR_HIST_BUCKETS]; 303 + u64 norm_success, nmi_success, success_total; 304 + u64 norm_failure, nmi_failure, failure_total; 316 305 u64 norm_total = 0, nmi_total = 0, total = 0; 317 306 bool has_slow = false; 318 307 319 308 for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) { 320 - norm_counts[i] = atomic64_read(&hist->normal[i]); 321 - nmi_counts[i] = atomic64_read(&hist->nmi[i]); 309 + norm_counts[i] = atomic64_read(&hist->hist[RQSL_CTX_NORMAL][i]); 310 + nmi_counts[i] = atomic64_read(&hist->hist[RQSL_CTX_NMI][i]); 322 311 total_counts[i] = norm_counts[i] + nmi_counts[i]; 323 312 norm_total += norm_counts[i]; 324 313 nmi_total += nmi_counts[i]; ··· 330 315 has_slow = true; 331 316 } 332 317 318 + norm_success = atomic64_read(&hist->success[RQSL_CTX_NORMAL]); 319 + nmi_success = atomic64_read(&hist->success[RQSL_CTX_NMI]); 320 + norm_failure = atomic64_read(&hist->failure[RQSL_CTX_NORMAL]); 321 + nmi_failure = atomic64_read(&hist->failure[RQSL_CTX_NMI]); 322 + success_total = norm_success + nmi_success; 323 + failure_total = norm_failure + nmi_failure; 324 + 333 325 if (!total) 334 326 continue; 335 327 336 328 if (!has_slow) { 337 - pr_err(" cpu%d: total %llu (normal %llu, nmi %llu), all within 0-%ums\n", 338 - cpu, total, norm_total, nmi_total, RQSL_SLOW_THRESHOLD_MS); 329 + pr_err(" cpu%d: total %llu (normal %llu, nmi %llu) | " 330 + "success %llu (normal %llu, nmi %llu) | " 331 + "failure %llu (normal %llu, nmi %llu), all within 0-%ums\n", 332 + cpu, total, norm_total, nmi_total, 333 + success_total, norm_success, nmi_success, 334 + failure_total, norm_failure, nmi_failure, 335 + RQSL_SLOW_THRESHOLD_MS); 339 336 continue; 340 337 } 341 338 342 - pr_err(" cpu%d: total %llu (normal %llu, nmi %llu)\n", 343 - cpu, total, norm_total, nmi_total); 339 + pr_err(" cpu%d: total %llu (normal %llu, nmi %llu) | " 340 + "success %llu (normal %llu, nmi %llu) | " 341 + "failure %llu (normal %llu, nmi %llu)\n", 342 + cpu, total, norm_total, nmi_total, 343 + success_total, norm_success, nmi_success, 344 + failure_total, norm_failure, nmi_failure); 344 345 for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) { 345 346 unsigned int start_ms; 346 347