Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks

Check that ww_mutexes can detect cyclic deadlocks (generalised ABBA
cycles) and resolve them by lock reordering.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Nicolai Hähnle <nhaehnle@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20161201114711.28697-7-chris@chris-wilson.co.uk
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Chris Wilson and committed by
Ingo Molnar
d1b42b80 70207686

+115
+115
kernel/locking/test-ww_mutex.c
··· 21 21 #include <linux/completion.h> 22 22 #include <linux/kthread.h> 23 23 #include <linux/module.h> 24 + #include <linux/slab.h> 24 25 #include <linux/ww_mutex.h> 25 26 26 27 static DEFINE_WW_CLASS(ww_class); 28 + struct workqueue_struct *wq; 27 29 28 30 struct test_mutex { 29 31 struct work_struct work; ··· 245 243 return ret; 246 244 } 247 245 246 + struct test_cycle { 247 + struct work_struct work; 248 + struct ww_mutex a_mutex; 249 + struct ww_mutex *b_mutex; 250 + struct completion *a_signal; 251 + struct completion b_signal; 252 + int result; 253 + }; 254 + 255 + static void test_cycle_work(struct work_struct *work) 256 + { 257 + struct test_cycle *cycle = container_of(work, typeof(*cycle), work); 258 + struct ww_acquire_ctx ctx; 259 + int err; 260 + 261 + ww_acquire_init(&ctx, &ww_class); 262 + ww_mutex_lock(&cycle->a_mutex, &ctx); 263 + 264 + complete(cycle->a_signal); 265 + wait_for_completion(&cycle->b_signal); 266 + 267 + err = ww_mutex_lock(cycle->b_mutex, &ctx); 268 + if (err == -EDEADLK) { 269 + ww_mutex_unlock(&cycle->a_mutex); 270 + ww_mutex_lock_slow(cycle->b_mutex, &ctx); 271 + err = ww_mutex_lock(&cycle->a_mutex, &ctx); 272 + } 273 + 274 + if (!err) 275 + ww_mutex_unlock(cycle->b_mutex); 276 + ww_mutex_unlock(&cycle->a_mutex); 277 + ww_acquire_fini(&ctx); 278 + 279 + cycle->result = err; 280 + } 281 + 282 + static int __test_cycle(unsigned int nthreads) 283 + { 284 + struct test_cycle *cycles; 285 + unsigned int n, last = nthreads - 1; 286 + int ret; 287 + 288 + cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); 289 + if (!cycles) 290 + return -ENOMEM; 291 + 292 + for (n = 0; n < nthreads; n++) { 293 + struct test_cycle *cycle = &cycles[n]; 294 + 295 + ww_mutex_init(&cycle->a_mutex, &ww_class); 296 + if (n == last) 297 + cycle->b_mutex = &cycles[0].a_mutex; 298 + else 299 + cycle->b_mutex = &cycles[n + 1].a_mutex; 300 + 301 + if (n == 0) 302 + cycle->a_signal = &cycles[last].b_signal; 303 + else 304 + cycle->a_signal = &cycles[n - 1].b_signal; 305 + init_completion(&cycle->b_signal); 306 + 307 + INIT_WORK(&cycle->work, test_cycle_work); 308 + cycle->result = 0; 309 + } 310 + 311 + for (n = 0; n < nthreads; n++) 312 + queue_work(wq, &cycles[n].work); 313 + 314 + flush_workqueue(wq); 315 + 316 + ret = 0; 317 + for (n = 0; n < nthreads; n++) { 318 + struct test_cycle *cycle = &cycles[n]; 319 + 320 + if (!cycle->result) 321 + continue; 322 + 323 + pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n", 324 + n, nthreads, cycle->result); 325 + ret = -EINVAL; 326 + break; 327 + } 328 + 329 + for (n = 0; n < nthreads; n++) 330 + ww_mutex_destroy(&cycles[n].a_mutex); 331 + kfree(cycles); 332 + return ret; 333 + } 334 + 335 + static int test_cycle(unsigned int ncpus) 336 + { 337 + unsigned int n; 338 + int ret; 339 + 340 + for (n = 2; n <= ncpus + 1; n++) { 341 + ret = __test_cycle(n); 342 + if (ret) 343 + return ret; 344 + } 345 + 346 + return 0; 347 + } 348 + 248 349 static int __init test_ww_mutex_init(void) 249 350 { 351 + int ncpus = num_online_cpus(); 250 352 int ret; 353 + 354 + wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); 355 + if (!wq) 356 + return -ENOMEM; 251 357 252 358 ret = test_mutex(); 253 359 if (ret) ··· 373 263 if (ret) 374 264 return ret; 375 265 266 + ret = test_cycle(ncpus); 267 + if (ret) 268 + return ret; 269 + 376 270 return 0; 377 271 } 378 272 379 273 static void __exit test_ww_mutex_exit(void) 380 274 { 275 + destroy_workqueue(wq); 381 276 } 382 277 383 278 module_init(test_ww_mutex_init);