Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cxl: Drop cxl_device_lock()

Now that all CXL subsystem locking is validated with custom lock
classes, there is no need for the custom usage of the lockdep_mutex.

Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Ben Widawsky <ben.widawsky@intel.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Link: https://lore.kernel.org/r/165055520383.3745911.53447786039115271.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+33 -126
+2 -2
drivers/cxl/core/pmem.c
··· 124 124 * work to flush. Once the state has been changed to 'dead' then no new 125 125 * work can be queued by user-triggered bind. 126 126 */ 127 - cxl_device_lock(&cxl_nvb->dev); 127 + device_lock(&cxl_nvb->dev); 128 128 flush = cxl_nvb->state != CXL_NVB_NEW; 129 129 cxl_nvb->state = CXL_NVB_DEAD; 130 - cxl_device_unlock(&cxl_nvb->dev); 130 + device_unlock(&cxl_nvb->dev); 131 131 132 132 /* 133 133 * Even though the device core will trigger device_release_driver()
+23 -32
drivers/cxl/core/port.c
··· 312 312 struct cxl_port *port = to_cxl_port(dev); 313 313 struct cxl_ep *ep, *_e; 314 314 315 - cxl_device_lock(dev); 315 + device_lock(dev); 316 316 list_for_each_entry_safe(ep, _e, &port->endpoints, list) 317 317 cxl_ep_release(ep); 318 - cxl_device_unlock(dev); 318 + device_unlock(dev); 319 319 ida_free(&cxl_port_ida, port->id); 320 320 kfree(port); 321 321 } ··· 556 556 return 0; 557 557 558 558 port = to_cxl_port(dev); 559 - cxl_device_lock(dev); 559 + device_lock(dev); 560 560 list_for_each_entry(dport, &port->dports, list) { 561 561 iter = match; 562 562 while (iter) { ··· 566 566 } 567 567 } 568 568 out: 569 - cxl_device_unlock(dev); 569 + device_unlock(dev); 570 570 571 571 return !!iter; 572 572 } ··· 625 625 static void cond_cxl_root_lock(struct cxl_port *port) 626 626 { 627 627 if (is_cxl_root(port)) 628 - cxl_device_lock(&port->dev); 628 + device_lock(&port->dev); 629 629 } 630 630 631 631 static void cond_cxl_root_unlock(struct cxl_port *port) 632 632 { 633 633 if (is_cxl_root(port)) 634 - cxl_device_unlock(&port->dev); 634 + device_unlock(&port->dev); 635 635 } 636 636 637 637 static void cxl_dport_remove(void *data) ··· 738 738 { 739 739 struct cxl_ep *dup; 740 740 741 - cxl_device_lock(&port->dev); 741 + device_lock(&port->dev); 742 742 if (port->dead) { 743 - cxl_device_unlock(&port->dev); 743 + device_unlock(&port->dev); 744 744 return -ENXIO; 745 745 } 746 746 dup = find_ep(port, new->ep); 747 747 if (!dup) 748 748 list_add_tail(&new->list, &port->endpoints); 749 - cxl_device_unlock(&port->dev); 749 + device_unlock(&port->dev); 750 750 751 751 return dup ? -EEXIST : 0; 752 752 } ··· 856 856 goto out; 857 857 parent = &parent_port->dev; 858 858 859 - cxl_device_lock(parent); 859 + device_lock(parent); 860 860 if (parent->driver && endpoint->uport) { 861 861 devm_release_action(parent, cxl_unlink_uport, endpoint); 862 862 devm_release_action(parent, unregister_port, endpoint); 863 863 } 864 - cxl_device_unlock(parent); 864 + device_unlock(parent); 865 865 put_device(parent); 866 866 out: 867 867 put_device(&endpoint->dev); ··· 922 922 } 923 923 924 924 parent_port = to_cxl_port(port->dev.parent); 925 - cxl_device_lock(&parent_port->dev); 925 + device_lock(&parent_port->dev); 926 926 if (!parent_port->dev.driver) { 927 927 /* 928 928 * The bottom-up race to delete the port lost to a ··· 930 930 * parent_port ->remove() will have cleaned up all 931 931 * descendants. 932 932 */ 933 - cxl_device_unlock(&parent_port->dev); 933 + device_unlock(&parent_port->dev); 934 934 put_device(&port->dev); 935 935 continue; 936 936 } 937 937 938 - cxl_device_lock(&port->dev); 938 + device_lock(&port->dev); 939 939 ep = find_ep(port, &cxlmd->dev); 940 940 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 941 941 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); ··· 950 950 port->dead = true; 951 951 list_splice_init(&port->dports, &reap_dports); 952 952 } 953 - cxl_device_unlock(&port->dev); 953 + device_unlock(&port->dev); 954 954 955 955 if (!list_empty(&reap_dports)) { 956 956 dev_dbg(&cxlmd->dev, "delete %s\n", ··· 958 958 delete_switch_port(port, &reap_dports); 959 959 } 960 960 put_device(&port->dev); 961 - cxl_device_unlock(&parent_port->dev); 961 + device_unlock(&parent_port->dev); 962 962 } 963 963 } 964 964 ··· 1006 1006 return -EAGAIN; 1007 1007 } 1008 1008 1009 - cxl_device_lock(&parent_port->dev); 1009 + device_lock(&parent_port->dev); 1010 1010 if (!parent_port->dev.driver) { 1011 1011 dev_warn(&cxlmd->dev, 1012 1012 "port %s:%s disabled, failed to enumerate CXL.mem\n", ··· 1024 1024 get_device(&port->dev); 1025 1025 } 1026 1026 out: 1027 - cxl_device_unlock(&parent_port->dev); 1027 + device_unlock(&parent_port->dev); 1028 1028 1029 1029 if (IS_ERR(port)) 1030 1030 rc = PTR_ERR(port); ··· 1135 1135 { 1136 1136 struct cxl_dport *dport; 1137 1137 1138 - cxl_device_lock(&port->dev); 1138 + device_lock(&port->dev); 1139 1139 list_for_each_entry(dport, &port->dports, list) 1140 1140 if (dport->dport == dev) { 1141 - cxl_device_unlock(&port->dev); 1141 + device_unlock(&port->dev); 1142 1142 return dport; 1143 1143 } 1144 1144 1145 - cxl_device_unlock(&port->dev); 1145 + device_unlock(&port->dev); 1146 1146 return NULL; 1147 1147 } 1148 1148 EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL); ··· 1384 1384 1385 1385 port = to_cxl_port(cxld->dev.parent); 1386 1386 1387 - cxl_device_lock(&port->dev); 1387 + device_lock(&port->dev); 1388 1388 rc = cxl_decoder_add_locked(cxld, target_map); 1389 - cxl_device_unlock(&port->dev); 1389 + device_unlock(&port->dev); 1390 1390 1391 1391 return rc; 1392 1392 } ··· 1457 1457 { 1458 1458 int rc; 1459 1459 1460 - /* 1461 - * Take the CXL nested lock since the driver core only holds 1462 - * @dev->mutex and not @dev->lockdep_mutex. 1463 - */ 1464 - cxl_nested_lock(dev); 1465 1460 rc = to_cxl_drv(dev->driver)->probe(dev); 1466 - cxl_nested_unlock(dev); 1467 - 1468 1461 dev_dbg(dev, "probe: %d\n", rc); 1469 1462 return rc; 1470 1463 } ··· 1466 1473 { 1467 1474 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 1468 1475 1469 - cxl_nested_lock(dev); 1470 1476 if (cxl_drv->remove) 1471 1477 cxl_drv->remove(dev); 1472 - cxl_nested_unlock(dev); 1473 1478 } 1474 1479 1475 1480 static struct workqueue_struct *cxl_bus_wq;
-78
drivers/cxl/cxl.h
··· 405 405 #define __mock static 406 406 #endif 407 407 408 - #ifdef CONFIG_PROVE_CXL_LOCKING 409 - enum cxl_lock_class { 410 - CXL_ANON_LOCK, 411 - CXL_NVDIMM_LOCK, 412 - CXL_NVDIMM_BRIDGE_LOCK, 413 - CXL_PORT_LOCK, 414 - /* 415 - * Be careful to add new lock classes here, CXL_PORT_LOCK is 416 - * extended by the port depth, so a maximum CXL port topology 417 - * depth would need to be defined first. 418 - */ 419 - }; 420 - 421 - static inline void cxl_nested_lock(struct device *dev) 422 - { 423 - if (is_cxl_port(dev)) { 424 - struct cxl_port *port = to_cxl_port(dev); 425 - 426 - mutex_lock_nested(&dev->lockdep_mutex, 427 - CXL_PORT_LOCK + port->depth); 428 - } else if (is_cxl_decoder(dev)) { 429 - struct cxl_port *port = to_cxl_port(dev->parent); 430 - 431 - /* 432 - * A decoder is the immediate child of a port, so set 433 - * its lock class equal to other child device siblings. 434 - */ 435 - mutex_lock_nested(&dev->lockdep_mutex, 436 - CXL_PORT_LOCK + port->depth + 1); 437 - } else if (is_cxl_nvdimm_bridge(dev)) 438 - mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_BRIDGE_LOCK); 439 - else if (is_cxl_nvdimm(dev)) 440 - mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_LOCK); 441 - else 442 - mutex_lock_nested(&dev->lockdep_mutex, CXL_ANON_LOCK); 443 - } 444 - 445 - static inline void cxl_nested_unlock(struct device *dev) 446 - { 447 - mutex_unlock(&dev->lockdep_mutex); 448 - } 449 - 450 - static inline void cxl_device_lock(struct device *dev) 451 - { 452 - /* 453 - * For double lock errors the lockup will happen before lockdep 454 - * warns at cxl_nested_lock(), so assert explicitly. 455 - */ 456 - lockdep_assert_not_held(&dev->lockdep_mutex); 457 - 458 - device_lock(dev); 459 - cxl_nested_lock(dev); 460 - } 461 - 462 - static inline void cxl_device_unlock(struct device *dev) 463 - { 464 - cxl_nested_unlock(dev); 465 - device_unlock(dev); 466 - } 467 - #else 468 - static inline void cxl_nested_lock(struct device *dev) 469 - { 470 - } 471 - 472 - static inline void cxl_nested_unlock(struct device *dev) 473 - { 474 - } 475 - 476 - static inline void cxl_device_lock(struct device *dev) 477 - { 478 - device_lock(dev); 479 - } 480 - 481 - static inline void cxl_device_unlock(struct device *dev) 482 - { 483 - device_unlock(dev); 484 - } 485 - #endif 486 408 #endif /* __CXL_H__ */
+2 -2
drivers/cxl/mem.c
··· 187 187 return -ENXIO; 188 188 } 189 189 190 - cxl_device_lock(&parent_port->dev); 190 + device_lock(&parent_port->dev); 191 191 if (!parent_port->dev.driver) { 192 192 dev_err(dev, "CXL port topology %s not enabled\n", 193 193 dev_name(&parent_port->dev)); ··· 197 197 198 198 rc = create_endpoint(cxlmd, parent_port); 199 199 out: 200 - cxl_device_unlock(&parent_port->dev); 200 + device_unlock(&parent_port->dev); 201 201 put_device(&parent_port->dev); 202 202 203 203 /*
+6 -6
drivers/cxl/pmem.c
··· 43 43 if (!cxl_nvb) 44 44 return -ENXIO; 45 45 46 - cxl_device_lock(&cxl_nvb->dev); 46 + device_lock(&cxl_nvb->dev); 47 47 if (!cxl_nvb->nvdimm_bus) { 48 48 rc = -ENXIO; 49 49 goto out; ··· 68 68 dev_set_drvdata(dev, nvdimm); 69 69 rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); 70 70 out: 71 - cxl_device_unlock(&cxl_nvb->dev); 71 + device_unlock(&cxl_nvb->dev); 72 72 put_device(&cxl_nvb->dev); 73 73 74 74 return rc; ··· 233 233 struct nvdimm_bus *victim_bus = NULL; 234 234 bool release = false, rescan = false; 235 235 236 - cxl_device_lock(&cxl_nvb->dev); 236 + device_lock(&cxl_nvb->dev); 237 237 switch (cxl_nvb->state) { 238 238 case CXL_NVB_ONLINE: 239 239 if (!online_nvdimm_bus(cxl_nvb)) { ··· 251 251 default: 252 252 break; 253 253 } 254 - cxl_device_unlock(&cxl_nvb->dev); 254 + device_unlock(&cxl_nvb->dev); 255 255 256 256 if (release) 257 257 device_release_driver(&cxl_nvb->dev); ··· 327 327 return 0; 328 328 329 329 cxl_nvb = to_cxl_nvdimm_bridge(dev); 330 - cxl_device_lock(dev); 330 + device_lock(dev); 331 331 cxl_nvb->state = CXL_NVB_NEW; 332 - cxl_device_unlock(dev); 332 + device_unlock(dev); 333 333 334 334 return 0; 335 335 }
-6
lib/Kconfig.debug
··· 1559 1559 help 1560 1560 Enable lockdep to validate nd_device_lock() usage. 1561 1561 1562 - config PROVE_CXL_LOCKING 1563 - bool "CXL" 1564 - depends on CXL_BUS 1565 - help 1566 - Enable lockdep to validate cxl_device_lock() usage. 1567 - 1568 1562 endchoice 1569 1563 1570 1564 endmenu # lock debugging