···336336 cxlrd = to_cxl_root_decoder(dev);337337 cxlsd = &cxlrd->cxlsd;338338339339- guard(rwsem_read)(&cxl_region_rwsem);339339+ guard(rwsem_read)(&cxl_rwsem.region);340340 for (int i = 0; i < cxlsd->nr_targets; i++) {341341 if (host_bridge == cxlsd->target[i]->dport_dev)342342 return 1;···987987 bool is_root;988988 int rc;989989990990- lockdep_assert_held(&cxl_dpa_rwsem);990990+ lockdep_assert_held(&cxl_rwsem.dpa);991991992992 struct xarray *usp_xa __free(free_perf_xa) =993993 kzalloc(sizeof(*usp_xa), GFP_KERNEL);···10571057{10581058 struct cxl_dpa_perf *perf;1059105910601060- lockdep_assert_held(&cxl_dpa_rwsem);10601060+ lockdep_assert_held(&cxl_rwsem.dpa);1061106110621062 perf = cxled_get_dpa_perf(cxled);10631063 if (IS_ERR(perf))
+28-4
drivers/cxl/core/core.h
···55#define __CXL_CORE_H__6677#include <cxl/mailbox.h>88+#include <linux/rwsem.h>89910extern const struct device_type cxl_nvdimm_bridge_type;1011extern const struct device_type cxl_nvdimm_type;1112extern const struct device_type cxl_pmu_type;12131314extern struct attribute_group cxl_base_attribute_group;1515+1616+enum cxl_detach_mode {1717+ DETACH_ONLY,1818+ DETACH_INVALIDATE,1919+};14201521#ifdef CONFIG_CXL_REGION1622extern struct device_attribute dev_attr_create_pmem_region;···2620extern const struct device_type cxl_pmem_region_type;2721extern const struct device_type cxl_dax_region_type;2822extern const struct device_type cxl_region_type;2929-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);2323+2424+int cxl_decoder_detach(struct cxl_region *cxlr,2525+ struct cxl_endpoint_decoder *cxled, int pos,2626+ enum cxl_detach_mode mode);2727+3028#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)3129#define CXL_REGION_TYPE(x) (&cxl_region_type)3230#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),···5848{5949 return 0;6050}6161-static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)5151+static inline int cxl_decoder_detach(struct cxl_region *cxlr,5252+ struct cxl_endpoint_decoder *cxled,5353+ int pos, enum cxl_detach_mode mode)6254{6355}6456static inline int cxl_region_init(void)···10997#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)11098#define PCI_CAP_EXP_SIZEOF 0x3c11199112112-extern struct rw_semaphore cxl_dpa_rwsem;113113-extern struct rw_semaphore cxl_region_rwsem;100100+struct cxl_rwsem {101101+ /*102102+ * All changes to HPA (interleave configuration) occur with this103103+ * lock held for write.104104+ */105105+ struct rw_semaphore region;106106+ /*107107+ * All changes to a device DPA space occur with this lock held108108+ * for write.109109+ */110110+ struct rw_semaphore dpa;111111+};112112+113113+extern struct cxl_rwsem cxl_rwsem;114114115115int cxl_memdev_init(void);116116void cxl_memdev_exit(void);
+20-24
drivers/cxl/core/edac.c
···115115 flags, min_cycle);116116 }117117118118- struct rw_semaphore *region_lock __free(rwsem_read_release) =119119- rwsem_read_intr_acquire(&cxl_region_rwsem);120120- if (!region_lock)121121- return -EINTR;118118+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);119119+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))120120+ return ret;122121123122 cxlr = cxl_ps_ctx->cxlr;124123 p = &cxlr->params;···157158 struct cxl_region *cxlr;158159 int ret, i;159160160160- struct rw_semaphore *region_lock __free(rwsem_read_release) =161161- rwsem_read_intr_acquire(&cxl_region_rwsem);162162- if (!region_lock)163163- return -EINTR;161161+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);162162+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))163163+ return ret;164164165165 cxlr = cxl_ps_ctx->cxlr;166166 p = &cxlr->params;···13381340 struct cxl_memdev_sparing_in_payload sparing_pi;13391341 struct cxl_event_dram *rec = NULL;13401342 u16 validity_flags = 0;13431343+ int ret;1341134413421342- struct rw_semaphore *region_lock __free(rwsem_read_release) =13431343- rwsem_read_intr_acquire(&cxl_region_rwsem);13441344- if (!region_lock)13451345- return -EINTR;13451345+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);13461346+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem)))13471347+ return ret;1346134813471347- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =13481348- rwsem_read_intr_acquire(&cxl_dpa_rwsem);13491349- if (!dpa_lock)13501350- return -EINTR;13491349+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);13501350+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))13511351+ return ret;1351135213521353 if (!cxl_sparing_ctx->cap_safe_when_in_use) {13531354 /* Memory to repair must be offline */···17841787 struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;17851788 struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;17861789 struct cxl_mem_repair_attrbs attrbs = { 0 };17901790+ int ret;1787179117881788- struct rw_semaphore *region_lock __free(rwsem_read_release) =17891789- rwsem_read_intr_acquire(&cxl_region_rwsem);17901790- if (!region_lock)17911791- return -EINTR;17921792+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);17931793+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem)))17941794+ return ret;1792179517931793- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =17941794- rwsem_read_intr_acquire(&cxl_dpa_rwsem);17951795- if (!dpa_lock)17961796- return -EINTR;17961796+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);17971797+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))17981798+ return ret;1797179917981800 if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {17991801 /* Memory to repair must be offline */
+62-56
drivers/cxl/core/hdm.c
···1616 * for enumerating these registers and capabilities.1717 */18181919-DECLARE_RWSEM(cxl_dpa_rwsem);1919+struct cxl_rwsem cxl_rwsem = {2020+ .region = __RWSEM_INITIALIZER(cxl_rwsem.region),2121+ .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),2222+};20232124static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,2225 int *target_map)···217214{218215 struct resource *p1, *p2;219216220220- guard(rwsem_read)(&cxl_dpa_rwsem);217217+ guard(rwsem_read)(&cxl_rwsem.dpa);221218 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {222219 __cxl_dpa_debug(file, p1, 0);223220 for (p2 = p1->child; p2; p2 = p2->sibling)···269266 struct resource *res = cxled->dpa_res;270267 resource_size_t skip_start;271268272272- lockdep_assert_held_write(&cxl_dpa_rwsem);269269+ lockdep_assert_held_write(&cxl_rwsem.dpa);273270274271 /* save @skip_start, before @res is released */275272 skip_start = res->start - cxled->skip;···284281285282static void cxl_dpa_release(void *cxled)286283{287287- guard(rwsem_write)(&cxl_dpa_rwsem);284284+ guard(rwsem_write)(&cxl_rwsem.dpa);288285 __cxl_dpa_release(cxled);289286}290287···296293{297294 struct cxl_port *port = cxled_to_port(cxled);298295299299- lockdep_assert_held_write(&cxl_dpa_rwsem);296296+ lockdep_assert_held_write(&cxl_rwsem.dpa);300297 devm_remove_action(&port->dev, cxl_dpa_release, cxled);301298 __cxl_dpa_release(cxled);302299}···364361 struct resource *res;365362 int rc;366363367367- lockdep_assert_held_write(&cxl_dpa_rwsem);364364+ lockdep_assert_held_write(&cxl_rwsem.dpa);368365369366 if (!len) {370367 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",···473470{474471 struct device *dev = cxlds->dev;475472476476- guard(rwsem_write)(&cxl_dpa_rwsem);473473+ guard(rwsem_write)(&cxl_rwsem.dpa);477474478475 if (cxlds->nr_partitions)479476 return -EBUSY;···519516 struct cxl_port *port = cxled_to_port(cxled);520517 int rc;521518522522- down_write(&cxl_dpa_rwsem);523523- rc = __cxl_dpa_reserve(cxled, base, len, skipped);524524- up_write(&cxl_dpa_rwsem);519519+ scoped_guard(rwsem_write, &cxl_rwsem.dpa)520520+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);525521526522 if (rc)527523 return rc;···531529532530resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)533531{534534- guard(rwsem_read)(&cxl_dpa_rwsem);532532+ guard(rwsem_read)(&cxl_rwsem.dpa);535533 if (cxled->dpa_res)536534 return resource_size(cxled->dpa_res);537535···542540{543541 resource_size_t base = -1;544542545545- lockdep_assert_held(&cxl_dpa_rwsem);543543+ lockdep_assert_held(&cxl_rwsem.dpa);546544 if (cxled->dpa_res)547545 base = cxled->dpa_res->start;548546···561559 struct cxl_port *port = cxled_to_port(cxled);562560 struct device *dev = &cxled->cxld.dev;563561564564- guard(rwsem_write)(&cxl_dpa_rwsem);562562+ guard(rwsem_write)(&cxl_rwsem.dpa);565563 if (!cxled->dpa_res)566564 return 0;567565 if (cxled->cxld.region) {···591589 struct device *dev = &cxled->cxld.dev;592590 int part;593591594594- guard(rwsem_write)(&cxl_dpa_rwsem);592592+ guard(rwsem_write)(&cxl_rwsem.dpa);595593 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)596594 return -EBUSY;597595···623621 struct resource *p, *last;624622 int part;625623626626- guard(rwsem_write)(&cxl_dpa_rwsem);624624+ guard(rwsem_write)(&cxl_rwsem.dpa);627625 if (cxled->cxld.region) {628626 dev_dbg(dev, "decoder attached to %s\n",629627 dev_name(&cxled->cxld.region->dev));···773771 return -ETIMEDOUT;774772}775773776776-static int cxl_decoder_commit(struct cxl_decoder *cxld)774774+static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)777775{778778- struct cxl_port *port = to_cxl_port(cxld->dev.parent);779779- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);780780- void __iomem *hdm = cxlhdm->regs.hdm_decoder;781781- int id = cxld->id, rc;776776+ int id = cxld->id;782777 u64 base, size;783778 u32 ctrl;784779785785- if (cxld->flags & CXL_DECODER_F_ENABLE)786786- return 0;787787-788788- if (cxl_num_decoders_committed(port) != id) {789789- dev_dbg(&port->dev,790790- "%s: out of order commit, expected decoder%d.%d\n",791791- dev_name(&cxld->dev), port->id,792792- cxl_num_decoders_committed(port));793793- return -EBUSY;794794- }795795-796796- /*797797- * For endpoint decoders hosted on CXL memory devices that798798- * support the sanitize operation, make sure sanitize is not in-flight.799799- */800800- if (is_endpoint_decoder(&cxld->dev)) {801801- struct cxl_endpoint_decoder *cxled =802802- to_cxl_endpoint_decoder(&cxld->dev);803803- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);804804- struct cxl_memdev_state *mds =805805- to_cxl_memdev_state(cxlmd->cxlds);806806-807807- if (mds && mds->security.sanitize_active) {808808- dev_dbg(&cxlmd->dev,809809- "attempted to commit %s during sanitize\n",810810- dev_name(&cxld->dev));811811- return -EBUSY;812812- }813813- }814814-815815- down_read(&cxl_dpa_rwsem);816780 /* common decoder settings */817781 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));818782 cxld_set_interleave(cxld, &ctrl);···812844 }813845814846 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));815815- up_read(&cxl_dpa_rwsem);847847+}848848+849849+static int cxl_decoder_commit(struct cxl_decoder *cxld)850850+{851851+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);852852+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);853853+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;854854+ int id = cxld->id, rc;855855+856856+ if (cxld->flags & CXL_DECODER_F_ENABLE)857857+ return 0;858858+859859+ if (cxl_num_decoders_committed(port) != id) {860860+ dev_dbg(&port->dev,861861+ "%s: out of order commit, expected decoder%d.%d\n",862862+ dev_name(&cxld->dev), port->id,863863+ cxl_num_decoders_committed(port));864864+ return -EBUSY;865865+ }866866+867867+ /*868868+ * For endpoint decoders hosted on CXL memory devices that869869+ * support the sanitize operation, make sure sanitize is not in-flight.870870+ */871871+ if (is_endpoint_decoder(&cxld->dev)) {872872+ struct cxl_endpoint_decoder *cxled =873873+ to_cxl_endpoint_decoder(&cxld->dev);874874+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);875875+ struct cxl_memdev_state *mds =876876+ to_cxl_memdev_state(cxlmd->cxlds);877877+878878+ if (mds && mds->security.sanitize_active) {879879+ dev_dbg(&cxlmd->dev,880880+ "attempted to commit %s during sanitize\n",881881+ dev_name(&cxld->dev));882882+ return -EBUSY;883883+ }884884+ }885885+886886+ scoped_guard(rwsem_read, &cxl_rwsem.dpa)887887+ setup_hw_decoder(cxld, hdm);816888817889 port->commit_end++;818890 rc = cxld_await_commit(hdm, cxld->id);···890882{891883 struct cxl_port *port = to_cxl_port(cxld->dev.parent);892884893893- lockdep_assert_held_write(&cxl_region_rwsem);885885+ lockdep_assert_held_write(&cxl_rwsem.region);894886895887 /*896888 * Once the highest committed decoder is disabled, free any other···922914 "%s: out of order reset, expected decoder%d.%d\n",923915 dev_name(&cxld->dev), port->id, port->commit_end);924916925925- down_read(&cxl_dpa_rwsem);926917 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));927918 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;928919 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));···930923 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));931924 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));932925 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));933933- up_read(&cxl_dpa_rwsem);934926935927 cxld->flags &= ~CXL_DECODER_F_ENABLE;936928···10381032 else10391033 cxld->target_type = CXL_DECODER_DEVMEM;1040103410411041- guard(rwsem_write)(&cxl_region_rwsem);10351035+ guard(rwsem_write)(&cxl_rwsem.region);10421036 if (cxld->id != cxl_num_decoders_committed(port)) {10431037 dev_warn(&port->dev,10441038 "decoder%d.%d: Committed out of order\n",
+6-7
drivers/cxl/core/mbox.c
···909909 * translations. Take topology mutation locks and lookup910910 * { HPA, REGION } from { DPA, MEMDEV } in the event record.911911 */912912- guard(rwsem_read)(&cxl_region_rwsem);913913- guard(rwsem_read)(&cxl_dpa_rwsem);912912+ guard(rwsem_read)(&cxl_rwsem.region);913913+ guard(rwsem_read)(&cxl_rwsem.dpa);914914915915 dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;916916 cxlr = cxl_dpa_to_region(cxlmd, dpa);···12651265 /* synchronize with cxl_mem_probe() and decoder write operations */12661266 guard(device)(&cxlmd->dev);12671267 endpoint = cxlmd->endpoint;12681268- guard(rwsem_read)(&cxl_region_rwsem);12681268+ guard(rwsem_read)(&cxl_rwsem.region);12691269 /*12701270 * Require an endpoint to be safe otherwise the driver can not12711271 * be sure that the device is unmapped.···14011401 int nr_records = 0;14021402 int rc;1403140314041404- rc = mutex_lock_interruptible(&mds->poison.lock);14051405- if (rc)14041404+ ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);14051405+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))14061406 return rc;1407140714081408 po = mds->poison.list_out;···14371437 }14381438 } while (po->flags & CXL_POISON_FLAG_MORE);1439143914401440- mutex_unlock(&mds->poison.lock);14411440 return rc;14421441}14431442EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");···14721473 return rc;14731474 }1474147514751475- mutex_init(&mds->poison.lock);14761476+ mutex_init(&mds->poison.mutex);14761477 return 0;14771478}14781479EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
+18-32
drivers/cxl/core/memdev.c
···232232 if (!port || !is_cxl_endpoint(port))233233 return -EINVAL;234234235235- rc = down_read_interruptible(&cxl_region_rwsem);236236- if (rc)235235+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);236236+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem)))237237 return rc;238238239239- rc = down_read_interruptible(&cxl_dpa_rwsem);240240- if (rc) {241241- up_read(&cxl_region_rwsem);239239+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);240240+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))242241 return rc;243243- }244242245243 if (cxl_num_decoders_committed(port) == 0) {246244 /* No regions mapped to this memdev */···247249 /* Regions mapped, collect poison by endpoint */248250 rc = cxl_get_poison_by_endpoint(port);249251 }250250- up_read(&cxl_dpa_rwsem);251251- up_read(&cxl_region_rwsem);252252253253 return rc;254254}···288292 if (!IS_ENABLED(CONFIG_DEBUG_FS))289293 return 0;290294291291- rc = down_read_interruptible(&cxl_region_rwsem);292292- if (rc)295295+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);296296+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem)))293297 return rc;294298295295- rc = down_read_interruptible(&cxl_dpa_rwsem);296296- if (rc) {297297- up_read(&cxl_region_rwsem);299299+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);300300+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))298301 return rc;299299- }300302301303 rc = cxl_validate_poison_dpa(cxlmd, dpa);302304 if (rc)303303- goto out;305305+ return rc;304306305307 inject.address = cpu_to_le64(dpa);306308 mbox_cmd = (struct cxl_mbox_cmd) {···308314 };309315 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);310316 if (rc)311311- goto out;317317+ return rc;312318313319 cxlr = cxl_dpa_to_region(cxlmd, dpa);314320 if (cxlr)···321327 .length = cpu_to_le32(1),322328 };323329 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);324324-out:325325- up_read(&cxl_dpa_rwsem);326326- up_read(&cxl_region_rwsem);327330328328- return rc;331331+ return 0;329332}330333EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");331334···338347 if (!IS_ENABLED(CONFIG_DEBUG_FS))339348 return 0;340349341341- rc = down_read_interruptible(&cxl_region_rwsem);342342- if (rc)350350+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);351351+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem)))343352 return rc;344353345345- rc = down_read_interruptible(&cxl_dpa_rwsem);346346- if (rc) {347347- up_read(&cxl_region_rwsem);354354+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);355355+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))348356 return rc;349349- }350357351358 rc = cxl_validate_poison_dpa(cxlmd, dpa);352359 if (rc)353353- goto out;360360+ return rc;354361355362 /*356363 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command···367378368379 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);369380 if (rc)370370- goto out;381381+ return rc;371382372383 cxlr = cxl_dpa_to_region(cxlmd, dpa);373384 if (cxlr)···380391 .length = cpu_to_le32(1),381392 };382393 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);383383-out:384384- up_read(&cxl_dpa_rwsem);385385- up_read(&cxl_region_rwsem);386394387387- return rc;395395+ return 0;388396}389397EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");390398
+9-18
drivers/cxl/core/port.c
···3030 * instantiated by the core.3131 */32323333-/*3434- * All changes to the interleave configuration occur with this lock held3535- * for write.3636- */3737-DECLARE_RWSEM(cxl_region_rwsem);3838-3933static DEFINE_IDA(cxl_port_ida);4034static DEFINE_XARRAY(cxl_root_buses);41354236int cxl_num_decoders_committed(struct cxl_port *port)4337{4444- lockdep_assert_held(&cxl_region_rwsem);3838+ lockdep_assert_held(&cxl_rwsem.region);45394640 return port->commit_end + 1;4741}···170176 ssize_t offset;171177 int rc;172178173173- guard(rwsem_read)(&cxl_region_rwsem);179179+ guard(rwsem_read)(&cxl_rwsem.region);174180 rc = emit_target_list(cxlsd, buf);175181 if (rc < 0)176182 return rc;···190196 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);191197 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);192198 struct cxl_dev_state *cxlds = cxlmd->cxlds;193193- /* without @cxl_dpa_rwsem, make sure @part is not reloaded */199199+ /* without @cxl_rwsem.dpa, make sure @part is not reloaded */194200 int part = READ_ONCE(cxled->part);195201 const char *desc;196202···229235{230236 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);231237232232- guard(rwsem_read)(&cxl_dpa_rwsem);238238+ guard(rwsem_read)(&cxl_rwsem.dpa);233239 return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));234240}235241static DEVICE_ATTR_RO(dpa_resource);···554560{555561 struct cxl_port *port = to_cxl_port(dev);556562557557- guard(rwsem_read)(&cxl_region_rwsem);563563+ guard(rwsem_read)(&cxl_rwsem.region);558564 return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));559565}560566···17161722 if (xa_empty(&port->dports))17171723 return -EINVAL;1718172417191719- guard(rwsem_write)(&cxl_region_rwsem);17251725+ guard(rwsem_write)(&cxl_rwsem.region);17201726 for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {17211727 struct cxl_dport *dport = find_dport(port, target_map[i]);17221728···1995200119962002static void cxld_unregister(void *dev)19972003{19981998- struct cxl_endpoint_decoder *cxled;19991999-20002000- if (is_endpoint_decoder(dev)) {20012001- cxled = to_cxl_endpoint_decoder(dev);20022002- cxl_decoder_kill_region(cxled);20032003- }20042004+ if (is_endpoint_decoder(dev))20052005+ cxl_decoder_detach(NULL, to_cxl_endpoint_decoder(dev), -1,20062006+ DETACH_INVALIDATE);2004200720052008 device_unregister(dev);20062009}
+249-228
drivers/cxl/core/region.c
···141141 struct cxl_region_params *p = &cxlr->params;142142 ssize_t rc;143143144144- rc = down_read_interruptible(&cxl_region_rwsem);145145- if (rc)144144+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);145145+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem)))146146 return rc;147147 if (cxlr->mode != CXL_PARTMODE_PMEM)148148- rc = sysfs_emit(buf, "\n");149149- else150150- rc = sysfs_emit(buf, "%pUb\n", &p->uuid);151151- up_read(&cxl_region_rwsem);152152-153153- return rc;148148+ return sysfs_emit(buf, "\n");149149+ return sysfs_emit(buf, "%pUb\n", &p->uuid);154150}155151156152static int is_dup(struct device *match, void *data)···158162 if (!is_cxl_region(match))159163 return 0;160164161161- lockdep_assert_held(&cxl_region_rwsem);165165+ lockdep_assert_held(&cxl_rwsem.region);162166 cxlr = to_cxl_region(match);163167 p = &cxlr->params;164168···188192 if (uuid_is_null(&temp))189193 return -EINVAL;190194191191- rc = down_write_killable(&cxl_region_rwsem);192192- if (rc)195195+ ACQUIRE(rwsem_write_kill, region_rwsem)(&cxl_rwsem.region);196196+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, ®ion_rwsem)))193197 return rc;194198195199 if (uuid_equal(&p->uuid, &temp))196196- goto out;200200+ return len;197201198198- rc = -EBUSY;199202 if (p->state >= CXL_CONFIG_ACTIVE)200200- goto out;203203+ return -EBUSY;201204202205 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);203206 if (rc < 0)204204- goto out;207207+ return rc;205208206209 uuid_copy(&p->uuid, &temp);207207-out:208208- up_write(&cxl_region_rwsem);209210210210- if (rc)211211- return rc;212211 return len;213212}214213static DEVICE_ATTR_RW(uuid);···340349 return rc;341350}342351352352+static int queue_reset(struct cxl_region *cxlr)353353+{354354+ struct cxl_region_params *p = &cxlr->params;355355+ int rc;356356+357357+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);358358+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))359359+ return rc;360360+361361+ /* Already in the requested state? */362362+ if (p->state < CXL_CONFIG_COMMIT)363363+ return 0;364364+365365+ p->state = CXL_CONFIG_RESET_PENDING;366366+367367+ return 0;368368+}369369+370370+static int __commit(struct cxl_region *cxlr)371371+{372372+ struct cxl_region_params *p = &cxlr->params;373373+ int rc;374374+375375+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);376376+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))377377+ return rc;378378+379379+ /* Already in the requested state? */380380+ if (p->state >= CXL_CONFIG_COMMIT)381381+ return 0;382382+383383+ /* Not ready to commit? */384384+ if (p->state < CXL_CONFIG_ACTIVE)385385+ return -ENXIO;386386+387387+ /*388388+ * Invalidate caches before region setup to drop any speculative389389+ * consumption of this address space390390+ */391391+ rc = cxl_region_invalidate_memregion(cxlr);392392+ if (rc)393393+ return rc;394394+395395+ rc = cxl_region_decode_commit(cxlr);396396+ if (rc)397397+ return rc;398398+399399+ p->state = CXL_CONFIG_COMMIT;400400+401401+ return 0;402402+}403403+343404static ssize_t commit_store(struct device *dev, struct device_attribute *attr,344405 const char *buf, size_t len)345406{···404361 if (rc)405362 return rc;406363407407- rc = down_write_killable(&cxl_region_rwsem);364364+ if (commit) {365365+ rc = __commit(cxlr);366366+ if (rc)367367+ return rc;368368+ return len;369369+ }370370+371371+ rc = queue_reset(cxlr);408372 if (rc)409373 return rc;410410-411411- /* Already in the requested state? */412412- if (commit && p->state >= CXL_CONFIG_COMMIT)413413- goto out;414414- if (!commit && p->state < CXL_CONFIG_COMMIT)415415- goto out;416416-417417- /* Not ready to commit? */418418- if (commit && p->state < CXL_CONFIG_ACTIVE) {419419- rc = -ENXIO;420420- goto out;421421- }422374423375 /*424424- * Invalidate caches before region setup to drop any speculative425425- * consumption of this address space376376+ * Unmap the region and depend the reset-pending state to ensure377377+ * it does not go active again until post reset426378 */427427- rc = cxl_region_invalidate_memregion(cxlr);428428- if (rc)429429- goto out;379379+ device_release_driver(&cxlr->dev);430380431431- if (commit) {432432- rc = cxl_region_decode_commit(cxlr);433433- if (rc == 0)434434- p->state = CXL_CONFIG_COMMIT;435435- } else {436436- p->state = CXL_CONFIG_RESET_PENDING;437437- up_write(&cxl_region_rwsem);438438- device_release_driver(&cxlr->dev);439439- down_write(&cxl_region_rwsem);381381+ /*382382+ * With the reset pending take cxl_rwsem.region unconditionally383383+ * to ensure the reset gets handled before returning.384384+ */385385+ guard(rwsem_write)(&cxl_rwsem.region);440386441441- /*442442- * The lock was dropped, so need to revalidate that the reset is443443- * still pending.444444- */445445- if (p->state == CXL_CONFIG_RESET_PENDING) {446446- cxl_region_decode_reset(cxlr, p->interleave_ways);447447- p->state = CXL_CONFIG_ACTIVE;448448- }387387+ /*388388+ * Revalidate that the reset is still pending in case another389389+ * thread already handled this reset.390390+ */391391+ if (p->state == CXL_CONFIG_RESET_PENDING) {392392+ cxl_region_decode_reset(cxlr, p->interleave_ways);393393+ p->state = CXL_CONFIG_ACTIVE;449394 }450395451451-out:452452- up_write(&cxl_region_rwsem);453453-454454- if (rc)455455- return rc;456396 return len;457397}458398···446420 struct cxl_region_params *p = &cxlr->params;447421 ssize_t rc;448422449449- rc = down_read_interruptible(&cxl_region_rwsem);450450- if (rc)423423+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);424424+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))451425 return rc;452452- rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);453453- up_read(&cxl_region_rwsem);454454-455455- return rc;426426+ return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);456427}457428static DEVICE_ATTR_RW(commit);458429···473450{474451 struct cxl_region *cxlr = to_cxl_region(dev);475452 struct cxl_region_params *p = &cxlr->params;476476- ssize_t rc;453453+ int rc;477454478478- rc = down_read_interruptible(&cxl_region_rwsem);479479- if (rc)455455+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);456456+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))480457 return rc;481481- rc = sysfs_emit(buf, "%d\n", p->interleave_ways);482482- up_read(&cxl_region_rwsem);483483-484484- return rc;458458+ return sysfs_emit(buf, "%d\n", p->interleave_ways);485459}486460487461static const struct attribute_group *get_cxl_region_target_group(void);···513493 return -EINVAL;514494 }515495516516- rc = down_write_killable(&cxl_region_rwsem);517517- if (rc)496496+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);497497+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))518498 return rc;519519- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {520520- rc = -EBUSY;521521- goto out;522522- }499499+500500+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)501501+ return -EBUSY;523502524503 save = p->interleave_ways;525504 p->interleave_ways = val;526505 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());527527- if (rc)506506+ if (rc) {528507 p->interleave_ways = save;529529-out:530530- up_write(&cxl_region_rwsem);531531- if (rc)532508 return rc;509509+ }510510+533511 return len;534512}535513static DEVICE_ATTR_RW(interleave_ways);···538520{539521 struct cxl_region *cxlr = to_cxl_region(dev);540522 struct cxl_region_params *p = &cxlr->params;541541- ssize_t rc;523523+ int rc;542524543543- rc = down_read_interruptible(&cxl_region_rwsem);544544- if (rc)525525+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);526526+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))545527 return rc;546546- rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);547547- up_read(&cxl_region_rwsem);548548-549549- return rc;528528+ return sysfs_emit(buf, "%d\n", p->interleave_granularity);550529}551530552531static ssize_t interleave_granularity_store(struct device *dev,···576561 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)577562 return -EINVAL;578563579579- rc = down_write_killable(&cxl_region_rwsem);580580- if (rc)564564+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);565565+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))581566 return rc;582582- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {583583- rc = -EBUSY;584584- goto out;585585- }567567+568568+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)569569+ return -EBUSY;586570587571 p->interleave_granularity = val;588588-out:589589- up_write(&cxl_region_rwsem);590590- if (rc)591591- return rc;572572+592573 return len;593574}594575static DEVICE_ATTR_RW(interleave_granularity);···595584 struct cxl_region *cxlr = to_cxl_region(dev);596585 struct cxl_region_params *p = &cxlr->params;597586 u64 resource = -1ULL;598598- ssize_t rc;587587+ int rc;599588600600- rc = down_read_interruptible(&cxl_region_rwsem);601601- if (rc)589589+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);590590+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))602591 return rc;592592+603593 if (p->res)604594 resource = p->res->start;605605- rc = sysfs_emit(buf, "%#llx\n", resource);606606- up_read(&cxl_region_rwsem);607607-608608- return rc;595595+ return sysfs_emit(buf, "%#llx\n", resource);609596}610597static DEVICE_ATTR_RO(resource);611598···631622 struct resource *res;632623 u64 remainder = 0;633624634634- lockdep_assert_held_write(&cxl_region_rwsem);625625+ lockdep_assert_held_write(&cxl_rwsem.region);635626636627 /* Nothing to do... */637628 if (p->res && resource_size(p->res) == size)···673664 struct cxl_region_params *p = &cxlr->params;674665675666 if (device_is_registered(&cxlr->dev))676676- lockdep_assert_held_write(&cxl_region_rwsem);667667+ lockdep_assert_held_write(&cxl_rwsem.region);677668 if (p->res) {678669 /*679670 * Autodiscovered regions may not have been able to insert their···690681{691682 struct cxl_region_params *p = &cxlr->params;692683693693- lockdep_assert_held_write(&cxl_region_rwsem);684684+ lockdep_assert_held_write(&cxl_rwsem.region);694685695686 if (!p->res)696687 return 0;···714705 if (rc)715706 return rc;716707717717- rc = down_write_killable(&cxl_region_rwsem);718718- if (rc)708708+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);709709+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))719710 return rc;720711721712 if (val)722713 rc = alloc_hpa(cxlr, val);723714 else724715 rc = free_hpa(cxlr);725725- up_write(&cxl_region_rwsem);726716727717 if (rc)728718 return rc;···737729 u64 size = 0;738730 ssize_t rc;739731740740- rc = down_read_interruptible(&cxl_region_rwsem);741741- if (rc)732732+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);733733+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))742734 return rc;743735 if (p->res)744736 size = resource_size(p->res);745745- rc = sysfs_emit(buf, "%#llx\n", size);746746- up_read(&cxl_region_rwsem);747747-748748- return rc;737737+ return sysfs_emit(buf, "%#llx\n", size);749738}750739static DEVICE_ATTR_RW(size);751740···768763 struct cxl_endpoint_decoder *cxled;769764 int rc;770765771771- rc = down_read_interruptible(&cxl_region_rwsem);772772- if (rc)766766+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);767767+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))773768 return rc;774769775770 if (pos >= p->interleave_ways) {776771 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,777772 p->interleave_ways);778778- rc = -ENXIO;779779- goto out;773773+ return -ENXIO;780774 }781775782776 cxled = p->targets[pos];783777 if (!cxled)784784- rc = sysfs_emit(buf, "\n");785785- else786786- rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));787787-out:788788- up_read(&cxl_region_rwsem);789789-790790- return rc;778778+ return sysfs_emit(buf, "\n");779779+ return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));791780}792781793782static int check_commit_order(struct device *dev, void *data)···896897 /*897898 * This decoder is pinned registered as long as the endpoint decoder is898899 * registered, and endpoint decoder unregistration holds the899899- * cxl_region_rwsem over unregister events, so no need to hold on to900900+ * cxl_rwsem.region over unregister events, so no need to hold on to900901 * this extra reference.901902 */902903 put_device(dev);···10871088 unsigned long index;10881089 int rc = -EBUSY;1089109010901090- lockdep_assert_held_write(&cxl_region_rwsem);10911091+ lockdep_assert_held_write(&cxl_rwsem.region);1091109210921093 cxl_rr = cxl_rr_load(port, cxlr);10931094 if (cxl_rr) {···11971198 struct cxl_region_ref *cxl_rr;11981199 struct cxl_ep *ep = NULL;1199120012001200- lockdep_assert_held_write(&cxl_region_rwsem);12011201+ lockdep_assert_held_write(&cxl_rwsem.region);1201120212021203 cxl_rr = cxl_rr_load(port, cxlr);12031204 if (!cxl_rr)···20932094 return 0;20942095}2095209620962096-static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)20972097+static struct cxl_region *20982098+__cxl_decoder_detach(struct cxl_region *cxlr,20992099+ struct cxl_endpoint_decoder *cxled, int pos,21002100+ enum cxl_detach_mode mode)20972101{20982098- struct cxl_port *iter, *ep_port = cxled_to_port(cxled);20992099- struct cxl_region *cxlr = cxled->cxld.region;21002102 struct cxl_region_params *p;21012101- int rc = 0;2102210321032103- lockdep_assert_held_write(&cxl_region_rwsem);21042104+ lockdep_assert_held_write(&cxl_rwsem.region);2104210521052105- if (!cxlr)21062106- return 0;21062106+ if (!cxled) {21072107+ p = &cxlr->params;2107210821082108- p = &cxlr->params;21092109- get_device(&cxlr->dev);21092109+ if (pos >= p->interleave_ways) {21102110+ dev_dbg(&cxlr->dev, "position %d out of range %d\n",21112111+ pos, p->interleave_ways);21122112+ return ERR_PTR(-ENXIO);21132113+ }21142114+21152115+ if (!p->targets[pos])21162116+ return NULL;21172117+ cxled = p->targets[pos];21182118+ } else {21192119+ cxlr = cxled->cxld.region;21202120+ if (!cxlr)21212121+ return NULL;21222122+ p = &cxlr->params;21232123+ }21242124+21252125+ if (mode == DETACH_INVALIDATE)21262126+ cxled->part = -1;2110212721112128 if (p->state > CXL_CONFIG_ACTIVE) {21122129 cxl_region_decode_reset(cxlr, p->interleave_ways);21132130 p->state = CXL_CONFIG_ACTIVE;21142131 }2115213221162116- for (iter = ep_port; !is_cxl_root(iter);21332133+ for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter);21172134 iter = to_cxl_port(iter->dev.parent))21182135 cxl_port_detach_region(iter, cxlr, cxled);21192136···21402125 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",21412126 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),21422127 cxled->pos);21432143- goto out;21282128+ return NULL;21442129 }2145213021462131 if (p->state == CXL_CONFIG_ACTIVE) {···21542139 .end = -1,21552140 };2156214121572157- /* notify the region driver that one of its targets has departed */21582158- up_write(&cxl_region_rwsem);21592159- device_release_driver(&cxlr->dev);21602160- down_write(&cxl_region_rwsem);21612161-out:21622162- put_device(&cxlr->dev);21632163- return rc;21422142+ get_device(&cxlr->dev);21432143+ return cxlr;21642144}2165214521662166-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)21462146+/*21472147+ * Cleanup a decoder's interest in a region. There are 2 cases to21482148+ * handle, removing an unknown @cxled from a known position in a region21492149+ * (detach_target()) or removing a known @cxled from an unknown @cxlr21502150+ * (cxld_unregister())21512151+ *21522152+ * When the detachment finds a region release the region driver.21532153+ */21542154+int cxl_decoder_detach(struct cxl_region *cxlr,21552155+ struct cxl_endpoint_decoder *cxled, int pos,21562156+ enum cxl_detach_mode mode)21672157{21682168- down_write(&cxl_region_rwsem);21692169- cxled->part = -1;21702170- cxl_region_detach(cxled);21712171- up_write(&cxl_region_rwsem);21582158+ struct cxl_region *detach;21592159+21602160+ /* when the decoder is being destroyed lock unconditionally */21612161+ if (mode == DETACH_INVALIDATE) {21622162+ guard(rwsem_write)(&cxl_rwsem.region);21632163+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);21642164+ } else {21652165+ int rc;21662166+21672167+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);21682168+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))21692169+ return rc;21702170+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);21712171+ }21722172+21732173+ if (detach) {21742174+ device_release_driver(&detach->dev);21752175+ put_device(&detach->dev);21762176+ }21772177+ return 0;21782178+}21792179+21802180+static int __attach_target(struct cxl_region *cxlr,21812181+ struct cxl_endpoint_decoder *cxled, int pos,21822182+ unsigned int state)21832183+{21842184+ int rc;21852185+21862186+ if (state == TASK_INTERRUPTIBLE) {21872187+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);21882188+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))21892189+ return rc;21902190+ guard(rwsem_read)(&cxl_rwsem.dpa);21912191+ return cxl_region_attach(cxlr, cxled, pos);21922192+ }21932193+ guard(rwsem_write)(&cxl_rwsem.region);21942194+ guard(rwsem_read)(&cxl_rwsem.dpa);21952195+ return cxl_region_attach(cxlr, cxled, pos);21722196}2173219721742198static int attach_target(struct cxl_region *cxlr,21752199 struct cxl_endpoint_decoder *cxled, int pos,21762200 unsigned int state)21772201{21782178- int rc = 0;22022202+ int rc = __attach_target(cxlr, cxled, pos, state);2179220321802180- if (state == TASK_INTERRUPTIBLE)21812181- rc = down_write_killable(&cxl_region_rwsem);21822182- else21832183- down_write(&cxl_region_rwsem);21842184- if (rc)21852185- return rc;22042204+ if (rc == 0)22052205+ return 0;2186220621872187- down_read(&cxl_dpa_rwsem);21882188- rc = cxl_region_attach(cxlr, cxled, pos);21892189- up_read(&cxl_dpa_rwsem);21902190- up_write(&cxl_region_rwsem);21912191-21922192- if (rc)21932193- dev_warn(cxled->cxld.dev.parent,21942194- "failed to attach %s to %s: %d\n",21952195- dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);21962196-22072207+ dev_warn(cxled->cxld.dev.parent, "failed to attach %s to %s: %d\n",22082208+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);21972209 return rc;21982210}2199221122002212static int detach_target(struct cxl_region *cxlr, int pos)22012213{22022202- struct cxl_region_params *p = &cxlr->params;22032203- int rc;22042204-22052205- rc = down_write_killable(&cxl_region_rwsem);22062206- if (rc)22072207- return rc;22082208-22092209- if (pos >= p->interleave_ways) {22102210- dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,22112211- p->interleave_ways);22122212- rc = -ENXIO;22132213- goto out;22142214- }22152215-22162216- if (!p->targets[pos]) {22172217- rc = 0;22182218- goto out;22192219- }22202220-22212221- rc = cxl_region_detach(p->targets[pos]);22222222-out:22232223- up_write(&cxl_region_rwsem);22242224- return rc;22142214+ return cxl_decoder_detach(cxlr, NULL, pos, DETACH_ONLY);22252215}2226221622272217static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,···24802460 return NOTIFY_DONE;2481246124822462 /*24832483- * No need to hold cxl_region_rwsem; region parameters are stable24632463+ * No need to hold cxl_rwsem.region; region parameters are stable24842464 * within the cxl_region driver.24852465 */24862466 region_nid = phys_to_target_node(cxlr->params.res->start);···25032483 int region_nid;2504248425052485 /*25062506- * No need to hold cxl_region_rwsem; region parameters are stable24862486+ * No need to hold cxl_rwsem.region; region parameters are stable25072487 * within the cxl_region driver.25082488 */25092489 region_nid = phys_to_target_node(cxlr->params.res->start);···26522632 struct cxl_decoder *cxld = to_cxl_decoder(dev);26532633 ssize_t rc;2654263426552655- rc = down_read_interruptible(&cxl_region_rwsem);26562656- if (rc)26352635+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);26362636+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))26572637 return rc;2658263826592639 if (cxld->region)26602660- rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));26612661- else26622662- rc = sysfs_emit(buf, "\n");26632663- up_read(&cxl_region_rwsem);26642664-26652665- return rc;26402640+ return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));26412641+ return sysfs_emit(buf, "\n");26662642}26672643DEVICE_ATTR_RO(region);26682644···29972981 struct device *dev;29982982 int i;2999298330003000- guard(rwsem_read)(&cxl_region_rwsem);29842984+ guard(rwsem_read)(&cxl_rwsem.region);30012985 if (p->state != CXL_CONFIG_COMMIT)30022986 return -ENXIO;30032987···30092993 cxlr_pmem->hpa_range.start = p->res->start;30102994 cxlr_pmem->hpa_range.end = p->res->end;3011299530123012- /* Snapshot the region configuration underneath the cxl_region_rwsem */29962996+ /* Snapshot the region configuration underneath the cxl_rwsem.region */30132997 cxlr_pmem->nr_mappings = p->nr_targets;30142998 for (i = 0; i < p->nr_targets; i++) {30152999 struct cxl_endpoint_decoder *cxled = p->targets[i];···30863070 struct cxl_dax_region *cxlr_dax;30873071 struct device *dev;3088307230893089- guard(rwsem_read)(&cxl_region_rwsem);30733073+ guard(rwsem_read)(&cxl_rwsem.region);30903074 if (p->state != CXL_CONFIG_COMMIT)30913075 return ERR_PTR(-ENXIO);30923076···32863270 cxlr = to_cxl_region(dev);32873271 p = &cxlr->params;3288327232893289- guard(rwsem_read)(&cxl_region_rwsem);32733273+ guard(rwsem_read)(&cxl_rwsem.region);32903274 if (p->res && p->res->start == r->start && p->res->end == r->end)32913275 return 1;32923276···33413325 struct resource *res;33423326 int rc;3343332733443344- guard(rwsem_write)(&cxl_region_rwsem);33283328+ guard(rwsem_write)(&cxl_rwsem.region);33453329 p = &cxlr->params;33463330 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {33473331 dev_err(cxlmd->dev.parent,···3477346134783462 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);3479346334803480- down_read(&cxl_region_rwsem);34813481- p = &cxlr->params;34823482- attach = p->state == CXL_CONFIG_COMMIT;34833483- up_read(&cxl_region_rwsem);34643464+ scoped_guard(rwsem_read, &cxl_rwsem.region) {34653465+ p = &cxlr->params;34663466+ attach = p->state == CXL_CONFIG_COMMIT;34673467+ }3484346834853469 if (attach) {34863470 /*···35053489 if (!endpoint)35063490 return ~0ULL;3507349135083508- guard(rwsem_write)(&cxl_region_rwsem);34923492+ guard(rwsem_write)(&cxl_rwsem.region);3509349335103494 xa_for_each(&endpoint->regions, index, iter) {35113495 struct cxl_region_params *p = &iter->region->params;···35423526 unregister_mt_adistance_algorithm(&cxlr->adist_notifier);35433527}3544352835453545-static int cxl_region_probe(struct device *dev)35293529+static int cxl_region_can_probe(struct cxl_region *cxlr)35463530{35473547- struct cxl_region *cxlr = to_cxl_region(dev);35483531 struct cxl_region_params *p = &cxlr->params;35493532 int rc;3550353335513551- rc = down_read_interruptible(&cxl_region_rwsem);35523552- if (rc) {35343534+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);35353535+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) {35533536 dev_dbg(&cxlr->dev, "probe interrupted\n");35543537 return rc;35553538 }3556353935573540 if (p->state < CXL_CONFIG_COMMIT) {35583541 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);35593559- rc = -ENXIO;35603560- goto out;35423542+ return -ENXIO;35613543 }3562354435633545 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {35643546 dev_err(&cxlr->dev,35653547 "failed to activate, re-commit region and retry\n");35663566- rc = -ENXIO;35673567- goto out;35483548+ return -ENXIO;35683549 }35503550+35513551+ return 0;35523552+}35533553+35543554+static int cxl_region_probe(struct device *dev)35553555+{35563556+ struct cxl_region *cxlr = to_cxl_region(dev);35573557+ struct cxl_region_params *p = &cxlr->params;35583558+ int rc;35593559+35603560+ rc = cxl_region_can_probe(cxlr);35613561+ if (rc)35623562+ return rc;3569356335703564 /*35713565 * From this point on any path that changes the region's state away from35723566 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.35733567 */35743574-out:35753575- up_read(&cxl_region_rwsem);35763576-35773577- if (rc)35783578- return rc;3579356835803569 cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;35813570 cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
+1-12
drivers/cxl/cxl.h
···471471 * @nr_targets: number of targets472472 * @cache_size: extended linear cache size if exists, otherwise zero.473473 *474474- * State transitions are protected by the cxl_region_rwsem474474+ * State transitions are protected by cxl_rwsem.region475475 */476476struct cxl_region_params {477477 enum cxl_config_state state;···914914#endif915915916916u16 cxl_gpf_get_dvsec(struct device *dev);917917-918918-static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)919919-{920920- if (down_read_interruptible(rwsem))921921- return NULL;922922-923923- return rwsem;924924-}925925-926926-DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))927927-928917#endif /* __CXL_H__ */
+2-2
drivers/cxl/cxlmem.h
···254254 * @max_errors: Maximum media error records held in device cache255255 * @enabled_cmds: All poison commands enabled in the CEL256256 * @list_out: The poison list payload returned by device257257- * @lock: Protect reads of the poison list257257+ * @mutex: Protect reads of the poison list258258 *259259 * Reads of the poison list are synchronized to ensure that a reader260260 * does not get an incomplete list because their request overlapped···265265 u32 max_errors;266266 DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX);267267 struct cxl_mbox_poison_out *list_out;268268- struct mutex lock; /* Protect reads of poison list */268268+ struct mutex mutex; /* Protect reads of poison list */269269};270270271271/*
+81-14
include/linux/cleanup.h
···33#define _LINUX_CLEANUP_H4455#include <linux/compiler.h>66+#include <linux/err.h>77+#include <linux/args.h>6879/**810 * DOC: scope-based cleanup helpers···6361 * Observe the lock is held for the remainder of the "if ()" block not6462 * the remainder of "func()".6563 *6666- * Now, when a function uses both __free() and guard(), or multiple6767- * instances of __free(), the LIFO order of variable definition order6868- * matters. GCC documentation says:6464+ * The ACQUIRE() macro can be used in all places that guard() can be6565+ * used and additionally support conditional locks6666+ *6767+ *6868+ * DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))6969+ * ...7070+ * ACQUIRE(pci_dev_try, lock)(dev);7171+ * rc = ACQUIRE_ERR(pci_dev_try, &lock);7272+ * if (rc)7373+ * return rc;7474+ * // @lock is held7575+ *7676+ * Now, when a function uses both __free() and guard()/ACQUIRE(), or7777+ * multiple instances of __free(), the LIFO order of variable definition7878+ * order matters. GCC documentation says:6979 *7080 * "When multiple variables in the same scope have cleanup attributes,7181 * at exit from the scope their associated cleanup functions are run in···319305 * acquire fails.320306 *321307 * Only for conditional locks.308308+ *309309+ * ACQUIRE(name, var):310310+ * a named instance of the (guard) class, suitable for conditional311311+ * locks when paired with ACQUIRE_ERR().312312+ *313313+ * ACQUIRE_ERR(name, &var):314314+ * a helper that is effectively a PTR_ERR() conversion of the guard315315+ * pointer. Returns 0 when the lock was acquired and a negative316316+ * error code otherwise.322317 */323318324319#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \325320static __maybe_unused const bool class_##_name##_is_conditional = _is_cond326321327327-#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \328328- static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \329329- { return (void *)(__force unsigned long)*(_exp); }322322+#define __GUARD_IS_ERR(_ptr) \323323+ ({ \324324+ unsigned long _rc = (__force unsigned long)(_ptr); \325325+ unlikely((_rc - 1) >= -MAX_ERRNO - 1); \326326+ })327327+328328+#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \329329+ static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \330330+ { \331331+ void *_ptr = (void *)(__force unsigned long)*(_exp); \332332+ if (IS_ERR(_ptr)) { \333333+ _ptr = NULL; \334334+ } \335335+ return _ptr; \336336+ } \337337+ static inline int class_##_name##_lock_err(class_##_name##_t *_T) \338338+ { \339339+ long _rc = (__force unsigned long)*(_exp); \340340+ if (!_rc) { \341341+ _rc = -EBUSY; \342342+ } \343343+ if (!IS_ERR_VALUE(_rc)) { \344344+ _rc = 0; \345345+ } \346346+ return _rc; \347347+ }330348331349#define DEFINE_CLASS_IS_GUARD(_name) \332350 __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \···369323 __DEFINE_GUARD_LOCK_PTR(_name, _T)370324371325#define DEFINE_GUARD(_name, _type, _lock, _unlock) \372372- DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \326326+ DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \373327 DEFINE_CLASS_IS_GUARD(_name)374328375375-#define DEFINE_GUARD_COND(_name, _ext, _condlock) \329329+#define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \376330 __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \377331 EXTEND_CLASS(_name, _ext, \378378- ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \332332+ ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \379333 class_##_name##_t _T) \380334 static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \381381- { return class_##_name##_lock_ptr(_T); }335335+ { return class_##_name##_lock_ptr(_T); } \336336+ static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \337337+ { return class_##_name##_lock_err(_T); }338338+339339+/*340340+ * Default binary condition; success on 'true'.341341+ */342342+#define DEFINE_GUARD_COND_3(_name, _ext, _lock) \343343+ DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)344344+345345+#define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)382346383347#define guard(_name) \384348 CLASS(_name, __UNIQUE_ID(guard))385349386350#define __guard_ptr(_name) class_##_name##_lock_ptr351351+#define __guard_err(_name) class_##_name##_lock_err387352#define __is_cond_ptr(_name) class_##_name##_is_conditional353353+354354+#define ACQUIRE(_name, _var) CLASS(_name, _var)355355+#define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)388356389357/*390358 * Helper macro for scoped_guard().···461401 \462402static inline void class_##_name##_destructor(class_##_name##_t *_T) \463403{ \464464- if (_T->lock) { _unlock; } \404404+ if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \465405} \466406 \467407__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)···493433__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \494434__DEFINE_LOCK_GUARD_0(_name, _lock)495435496496-#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \436436+#define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \497437 __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \498438 EXTEND_CLASS(_name, _ext, \499439 ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\500500- if (_T->lock && !(_condlock)) _T->lock = NULL; \440440+ int _RET = (_lock); \441441+ if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\501442 _t; }), \502443 typeof_member(class_##_name##_t, lock) l) \503444 static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \504504- { return class_##_name##_lock_ptr(_T); }445445+ { return class_##_name##_lock_ptr(_T); } \446446+ static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \447447+ { return class_##_name##_lock_err(_T); }505448449449+#define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \450450+ DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)451451+452452+#define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)506453507454#endif /* _LINUX_CLEANUP_H */