Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cxl/hdm: Commit decoder state to hardware

After all the soft validation of the region has completed, convey the
region configuration to hardware while being careful to commit decoders
in specification mandated order. In addition to programming the endpoint
decoder base-address, interleave ways and granularity, the switch
decoder target lists are also established.

While the kernel can enforce spec-mandated commit order, it can not
enforce spec-mandated reset order. For example, the kernel can't stop
someone from removing an endpoint device that is occupying decoderN in a
switch decoder where decoderN+1 is also committed. To reset decoderN,
decoderN+1 must be torn down first. That "tear down the world"
implementation is saved for a follow-on patch.

Callback operations are provided for the 'commit' and 'reset'
operations. While those callbacks may prove useful for CXL accelerators
(Type-2 devices with memory) the primary motivation is to enable a
simple way for cxl_test to intercept those operations.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784338418.1758207.14659830845389904356.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+486 -11
+16
Documentation/ABI/testing/sysfs-bus-cxl
··· 372 372 not an endpoint decoder. Once all positions have been 373 373 successfully written a final validation for decode conflicts is 374 374 performed before activating the region. 375 + 376 + 377 + What: /sys/bus/cxl/devices/regionZ/commit 378 + Date: May, 2022 379 + KernelVersion: v5.20 380 + Contact: linux-cxl@vger.kernel.org 381 + Description: 382 + (RW) Write a boolean 'true' string value to this attribute to 383 + trigger the region to transition from the software programmed 384 + state to the actively decoding in hardware state. The commit 385 + operation in addition to validating that the region is in proper 386 + configured state, validates that the decoders are being 387 + committed in spec mandated order (last committed decoder id + 388 + 1), and checks that the hardware accepts the commit request. 389 + Reading this value indicates whether the region is committed or 390 + not.
+227
drivers/cxl/core/hdm.c
··· 129 129 return ERR_PTR(-ENXIO); 130 130 } 131 131 132 + dev_set_drvdata(dev, cxlhdm); 133 + 132 134 return cxlhdm; 133 135 } 134 136 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); ··· 468 466 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 469 467 } 470 468 469 + static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) 470 + { 471 + u16 eig; 472 + u8 eiw; 473 + 474 + /* 475 + * Input validation ensures these warns never fire, but otherwise 476 + * suppress unititalized variable usage warnings. 477 + */ 478 + if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw), 479 + "invalid interleave_ways: %d\n", cxld->interleave_ways)) 480 + return; 481 + if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig), 482 + "invalid interleave_granularity: %d\n", 483 + cxld->interleave_granularity)) 484 + return; 485 + 486 + u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); 487 + u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); 488 + *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; 489 + } 490 + 491 + static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 492 + { 493 + u32p_replace_bits(ctrl, !!(cxld->target_type == 3), 494 + CXL_HDM_DECODER0_CTRL_TYPE); 495 + } 496 + 497 + static void cxld_set_hpa(struct cxl_decoder *cxld, u64 *base, u64 *size) 498 + { 499 + struct cxl_region *cxlr = cxld->region; 500 + struct cxl_region_params *p = &cxlr->params; 501 + 502 + cxld->hpa_range = (struct range) { 503 + .start = p->res->start, 504 + .end = p->res->end, 505 + }; 506 + 507 + *base = p->res->start; 508 + *size = resource_size(p->res); 509 + } 510 + 511 + static void cxld_clear_hpa(struct cxl_decoder *cxld) 512 + { 513 + cxld->hpa_range = (struct range) { 514 + .start = 0, 515 + .end = -1, 516 + }; 517 + } 518 + 519 + static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) 520 + { 521 + struct cxl_dport **t = &cxlsd->target[0]; 522 + int ways = cxlsd->cxld.interleave_ways; 523 + 524 + if (dev_WARN_ONCE(&cxlsd->cxld.dev, 525 + ways > 8 || ways > cxlsd->nr_targets, 526 + "ways: %d overflows targets: %d\n", ways, 527 + cxlsd->nr_targets)) 528 + return -ENXIO; 529 + 530 + *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); 531 + if (ways > 1) 532 + *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); 533 + if (ways > 2) 534 + *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); 535 + if (ways > 3) 536 + *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); 537 + if (ways > 4) 538 + *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); 539 + if (ways > 5) 540 + *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); 541 + if (ways > 6) 542 + *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); 543 + if (ways > 7) 544 + *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); 545 + 546 + return 0; 547 + } 548 + 549 + /* 550 + * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set 551 + * committed or error within 10ms, but just be generous with 20ms to account for 552 + * clock skew and other marginal behavior 553 + */ 554 + #define COMMIT_TIMEOUT_MS 20 555 + static int cxld_await_commit(void __iomem *hdm, int id) 556 + { 557 + u32 ctrl; 558 + int i; 559 + 560 + for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { 561 + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 562 + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { 563 + ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 564 + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 565 + return -EIO; 566 + } 567 + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 568 + return 0; 569 + fsleep(1000); 570 + } 571 + 572 + return -ETIMEDOUT; 573 + } 574 + 575 + static int cxl_decoder_commit(struct cxl_decoder *cxld) 576 + { 577 + struct cxl_port *port = to_cxl_port(cxld->dev.parent); 578 + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 579 + void __iomem *hdm = cxlhdm->regs.hdm_decoder; 580 + int id = cxld->id, rc; 581 + u64 base, size; 582 + u32 ctrl; 583 + 584 + if (cxld->flags & CXL_DECODER_F_ENABLE) 585 + return 0; 586 + 587 + if (port->commit_end + 1 != id) { 588 + dev_dbg(&port->dev, 589 + "%s: out of order commit, expected decoder%d.%d\n", 590 + dev_name(&cxld->dev), port->id, port->commit_end + 1); 591 + return -EBUSY; 592 + } 593 + 594 + down_read(&cxl_dpa_rwsem); 595 + /* common decoder settings */ 596 + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 597 + cxld_set_interleave(cxld, &ctrl); 598 + cxld_set_type(cxld, &ctrl); 599 + cxld_set_hpa(cxld, &base, &size); 600 + 601 + writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 602 + writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 603 + writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 604 + writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 605 + 606 + if (is_switch_decoder(&cxld->dev)) { 607 + struct cxl_switch_decoder *cxlsd = 608 + to_cxl_switch_decoder(&cxld->dev); 609 + void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); 610 + void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); 611 + u64 targets; 612 + 613 + rc = cxlsd_set_targets(cxlsd, &targets); 614 + if (rc) { 615 + dev_dbg(&port->dev, "%s: target configuration error\n", 616 + dev_name(&cxld->dev)); 617 + goto err; 618 + } 619 + 620 + writel(upper_32_bits(targets), tl_hi); 621 + writel(lower_32_bits(targets), tl_lo); 622 + } else { 623 + struct cxl_endpoint_decoder *cxled = 624 + to_cxl_endpoint_decoder(&cxld->dev); 625 + void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); 626 + void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); 627 + 628 + writel(upper_32_bits(cxled->skip), sk_hi); 629 + writel(lower_32_bits(cxled->skip), sk_lo); 630 + } 631 + 632 + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 633 + up_read(&cxl_dpa_rwsem); 634 + 635 + port->commit_end++; 636 + rc = cxld_await_commit(hdm, cxld->id); 637 + err: 638 + if (rc) { 639 + dev_dbg(&port->dev, "%s: error %d committing decoder\n", 640 + dev_name(&cxld->dev), rc); 641 + cxld->reset(cxld); 642 + return rc; 643 + } 644 + cxld->flags |= CXL_DECODER_F_ENABLE; 645 + 646 + return 0; 647 + } 648 + 649 + static int cxl_decoder_reset(struct cxl_decoder *cxld) 650 + { 651 + struct cxl_port *port = to_cxl_port(cxld->dev.parent); 652 + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 653 + void __iomem *hdm = cxlhdm->regs.hdm_decoder; 654 + int id = cxld->id; 655 + u32 ctrl; 656 + 657 + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 658 + return 0; 659 + 660 + if (port->commit_end != id) { 661 + dev_dbg(&port->dev, 662 + "%s: out of order reset, expected decoder%d.%d\n", 663 + dev_name(&cxld->dev), port->id, port->commit_end); 664 + return -EBUSY; 665 + } 666 + 667 + down_read(&cxl_dpa_rwsem); 668 + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 669 + ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 670 + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 671 + 672 + cxld_clear_hpa(cxld); 673 + writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 674 + writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 675 + writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 676 + writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 677 + up_read(&cxl_dpa_rwsem); 678 + 679 + port->commit_end--; 680 + cxld->flags &= ~CXL_DECODER_F_ENABLE; 681 + 682 + return 0; 683 + } 684 + 471 685 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 472 686 int *target_map, void __iomem *hdm, int which, 473 687 u64 *dpa_base) ··· 706 488 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); 707 489 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); 708 490 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); 491 + cxld->commit = cxl_decoder_commit; 492 + cxld->reset = cxl_decoder_reset; 709 493 710 494 if (!committed) 711 495 size = 0; ··· 731 511 cxld->target_type = CXL_DECODER_EXPANDER; 732 512 else 733 513 cxld->target_type = CXL_DECODER_ACCELERATOR; 514 + if (cxld->id != port->commit_end + 1) { 515 + dev_warn(&port->dev, 516 + "decoder%d.%d: Committed out of order\n", 517 + port->id, cxld->id); 518 + return -ENXIO; 519 + } 520 + port->commit_end = cxld->id; 734 521 } else { 735 522 /* unless / until type-2 drivers arrive, assume type-3 */ 736 523 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
+1
drivers/cxl/core/port.c
··· 632 632 port->component_reg_phys = component_reg_phys; 633 633 ida_init(&port->decoder_ida); 634 634 port->hdm_end = -1; 635 + port->commit_end = -1; 635 636 xa_init(&port->dports); 636 637 xa_init(&port->endpoints); 637 638 xa_init(&port->regions);
+184 -10
drivers/cxl/core/region.c
··· 115 115 } 116 116 static DEVICE_ATTR_RW(uuid); 117 117 118 + static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port, 119 + struct cxl_region *cxlr) 120 + { 121 + return xa_load(&port->regions, (unsigned long)cxlr); 122 + } 123 + 124 + static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) 125 + { 126 + struct cxl_region_params *p = &cxlr->params; 127 + int i; 128 + 129 + for (i = count - 1; i >= 0; i--) { 130 + struct cxl_endpoint_decoder *cxled = p->targets[i]; 131 + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 132 + struct cxl_port *iter = cxled_to_port(cxled); 133 + struct cxl_ep *ep; 134 + int rc; 135 + 136 + while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 137 + iter = to_cxl_port(iter->dev.parent); 138 + 139 + for (ep = cxl_ep_load(iter, cxlmd); iter; 140 + iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 141 + struct cxl_region_ref *cxl_rr; 142 + struct cxl_decoder *cxld; 143 + 144 + cxl_rr = cxl_rr_load(iter, cxlr); 145 + cxld = cxl_rr->decoder; 146 + rc = cxld->reset(cxld); 147 + if (rc) 148 + return rc; 149 + } 150 + 151 + rc = cxled->cxld.reset(&cxled->cxld); 152 + if (rc) 153 + return rc; 154 + } 155 + 156 + return 0; 157 + } 158 + 159 + static int cxl_region_decode_commit(struct cxl_region *cxlr) 160 + { 161 + struct cxl_region_params *p = &cxlr->params; 162 + int i, rc; 163 + 164 + for (i = 0; i < p->nr_targets; i++) { 165 + struct cxl_endpoint_decoder *cxled = p->targets[i]; 166 + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 167 + struct cxl_region_ref *cxl_rr; 168 + struct cxl_decoder *cxld; 169 + struct cxl_port *iter; 170 + struct cxl_ep *ep; 171 + 172 + /* commit bottom up */ 173 + for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 174 + iter = to_cxl_port(iter->dev.parent)) { 175 + cxl_rr = cxl_rr_load(iter, cxlr); 176 + cxld = cxl_rr->decoder; 177 + rc = cxld->commit(cxld); 178 + if (rc) 179 + break; 180 + } 181 + 182 + /* success, all decoders up to the root are programmed */ 183 + if (is_cxl_root(iter)) 184 + continue; 185 + 186 + /* programming @iter failed, teardown */ 187 + for (ep = cxl_ep_load(iter, cxlmd); ep && iter; 188 + iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 189 + cxl_rr = cxl_rr_load(iter, cxlr); 190 + cxld = cxl_rr->decoder; 191 + cxld->reset(cxld); 192 + } 193 + 194 + cxled->cxld.reset(&cxled->cxld); 195 + if (i == 0) 196 + return rc; 197 + break; 198 + } 199 + 200 + if (i >= p->nr_targets) 201 + return 0; 202 + 203 + /* undo the targets that were successfully committed */ 204 + cxl_region_decode_reset(cxlr, i); 205 + return rc; 206 + } 207 + 208 + static ssize_t commit_store(struct device *dev, struct device_attribute *attr, 209 + const char *buf, size_t len) 210 + { 211 + struct cxl_region *cxlr = to_cxl_region(dev); 212 + struct cxl_region_params *p = &cxlr->params; 213 + bool commit; 214 + ssize_t rc; 215 + 216 + rc = kstrtobool(buf, &commit); 217 + if (rc) 218 + return rc; 219 + 220 + rc = down_write_killable(&cxl_region_rwsem); 221 + if (rc) 222 + return rc; 223 + 224 + /* Already in the requested state? */ 225 + if (commit && p->state >= CXL_CONFIG_COMMIT) 226 + goto out; 227 + if (!commit && p->state < CXL_CONFIG_COMMIT) 228 + goto out; 229 + 230 + /* Not ready to commit? */ 231 + if (commit && p->state < CXL_CONFIG_ACTIVE) { 232 + rc = -ENXIO; 233 + goto out; 234 + } 235 + 236 + if (commit) 237 + rc = cxl_region_decode_commit(cxlr); 238 + else { 239 + p->state = CXL_CONFIG_RESET_PENDING; 240 + up_write(&cxl_region_rwsem); 241 + device_release_driver(&cxlr->dev); 242 + down_write(&cxl_region_rwsem); 243 + 244 + /* 245 + * The lock was dropped, so need to revalidate that the reset is 246 + * still pending. 247 + */ 248 + if (p->state == CXL_CONFIG_RESET_PENDING) 249 + rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 250 + } 251 + 252 + if (rc) 253 + goto out; 254 + 255 + if (commit) 256 + p->state = CXL_CONFIG_COMMIT; 257 + else if (p->state == CXL_CONFIG_RESET_PENDING) 258 + p->state = CXL_CONFIG_ACTIVE; 259 + 260 + out: 261 + up_write(&cxl_region_rwsem); 262 + 263 + if (rc) 264 + return rc; 265 + return len; 266 + } 267 + 268 + static ssize_t commit_show(struct device *dev, struct device_attribute *attr, 269 + char *buf) 270 + { 271 + struct cxl_region *cxlr = to_cxl_region(dev); 272 + struct cxl_region_params *p = &cxlr->params; 273 + ssize_t rc; 274 + 275 + rc = down_read_interruptible(&cxl_region_rwsem); 276 + if (rc) 277 + return rc; 278 + rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); 279 + up_read(&cxl_region_rwsem); 280 + 281 + return rc; 282 + } 283 + static DEVICE_ATTR_RW(commit); 284 + 118 285 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, 119 286 int n) 120 287 { ··· 566 399 567 400 static struct attribute *cxl_region_attrs[] = { 568 401 &dev_attr_uuid.attr, 402 + &dev_attr_commit.attr, 569 403 &dev_attr_interleave_ways.attr, 570 404 &dev_attr_interleave_granularity.attr, 571 405 &dev_attr_resource.attr, ··· 841 673 if (cxl_rr->nr_eps == 0) 842 674 free_region_ref(cxl_rr); 843 675 return rc; 844 - } 845 - 846 - static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port, 847 - struct cxl_region *cxlr) 848 - { 849 - return xa_load(&port->regions, (unsigned long)cxlr); 850 676 } 851 677 852 678 static void cxl_port_detach_region(struct cxl_port *port, ··· 1230 1068 return rc; 1231 1069 } 1232 1070 1233 - static void cxl_region_detach(struct cxl_endpoint_decoder *cxled) 1071 + static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) 1234 1072 { 1235 1073 struct cxl_port *iter, *ep_port = cxled_to_port(cxled); 1236 1074 struct cxl_region *cxlr = cxled->cxld.region; 1237 1075 struct cxl_region_params *p; 1076 + int rc = 0; 1238 1077 1239 1078 lockdep_assert_held_write(&cxl_region_rwsem); 1240 1079 1241 1080 if (!cxlr) 1242 - return; 1081 + return 0; 1243 1082 1244 1083 p = &cxlr->params; 1245 1084 get_device(&cxlr->dev); 1085 + 1086 + if (p->state > CXL_CONFIG_ACTIVE) { 1087 + /* 1088 + * TODO: tear down all impacted regions if a device is 1089 + * removed out of order 1090 + */ 1091 + rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 1092 + if (rc) 1093 + goto out; 1094 + p->state = CXL_CONFIG_ACTIVE; 1095 + } 1246 1096 1247 1097 for (iter = ep_port; !is_cxl_root(iter); 1248 1098 iter = to_cxl_port(iter->dev.parent)) ··· 1283 1109 down_write(&cxl_region_rwsem); 1284 1110 out: 1285 1111 put_device(&cxlr->dev); 1112 + return rc; 1286 1113 } 1287 1114 1288 1115 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) ··· 1341 1166 goto out; 1342 1167 } 1343 1168 1344 - cxl_region_detach(p->targets[pos]); 1345 - rc = 0; 1169 + rc = cxl_region_detach(p->targets[pos]); 1346 1170 out: 1347 1171 up_write(&cxl_region_rwsem); 1348 1172 return rc;
+12 -1
drivers/cxl/cxl.h
··· 54 54 #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8) 55 55 #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) 56 56 #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) 57 + #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11) 57 58 #define CXL_HDM_DECODER0_CTRL_TYPE BIT(12) 58 59 #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) 59 60 #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) ··· 258 257 * @target_type: accelerator vs expander (type2 vs type3) selector 259 258 * @region: currently assigned region for this decoder 260 259 * @flags: memory type capabilities and locking 261 - */ 260 + * @commit: device/decoder-type specific callback to commit settings to hw 261 + * @reset: device/decoder-type specific callback to reset hw settings 262 + */ 262 263 struct cxl_decoder { 263 264 struct device dev; 264 265 int id; ··· 270 267 enum cxl_decoder_type target_type; 271 268 struct cxl_region *region; 272 269 unsigned long flags; 270 + int (*commit)(struct cxl_decoder *cxld); 271 + int (*reset)(struct cxl_decoder *cxld); 273 272 }; 274 273 275 274 /* ··· 344 339 * changes to interleave_ways or interleave_granularity 345 340 * @CXL_CONFIG_ACTIVE: All targets have been added the region is now 346 341 * active 342 + * @CXL_CONFIG_RESET_PENDING: see commit_store() 343 + * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware 347 344 */ 348 345 enum cxl_config_state { 349 346 CXL_CONFIG_IDLE, 350 347 CXL_CONFIG_INTERLEAVE_ACTIVE, 351 348 CXL_CONFIG_ACTIVE, 349 + CXL_CONFIG_RESET_PENDING, 350 + CXL_CONFIG_COMMIT, 352 351 }; 353 352 354 353 /** ··· 434 425 * @parent_dport: dport that points to this port in the parent 435 426 * @decoder_ida: allocator for decoder ids 436 427 * @hdm_end: track last allocated HDM decoder instance for allocation ordering 428 + * @commit_end: cursor to track highest committed decoder for commit ordering 437 429 * @component_reg_phys: component register capability base address (optional) 438 430 * @dead: last ep has been removed, force port re-creation 439 431 * @depth: How deep this port is relative to the root. depth 0 is the root. ··· 452 442 struct cxl_dport *parent_dport; 453 443 struct ida decoder_ida; 454 444 int hdm_end; 445 + int commit_end; 455 446 resource_size_t component_reg_phys; 456 447 bool dead; 457 448 unsigned int depth;
+46
tools/testing/cxl/test/cxl.c
··· 429 429 return 0; 430 430 } 431 431 432 + static int mock_decoder_commit(struct cxl_decoder *cxld) 433 + { 434 + struct cxl_port *port = to_cxl_port(cxld->dev.parent); 435 + int id = cxld->id; 436 + 437 + if (cxld->flags & CXL_DECODER_F_ENABLE) 438 + return 0; 439 + 440 + dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev)); 441 + if (port->commit_end + 1 != id) { 442 + dev_dbg(&port->dev, 443 + "%s: out of order commit, expected decoder%d.%d\n", 444 + dev_name(&cxld->dev), port->id, port->commit_end + 1); 445 + return -EBUSY; 446 + } 447 + 448 + port->commit_end++; 449 + cxld->flags |= CXL_DECODER_F_ENABLE; 450 + 451 + return 0; 452 + } 453 + 454 + static int mock_decoder_reset(struct cxl_decoder *cxld) 455 + { 456 + struct cxl_port *port = to_cxl_port(cxld->dev.parent); 457 + int id = cxld->id; 458 + 459 + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 460 + return 0; 461 + 462 + dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev)); 463 + if (port->commit_end != id) { 464 + dev_dbg(&port->dev, 465 + "%s: out of order reset, expected decoder%d.%d\n", 466 + dev_name(&cxld->dev), port->id, port->commit_end); 467 + return -EBUSY; 468 + } 469 + 470 + port->commit_end--; 471 + cxld->flags &= ~CXL_DECODER_F_ENABLE; 472 + 473 + return 0; 474 + } 475 + 432 476 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) 433 477 { 434 478 struct cxl_port *port = cxlhdm->port; ··· 526 482 cxld->interleave_ways = min_not_zero(target_count, 1); 527 483 cxld->interleave_granularity = SZ_4K; 528 484 cxld->target_type = CXL_DECODER_EXPANDER; 485 + cxld->commit = mock_decoder_commit; 486 + cxld->reset = mock_decoder_reset; 529 487 530 488 if (target_count) { 531 489 rc = device_for_each_child(port->uport, &ctx,