Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-6.5/cxl-type-2' into for-6.5/cxl

Pick up the driver cleanups identified in preparation for CXL "type-2"
(accelerator) device support. The major change here from a conflict
generation perspective is the split of 'struct cxl_memdev_state' from
the core 'struct cxl_dev_state'. Since an accelerator may not care about
all the optional features that are standard on a CXL "type-3" (host-only
memory expander) device.

A silent conflict also occurs with the move of the endpoint port to be a
formal property of a 'struct cxl_memdev' rather than drvdata.

+594 -549
+1 -1
drivers/cxl/acpi.c
··· 258 258 259 259 cxld = &cxlrd->cxlsd.cxld; 260 260 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 261 - cxld->target_type = CXL_DECODER_EXPANDER; 261 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 262 262 cxld->hpa_range = (struct range) { 263 263 .start = res->start, 264 264 .end = res->end,
+31 -13
drivers/cxl/core/hdm.c
··· 570 570 571 571 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 572 572 { 573 - u32p_replace_bits(ctrl, !!(cxld->target_type == 3), 574 - CXL_HDM_DECODER0_CTRL_TYPE); 573 + u32p_replace_bits(ctrl, 574 + !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), 575 + CXL_HDM_DECODER0_CTRL_HOSTONLY); 575 576 } 576 577 577 578 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) ··· 765 764 if (!len) 766 765 return -ENOENT; 767 766 768 - cxld->target_type = CXL_DECODER_EXPANDER; 767 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 769 768 cxld->commit = NULL; 770 769 cxld->reset = NULL; 771 770 cxld->hpa_range = info->dvsec_range[which]; ··· 794 793 int *target_map, void __iomem *hdm, int which, 795 794 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) 796 795 { 796 + struct cxl_endpoint_decoder *cxled = NULL; 797 797 u64 size, base, skip, dpa_size, lo, hi; 798 - struct cxl_endpoint_decoder *cxled; 799 798 bool committed; 800 799 u32 remainder; 801 800 int i, rc; ··· 828 827 return -ENXIO; 829 828 } 830 829 830 + if (info) 831 + cxled = to_cxl_endpoint_decoder(&cxld->dev); 831 832 cxld->hpa_range = (struct range) { 832 833 .start = base, 833 834 .end = base + size - 1, ··· 840 837 cxld->flags |= CXL_DECODER_F_ENABLE; 841 838 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) 842 839 cxld->flags |= CXL_DECODER_F_LOCK; 843 - if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl)) 844 - cxld->target_type = CXL_DECODER_EXPANDER; 840 + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl)) 841 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 845 842 else 846 - cxld->target_type = CXL_DECODER_ACCELERATOR; 843 + cxld->target_type = CXL_DECODER_DEVMEM; 847 844 if (cxld->id != port->commit_end + 1) { 848 845 dev_warn(&port->dev, 849 846 "decoder%d.%d: Committed out of order\n", ··· 859 856 } 860 857 port->commit_end = cxld->id; 861 858 } else { 862 - /* unless / until type-2 drivers arrive, assume type-3 */ 863 - if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) { 864 - ctrl |= CXL_HDM_DECODER0_CTRL_TYPE; 859 + if (cxled) { 860 + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 861 + struct cxl_dev_state *cxlds = cxlmd->cxlds; 862 + 863 + /* 864 + * Default by devtype until a device arrives that needs 865 + * more precision. 866 + */ 867 + if (cxlds->type == CXL_DEVTYPE_CLASSMEM) 868 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 869 + else 870 + cxld->target_type = CXL_DECODER_DEVMEM; 871 + } else { 872 + /* To be overridden by region type at commit time */ 873 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 874 + } 875 + 876 + if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) && 877 + cxld->target_type == CXL_DECODER_HOSTONLYMEM) { 878 + ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY; 865 879 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 866 880 } 867 - cxld->target_type = CXL_DECODER_EXPANDER; 868 881 } 869 882 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), 870 883 &cxld->interleave_ways); ··· 899 880 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, 900 881 cxld->interleave_ways, cxld->interleave_granularity); 901 882 902 - if (!info) { 883 + if (!cxled) { 903 884 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); 904 885 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); 905 886 target_list.value = (hi << 32) + lo; ··· 922 903 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); 923 904 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which)); 924 905 skip = (hi << 32) + lo; 925 - cxled = to_cxl_endpoint_decoder(&cxld->dev); 926 906 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); 927 907 if (rc) { 928 908 dev_err(&port->dev,
+147 -138
drivers/cxl/core/mbox.c
··· 182 182 183 183 /** 184 184 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command 185 - * @cxlds: The device data for the operation 185 + * @mds: The driver data for the operation 186 186 * @mbox_cmd: initialized command to execute 187 187 * 188 188 * Context: Any context. ··· 198 198 * error. While this distinction can be useful for commands from userspace, the 199 199 * kernel will only be able to use results when both are successful. 200 200 */ 201 - int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, 201 + int cxl_internal_send_cmd(struct cxl_memdev_state *mds, 202 202 struct cxl_mbox_cmd *mbox_cmd) 203 203 { 204 204 size_t out_size, min_out; 205 205 int rc; 206 206 207 - if (mbox_cmd->size_in > cxlds->payload_size || 208 - mbox_cmd->size_out > cxlds->payload_size) 207 + if (mbox_cmd->size_in > mds->payload_size || 208 + mbox_cmd->size_out > mds->payload_size) 209 209 return -E2BIG; 210 210 211 211 out_size = mbox_cmd->size_out; 212 212 min_out = mbox_cmd->min_out; 213 - rc = cxlds->mbox_send(cxlds, mbox_cmd); 213 + rc = mds->mbox_send(mds, mbox_cmd); 214 214 /* 215 215 * EIO is reserved for a payload size mismatch and mbox_send() 216 216 * may not return this error. ··· 298 298 } 299 299 300 300 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, 301 - struct cxl_dev_state *cxlds, u16 opcode, 301 + struct cxl_memdev_state *mds, u16 opcode, 302 302 size_t in_size, size_t out_size, u64 in_payload) 303 303 { 304 304 *mbox = (struct cxl_mbox_cmd) { ··· 313 313 return PTR_ERR(mbox->payload_in); 314 314 315 315 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { 316 - dev_dbg(cxlds->dev, "%s: input payload not allowed\n", 316 + dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n", 317 317 cxl_mem_opcode_to_name(opcode)); 318 318 kvfree(mbox->payload_in); 319 319 return -EBUSY; ··· 322 322 323 323 /* Prepare to handle a full payload for variable sized output */ 324 324 if (out_size == CXL_VARIABLE_PAYLOAD) 325 - mbox->size_out = cxlds->payload_size; 325 + mbox->size_out = mds->payload_size; 326 326 else 327 327 mbox->size_out = out_size; 328 328 ··· 344 344 345 345 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, 346 346 const struct cxl_send_command *send_cmd, 347 - struct cxl_dev_state *cxlds) 347 + struct cxl_memdev_state *mds) 348 348 { 349 349 if (send_cmd->raw.rsvd) 350 350 return -EINVAL; ··· 354 354 * gets passed along without further checking, so it must be 355 355 * validated here. 356 356 */ 357 - if (send_cmd->out.size > cxlds->payload_size) 357 + if (send_cmd->out.size > mds->payload_size) 358 358 return -EINVAL; 359 359 360 360 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 361 361 return -EPERM; 362 362 363 - dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n"); 363 + dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n"); 364 364 365 365 *mem_cmd = (struct cxl_mem_command) { 366 366 .info = { ··· 376 376 377 377 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, 378 378 const struct cxl_send_command *send_cmd, 379 - struct cxl_dev_state *cxlds) 379 + struct cxl_memdev_state *mds) 380 380 { 381 381 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; 382 382 const struct cxl_command_info *info = &c->info; ··· 391 391 return -EINVAL; 392 392 393 393 /* Check that the command is enabled for hardware */ 394 - if (!test_bit(info->id, cxlds->enabled_cmds)) 394 + if (!test_bit(info->id, mds->enabled_cmds)) 395 395 return -ENOTTY; 396 396 397 397 /* Check that the command is not claimed for exclusive kernel use */ 398 - if (test_bit(info->id, cxlds->exclusive_cmds)) 398 + if (test_bit(info->id, mds->exclusive_cmds)) 399 399 return -EBUSY; 400 400 401 401 /* Check the input buffer is the expected size */ ··· 424 424 /** 425 425 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 426 426 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. 427 - * @cxlds: The device data for the operation 427 + * @mds: The driver data for the operation 428 428 * @send_cmd: &struct cxl_send_command copied in from userspace. 429 429 * 430 430 * Return: ··· 439 439 * safe to send to the hardware. 440 440 */ 441 441 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, 442 - struct cxl_dev_state *cxlds, 442 + struct cxl_memdev_state *mds, 443 443 const struct cxl_send_command *send_cmd) 444 444 { 445 445 struct cxl_mem_command mem_cmd; ··· 453 453 * supports, but output can be arbitrarily large (simply write out as 454 454 * much data as the hardware provides). 455 455 */ 456 - if (send_cmd->in.size > cxlds->payload_size) 456 + if (send_cmd->in.size > mds->payload_size) 457 457 return -EINVAL; 458 458 459 459 /* Sanitize and construct a cxl_mem_command */ 460 460 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) 461 - rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds); 461 + rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds); 462 462 else 463 - rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds); 463 + rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds); 464 464 465 465 if (rc) 466 466 return rc; 467 467 468 468 /* Sanitize and construct a cxl_mbox_cmd */ 469 - return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode, 469 + return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode, 470 470 mem_cmd.info.size_in, mem_cmd.info.size_out, 471 471 send_cmd->in.payload); 472 472 } ··· 474 474 int cxl_query_cmd(struct cxl_memdev *cxlmd, 475 475 struct cxl_mem_query_commands __user *q) 476 476 { 477 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 477 478 struct device *dev = &cxlmd->dev; 478 479 struct cxl_mem_command *cmd; 479 480 u32 n_commands; ··· 496 495 cxl_for_each_cmd(cmd) { 497 496 struct cxl_command_info info = cmd->info; 498 497 499 - if (test_bit(info.id, cxlmd->cxlds->enabled_cmds)) 498 + if (test_bit(info.id, mds->enabled_cmds)) 500 499 info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED; 501 - if (test_bit(info.id, cxlmd->cxlds->exclusive_cmds)) 500 + if (test_bit(info.id, mds->exclusive_cmds)) 502 501 info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE; 503 502 504 503 if (copy_to_user(&q->commands[j++], &info, sizeof(info))) ··· 513 512 514 513 /** 515 514 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 516 - * @cxlds: The device data for the operation 515 + * @mds: The driver data for the operation 517 516 * @mbox_cmd: The validated mailbox command. 518 517 * @out_payload: Pointer to userspace's output payload. 519 518 * @size_out: (Input) Max payload size to copy out. ··· 534 533 * 535 534 * See cxl_send_cmd(). 536 535 */ 537 - static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds, 536 + static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, 538 537 struct cxl_mbox_cmd *mbox_cmd, 539 538 u64 out_payload, s32 *size_out, 540 539 u32 *retval) 541 540 { 542 - struct device *dev = cxlds->dev; 541 + struct device *dev = mds->cxlds.dev; 543 542 int rc; 544 543 545 544 dev_dbg(dev, ··· 549 548 cxl_mem_opcode_to_name(mbox_cmd->opcode), 550 549 mbox_cmd->opcode, mbox_cmd->size_in); 551 550 552 - rc = cxlds->mbox_send(cxlds, mbox_cmd); 551 + rc = mds->mbox_send(mds, mbox_cmd); 553 552 if (rc) 554 553 goto out; 555 554 ··· 578 577 579 578 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) 580 579 { 581 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 580 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 582 581 struct device *dev = &cxlmd->dev; 583 582 struct cxl_send_command send; 584 583 struct cxl_mbox_cmd mbox_cmd; ··· 589 588 if (copy_from_user(&send, s, sizeof(send))) 590 589 return -EFAULT; 591 590 592 - rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send); 591 + rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send); 593 592 if (rc) 594 593 return rc; 595 594 596 - rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload, 595 + rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload, 597 596 &send.out.size, &send.retval); 598 597 if (rc) 599 598 return rc; ··· 604 603 return 0; 605 604 } 606 605 607 - static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 *size, u8 *out) 606 + static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, 607 + u32 *size, u8 *out) 608 608 { 609 609 u32 remaining = *size; 610 610 u32 offset = 0; 611 611 612 612 while (remaining) { 613 - u32 xfer_size = min_t(u32, remaining, cxlds->payload_size); 613 + u32 xfer_size = min_t(u32, remaining, mds->payload_size); 614 614 struct cxl_mbox_cmd mbox_cmd; 615 615 struct cxl_mbox_get_log log; 616 616 int rc; ··· 630 628 .payload_out = out, 631 629 }; 632 630 633 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 631 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 634 632 635 633 /* 636 634 * The output payload length that indicates the number ··· 657 655 658 656 /** 659 657 * cxl_walk_cel() - Walk through the Command Effects Log. 660 - * @cxlds: The device data for the operation 658 + * @mds: The driver data for the operation 661 659 * @size: Length of the Command Effects Log. 662 660 * @cel: CEL 663 661 * 664 662 * Iterate over each entry in the CEL and determine if the driver supports the 665 663 * command. If so, the command is enabled for the device and can be used later. 666 664 */ 667 - static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel) 665 + static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) 668 666 { 669 667 struct cxl_cel_entry *cel_entry; 670 668 const int cel_entries = size / sizeof(*cel_entry); 669 + struct device *dev = mds->cxlds.dev; 671 670 int i; 672 671 673 672 cel_entry = (struct cxl_cel_entry *) cel; ··· 678 675 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 679 676 680 677 if (!cmd && !cxl_is_poison_command(opcode)) { 681 - dev_dbg(cxlds->dev, 678 + dev_dbg(dev, 682 679 "Opcode 0x%04x unsupported by driver\n", opcode); 683 680 continue; 684 681 } 685 682 686 683 if (cmd) 687 - set_bit(cmd->info.id, cxlds->enabled_cmds); 684 + set_bit(cmd->info.id, mds->enabled_cmds); 688 685 689 686 if (cxl_is_poison_command(opcode)) 690 - cxl_set_poison_cmd_enabled(&cxlds->poison, opcode); 687 + cxl_set_poison_cmd_enabled(&mds->poison, opcode); 691 688 692 - dev_dbg(cxlds->dev, "Opcode 0x%04x enabled\n", opcode); 689 + dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode); 693 690 } 694 691 } 695 692 696 - static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds) 693 + static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) 697 694 { 698 695 struct cxl_mbox_get_supported_logs *ret; 699 696 struct cxl_mbox_cmd mbox_cmd; 700 697 int rc; 701 698 702 - ret = kvmalloc(cxlds->payload_size, GFP_KERNEL); 699 + ret = kvmalloc(mds->payload_size, GFP_KERNEL); 703 700 if (!ret) 704 701 return ERR_PTR(-ENOMEM); 705 702 706 703 mbox_cmd = (struct cxl_mbox_cmd) { 707 704 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, 708 - .size_out = cxlds->payload_size, 705 + .size_out = mds->payload_size, 709 706 .payload_out = ret, 710 707 /* At least the record number field must be valid */ 711 708 .min_out = 2, 712 709 }; 713 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 710 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 714 711 if (rc < 0) { 715 712 kvfree(ret); 716 713 return ERR_PTR(rc); ··· 733 730 734 731 /** 735 732 * cxl_enumerate_cmds() - Enumerate commands for a device. 736 - * @cxlds: The device data for the operation 733 + * @mds: The driver data for the operation 737 734 * 738 735 * Returns 0 if enumerate completed successfully. 739 736 * 740 737 * CXL devices have optional support for certain commands. This function will 741 738 * determine the set of supported commands for the hardware and update the 742 - * enabled_cmds bitmap in the @cxlds. 739 + * enabled_cmds bitmap in the @mds. 743 740 */ 744 - int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) 741 + int cxl_enumerate_cmds(struct cxl_memdev_state *mds) 745 742 { 746 743 struct cxl_mbox_get_supported_logs *gsl; 747 - struct device *dev = cxlds->dev; 744 + struct device *dev = mds->cxlds.dev; 748 745 struct cxl_mem_command *cmd; 749 746 int i, rc; 750 747 751 - gsl = cxl_get_gsl(cxlds); 748 + gsl = cxl_get_gsl(mds); 752 749 if (IS_ERR(gsl)) 753 750 return PTR_ERR(gsl); 754 751 ··· 769 766 goto out; 770 767 } 771 768 772 - rc = cxl_xfer_log(cxlds, &uuid, &size, log); 769 + rc = cxl_xfer_log(mds, &uuid, &size, log); 773 770 if (rc) { 774 771 kvfree(log); 775 772 goto out; 776 773 } 777 774 778 - cxl_walk_cel(cxlds, size, log); 775 + cxl_walk_cel(mds, size, log); 779 776 kvfree(log); 780 777 781 778 /* In case CEL was bogus, enable some default commands. */ 782 779 cxl_for_each_cmd(cmd) 783 780 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 784 - set_bit(cmd->info.id, cxlds->enabled_cmds); 781 + set_bit(cmd->info.id, mds->enabled_cmds); 785 782 786 783 /* Found the required CEL */ 787 784 rc = 0; ··· 842 839 } 843 840 } 844 841 845 - static int cxl_clear_event_record(struct cxl_dev_state *cxlds, 842 + static int cxl_clear_event_record(struct cxl_memdev_state *mds, 846 843 enum cxl_event_log_type log, 847 844 struct cxl_get_event_payload *get_pl) 848 845 { ··· 856 853 int i; 857 854 858 855 /* Payload size may limit the max handles */ 859 - if (pl_size > cxlds->payload_size) { 860 - max_handles = (cxlds->payload_size - sizeof(*payload)) / 861 - sizeof(__le16); 856 + if (pl_size > mds->payload_size) { 857 + max_handles = (mds->payload_size - sizeof(*payload)) / 858 + sizeof(__le16); 862 859 pl_size = struct_size(payload, handles, max_handles); 863 860 } 864 861 ··· 883 880 i = 0; 884 881 for (cnt = 0; cnt < total; cnt++) { 885 882 payload->handles[i++] = get_pl->records[cnt].hdr.handle; 886 - dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n", 887 - log, le16_to_cpu(payload->handles[i])); 883 + dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log, 884 + le16_to_cpu(payload->handles[i])); 888 885 889 886 if (i == max_handles) { 890 887 payload->nr_recs = i; 891 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 888 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 892 889 if (rc) 893 890 goto free_pl; 894 891 i = 0; ··· 899 896 if (i) { 900 897 payload->nr_recs = i; 901 898 mbox_cmd.size_in = struct_size(payload, handles, i); 902 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 899 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 903 900 if (rc) 904 901 goto free_pl; 905 902 } ··· 909 906 return rc; 910 907 } 911 908 912 - static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds, 909 + static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, 913 910 enum cxl_event_log_type type) 914 911 { 912 + struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; 913 + struct device *dev = mds->cxlds.dev; 915 914 struct cxl_get_event_payload *payload; 916 915 struct cxl_mbox_cmd mbox_cmd; 917 916 u8 log_type = type; 918 917 u16 nr_rec; 919 918 920 - mutex_lock(&cxlds->event.log_lock); 921 - payload = cxlds->event.buf; 919 + mutex_lock(&mds->event.log_lock); 920 + payload = mds->event.buf; 922 921 923 922 mbox_cmd = (struct cxl_mbox_cmd) { 924 923 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, 925 924 .payload_in = &log_type, 926 925 .size_in = sizeof(log_type), 927 926 .payload_out = payload, 928 - .size_out = cxlds->payload_size, 927 + .size_out = mds->payload_size, 929 928 .min_out = struct_size(payload, records, 0), 930 929 }; 931 930 932 931 do { 933 932 int rc, i; 934 933 935 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 934 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 936 935 if (rc) { 937 - dev_err_ratelimited(cxlds->dev, 936 + dev_err_ratelimited(dev, 938 937 "Event log '%d': Failed to query event records : %d", 939 938 type, rc); 940 939 break; ··· 947 942 break; 948 943 949 944 for (i = 0; i < nr_rec; i++) 950 - cxl_event_trace_record(cxlds->cxlmd, type, 945 + cxl_event_trace_record(cxlmd, type, 951 946 &payload->records[i]); 952 947 953 948 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW) 954 - trace_cxl_overflow(cxlds->cxlmd, type, payload); 949 + trace_cxl_overflow(cxlmd, type, payload); 955 950 956 - rc = cxl_clear_event_record(cxlds, type, payload); 951 + rc = cxl_clear_event_record(mds, type, payload); 957 952 if (rc) { 958 - dev_err_ratelimited(cxlds->dev, 953 + dev_err_ratelimited(dev, 959 954 "Event log '%d': Failed to clear events : %d", 960 955 type, rc); 961 956 break; 962 957 } 963 958 } while (nr_rec); 964 959 965 - mutex_unlock(&cxlds->event.log_lock); 960 + mutex_unlock(&mds->event.log_lock); 966 961 } 967 962 968 963 /** 969 964 * cxl_mem_get_event_records - Get Event Records from the device 970 - * @cxlds: The device data for the operation 965 + * @mds: The driver data for the operation 971 966 * @status: Event Status register value identifying which events are available. 972 967 * 973 968 * Retrieve all event records available on the device, report them as trace ··· 976 971 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records 977 972 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records 978 973 */ 979 - void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status) 974 + void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status) 980 975 { 981 - dev_dbg(cxlds->dev, "Reading event logs: %x\n", status); 976 + dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status); 982 977 983 978 if (status & CXLDEV_EVENT_STATUS_FATAL) 984 - cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL); 979 + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL); 985 980 if (status & CXLDEV_EVENT_STATUS_FAIL) 986 - cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL); 981 + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL); 987 982 if (status & CXLDEV_EVENT_STATUS_WARN) 988 - cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN); 983 + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN); 989 984 if (status & CXLDEV_EVENT_STATUS_INFO) 990 - cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO); 985 + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO); 991 986 } 992 987 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); 993 988 994 989 /** 995 990 * cxl_mem_get_partition_info - Get partition info 996 - * @cxlds: The device data for the operation 991 + * @mds: The driver data for the operation 997 992 * 998 993 * Retrieve the current partition info for the device specified. The active 999 994 * values are the current capacity in bytes. If not 0, the 'next' values are ··· 1003 998 * 1004 999 * See CXL @8.2.9.5.2.1 Get Partition Info 1005 1000 */ 1006 - static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds) 1001 + static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) 1007 1002 { 1008 1003 struct cxl_mbox_get_partition_info pi; 1009 1004 struct cxl_mbox_cmd mbox_cmd; ··· 1014 1009 .size_out = sizeof(pi), 1015 1010 .payload_out = &pi, 1016 1011 }; 1017 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 1012 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1018 1013 if (rc) 1019 1014 return rc; 1020 1015 1021 - cxlds->active_volatile_bytes = 1016 + mds->active_volatile_bytes = 1022 1017 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1023 - cxlds->active_persistent_bytes = 1018 + mds->active_persistent_bytes = 1024 1019 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; 1025 - cxlds->next_volatile_bytes = 1020 + mds->next_volatile_bytes = 1026 1021 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1027 - cxlds->next_persistent_bytes = 1022 + mds->next_persistent_bytes = 1028 1023 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1029 1024 1030 1025 return 0; ··· 1032 1027 1033 1028 /** 1034 1029 * cxl_dev_state_identify() - Send the IDENTIFY command to the device. 1035 - * @cxlds: The device data for the operation 1030 + * @mds: The driver data for the operation 1036 1031 * 1037 1032 * Return: 0 if identify was executed successfully or media not ready. 1038 1033 * 1039 1034 * This will dispatch the identify command to the device and on success populate 1040 1035 * structures to be exported to sysfs. 1041 1036 */ 1042 - int cxl_dev_state_identify(struct cxl_dev_state *cxlds) 1037 + int cxl_dev_state_identify(struct cxl_memdev_state *mds) 1043 1038 { 1044 1039 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 1045 1040 struct cxl_mbox_identify id; ··· 1047 1042 u32 val; 1048 1043 int rc; 1049 1044 1050 - if (!cxlds->media_ready) 1045 + if (!mds->cxlds.media_ready) 1051 1046 return 0; 1052 1047 1053 1048 mbox_cmd = (struct cxl_mbox_cmd) { ··· 1055 1050 .size_out = sizeof(id), 1056 1051 .payload_out = &id, 1057 1052 }; 1058 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 1053 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1059 1054 if (rc < 0) 1060 1055 return rc; 1061 1056 1062 - cxlds->total_bytes = 1057 + mds->total_bytes = 1063 1058 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; 1064 - cxlds->volatile_only_bytes = 1059 + mds->volatile_only_bytes = 1065 1060 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; 1066 - cxlds->persistent_only_bytes = 1061 + mds->persistent_only_bytes = 1067 1062 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; 1068 - cxlds->partition_align_bytes = 1063 + mds->partition_align_bytes = 1069 1064 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; 1070 1065 1071 - cxlds->lsa_size = le32_to_cpu(id.lsa_size); 1072 - memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision)); 1066 + mds->lsa_size = le32_to_cpu(id.lsa_size); 1067 + memcpy(mds->firmware_version, id.fw_revision, 1068 + sizeof(id.fw_revision)); 1073 1069 1074 - if (test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) { 1070 + if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) { 1075 1071 val = get_unaligned_le24(id.poison_list_max_mer); 1076 - cxlds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); 1072 + mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); 1077 1073 } 1078 1074 1079 1075 return 0; ··· 1083 1077 1084 1078 /** 1085 1079 * cxl_mem_sanitize() - Send a sanitization command to the device. 1086 - * @cxlds: The device data for the operation 1080 + * @mds: The device data for the operation 1087 1081 * @cmd: The specific sanitization command opcode 1088 1082 * 1089 1083 * Return: 0 if the command was executed successfully, regardless of ··· 1094 1088 * 1095 1089 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. 1096 1090 */ 1097 - int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd) 1091 + int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) 1098 1092 { 1099 1093 int rc; 1100 1094 u32 sec_out = 0; ··· 1107 1101 .size_out = sizeof(out), 1108 1102 }; 1109 1103 struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; 1104 + struct cxl_dev_state *cxlds = &mds->cxlds; 1110 1105 1111 1106 if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) 1112 1107 return -EINVAL; 1113 1108 1114 - rc = cxl_internal_send_cmd(cxlds, &sec_cmd); 1109 + rc = cxl_internal_send_cmd(mds, &sec_cmd); 1115 1110 if (rc < 0) { 1116 1111 dev_err(cxlds->dev, "Failed to get security state : %d", rc); 1117 1112 return rc; ··· 1131 1124 sec_out & CXL_PMEM_SEC_STATE_LOCKED) 1132 1125 return -EINVAL; 1133 1126 1134 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 1127 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1135 1128 if (rc < 0) { 1136 1129 dev_err(cxlds->dev, "Failed to sanitize device : %d", rc); 1137 1130 return rc; ··· 1167 1160 return 0; 1168 1161 } 1169 1162 1170 - int cxl_mem_create_range_info(struct cxl_dev_state *cxlds) 1163 + int cxl_mem_create_range_info(struct cxl_memdev_state *mds) 1171 1164 { 1165 + struct cxl_dev_state *cxlds = &mds->cxlds; 1172 1166 struct device *dev = cxlds->dev; 1173 1167 int rc; 1174 1168 ··· 1181 1173 } 1182 1174 1183 1175 cxlds->dpa_res = 1184 - (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes); 1176 + (struct resource)DEFINE_RES_MEM(0, mds->total_bytes); 1185 1177 1186 - if (cxlds->partition_align_bytes == 0) { 1178 + if (mds->partition_align_bytes == 0) { 1187 1179 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, 1188 - cxlds->volatile_only_bytes, "ram"); 1180 + mds->volatile_only_bytes, "ram"); 1189 1181 if (rc) 1190 1182 return rc; 1191 1183 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, 1192 - cxlds->volatile_only_bytes, 1193 - cxlds->persistent_only_bytes, "pmem"); 1184 + mds->volatile_only_bytes, 1185 + mds->persistent_only_bytes, "pmem"); 1194 1186 } 1195 1187 1196 - rc = cxl_mem_get_partition_info(cxlds); 1188 + rc = cxl_mem_get_partition_info(mds); 1197 1189 if (rc) { 1198 1190 dev_err(dev, "Failed to query partition information\n"); 1199 1191 return rc; 1200 1192 } 1201 1193 1202 1194 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, 1203 - cxlds->active_volatile_bytes, "ram"); 1195 + mds->active_volatile_bytes, "ram"); 1204 1196 if (rc) 1205 1197 return rc; 1206 1198 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, 1207 - cxlds->active_volatile_bytes, 1208 - cxlds->active_persistent_bytes, "pmem"); 1199 + mds->active_volatile_bytes, 1200 + mds->active_persistent_bytes, "pmem"); 1209 1201 } 1210 1202 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); 1211 1203 1212 - int cxl_set_timestamp(struct cxl_dev_state *cxlds) 1204 + int cxl_set_timestamp(struct cxl_memdev_state *mds) 1213 1205 { 1214 1206 struct cxl_mbox_cmd mbox_cmd; 1215 1207 struct cxl_mbox_set_timestamp_in pi; ··· 1222 1214 .payload_in = &pi, 1223 1215 }; 1224 1216 1225 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 1217 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1226 1218 /* 1227 1219 * Command is optional. Devices may have another way of providing 1228 1220 * a timestamp, or may return all 0s in timestamp fields. ··· 1238 1230 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, 1239 1231 struct cxl_region *cxlr) 1240 1232 { 1241 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 1233 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 1242 1234 struct cxl_mbox_poison_out *po; 1243 1235 struct cxl_mbox_poison_in pi; 1244 1236 struct cxl_mbox_cmd mbox_cmd; 1245 1237 int nr_records = 0; 1246 1238 int rc; 1247 1239 1248 - rc = mutex_lock_interruptible(&cxlds->poison.lock); 1240 + rc = mutex_lock_interruptible(&mds->poison.lock); 1249 1241 if (rc) 1250 1242 return rc; 1251 1243 1252 - po = cxlds->poison.list_out; 1244 + po = mds->poison.list_out; 1253 1245 pi.offset = cpu_to_le64(offset); 1254 1246 pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); 1255 1247 ··· 1257 1249 .opcode = CXL_MBOX_OP_GET_POISON, 1258 1250 .size_in = sizeof(pi), 1259 1251 .payload_in = &pi, 1260 - .size_out = cxlds->payload_size, 1252 + .size_out = mds->payload_size, 1261 1253 .payload_out = po, 1262 1254 .min_out = struct_size(po, record, 0), 1263 1255 }; 1264 1256 1265 1257 do { 1266 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 1258 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1267 1259 if (rc) 1268 1260 break; 1269 1261 ··· 1274 1266 1275 1267 /* Protect against an uncleared _FLAG_MORE */ 1276 1268 nr_records = nr_records + le16_to_cpu(po->count); 1277 - if (nr_records >= cxlds->poison.max_errors) { 1269 + if (nr_records >= mds->poison.max_errors) { 1278 1270 dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", 1279 1271 nr_records); 1280 1272 break; 1281 1273 } 1282 1274 } while (po->flags & CXL_POISON_FLAG_MORE); 1283 1275 1284 - mutex_unlock(&cxlds->poison.lock); 1276 + mutex_unlock(&mds->poison.lock); 1285 1277 return rc; 1286 1278 } 1287 1279 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); ··· 1291 1283 kvfree(buf); 1292 1284 } 1293 1285 1294 - /* Get Poison List output buffer is protected by cxlds->poison.lock */ 1295 - static int cxl_poison_alloc_buf(struct cxl_dev_state *cxlds) 1286 + /* Get Poison List output buffer is protected by mds->poison.lock */ 1287 + static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) 1296 1288 { 1297 - cxlds->poison.list_out = kvmalloc(cxlds->payload_size, GFP_KERNEL); 1298 - if (!cxlds->poison.list_out) 1289 + mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL); 1290 + if (!mds->poison.list_out) 1299 1291 return -ENOMEM; 1300 1292 1301 - return devm_add_action_or_reset(cxlds->dev, free_poison_buf, 1302 - cxlds->poison.list_out); 1293 + return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf, 1294 + mds->poison.list_out); 1303 1295 } 1304 1296 1305 - int cxl_poison_state_init(struct cxl_dev_state *cxlds) 1297 + int cxl_poison_state_init(struct cxl_memdev_state *mds) 1306 1298 { 1307 1299 int rc; 1308 1300 1309 - if (!test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) 1301 + if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) 1310 1302 return 0; 1311 1303 1312 - rc = cxl_poison_alloc_buf(cxlds); 1304 + rc = cxl_poison_alloc_buf(mds); 1313 1305 if (rc) { 1314 - clear_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds); 1306 + clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds); 1315 1307 return rc; 1316 1308 } 1317 1309 1318 - mutex_init(&cxlds->poison.lock); 1310 + mutex_init(&mds->poison.lock); 1319 1311 return 0; 1320 1312 } 1321 1313 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); 1322 1314 1323 - struct cxl_dev_state *cxl_dev_state_create(struct device *dev) 1315 + struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) 1324 1316 { 1325 - struct cxl_dev_state *cxlds; 1317 + struct cxl_memdev_state *mds; 1326 1318 1327 - cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL); 1328 - if (!cxlds) { 1319 + mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL); 1320 + if (!mds) { 1329 1321 dev_err(dev, "No memory available\n"); 1330 1322 return ERR_PTR(-ENOMEM); 1331 1323 } 1332 1324 1333 - mutex_init(&cxlds->mbox_mutex); 1334 - mutex_init(&cxlds->event.log_lock); 1335 - cxlds->dev = dev; 1325 + mutex_init(&mds->mbox_mutex); 1326 + mutex_init(&mds->event.log_lock); 1327 + mds->cxlds.dev = dev; 1328 + mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; 1336 1329 1337 - return cxlds; 1330 + return mds; 1338 1331 } 1339 - EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL); 1332 + EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL); 1340 1333 1341 1334 void __init cxl_mbox_init(void) 1342 1335 {
+96 -77
drivers/cxl/core/memdev.c
··· 41 41 { 42 42 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 43 43 struct cxl_dev_state *cxlds = cxlmd->cxlds; 44 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 44 45 45 - return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version); 46 + if (!mds) 47 + return sysfs_emit(buf, "\n"); 48 + return sysfs_emit(buf, "%.16s\n", mds->firmware_version); 46 49 } 47 50 static DEVICE_ATTR_RO(firmware_version); 48 51 ··· 54 51 { 55 52 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 56 53 struct cxl_dev_state *cxlds = cxlmd->cxlds; 54 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 57 55 58 - return sysfs_emit(buf, "%zu\n", cxlds->payload_size); 56 + if (!mds) 57 + return sysfs_emit(buf, "\n"); 58 + return sysfs_emit(buf, "%zu\n", mds->payload_size); 59 59 } 60 60 static DEVICE_ATTR_RO(payload_max); 61 61 ··· 67 61 { 68 62 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 69 63 struct cxl_dev_state *cxlds = cxlmd->cxlds; 64 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 70 65 71 - return sysfs_emit(buf, "%zu\n", cxlds->lsa_size); 66 + if (!mds) 67 + return sysfs_emit(buf, "\n"); 68 + return sysfs_emit(buf, "%zu\n", mds->lsa_size); 72 69 } 73 70 static DEVICE_ATTR_RO(label_storage_size); 74 71 ··· 124 115 { 125 116 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 126 117 struct cxl_dev_state *cxlds = cxlmd->cxlds; 127 - unsigned long state = cxlds->security.state; 118 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 128 119 u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); 129 120 u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg); 130 121 u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); 122 + unsigned long state = mds->security.state; 131 123 132 124 if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100) 133 125 return sysfs_emit(buf, "sanitize\n"); ··· 152 142 const char *buf, size_t len) 153 143 { 154 144 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 155 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 156 - struct cxl_port *port = dev_get_drvdata(&cxlmd->dev); 157 - ssize_t rc; 145 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 146 + struct cxl_port *port = cxlmd->endpoint; 158 147 bool sanitize; 148 + ssize_t rc; 159 149 160 150 if (kstrtobool(buf, &sanitize) || !sanitize) 161 151 return -EINVAL; ··· 167 157 if (port->commit_end != -1) 168 158 return -EBUSY; 169 159 170 - rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SANITIZE); 160 + rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE); 171 161 172 162 return rc ? rc : len; 173 163 } ··· 179 169 const char *buf, size_t len) 180 170 { 181 171 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 182 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 183 - struct cxl_port *port = dev_get_drvdata(&cxlmd->dev); 172 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 173 + struct cxl_port *port = cxlmd->endpoint; 184 174 ssize_t rc; 185 175 bool erase; 186 176 ··· 194 184 if (port->commit_end != -1) 195 185 return -EBUSY; 196 186 197 - rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SECURE_ERASE); 187 + rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE); 198 188 199 189 return rc ? rc : len; 200 190 } ··· 234 224 struct cxl_port *port; 235 225 int rc; 236 226 237 - port = dev_get_drvdata(&cxlmd->dev); 227 + port = cxlmd->endpoint; 238 228 if (!port || !is_cxl_endpoint(port)) 239 229 return -EINVAL; 240 230 ··· 292 282 ctx = (struct cxl_dpa_to_region_context) { 293 283 .dpa = dpa, 294 284 }; 295 - port = dev_get_drvdata(&cxlmd->dev); 285 + port = cxlmd->endpoint; 296 286 if (port && is_cxl_endpoint(port) && port->commit_end != -1) 297 287 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); 298 288 ··· 325 315 326 316 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) 327 317 { 328 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 318 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 329 319 struct cxl_mbox_inject_poison inject; 330 320 struct cxl_poison_record record; 331 321 struct cxl_mbox_cmd mbox_cmd; ··· 349 339 .size_in = sizeof(inject), 350 340 .payload_in = &inject, 351 341 }; 352 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 342 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 353 343 if (rc) 354 344 goto out; 355 345 356 346 cxlr = cxl_dpa_to_region(cxlmd, dpa); 357 347 if (cxlr) 358 - dev_warn_once(cxlds->dev, 348 + dev_warn_once(mds->cxlds.dev, 359 349 "poison inject dpa:%#llx region: %s\n", dpa, 360 350 dev_name(&cxlr->dev)); 361 351 ··· 373 363 374 364 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) 375 365 { 376 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 366 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 377 367 struct cxl_mbox_clear_poison clear; 378 368 struct cxl_poison_record record; 379 369 struct cxl_mbox_cmd mbox_cmd; ··· 406 396 .payload_in = &clear, 407 397 }; 408 398 409 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 399 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 410 400 if (rc) 411 401 goto out; 412 402 413 403 cxlr = cxl_dpa_to_region(cxlmd, dpa); 414 404 if (cxlr) 415 - dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", 416 - dpa, dev_name(&cxlr->dev)); 405 + dev_warn_once(mds->cxlds.dev, 406 + "poison clear dpa:%#llx region: %s\n", dpa, 407 + dev_name(&cxlr->dev)); 417 408 418 409 record = (struct cxl_poison_record) { 419 410 .address = cpu_to_le64(dpa), ··· 505 494 506 495 /** 507 496 * set_exclusive_cxl_commands() - atomically disable user cxl commands 508 - * @cxlds: The device state to operate on 497 + * @mds: The device state to operate on 509 498 * @cmds: bitmap of commands to mark exclusive 510 499 * 511 500 * Grab the cxl_memdev_rwsem in write mode to flush in-flight 512 501 * invocations of the ioctl path and then disable future execution of 513 502 * commands with the command ids set in @cmds. 514 503 */ 515 - void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) 504 + void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, 505 + unsigned long *cmds) 516 506 { 517 507 down_write(&cxl_memdev_rwsem); 518 - bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, 508 + bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds, 519 509 CXL_MEM_COMMAND_ID_MAX); 520 510 up_write(&cxl_memdev_rwsem); 521 511 } ··· 524 512 525 513 /** 526 514 * clear_exclusive_cxl_commands() - atomically enable user cxl commands 527 - * @cxlds: The device state to modify 515 + * @mds: The device state to modify 528 516 * @cmds: bitmap of commands to mark available for userspace 529 517 */ 530 - void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) 518 + void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, 519 + unsigned long *cmds) 531 520 { 532 521 down_write(&cxl_memdev_rwsem); 533 - bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, 522 + bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds, 534 523 CXL_MEM_COMMAND_ID_MAX); 535 524 up_write(&cxl_memdev_rwsem); 536 525 } ··· 540 527 static void cxl_memdev_security_shutdown(struct device *dev) 541 528 { 542 529 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 543 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 530 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 544 531 545 - if (cxlds->security.poll) 546 - cancel_delayed_work_sync(&cxlds->security.poll_dwork); 532 + if (mds->security.poll) 533 + cancel_delayed_work_sync(&mds->security.poll_dwork); 547 534 } 548 535 549 536 static void cxl_memdev_shutdown(struct device *dev) ··· 631 618 unsigned long arg) 632 619 { 633 620 struct cxl_memdev *cxlmd = file->private_data; 621 + struct cxl_dev_state *cxlds; 634 622 int rc = -ENXIO; 635 623 636 624 down_read(&cxl_memdev_rwsem); 637 - if (cxlmd->cxlds) 625 + cxlds = cxlmd->cxlds; 626 + if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM) 638 627 rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); 639 628 up_read(&cxl_memdev_rwsem); 640 629 ··· 674 659 * 675 660 * See CXL-3.0 8.2.9.3.1 Get FW Info 676 661 */ 677 - static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds) 662 + static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) 678 663 { 679 664 struct cxl_mbox_get_fw_info info; 680 665 struct cxl_mbox_cmd mbox_cmd; ··· 686 671 .payload_out = &info, 687 672 }; 688 673 689 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 674 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 690 675 if (rc < 0) 691 676 return rc; 692 677 693 - cxlds->fw.num_slots = info.num_slots; 694 - cxlds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, 678 + mds->fw.num_slots = info.num_slots; 679 + mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, 695 680 info.slot_info); 696 681 697 682 return 0; ··· 699 684 700 685 /** 701 686 * cxl_mem_activate_fw - Activate Firmware 702 - * @cxlds: The device data for the operation 687 + * @mds: The device data for the operation 703 688 * @slot: slot number to activate 704 689 * 705 690 * Activate firmware in a given slot for the device specified. ··· 708 693 * 709 694 * See CXL-3.0 8.2.9.3.3 Activate FW 710 695 */ 711 - static int cxl_mem_activate_fw(struct cxl_dev_state *cxlds, int slot) 696 + static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) 712 697 { 713 698 struct cxl_mbox_activate_fw activate; 714 699 struct cxl_mbox_cmd mbox_cmd; 715 700 716 - if (slot == 0 || slot > cxlds->fw.num_slots) 701 + if (slot == 0 || slot > mds->fw.num_slots) 717 702 return -EINVAL; 718 703 719 704 mbox_cmd = (struct cxl_mbox_cmd) { ··· 726 711 activate.action = CXL_FW_ACTIVATE_OFFLINE; 727 712 activate.slot = slot; 728 713 729 - return cxl_internal_send_cmd(cxlds, &mbox_cmd); 714 + return cxl_internal_send_cmd(mds, &mbox_cmd); 730 715 } 731 716 732 717 /** 733 718 * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer 734 - * @cxlds: The device data for the operation 719 + * @mds: The device data for the operation 735 720 * 736 721 * Abort an in-progress firmware transfer for the device specified. 737 722 * ··· 739 724 * 740 725 * See CXL-3.0 8.2.9.3.2 Transfer FW 741 726 */ 742 - static int cxl_mem_abort_fw_xfer(struct cxl_dev_state *cxlds) 727 + static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) 743 728 { 744 729 struct cxl_mbox_transfer_fw *transfer; 745 730 struct cxl_mbox_cmd mbox_cmd; ··· 760 745 761 746 transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; 762 747 763 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 748 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 764 749 kfree(transfer); 765 750 return rc; 766 751 } 767 752 768 753 static void cxl_fw_cleanup(struct fw_upload *fwl) 769 754 { 770 - struct cxl_dev_state *cxlds = fwl->dd_handle; 755 + struct cxl_memdev_state *mds = fwl->dd_handle; 771 756 772 - cxlds->fw.next_slot = 0; 757 + mds->fw.next_slot = 0; 773 758 } 774 759 775 760 static int cxl_fw_do_cancel(struct fw_upload *fwl) 776 761 { 777 - struct cxl_dev_state *cxlds = fwl->dd_handle; 762 + struct cxl_memdev_state *mds = fwl->dd_handle; 763 + struct cxl_dev_state *cxlds = &mds->cxlds; 778 764 struct cxl_memdev *cxlmd = cxlds->cxlmd; 779 765 int rc; 780 766 781 - rc = cxl_mem_abort_fw_xfer(cxlds); 767 + rc = cxl_mem_abort_fw_xfer(mds); 782 768 if (rc < 0) 783 769 dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc); 784 770 ··· 789 773 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, 790 774 u32 size) 791 775 { 792 - struct cxl_dev_state *cxlds = fwl->dd_handle; 776 + struct cxl_memdev_state *mds = fwl->dd_handle; 793 777 struct cxl_mbox_transfer_fw *transfer; 794 778 795 779 if (!size) 796 780 return FW_UPLOAD_ERR_INVALID_SIZE; 797 781 798 - cxlds->fw.oneshot = struct_size(transfer, data, size) < 799 - cxlds->payload_size; 782 + mds->fw.oneshot = struct_size(transfer, data, size) < 783 + mds->payload_size; 800 784 801 - if (cxl_mem_get_fw_info(cxlds)) 785 + if (cxl_mem_get_fw_info(mds)) 802 786 return FW_UPLOAD_ERR_HW_ERROR; 803 787 804 788 /* 805 789 * So far no state has been changed, hence no other cleanup is 806 790 * necessary. Simply return the cancelled status. 807 791 */ 808 - if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state)) 792 + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 809 793 return FW_UPLOAD_ERR_CANCELED; 810 794 811 795 return FW_UPLOAD_ERR_NONE; ··· 814 798 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, 815 799 u32 offset, u32 size, u32 *written) 816 800 { 817 - struct cxl_dev_state *cxlds = fwl->dd_handle; 801 + struct cxl_memdev_state *mds = fwl->dd_handle; 802 + struct cxl_dev_state *cxlds = &mds->cxlds; 818 803 struct cxl_memdev *cxlmd = cxlds->cxlmd; 819 804 struct cxl_mbox_transfer_fw *transfer; 820 805 struct cxl_mbox_cmd mbox_cmd; ··· 834 817 } 835 818 836 819 /* 837 - * Pick transfer size based on cxlds->payload_size 838 - * @size must bw 128-byte aligned, ->payload_size is a power of 2 839 - * starting at 256 bytes, and sizeof(*transfer) is 128. 840 - * These constraints imply that @cur_size will always be 128b aligned. 820 + * Pick transfer size based on mds->payload_size @size must bw 128-byte 821 + * aligned, ->payload_size is a power of 2 starting at 256 bytes, and 822 + * sizeof(*transfer) is 128. These constraints imply that @cur_size 823 + * will always be 128b aligned. 841 824 */ 842 - cur_size = min_t(size_t, size, cxlds->payload_size - sizeof(*transfer)); 825 + cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer)); 843 826 844 827 remaining = size - cur_size; 845 828 size_in = struct_size(transfer, data, cur_size); 846 829 847 - if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state)) 830 + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 848 831 return cxl_fw_do_cancel(fwl); 849 832 850 833 /* ··· 852 835 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1') 853 836 * Check for rollover using modulo, and 1-index it by adding 1 854 837 */ 855 - cxlds->fw.next_slot = (cxlds->fw.cur_slot % cxlds->fw.num_slots) + 1; 838 + mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1; 856 839 857 840 /* Do the transfer via mailbox cmd */ 858 841 transfer = kzalloc(size_in, GFP_KERNEL); ··· 861 844 862 845 transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT); 863 846 memcpy(transfer->data, data + offset, cur_size); 864 - if (cxlds->fw.oneshot) { 847 + if (mds->fw.oneshot) { 865 848 transfer->action = CXL_FW_TRANSFER_ACTION_FULL; 866 - transfer->slot = cxlds->fw.next_slot; 849 + transfer->slot = mds->fw.next_slot; 867 850 } else { 868 851 if (offset == 0) { 869 852 transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE; 870 853 } else if (remaining == 0) { 871 854 transfer->action = CXL_FW_TRANSFER_ACTION_END; 872 - transfer->slot = cxlds->fw.next_slot; 855 + transfer->slot = mds->fw.next_slot; 873 856 } else { 874 857 transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE; 875 858 } ··· 883 866 .poll_count = 30, 884 867 }; 885 868 886 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 869 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 887 870 if (rc < 0) { 888 871 rc = FW_UPLOAD_ERR_RW_ERROR; 889 872 goto out_free; ··· 892 875 *written = cur_size; 893 876 894 877 /* Activate FW if oneshot or if the last slice was written */ 895 - if (cxlds->fw.oneshot || remaining == 0) { 878 + if (mds->fw.oneshot || remaining == 0) { 896 879 dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n", 897 - cxlds->fw.next_slot); 898 - rc = cxl_mem_activate_fw(cxlds, cxlds->fw.next_slot); 880 + mds->fw.next_slot); 881 + rc = cxl_mem_activate_fw(mds, mds->fw.next_slot); 899 882 if (rc < 0) { 900 883 dev_err(&cxlmd->dev, "Error activating firmware: %d\n", 901 884 rc); ··· 913 896 914 897 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) 915 898 { 916 - struct cxl_dev_state *cxlds = fwl->dd_handle; 899 + struct cxl_memdev_state *mds = fwl->dd_handle; 917 900 918 901 /* 919 902 * cxl_internal_send_cmd() handles background operations synchronously. ··· 921 904 * reported and handled during the ->write() call(s). 922 905 * Just check if a cancel request was received, and return success. 923 906 */ 924 - if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state)) 907 + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 925 908 return cxl_fw_do_cancel(fwl); 926 909 927 910 return FW_UPLOAD_ERR_NONE; ··· 929 912 930 913 static void cxl_fw_cancel(struct fw_upload *fwl) 931 914 { 932 - struct cxl_dev_state *cxlds = fwl->dd_handle; 915 + struct cxl_memdev_state *mds = fwl->dd_handle; 933 916 934 - set_bit(CXL_FW_CANCEL, cxlds->fw.state); 917 + set_bit(CXL_FW_CANCEL, mds->fw.state); 935 918 } 936 919 937 920 static const struct fw_upload_ops cxl_memdev_fw_ops = { ··· 947 930 firmware_upload_unregister(fwl); 948 931 } 949 932 950 - int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds) 933 + int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds) 951 934 { 935 + struct cxl_dev_state *cxlds = &mds->cxlds; 952 936 struct device *dev = &cxlds->cxlmd->dev; 953 937 struct fw_upload *fwl; 954 938 int rc; 955 939 956 - if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxlds->enabled_cmds)) 940 + if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds)) 957 941 return 0; 958 942 959 943 fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), 960 - &cxl_memdev_fw_ops, cxlds); 944 + &cxl_memdev_fw_ops, mds); 961 945 if (IS_ERR(fwl)) 962 946 return dev_err_probe(dev, PTR_ERR(fwl), 963 947 "Failed to register firmware loader\n"); ··· 985 967 986 968 static void put_sanitize(void *data) 987 969 { 988 - struct cxl_dev_state *cxlds = data; 970 + struct cxl_memdev_state *mds = data; 989 971 990 - sysfs_put(cxlds->security.sanitize_node); 972 + sysfs_put(mds->security.sanitize_node); 991 973 } 992 974 993 975 static int cxl_memdev_security_init(struct cxl_memdev *cxlmd) 994 976 { 995 977 struct cxl_dev_state *cxlds = cxlmd->cxlds; 978 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 996 979 struct device *dev = &cxlmd->dev; 997 980 struct kernfs_node *sec; 998 981 ··· 1002 983 dev_err(dev, "sysfs_get_dirent 'security' failed\n"); 1003 984 return -ENODEV; 1004 985 } 1005 - cxlds->security.sanitize_node = sysfs_get_dirent(sec, "state"); 986 + mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); 1006 987 sysfs_put(sec); 1007 - if (!cxlds->security.sanitize_node) { 988 + if (!mds->security.sanitize_node) { 1008 989 dev_err(dev, "sysfs_get_dirent 'state' failed\n"); 1009 990 return -ENODEV; 1010 991 } 1011 992 1012 - return devm_add_action_or_reset(cxlds->dev, put_sanitize, cxlds); 993 + return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds); 1013 994 } 1014 995 1015 996 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+4 -23
drivers/cxl/core/pci.c
··· 308 308 hdm + CXL_HDM_DECODER_CTRL_OFFSET); 309 309 } 310 310 311 - int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm) 311 + static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) 312 312 { 313 - void __iomem *hdm; 313 + void __iomem *hdm = cxlhdm->regs.hdm_decoder; 314 314 u32 global_ctrl; 315 315 316 - /* 317 - * If the hdm capability was not mapped there is nothing to enable and 318 - * the caller is responsible for what happens next. For example, 319 - * emulate a passthrough decoder. 320 - */ 321 - if (IS_ERR(cxlhdm)) 322 - return 0; 323 - 324 - hdm = cxlhdm->regs.hdm_decoder; 325 316 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 326 - 327 - /* 328 - * If the HDM decoder capability was enabled on entry, skip 329 - * registering disable_hdm() since this decode capability may be 330 - * owned by platform firmware. 331 - */ 332 - if (global_ctrl & CXL_HDM_DECODER_ENABLE) 333 - return 0; 334 - 335 317 writel(global_ctrl | CXL_HDM_DECODER_ENABLE, 336 318 hdm + CXL_HDM_DECODER_CTRL_OFFSET); 337 319 338 - return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm); 320 + return devm_add_action_or_reset(host, disable_hdm, cxlhdm); 339 321 } 340 - EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL); 341 322 342 323 int cxl_dvsec_rr_decode(struct device *dev, int d, 343 324 struct cxl_endpoint_dvsec_info *info) ··· 492 511 if (info->mem_enabled) 493 512 return 0; 494 513 495 - rc = devm_cxl_enable_hdm(port, cxlhdm); 514 + rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); 496 515 if (rc) 497 516 return rc; 498 517
+1 -1
drivers/cxl/core/pmem.c
··· 64 64 65 65 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd) 66 66 { 67 - struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev)); 67 + struct cxl_port *port = find_cxl_root(cxlmd->endpoint); 68 68 struct device *dev; 69 69 70 70 if (!port)
+6 -5
drivers/cxl/core/port.c
··· 117 117 struct cxl_decoder *cxld = to_cxl_decoder(dev); 118 118 119 119 switch (cxld->target_type) { 120 - case CXL_DECODER_ACCELERATOR: 120 + case CXL_DECODER_DEVMEM: 121 121 return sysfs_emit(buf, "accelerator\n"); 122 - case CXL_DECODER_EXPANDER: 122 + case CXL_DECODER_HOSTONLYMEM: 123 123 return sysfs_emit(buf, "expander\n"); 124 124 } 125 125 return -ENXIO; ··· 1161 1161 static void delete_endpoint(void *data) 1162 1162 { 1163 1163 struct cxl_memdev *cxlmd = data; 1164 - struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev); 1164 + struct cxl_port *endpoint = cxlmd->endpoint; 1165 1165 struct cxl_port *parent_port; 1166 1166 struct device *parent; 1167 1167 ··· 1176 1176 devm_release_action(parent, cxl_unlink_uport, endpoint); 1177 1177 devm_release_action(parent, unregister_port, endpoint); 1178 1178 } 1179 + cxlmd->endpoint = NULL; 1179 1180 device_unlock(parent); 1180 1181 put_device(parent); 1181 1182 out: ··· 1188 1187 struct device *dev = &cxlmd->dev; 1189 1188 1190 1189 get_device(&endpoint->dev); 1191 - dev_set_drvdata(dev, endpoint); 1190 + cxlmd->endpoint = endpoint; 1192 1191 cxlmd->depth = endpoint->depth; 1193 1192 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 1194 1193 } ··· 1551 1550 /* Pre initialize an "empty" decoder */ 1552 1551 cxld->interleave_ways = 1; 1553 1552 cxld->interleave_granularity = PAGE_SIZE; 1554 - cxld->target_type = CXL_DECODER_EXPANDER; 1553 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 1555 1554 cxld->hpa_range = (struct range) { 1556 1555 .start = 0, 1557 1556 .end = -1,
+13 -1
drivers/cxl/core/region.c
··· 809 809 return -EBUSY; 810 810 } 811 811 812 + /* 813 + * Endpoints should already match the region type, but backstop that 814 + * assumption with an assertion. Switch-decoders change mapping-type 815 + * based on what is mapped when they are assigned to a region. 816 + */ 817 + dev_WARN_ONCE(&cxlr->dev, 818 + port == cxled_to_port(cxled) && 819 + cxld->target_type != cxlr->type, 820 + "%s:%s mismatch decoder type %d -> %d\n", 821 + dev_name(&cxled_to_memdev(cxled)->dev), 822 + dev_name(&cxld->dev), cxld->target_type, cxlr->type); 823 + cxld->target_type = cxlr->type; 812 824 cxl_rr->decoder = cxld; 813 825 return 0; 814 826 } ··· 2115 2103 return ERR_PTR(-EBUSY); 2116 2104 } 2117 2105 2118 - return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_EXPANDER); 2106 + return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); 2119 2107 } 2120 2108 2121 2109 static ssize_t create_pmem_region_store(struct device *dev,
+4 -4
drivers/cxl/core/regs.c
··· 200 200 } 201 201 202 202 int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, 203 - struct cxl_register_map *map, unsigned long map_mask) 203 + const struct cxl_register_map *map, unsigned long map_mask) 204 204 { 205 205 struct mapinfo { 206 - struct cxl_reg_map *rmap; 206 + const struct cxl_reg_map *rmap; 207 207 void __iomem **addr; 208 208 } mapinfo[] = { 209 209 { &map->component_map.hdm_decoder, &regs->hdm_decoder }, ··· 233 233 234 234 int cxl_map_device_regs(struct device *dev, 235 235 struct cxl_device_regs *regs, 236 - struct cxl_register_map *map) 236 + const struct cxl_register_map *map) 237 237 { 238 238 resource_size_t phys_addr = map->resource; 239 239 struct mapinfo { 240 - struct cxl_reg_map *rmap; 240 + const struct cxl_reg_map *rmap; 241 241 void __iomem **addr; 242 242 } mapinfo[] = { 243 243 { &map->device_map.status, &regs->status, },
+5 -6
drivers/cxl/cxl.h
··· 56 56 #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) 57 57 #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) 58 58 #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11) 59 - #define CXL_HDM_DECODER0_CTRL_TYPE BIT(12) 59 + #define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12) 60 60 #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) 61 61 #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) 62 62 #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i) ··· 262 262 void cxl_probe_device_regs(struct device *dev, void __iomem *base, 263 263 struct cxl_device_reg_map *map); 264 264 int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, 265 - struct cxl_register_map *map, 265 + const struct cxl_register_map *map, 266 266 unsigned long map_mask); 267 267 int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs, 268 - struct cxl_register_map *map); 268 + const struct cxl_register_map *map); 269 269 270 270 enum cxl_regloc_type; 271 271 int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, ··· 298 298 #define CXL_DECODER_F_MASK GENMASK(5, 0) 299 299 300 300 enum cxl_decoder_type { 301 - CXL_DECODER_ACCELERATOR = 2, 302 - CXL_DECODER_EXPANDER = 3, 301 + CXL_DECODER_DEVMEM = 2, 302 + CXL_DECODER_HOSTONLYMEM = 3, 303 303 }; 304 304 305 305 /* ··· 718 718 struct cxl_hdm; 719 719 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 720 720 struct cxl_endpoint_dvsec_info *info); 721 - int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm); 722 721 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 723 722 struct cxl_endpoint_dvsec_info *info); 724 723 int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+77 -41
drivers/cxl/cxlmem.h
··· 39 39 * @detach_work: active memdev lost a port in its ancestry 40 40 * @cxl_nvb: coordinate removal of @cxl_nvd if present 41 41 * @cxl_nvd: optional bridge to an nvdimm if the device supports pmem 42 + * @endpoint: connection to the CXL port topology for this memory device 42 43 * @id: id number of this memdev instance. 43 44 * @depth: endpoint port depth 44 45 */ ··· 50 49 struct work_struct detach_work; 51 50 struct cxl_nvdimm_bridge *cxl_nvb; 52 51 struct cxl_nvdimm *cxl_nvd; 52 + struct cxl_port *endpoint; 53 53 int id; 54 54 int depth; 55 55 }; ··· 85 83 } 86 84 87 85 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds); 88 - int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds); 86 + struct cxl_memdev_state; 87 + int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds); 89 88 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 90 89 resource_size_t base, resource_size_t len, 91 90 resource_size_t skipped); ··· 205 202 */ 206 203 #define CXL_CAPACITY_MULTIPLIER SZ_256M 207 204 208 - /** 205 + /* 209 206 * Event Interrupt Policy 210 207 * 211 208 * CXL rev 3.0 section 8.2.9.2.4; Table 8-52 ··· 225 222 /** 226 223 * struct cxl_event_state - Event log driver state 227 224 * 228 - * @event_buf: Buffer to receive event data 229 - * @event_log_lock: Serialize event_buf and log use 225 + * @buf: Buffer to receive event data 226 + * @log_lock: Serialize event_buf and log use 230 227 */ 231 228 struct cxl_event_state { 232 229 struct cxl_get_event_payload *buf; ··· 359 356 struct kernfs_node *sanitize_node; 360 357 }; 361 358 359 + /* 360 + * enum cxl_devtype - delineate type-2 from a generic type-3 device 361 + * @CXL_DEVTYPE_DEVMEM - Vendor specific CXL Type-2 device implementing HDM-D or 362 + * HDM-DB, no requirement that this device implements a 363 + * mailbox, or other memory-device-standard manageability 364 + * flows. 365 + * @CXL_DEVTYPE_CLASSMEM - Common class definition of a CXL Type-3 device with 366 + * HDM-H and class-mandatory memory device registers 367 + */ 368 + enum cxl_devtype { 369 + CXL_DEVTYPE_DEVMEM, 370 + CXL_DEVTYPE_CLASSMEM, 371 + }; 372 + 362 373 /** 363 374 * struct cxl_dev_state - The driver device state 364 375 * ··· 386 369 * @cxl_dvsec: Offset to the PCIe device DVSEC 387 370 * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH) 388 371 * @media_ready: Indicate whether the device media is usable 372 + * @dpa_res: Overall DPA resource tree for the device 373 + * @pmem_res: Active Persistent memory capacity configuration 374 + * @ram_res: Active Volatile memory capacity configuration 375 + * @component_reg_phys: register base of component registers 376 + * @serial: PCIe Device Serial Number 377 + * @type: Generic Memory Class device or Vendor Specific Memory device 378 + */ 379 + struct cxl_dev_state { 380 + struct device *dev; 381 + struct cxl_memdev *cxlmd; 382 + struct cxl_regs regs; 383 + int cxl_dvsec; 384 + bool rcd; 385 + bool media_ready; 386 + struct resource dpa_res; 387 + struct resource pmem_res; 388 + struct resource ram_res; 389 + resource_size_t component_reg_phys; 390 + u64 serial; 391 + enum cxl_devtype type; 392 + }; 393 + 394 + /** 395 + * struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data 396 + * 397 + * CXL 8.1.12.1 PCI Header - Class Code Register Memory Device defines 398 + * common memory device functionality like the presence of a mailbox and 399 + * the functionality related to that like Identify Memory Device and Get 400 + * Partition Info 401 + * @cxlds: Core driver state common across Type-2 and Type-3 devices 389 402 * @payload_size: Size of space for payload 390 403 * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) 391 404 * @lsa_size: Size of Label Storage Area ··· 424 377 * @firmware_version: Firmware version for the memory device. 425 378 * @enabled_cmds: Hardware commands found enabled in CEL. 426 379 * @exclusive_cmds: Commands that are kernel-internal only 427 - * @dpa_res: Overall DPA resource tree for the device 428 - * @pmem_res: Active Persistent memory capacity configuration 429 - * @ram_res: Active Volatile memory capacity configuration 430 380 * @total_bytes: sum of all possible capacities 431 381 * @volatile_only_bytes: hard volatile capacity 432 382 * @persistent_only_bytes: hard persistent capacity ··· 432 388 * @active_persistent_bytes: sum of hard + soft persistent 433 389 * @next_volatile_bytes: volatile capacity change pending device reset 434 390 * @next_persistent_bytes: persistent capacity change pending device reset 435 - * @component_reg_phys: register base of component registers 436 - * @info: Cached DVSEC information about the device. 437 - * @serial: PCIe Device Serial Number 438 391 * @event: event log driver state 439 392 * @poison: poison driver state info 440 393 * @fw: firmware upload / activation state 441 394 * @mbox_send: @dev specific transport for transmitting mailbox commands 442 395 * 443 - * See section 8.2.9.5.2 Capacity Configuration and Label Storage for 396 + * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for 444 397 * details on capacity parameters. 445 398 */ 446 - struct cxl_dev_state { 447 - struct device *dev; 448 - struct cxl_memdev *cxlmd; 449 - 450 - struct cxl_regs regs; 451 - int cxl_dvsec; 452 - 453 - bool rcd; 454 - bool media_ready; 399 + struct cxl_memdev_state { 400 + struct cxl_dev_state cxlds; 455 401 size_t payload_size; 456 402 size_t lsa_size; 457 403 struct mutex mbox_mutex; /* Protects device mailbox and firmware */ 458 404 char firmware_version[0x10]; 459 405 DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX); 460 406 DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); 461 - 462 - struct resource dpa_res; 463 - struct resource pmem_res; 464 - struct resource ram_res; 465 407 u64 total_bytes; 466 408 u64 volatile_only_bytes; 467 409 u64 persistent_only_bytes; 468 410 u64 partition_align_bytes; 469 - 470 411 u64 active_volatile_bytes; 471 412 u64 active_persistent_bytes; 472 413 u64 next_volatile_bytes; 473 414 u64 next_persistent_bytes; 474 - 475 - resource_size_t component_reg_phys; 476 - u64 serial; 477 - 478 415 struct cxl_event_state event; 479 416 struct cxl_poison_state poison; 480 417 struct cxl_security_state security; 481 418 struct cxl_fw_state fw; 482 419 483 420 struct rcuwait mbox_wait; 484 - int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); 421 + int (*mbox_send)(struct cxl_memdev_state *mds, 422 + struct cxl_mbox_cmd *cmd); 485 423 }; 424 + 425 + static inline struct cxl_memdev_state * 426 + to_cxl_memdev_state(struct cxl_dev_state *cxlds) 427 + { 428 + if (cxlds->type != CXL_DEVTYPE_CLASSMEM) 429 + return NULL; 430 + return container_of(cxlds, struct cxl_memdev_state, cxlds); 431 + } 486 432 487 433 enum cxl_opcode { 488 434 CXL_MBOX_OP_INVALID = 0x0000, ··· 835 801 CXL_PMEM_SEC_PASS_USER, 836 802 }; 837 803 838 - int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, 804 + int cxl_internal_send_cmd(struct cxl_memdev_state *mds, 839 805 struct cxl_mbox_cmd *cmd); 840 - int cxl_dev_state_identify(struct cxl_dev_state *cxlds); 806 + int cxl_dev_state_identify(struct cxl_memdev_state *mds); 841 807 int cxl_await_media_ready(struct cxl_dev_state *cxlds); 842 - int cxl_enumerate_cmds(struct cxl_dev_state *cxlds); 843 - int cxl_mem_create_range_info(struct cxl_dev_state *cxlds); 844 - struct cxl_dev_state *cxl_dev_state_create(struct device *dev); 845 - void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); 846 - void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); 847 - void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status); 848 - int cxl_set_timestamp(struct cxl_dev_state *cxlds); 849 - int cxl_poison_state_init(struct cxl_dev_state *cxlds); 808 + int cxl_enumerate_cmds(struct cxl_memdev_state *mds); 809 + int cxl_mem_create_range_info(struct cxl_memdev_state *mds); 810 + struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev); 811 + void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, 812 + unsigned long *cmds); 813 + void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, 814 + unsigned long *cmds); 815 + void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status); 816 + int cxl_set_timestamp(struct cxl_memdev_state *mds); 817 + int cxl_poison_state_init(struct cxl_memdev_state *mds); 850 818 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, 851 819 struct cxl_region *cxlr); 852 820 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); ··· 867 831 } 868 832 #endif 869 833 870 - int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd); 834 + int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd); 871 835 872 836 struct cxl_hdm { 873 837 struct cxl_component_regs regs;
+7 -3
drivers/cxl/mem.c
··· 117 117 static int cxl_mem_probe(struct device *dev) 118 118 { 119 119 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 120 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 120 121 struct cxl_dev_state *cxlds = cxlmd->cxlds; 121 122 struct device *endpoint_parent; 122 123 struct cxl_port *parent_port; ··· 142 141 dentry = cxl_debugfs_create_dir(dev_name(dev)); 143 142 debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show); 144 143 145 - if (test_bit(CXL_POISON_ENABLED_INJECT, cxlds->poison.enabled_cmds)) 144 + if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds)) 146 145 debugfs_create_file("inject_poison", 0200, dentry, cxlmd, 147 146 &cxl_poison_inject_fops); 148 - if (test_bit(CXL_POISON_ENABLED_CLEAR, cxlds->poison.enabled_cmds)) 147 + if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds)) 149 148 debugfs_create_file("clear_poison", 0200, dentry, cxlmd, 150 149 &cxl_poison_clear_fops); 151 150 ··· 228 227 { 229 228 if (a == &dev_attr_trigger_poison_list.attr) { 230 229 struct device *dev = kobj_to_dev(kobj); 230 + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 231 + struct cxl_memdev_state *mds = 232 + to_cxl_memdev_state(cxlmd->cxlds); 231 233 232 234 if (!test_bit(CXL_POISON_ENABLED_LIST, 233 - to_cxl_memdev(dev)->cxlds->poison.enabled_cmds)) 235 + mds->poison.enabled_cmds)) 234 236 return 0; 235 237 } 236 238 return a->mode;
+87 -93
drivers/cxl/pci.c
··· 119 119 u16 opcode; 120 120 struct cxl_dev_id *dev_id = id; 121 121 struct cxl_dev_state *cxlds = dev_id->cxlds; 122 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 122 123 123 124 if (!cxl_mbox_background_complete(cxlds)) 124 125 return IRQ_NONE; ··· 127 126 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); 128 127 opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); 129 128 if (opcode == CXL_MBOX_OP_SANITIZE) { 130 - if (cxlds->security.sanitize_node) 131 - sysfs_notify_dirent(cxlds->security.sanitize_node); 129 + if (mds->security.sanitize_node) 130 + sysfs_notify_dirent(mds->security.sanitize_node); 132 131 133 132 dev_dbg(cxlds->dev, "Sanitization operation ended\n"); 134 133 } else { 135 134 /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ 136 - rcuwait_wake_up(&cxlds->mbox_wait); 135 + rcuwait_wake_up(&mds->mbox_wait); 137 136 } 138 137 139 138 return IRQ_HANDLED; ··· 144 143 */ 145 144 static void cxl_mbox_sanitize_work(struct work_struct *work) 146 145 { 147 - struct cxl_dev_state *cxlds; 146 + struct cxl_memdev_state *mds = 147 + container_of(work, typeof(*mds), security.poll_dwork.work); 148 + struct cxl_dev_state *cxlds = &mds->cxlds; 148 149 149 - cxlds = container_of(work, 150 - struct cxl_dev_state, security.poll_dwork.work); 151 - 152 - mutex_lock(&cxlds->mbox_mutex); 150 + mutex_lock(&mds->mbox_mutex); 153 151 if (cxl_mbox_background_complete(cxlds)) { 154 - cxlds->security.poll_tmo_secs = 0; 152 + mds->security.poll_tmo_secs = 0; 155 153 put_device(cxlds->dev); 156 154 157 - if (cxlds->security.sanitize_node) 158 - sysfs_notify_dirent(cxlds->security.sanitize_node); 155 + if (mds->security.sanitize_node) 156 + sysfs_notify_dirent(mds->security.sanitize_node); 159 157 160 158 dev_dbg(cxlds->dev, "Sanitization operation ended\n"); 161 159 } else { 162 - int timeout = cxlds->security.poll_tmo_secs + 10; 160 + int timeout = mds->security.poll_tmo_secs + 10; 163 161 164 - cxlds->security.poll_tmo_secs = min(15 * 60, timeout); 165 - queue_delayed_work(system_wq, &cxlds->security.poll_dwork, 162 + mds->security.poll_tmo_secs = min(15 * 60, timeout); 163 + queue_delayed_work(system_wq, &mds->security.poll_dwork, 166 164 timeout * HZ); 167 165 } 168 - mutex_unlock(&cxlds->mbox_mutex); 166 + mutex_unlock(&mds->mbox_mutex); 169 167 } 170 168 171 169 /** 172 170 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command 173 - * @cxlds: The device state to communicate with. 171 + * @mds: The memory device driver data 174 172 * @mbox_cmd: Command to send to the memory device. 175 173 * 176 174 * Context: Any context. Expects mbox_mutex to be held. ··· 189 189 * not need to coordinate with each other. The driver only uses the primary 190 190 * mailbox. 191 191 */ 192 - static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, 192 + static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, 193 193 struct cxl_mbox_cmd *mbox_cmd) 194 194 { 195 + struct cxl_dev_state *cxlds = &mds->cxlds; 195 196 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 196 197 struct device *dev = cxlds->dev; 197 198 u64 cmd_reg, status_reg; 198 199 size_t out_len; 199 200 int rc; 200 201 201 - lockdep_assert_held(&cxlds->mbox_mutex); 202 + lockdep_assert_held(&mds->mbox_mutex); 202 203 203 204 /* 204 205 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. ··· 233 232 * not be in sync. Ensure no new command comes in until so. Keep the 234 233 * hardware semantics and only allow device health status. 235 234 */ 236 - if (cxlds->security.poll_tmo_secs > 0) { 235 + if (mds->security.poll_tmo_secs > 0) { 237 236 if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO) 238 237 return -EBUSY; 239 238 } ··· 294 293 * and allow userspace to poll(2) for completion. 295 294 */ 296 295 if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { 297 - if (cxlds->security.poll_tmo_secs != -1) { 296 + if (mds->security.poll_tmo_secs != -1) { 298 297 /* hold the device throughout */ 299 298 get_device(cxlds->dev); 300 299 301 300 /* give first timeout a second */ 302 301 timeout = 1; 303 - cxlds->security.poll_tmo_secs = timeout; 302 + mds->security.poll_tmo_secs = timeout; 304 303 queue_delayed_work(system_wq, 305 - &cxlds->security.poll_dwork, 304 + &mds->security.poll_dwork, 306 305 timeout * HZ); 307 306 } 308 307 ··· 315 314 316 315 timeout = mbox_cmd->poll_interval_ms; 317 316 for (i = 0; i < mbox_cmd->poll_count; i++) { 318 - if (rcuwait_wait_event_timeout(&cxlds->mbox_wait, 317 + if (rcuwait_wait_event_timeout(&mds->mbox_wait, 319 318 cxl_mbox_background_complete(cxlds), 320 319 TASK_UNINTERRUPTIBLE, 321 320 msecs_to_jiffies(timeout)) > 0) ··· 358 357 * have requested less data than the hardware supplied even 359 358 * within spec. 360 359 */ 361 - size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len); 360 + size_t n; 362 361 362 + n = min3(mbox_cmd->size_out, mds->payload_size, out_len); 363 363 memcpy_fromio(mbox_cmd->payload_out, payload, n); 364 364 mbox_cmd->size_out = n; 365 365 } else { ··· 370 368 return 0; 371 369 } 372 370 373 - static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 371 + static int cxl_pci_mbox_send(struct cxl_memdev_state *mds, 372 + struct cxl_mbox_cmd *cmd) 374 373 { 375 374 int rc; 376 375 377 - mutex_lock_io(&cxlds->mbox_mutex); 378 - rc = __cxl_pci_mbox_send_cmd(cxlds, cmd); 379 - mutex_unlock(&cxlds->mbox_mutex); 376 + mutex_lock_io(&mds->mbox_mutex); 377 + rc = __cxl_pci_mbox_send_cmd(mds, cmd); 378 + mutex_unlock(&mds->mbox_mutex); 380 379 381 380 return rc; 382 381 } 383 382 384 - static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) 383 + static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) 385 384 { 385 + struct cxl_dev_state *cxlds = &mds->cxlds; 386 386 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 387 + struct device *dev = cxlds->dev; 387 388 unsigned long timeout; 388 389 u64 md_status; 389 390 ··· 400 395 } while (!time_after(jiffies, timeout)); 401 396 402 397 if (!(md_status & CXLMDEV_MBOX_IF_READY)) { 403 - cxl_err(cxlds->dev, md_status, 404 - "timeout awaiting mailbox ready"); 398 + cxl_err(dev, md_status, "timeout awaiting mailbox ready"); 405 399 return -ETIMEDOUT; 406 400 } 407 401 ··· 411 407 * source for future doorbell busy events. 412 408 */ 413 409 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) { 414 - cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle"); 410 + cxl_err(dev, md_status, "timeout awaiting mailbox idle"); 415 411 return -ETIMEDOUT; 416 412 } 417 413 418 - cxlds->mbox_send = cxl_pci_mbox_send; 419 - cxlds->payload_size = 414 + mds->mbox_send = cxl_pci_mbox_send; 415 + mds->payload_size = 420 416 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 421 417 422 418 /* ··· 426 422 * there's no point in going forward. If the size is too large, there's 427 423 * no harm is soft limiting it. 428 424 */ 429 - cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M); 430 - if (cxlds->payload_size < 256) { 431 - dev_err(cxlds->dev, "Mailbox is too small (%zub)", 432 - cxlds->payload_size); 425 + mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M); 426 + if (mds->payload_size < 256) { 427 + dev_err(dev, "Mailbox is too small (%zub)", 428 + mds->payload_size); 433 429 return -ENXIO; 434 430 } 435 431 436 - dev_dbg(cxlds->dev, "Mailbox payload sized %zu", 437 - cxlds->payload_size); 432 + dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size); 438 433 439 - rcuwait_init(&cxlds->mbox_wait); 434 + rcuwait_init(&mds->mbox_wait); 440 435 441 436 if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) { 442 437 u32 ctrl; ··· 459 456 } 460 457 461 458 mbox_poll: 462 - cxlds->security.poll = true; 463 - INIT_DELAYED_WORK(&cxlds->security.poll_dwork, cxl_mbox_sanitize_work); 459 + mds->security.poll = true; 460 + INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); 464 461 465 462 dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported"); 466 463 return 0; ··· 557 554 return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END; 558 555 } 559 556 560 - /* 561 - * CXL v3.0 6.2.3 Table 6-4 562 - * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits 563 - * mode, otherwise it's 68B flits mode. 564 - */ 565 - static bool cxl_pci_flit_256(struct pci_dev *pdev) 566 - { 567 - u16 lnksta2; 568 - 569 - pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2); 570 - return lnksta2 & PCI_EXP_LNKSTA2_FLIT; 571 - } 572 - 573 557 static int cxl_pci_ras_unmask(struct pci_dev *pdev) 574 558 { 575 559 struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); ··· 583 593 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET; 584 594 orig_val = readl(addr); 585 595 586 - mask = CXL_RAS_UNCORRECTABLE_MASK_MASK; 587 - if (!cxl_pci_flit_256(pdev)) 588 - mask &= ~CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK; 596 + mask = CXL_RAS_UNCORRECTABLE_MASK_MASK | 597 + CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK; 589 598 val = orig_val & ~mask; 590 599 writel(val, addr); 591 600 dev_dbg(&pdev->dev, ··· 611 622 612 623 /* 613 624 * There is a single buffer for reading event logs from the mailbox. All logs 614 - * share this buffer protected by the cxlds->event_log_lock. 625 + * share this buffer protected by the mds->event_log_lock. 615 626 */ 616 - static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds) 627 + static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds) 617 628 { 618 629 struct cxl_get_event_payload *buf; 619 630 620 - buf = kvmalloc(cxlds->payload_size, GFP_KERNEL); 631 + buf = kvmalloc(mds->payload_size, GFP_KERNEL); 621 632 if (!buf) 622 633 return -ENOMEM; 623 - cxlds->event.buf = buf; 634 + mds->event.buf = buf; 624 635 625 - return devm_add_action_or_reset(cxlds->dev, free_event_buf, buf); 636 + return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf); 626 637 } 627 638 628 639 static int cxl_alloc_irq_vectors(struct pci_dev *pdev) ··· 651 662 { 652 663 struct cxl_dev_id *dev_id = id; 653 664 struct cxl_dev_state *cxlds = dev_id->cxlds; 665 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 654 666 u32 status; 655 667 656 668 do { ··· 664 674 status &= CXLDEV_EVENT_STATUS_ALL; 665 675 if (!status) 666 676 break; 667 - cxl_mem_get_event_records(cxlds, status); 677 + cxl_mem_get_event_records(mds, status); 668 678 cond_resched(); 669 679 } while (status); 670 680 ··· 687 697 return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread); 688 698 } 689 699 690 - static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds, 700 + static int cxl_event_get_int_policy(struct cxl_memdev_state *mds, 691 701 struct cxl_event_interrupt_policy *policy) 692 702 { 693 703 struct cxl_mbox_cmd mbox_cmd = { ··· 697 707 }; 698 708 int rc; 699 709 700 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 710 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 701 711 if (rc < 0) 702 - dev_err(cxlds->dev, "Failed to get event interrupt policy : %d", 703 - rc); 712 + dev_err(mds->cxlds.dev, 713 + "Failed to get event interrupt policy : %d", rc); 704 714 705 715 return rc; 706 716 } 707 717 708 - static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds, 718 + static int cxl_event_config_msgnums(struct cxl_memdev_state *mds, 709 719 struct cxl_event_interrupt_policy *policy) 710 720 { 711 721 struct cxl_mbox_cmd mbox_cmd; ··· 724 734 .size_in = sizeof(*policy), 725 735 }; 726 736 727 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 737 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 728 738 if (rc < 0) { 729 - dev_err(cxlds->dev, "Failed to set event interrupt policy : %d", 739 + dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d", 730 740 rc); 731 741 return rc; 732 742 } 733 743 734 744 /* Retrieve final interrupt settings */ 735 - return cxl_event_get_int_policy(cxlds, policy); 745 + return cxl_event_get_int_policy(mds, policy); 736 746 } 737 747 738 - static int cxl_event_irqsetup(struct cxl_dev_state *cxlds) 748 + static int cxl_event_irqsetup(struct cxl_memdev_state *mds) 739 749 { 750 + struct cxl_dev_state *cxlds = &mds->cxlds; 740 751 struct cxl_event_interrupt_policy policy; 741 752 int rc; 742 753 743 - rc = cxl_event_config_msgnums(cxlds, &policy); 754 + rc = cxl_event_config_msgnums(mds, &policy); 744 755 if (rc) 745 756 return rc; 746 757 ··· 780 789 } 781 790 782 791 static int cxl_event_config(struct pci_host_bridge *host_bridge, 783 - struct cxl_dev_state *cxlds) 792 + struct cxl_memdev_state *mds) 784 793 { 785 794 struct cxl_event_interrupt_policy policy; 786 795 int rc; ··· 792 801 if (!host_bridge->native_cxl_error) 793 802 return 0; 794 803 795 - rc = cxl_mem_alloc_event_buf(cxlds); 804 + rc = cxl_mem_alloc_event_buf(mds); 796 805 if (rc) 797 806 return rc; 798 807 799 - rc = cxl_event_get_int_policy(cxlds, &policy); 808 + rc = cxl_event_get_int_policy(mds, &policy); 800 809 if (rc) 801 810 return rc; 802 811 ··· 804 813 cxl_event_int_is_fw(policy.warn_settings) || 805 814 cxl_event_int_is_fw(policy.failure_settings) || 806 815 cxl_event_int_is_fw(policy.fatal_settings)) { 807 - dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n"); 816 + dev_err(mds->cxlds.dev, 817 + "FW still in control of Event Logs despite _OSC settings\n"); 808 818 return -EBUSY; 809 819 } 810 820 811 - rc = cxl_event_irqsetup(cxlds); 821 + rc = cxl_event_irqsetup(mds); 812 822 if (rc) 813 823 return rc; 814 824 815 - cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); 825 + cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL); 816 826 817 827 return 0; 818 828 } ··· 821 829 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 822 830 { 823 831 struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); 832 + struct cxl_memdev_state *mds; 833 + struct cxl_dev_state *cxlds; 824 834 struct cxl_register_map map; 825 835 struct cxl_memdev *cxlmd; 826 - struct cxl_dev_state *cxlds; 827 836 int rc; 828 837 829 838 /* ··· 839 846 return rc; 840 847 pci_set_master(pdev); 841 848 842 - cxlds = cxl_dev_state_create(&pdev->dev); 843 - if (IS_ERR(cxlds)) 844 - return PTR_ERR(cxlds); 849 + mds = cxl_memdev_state_create(&pdev->dev); 850 + if (IS_ERR(mds)) 851 + return PTR_ERR(mds); 852 + cxlds = &mds->cxlds; 845 853 pci_set_drvdata(pdev, cxlds); 846 854 847 855 cxlds->rcd = is_cxl_restricted(pdev); ··· 887 893 if (rc) 888 894 return rc; 889 895 890 - rc = cxl_pci_setup_mailbox(cxlds); 896 + rc = cxl_pci_setup_mailbox(mds); 891 897 if (rc) 892 898 return rc; 893 899 894 - rc = cxl_enumerate_cmds(cxlds); 900 + rc = cxl_enumerate_cmds(mds); 895 901 if (rc) 896 902 return rc; 897 903 898 - rc = cxl_set_timestamp(cxlds); 904 + rc = cxl_set_timestamp(mds); 899 905 if (rc) 900 906 return rc; 901 907 902 - rc = cxl_poison_state_init(cxlds); 908 + rc = cxl_poison_state_init(mds); 903 909 if (rc) 904 910 return rc; 905 911 906 - rc = cxl_dev_state_identify(cxlds); 912 + rc = cxl_dev_state_identify(mds); 907 913 if (rc) 908 914 return rc; 909 915 910 - rc = cxl_mem_create_range_info(cxlds); 916 + rc = cxl_mem_create_range_info(mds); 911 917 if (rc) 912 918 return rc; 913 919 ··· 915 921 if (IS_ERR(cxlmd)) 916 922 return PTR_ERR(cxlmd); 917 923 918 - rc = cxl_memdev_setup_fw_upload(cxlds); 924 + rc = cxl_memdev_setup_fw_upload(mds); 919 925 if (rc) 920 926 return rc; 921 927 922 - rc = cxl_event_config(host_bridge, cxlds); 928 + rc = cxl_event_config(host_bridge, mds); 923 929 if (rc) 924 930 return rc; 925 931
+18 -17
drivers/cxl/pmem.c
··· 15 15 16 16 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); 17 17 18 - static void clear_exclusive(void *cxlds) 18 + static void clear_exclusive(void *mds) 19 19 { 20 - clear_exclusive_cxl_commands(cxlds, exclusive_cmds); 20 + clear_exclusive_cxl_commands(mds, exclusive_cmds); 21 21 } 22 22 23 23 static void unregister_nvdimm(void *nvdimm) ··· 65 65 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); 66 66 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 67 67 struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb; 68 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 68 69 unsigned long flags = 0, cmd_mask = 0; 69 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 70 70 struct nvdimm *nvdimm; 71 71 int rc; 72 72 73 - set_exclusive_cxl_commands(cxlds, exclusive_cmds); 74 - rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds); 73 + set_exclusive_cxl_commands(mds, exclusive_cmds); 74 + rc = devm_add_action_or_reset(dev, clear_exclusive, mds); 75 75 if (rc) 76 76 return rc; 77 77 ··· 100 100 }, 101 101 }; 102 102 103 - static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds, 103 + static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds, 104 104 struct nd_cmd_get_config_size *cmd, 105 105 unsigned int buf_len) 106 106 { 107 107 if (sizeof(*cmd) > buf_len) 108 108 return -EINVAL; 109 109 110 - *cmd = (struct nd_cmd_get_config_size) { 111 - .config_size = cxlds->lsa_size, 112 - .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa), 110 + *cmd = (struct nd_cmd_get_config_size){ 111 + .config_size = mds->lsa_size, 112 + .max_xfer = 113 + mds->payload_size - sizeof(struct cxl_mbox_set_lsa), 113 114 }; 114 115 115 116 return 0; 116 117 } 117 118 118 - static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, 119 + static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds, 119 120 struct nd_cmd_get_config_data_hdr *cmd, 120 121 unsigned int buf_len) 121 122 { ··· 141 140 .payload_out = cmd->out_buf, 142 141 }; 143 142 144 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 143 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 145 144 cmd->status = 0; 146 145 147 146 return rc; 148 147 } 149 148 150 - static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, 149 + static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds, 151 150 struct nd_cmd_set_config_hdr *cmd, 152 151 unsigned int buf_len) 153 152 { ··· 177 176 .size_in = struct_size(set_lsa, data, cmd->in_length), 178 177 }; 179 178 180 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 179 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 181 180 182 181 /* 183 182 * Set "firmware" status (4-packed bytes at the end of the input ··· 195 194 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 196 195 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); 197 196 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 198 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 197 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 199 198 200 199 if (!test_bit(cmd, &cmd_mask)) 201 200 return -ENOTTY; 202 201 203 202 switch (cmd) { 204 203 case ND_CMD_GET_CONFIG_SIZE: 205 - return cxl_pmem_get_config_size(cxlds, buf, buf_len); 204 + return cxl_pmem_get_config_size(mds, buf, buf_len); 206 205 case ND_CMD_GET_CONFIG_DATA: 207 - return cxl_pmem_get_config_data(cxlds, buf, buf_len); 206 + return cxl_pmem_get_config_data(mds, buf, buf_len); 208 207 case ND_CMD_SET_CONFIG_DATA: 209 - return cxl_pmem_set_config_data(cxlds, buf, buf_len); 208 + return cxl_pmem_set_config_data(mds, buf, buf_len); 210 209 default: 211 210 return -ENOTTY; 212 211 }
+5 -9
drivers/cxl/port.c
··· 60 60 static int cxl_switch_port_probe(struct cxl_port *port) 61 61 { 62 62 struct cxl_hdm *cxlhdm; 63 - int rc, nr_dports; 63 + int rc; 64 64 65 - nr_dports = devm_cxl_port_enumerate_dports(port); 66 - if (nr_dports < 0) 67 - return nr_dports; 68 - 69 - cxlhdm = devm_cxl_setup_hdm(port, NULL); 70 - rc = devm_cxl_enable_hdm(port, cxlhdm); 71 - if (rc) 65 + rc = devm_cxl_port_enumerate_dports(port); 66 + if (rc < 0) 72 67 return rc; 73 68 69 + cxlhdm = devm_cxl_setup_hdm(port, NULL); 74 70 if (!IS_ERR(cxlhdm)) 75 71 return devm_cxl_enumerate_decoders(cxlhdm, NULL); 76 72 ··· 75 79 return PTR_ERR(cxlhdm); 76 80 } 77 81 78 - if (nr_dports == 1) { 82 + if (rc == 1) { 79 83 dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); 80 84 return devm_cxl_add_passthrough_decoder(port); 81 85 }
+13 -13
drivers/cxl/security.c
··· 14 14 { 15 15 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 16 16 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 17 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 17 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 18 18 unsigned long security_flags = 0; 19 19 struct cxl_get_security_output { 20 20 __le32 flags; ··· 29 29 .payload_out = &out, 30 30 }; 31 31 32 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 32 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 33 33 if (rc < 0) 34 34 return 0; 35 35 36 36 sec_out = le32_to_cpu(out.flags); 37 37 /* cache security state */ 38 - cxlds->security.state = sec_out; 38 + mds->security.state = sec_out; 39 39 40 40 if (ptype == NVDIMM_MASTER) { 41 41 if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) ··· 70 70 { 71 71 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 72 72 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 73 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 73 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 74 74 struct cxl_mbox_cmd mbox_cmd; 75 75 struct cxl_set_pass set_pass; 76 76 ··· 87 87 .payload_in = &set_pass, 88 88 }; 89 89 90 - return cxl_internal_send_cmd(cxlds, &mbox_cmd); 90 + return cxl_internal_send_cmd(mds, &mbox_cmd); 91 91 } 92 92 93 93 static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, ··· 96 96 { 97 97 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 98 98 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 99 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 99 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 100 100 struct cxl_disable_pass dis_pass; 101 101 struct cxl_mbox_cmd mbox_cmd; 102 102 ··· 112 112 .payload_in = &dis_pass, 113 113 }; 114 114 115 - return cxl_internal_send_cmd(cxlds, &mbox_cmd); 115 + return cxl_internal_send_cmd(mds, &mbox_cmd); 116 116 } 117 117 118 118 static int cxl_pmem_security_disable(struct nvdimm *nvdimm, ··· 131 131 { 132 132 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 133 133 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 134 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 134 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 135 135 struct cxl_mbox_cmd mbox_cmd = { 136 136 .opcode = CXL_MBOX_OP_FREEZE_SECURITY, 137 137 }; 138 138 139 - return cxl_internal_send_cmd(cxlds, &mbox_cmd); 139 + return cxl_internal_send_cmd(mds, &mbox_cmd); 140 140 } 141 141 142 142 static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, ··· 144 144 { 145 145 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 146 146 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 147 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 147 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 148 148 u8 pass[NVDIMM_PASSPHRASE_LEN]; 149 149 struct cxl_mbox_cmd mbox_cmd; 150 150 int rc; ··· 156 156 .payload_in = pass, 157 157 }; 158 158 159 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 159 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 160 160 if (rc < 0) 161 161 return rc; 162 162 ··· 169 169 { 170 170 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 171 171 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 172 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 172 + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 173 173 struct cxl_mbox_cmd mbox_cmd; 174 174 struct cxl_pass_erase erase; 175 175 int rc; ··· 185 185 .payload_in = &erase, 186 186 }; 187 187 188 - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 188 + rc = cxl_internal_send_cmd(mds, &mbox_cmd); 189 189 if (rc < 0) 190 190 return rc; 191 191
-1
tools/testing/cxl/Kbuild
··· 6 6 ldflags-y += --wrap=nvdimm_bus_register 7 7 ldflags-y += --wrap=devm_cxl_port_enumerate_dports 8 8 ldflags-y += --wrap=devm_cxl_setup_hdm 9 - ldflags-y += --wrap=devm_cxl_enable_hdm 10 9 ldflags-y += --wrap=devm_cxl_add_passthrough_decoder 11 10 ldflags-y += --wrap=devm_cxl_enumerate_decoders 12 11 ldflags-y += --wrap=cxl_await_media_ready
+3 -3
tools/testing/cxl/test/cxl.c
··· 713 713 714 714 cxld->interleave_ways = 1; 715 715 cxld->interleave_granularity = 256; 716 - cxld->target_type = CXL_DECODER_EXPANDER; 716 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 717 717 cxld->commit = mock_decoder_commit; 718 718 cxld->reset = mock_decoder_reset; 719 719 } ··· 787 787 788 788 cxld->interleave_ways = 2; 789 789 eig_to_granularity(window->granularity, &cxld->interleave_granularity); 790 - cxld->target_type = CXL_DECODER_EXPANDER; 790 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 791 791 cxld->flags = CXL_DECODER_F_ENABLE; 792 792 cxled->state = CXL_DECODER_STATE_AUTO; 793 793 port->commit_end = cxld->id; ··· 820 820 } else 821 821 cxlsd->target[0] = dport; 822 822 cxld = &cxlsd->cxld; 823 - cxld->target_type = CXL_DECODER_EXPANDER; 823 + cxld->target_type = CXL_DECODER_HOSTONLYMEM; 824 824 cxld->flags = CXL_DECODER_F_ENABLE; 825 825 iter->commit_end = 0; 826 826 /*
+76 -85
tools/testing/cxl/test/mem.c
··· 133 133 }; 134 134 135 135 struct mock_event_store { 136 - struct cxl_dev_state *cxlds; 136 + struct cxl_memdev_state *mds; 137 137 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; 138 138 u32 ev_status; 139 139 }; ··· 215 215 log->nr_events++; 216 216 } 217 217 218 - static int mock_get_event(struct cxl_dev_state *cxlds, 219 - struct cxl_mbox_cmd *cmd) 218 + static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd) 220 219 { 221 220 struct cxl_get_event_payload *pl; 222 221 struct mock_event_log *log; ··· 235 236 236 237 memset(cmd->payload_out, 0, cmd->size_out); 237 238 238 - log = event_find_log(cxlds->dev, log_type); 239 + log = event_find_log(dev, log_type); 239 240 if (!log || event_log_empty(log)) 240 241 return 0; 241 242 ··· 268 269 return 0; 269 270 } 270 271 271 - static int mock_clear_event(struct cxl_dev_state *cxlds, 272 - struct cxl_mbox_cmd *cmd) 272 + static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd) 273 273 { 274 274 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in; 275 275 struct mock_event_log *log; ··· 279 281 if (log_type >= CXL_EVENT_TYPE_MAX) 280 282 return -EINVAL; 281 283 282 - log = event_find_log(cxlds->dev, log_type); 284 + log = event_find_log(dev, log_type); 283 285 if (!log) 284 286 return 0; /* No mock data in this log */ 285 287 ··· 289 291 * However, this is not good behavior for the host so test it. 290 292 */ 291 293 if (log->clear_idx + pl->nr_recs > log->cur_idx) { 292 - dev_err(cxlds->dev, 294 + dev_err(dev, 293 295 "Attempting to clear more events than returned!\n"); 294 296 return -EINVAL; 295 297 } ··· 299 301 nr < pl->nr_recs; 300 302 nr++, handle++) { 301 303 if (handle != le16_to_cpu(pl->handles[nr])) { 302 - dev_err(cxlds->dev, "Clearing events out of order\n"); 304 + dev_err(dev, "Clearing events out of order\n"); 303 305 return -EINVAL; 304 306 } 305 307 } ··· 326 328 event_reset_log(log); 327 329 } 328 330 329 - cxl_mem_get_event_records(mes->cxlds, mes->ev_status); 331 + cxl_mem_get_event_records(mes->mds, mes->ev_status); 330 332 } 331 333 332 334 struct cxl_event_record_raw maint_needed = { ··· 486 488 return 0; 487 489 } 488 490 489 - static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 491 + static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd) 490 492 { 491 493 struct cxl_mbox_get_log *gl = cmd->payload_in; 492 494 u32 offset = le32_to_cpu(gl->offset); ··· 496 498 497 499 if (cmd->size_in < sizeof(*gl)) 498 500 return -EINVAL; 499 - if (length > cxlds->payload_size) 501 + if (length > mds->payload_size) 500 502 return -EINVAL; 501 503 if (offset + length > sizeof(mock_cel)) 502 504 return -EINVAL; ··· 510 512 return 0; 511 513 } 512 514 513 - static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 515 + static int mock_rcd_id(struct cxl_mbox_cmd *cmd) 514 516 { 515 517 struct cxl_mbox_identify id = { 516 518 .fw_revision = { "mock fw v1 " }, ··· 528 530 return 0; 529 531 } 530 532 531 - static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 533 + static int mock_id(struct cxl_mbox_cmd *cmd) 532 534 { 533 535 struct cxl_mbox_identify id = { 534 536 .fw_revision = { "mock fw v1 " }, ··· 550 552 return 0; 551 553 } 552 554 553 - static int mock_partition_info(struct cxl_dev_state *cxlds, 554 - struct cxl_mbox_cmd *cmd) 555 + static int mock_partition_info(struct cxl_mbox_cmd *cmd) 555 556 { 556 557 struct cxl_mbox_get_partition_info pi = { 557 558 .active_volatile_cap = ··· 567 570 return 0; 568 571 } 569 572 570 - static int mock_sanitize(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 573 + static int mock_sanitize(struct cxl_mockmem_data *mdata, 574 + struct cxl_mbox_cmd *cmd) 571 575 { 572 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 573 - 574 576 if (cmd->size_in != 0) 575 577 return -EINVAL; 576 578 ··· 588 592 return 0; /* assume less than 2 secs, no bg */ 589 593 } 590 594 591 - static int mock_secure_erase(struct cxl_dev_state *cxlds, 595 + static int mock_secure_erase(struct cxl_mockmem_data *mdata, 592 596 struct cxl_mbox_cmd *cmd) 593 597 { 594 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 595 - 596 598 if (cmd->size_in != 0) 597 599 return -EINVAL; 598 600 ··· 610 616 return 0; 611 617 } 612 618 613 - static int mock_get_security_state(struct cxl_dev_state *cxlds, 619 + static int mock_get_security_state(struct cxl_mockmem_data *mdata, 614 620 struct cxl_mbox_cmd *cmd) 615 621 { 616 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 617 - 618 622 if (cmd->size_in) 619 623 return -EINVAL; 620 624 ··· 642 650 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 643 651 } 644 652 645 - static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 653 + static int mock_set_passphrase(struct cxl_mockmem_data *mdata, 654 + struct cxl_mbox_cmd *cmd) 646 655 { 647 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 648 656 struct cxl_set_pass *set_pass; 649 657 650 658 if (cmd->size_in != sizeof(*set_pass)) ··· 702 710 return -EINVAL; 703 711 } 704 712 705 - static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 713 + static int mock_disable_passphrase(struct cxl_mockmem_data *mdata, 714 + struct cxl_mbox_cmd *cmd) 706 715 { 707 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 708 716 struct cxl_disable_pass *dis_pass; 709 717 710 718 if (cmd->size_in != sizeof(*dis_pass)) ··· 773 781 return 0; 774 782 } 775 783 776 - static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 784 + static int mock_freeze_security(struct cxl_mockmem_data *mdata, 785 + struct cxl_mbox_cmd *cmd) 777 786 { 778 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 779 - 780 787 if (cmd->size_in != 0) 781 788 return -EINVAL; 782 789 ··· 789 798 return 0; 790 799 } 791 800 792 - static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 801 + static int mock_unlock_security(struct cxl_mockmem_data *mdata, 802 + struct cxl_mbox_cmd *cmd) 793 803 { 794 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 795 - 796 804 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) 797 805 return -EINVAL; 798 806 ··· 830 840 return 0; 831 841 } 832 842 833 - static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds, 843 + static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata, 834 844 struct cxl_mbox_cmd *cmd) 835 845 { 836 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 837 846 struct cxl_pass_erase *erase; 838 847 839 848 if (cmd->size_in != sizeof(*erase)) ··· 928 939 return 0; 929 940 } 930 941 931 - static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 942 + static int mock_get_lsa(struct cxl_mockmem_data *mdata, 943 + struct cxl_mbox_cmd *cmd) 932 944 { 933 945 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; 934 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 935 946 void *lsa = mdata->lsa; 936 947 u32 offset, length; 937 948 ··· 948 959 return 0; 949 960 } 950 961 951 - static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 962 + static int mock_set_lsa(struct cxl_mockmem_data *mdata, 963 + struct cxl_mbox_cmd *cmd) 952 964 { 953 965 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; 954 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 955 966 void *lsa = mdata->lsa; 956 967 u32 offset, length; 957 968 ··· 966 977 return 0; 967 978 } 968 979 969 - static int mock_health_info(struct cxl_dev_state *cxlds, 970 - struct cxl_mbox_cmd *cmd) 980 + static int mock_health_info(struct cxl_mbox_cmd *cmd) 971 981 { 972 982 struct cxl_mbox_health_info health_info = { 973 983 /* set flags for maint needed, perf degraded, hw replacement */ ··· 1183 1195 }; 1184 1196 ATTRIBUTE_GROUPS(cxl_mock_mem_core); 1185 1197 1186 - static int mock_fw_info(struct cxl_dev_state *cxlds, 1187 - struct cxl_mbox_cmd *cmd) 1198 + static int mock_fw_info(struct cxl_mockmem_data *mdata, 1199 + struct cxl_mbox_cmd *cmd) 1188 1200 { 1189 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 1190 1201 struct cxl_mbox_get_fw_info fw_info = { 1191 1202 .num_slots = FW_SLOTS, 1192 1203 .slot_info = (mdata->fw_slot & 0x7) | ··· 1205 1218 return 0; 1206 1219 } 1207 1220 1208 - static int mock_transfer_fw(struct cxl_dev_state *cxlds, 1221 + static int mock_transfer_fw(struct cxl_mockmem_data *mdata, 1209 1222 struct cxl_mbox_cmd *cmd) 1210 1223 { 1211 1224 struct cxl_mbox_transfer_fw *transfer = cmd->payload_in; 1212 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 1213 1225 void *fw = mdata->fw; 1214 1226 size_t offset, length; 1215 1227 ··· 1240 1254 return 0; 1241 1255 } 1242 1256 1243 - static int mock_activate_fw(struct cxl_dev_state *cxlds, 1257 + static int mock_activate_fw(struct cxl_mockmem_data *mdata, 1244 1258 struct cxl_mbox_cmd *cmd) 1245 1259 { 1246 1260 struct cxl_mbox_activate_fw *activate = cmd->payload_in; 1247 - struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 1248 1261 1249 1262 if (activate->slot == 0 || activate->slot > FW_SLOTS) 1250 1263 return -EINVAL; ··· 1261 1276 return -EINVAL; 1262 1277 } 1263 1278 1264 - static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 1279 + static int cxl_mock_mbox_send(struct cxl_memdev_state *mds, 1280 + struct cxl_mbox_cmd *cmd) 1265 1281 { 1282 + struct cxl_dev_state *cxlds = &mds->cxlds; 1266 1283 struct device *dev = cxlds->dev; 1284 + struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1267 1285 int rc = -EIO; 1268 1286 1269 1287 switch (cmd->opcode) { ··· 1277 1289 rc = mock_gsl(cmd); 1278 1290 break; 1279 1291 case CXL_MBOX_OP_GET_LOG: 1280 - rc = mock_get_log(cxlds, cmd); 1292 + rc = mock_get_log(mds, cmd); 1281 1293 break; 1282 1294 case CXL_MBOX_OP_IDENTIFY: 1283 1295 if (cxlds->rcd) 1284 - rc = mock_rcd_id(cxlds, cmd); 1296 + rc = mock_rcd_id(cmd); 1285 1297 else 1286 - rc = mock_id(cxlds, cmd); 1298 + rc = mock_id(cmd); 1287 1299 break; 1288 1300 case CXL_MBOX_OP_GET_LSA: 1289 - rc = mock_get_lsa(cxlds, cmd); 1301 + rc = mock_get_lsa(mdata, cmd); 1290 1302 break; 1291 1303 case CXL_MBOX_OP_GET_PARTITION_INFO: 1292 - rc = mock_partition_info(cxlds, cmd); 1304 + rc = mock_partition_info(cmd); 1293 1305 break; 1294 1306 case CXL_MBOX_OP_GET_EVENT_RECORD: 1295 - rc = mock_get_event(cxlds, cmd); 1307 + rc = mock_get_event(dev, cmd); 1296 1308 break; 1297 1309 case CXL_MBOX_OP_CLEAR_EVENT_RECORD: 1298 - rc = mock_clear_event(cxlds, cmd); 1310 + rc = mock_clear_event(dev, cmd); 1299 1311 break; 1300 1312 case CXL_MBOX_OP_SET_LSA: 1301 - rc = mock_set_lsa(cxlds, cmd); 1313 + rc = mock_set_lsa(mdata, cmd); 1302 1314 break; 1303 1315 case CXL_MBOX_OP_GET_HEALTH_INFO: 1304 - rc = mock_health_info(cxlds, cmd); 1316 + rc = mock_health_info(cmd); 1305 1317 break; 1306 1318 case CXL_MBOX_OP_SANITIZE: 1307 - rc = mock_sanitize(cxlds, cmd); 1319 + rc = mock_sanitize(mdata, cmd); 1308 1320 break; 1309 1321 case CXL_MBOX_OP_SECURE_ERASE: 1310 - rc = mock_secure_erase(cxlds, cmd); 1322 + rc = mock_secure_erase(mdata, cmd); 1311 1323 break; 1312 1324 case CXL_MBOX_OP_GET_SECURITY_STATE: 1313 - rc = mock_get_security_state(cxlds, cmd); 1325 + rc = mock_get_security_state(mdata, cmd); 1314 1326 break; 1315 1327 case CXL_MBOX_OP_SET_PASSPHRASE: 1316 - rc = mock_set_passphrase(cxlds, cmd); 1328 + rc = mock_set_passphrase(mdata, cmd); 1317 1329 break; 1318 1330 case CXL_MBOX_OP_DISABLE_PASSPHRASE: 1319 - rc = mock_disable_passphrase(cxlds, cmd); 1331 + rc = mock_disable_passphrase(mdata, cmd); 1320 1332 break; 1321 1333 case CXL_MBOX_OP_FREEZE_SECURITY: 1322 - rc = mock_freeze_security(cxlds, cmd); 1334 + rc = mock_freeze_security(mdata, cmd); 1323 1335 break; 1324 1336 case CXL_MBOX_OP_UNLOCK: 1325 - rc = mock_unlock_security(cxlds, cmd); 1337 + rc = mock_unlock_security(mdata, cmd); 1326 1338 break; 1327 1339 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: 1328 - rc = mock_passphrase_secure_erase(cxlds, cmd); 1340 + rc = mock_passphrase_secure_erase(mdata, cmd); 1329 1341 break; 1330 1342 case CXL_MBOX_OP_GET_POISON: 1331 1343 rc = mock_get_poison(cxlds, cmd); ··· 1337 1349 rc = mock_clear_poison(cxlds, cmd); 1338 1350 break; 1339 1351 case CXL_MBOX_OP_GET_FW_INFO: 1340 - rc = mock_fw_info(cxlds, cmd); 1352 + rc = mock_fw_info(mdata, cmd); 1341 1353 break; 1342 1354 case CXL_MBOX_OP_TRANSFER_FW: 1343 - rc = mock_transfer_fw(cxlds, cmd); 1355 + rc = mock_transfer_fw(mdata, cmd); 1344 1356 break; 1345 1357 case CXL_MBOX_OP_ACTIVATE_FW: 1346 - rc = mock_activate_fw(cxlds, cmd); 1358 + rc = mock_activate_fw(mdata, cmd); 1347 1359 break; 1348 1360 default: 1349 1361 break; ··· 1385 1397 { 1386 1398 struct device *dev = &pdev->dev; 1387 1399 struct cxl_memdev *cxlmd; 1400 + struct cxl_memdev_state *mds; 1388 1401 struct cxl_dev_state *cxlds; 1389 1402 struct cxl_mockmem_data *mdata; 1390 1403 int rc; ··· 1411 1422 if (rc) 1412 1423 return rc; 1413 1424 1414 - cxlds = cxl_dev_state_create(dev); 1415 - if (IS_ERR(cxlds)) 1416 - return PTR_ERR(cxlds); 1425 + mds = cxl_memdev_state_create(dev); 1426 + if (IS_ERR(mds)) 1427 + return PTR_ERR(mds); 1417 1428 1429 + mds->mbox_send = cxl_mock_mbox_send; 1430 + mds->payload_size = SZ_4K; 1431 + mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; 1432 + 1433 + cxlds = &mds->cxlds; 1418 1434 cxlds->serial = pdev->id; 1419 - cxlds->mbox_send = cxl_mock_mbox_send; 1420 - cxlds->payload_size = SZ_4K; 1421 - cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; 1422 1435 if (is_rcd(pdev)) { 1423 1436 cxlds->rcd = true; 1424 1437 cxlds->component_reg_phys = CXL_RESOURCE_NONE; 1425 1438 } 1426 1439 1427 - rc = cxl_enumerate_cmds(cxlds); 1440 + rc = cxl_enumerate_cmds(mds); 1428 1441 if (rc) 1429 1442 return rc; 1430 1443 1431 - rc = cxl_poison_state_init(cxlds); 1444 + rc = cxl_poison_state_init(mds); 1432 1445 if (rc) 1433 1446 return rc; 1434 1447 1435 - rc = cxl_set_timestamp(cxlds); 1448 + rc = cxl_set_timestamp(mds); 1436 1449 if (rc) 1437 1450 return rc; 1438 1451 1439 1452 cxlds->media_ready = true; 1440 - rc = cxl_dev_state_identify(cxlds); 1453 + rc = cxl_dev_state_identify(mds); 1441 1454 if (rc) 1442 1455 return rc; 1443 1456 1444 - rc = cxl_mem_create_range_info(cxlds); 1457 + rc = cxl_mem_create_range_info(mds); 1445 1458 if (rc) 1446 1459 return rc; 1447 1460 1448 - mdata->mes.cxlds = cxlds; 1461 + mdata->mes.mds = mds; 1449 1462 cxl_mock_add_event_logs(&mdata->mes); 1450 1463 1451 1464 cxlmd = devm_cxl_add_memdev(cxlds); 1452 1465 if (IS_ERR(cxlmd)) 1453 1466 return PTR_ERR(cxlmd); 1454 1467 1455 - rc = cxl_memdev_setup_fw_upload(cxlds); 1468 + rc = cxl_memdev_setup_fw_upload(mds); 1456 1469 if (rc) 1457 1470 return rc; 1458 1471 1459 - cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); 1472 + cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL); 1460 1473 1461 1474 return 0; 1462 1475 }
-15
tools/testing/cxl/test/mock.c
··· 149 149 } 150 150 EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, CXL); 151 151 152 - int __wrap_devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm) 153 - { 154 - int index, rc; 155 - struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 156 - 157 - if (ops && ops->is_mock_port(port->uport)) 158 - rc = 0; 159 - else 160 - rc = devm_cxl_enable_hdm(port, cxlhdm); 161 - put_cxl_mock_ops(index); 162 - 163 - return rc; 164 - } 165 - EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enable_hdm, CXL); 166 - 167 152 int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port) 168 153 { 169 154 int rc, index;