Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull CXL updates from Dave Jiang:

- Three CXL mailbox passthrough commands are added to support the
populating and clearing of vendor debug logs:
- Get Log Capabilities
- Get Supported Log Sub-List Commands
- Clear Log

- Add support of Device Phyiscal Address (DPA) to Host Physical Address
(HPA) translation for CXL events of cxl_dram and cxl_general media.

This allows user space to figure out which CXL region the event
occured via trace event.

- Connect CXL to CPER reporting.

If a device is configured for firmware first, CXL event records are
not sent directly to the host. Those records are reported through EFI
Common Platform Error Records (CPER). Add support to route the CPER
records through the CXL sub-system in order to provide DPA to HPA
translation and also event decoding and tracing. This is useful for
users to determine which system issues may correspond to specific
hardware events.

- A number of misc cleanups and fixes:
- Fix for compile warning of cxl_security_ops
- Add debug message for invalid interleave granularity
- Enhancement to cxl-test event testing
- Add dev_warn() on unsupported mixed mode decoder
- Fix use of phys_to_target_node() for x86
- Use helper function for decoder enum instead of open coding
- Include missing headers for cxl-event
- Fix MAINTAINERS file entry
- Fix cxlr_pmem memory leak
- Cleanup __cxl_parse_cfmws via scope-based resource menagement
- Convert cxl_pmem_region_alloc() to scope-based resource management

* tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (21 commits)
cxl/cper: Remove duplicated GUID defines
cxl/cper: Fix non-ACPI-APEI-GHES build
cxl/pci: Process CPER events
acpi/ghes: Process CXL Component Events
cxl/region: Convert cxl_pmem_region_alloc to scope-based resource management
cxl/acpi: Cleanup __cxl_parse_cfmws()
cxl/region: Fix cxlr_pmem leaks
cxl/core: Add region info to cxl_general_media and cxl_dram events
cxl/region: Move cxl_trace_hpa() work to the region driver
cxl/region: Move cxl_dpa_to_region() work to the region driver
cxl/trace: Correct DPA field masks for general_media & dram events
MAINTAINERS: repair file entry in COMPUTE EXPRESS LINK
cxl/cxl-event: include missing <linux/types.h> and <linux/uuid.h>
cxl/hdm: Debug, use decoder name function
cxl: Fix use of phys_to_target_node() for x86
cxl/hdm: dev_warn() on unsupported mixed mode decoder
cxl/test: Enhance event testing
cxl/hdm: Add debug message for invalid interleave granularity
cxl: Fix compile warning for cxl_security_ops extern
cxl/mbox: Add Clear Log mailbox command
...

+527 -234
+1 -1
MAINTAINERS
··· 5407 5407 L: linux-cxl@vger.kernel.org 5408 5408 S: Maintained 5409 5409 F: drivers/cxl/ 5410 - F: include/linux/cxl-einj.h 5410 + F: include/linux/einj-cxl.h 5411 5411 F: include/linux/cxl-event.h 5412 5412 F: include/uapi/linux/cxl_mem.h 5413 5413 F: tools/testing/cxl/
+84
drivers/acpi/apei/ghes.c
··· 26 26 #include <linux/interrupt.h> 27 27 #include <linux/timer.h> 28 28 #include <linux/cper.h> 29 + #include <linux/cleanup.h> 30 + #include <linux/cxl-event.h> 29 31 #include <linux/platform_device.h> 30 32 #include <linux/mutex.h> 31 33 #include <linux/ratelimit.h> ··· 35 33 #include <linux/irq_work.h> 36 34 #include <linux/llist.h> 37 35 #include <linux/genalloc.h> 36 + #include <linux/kfifo.h> 38 37 #include <linux/pci.h> 39 38 #include <linux/pfn.h> 40 39 #include <linux/aer.h> ··· 676 673 schedule_work(&entry->work); 677 674 } 678 675 676 + /* Room for 8 entries for each of the 4 event log queues */ 677 + #define CXL_CPER_FIFO_DEPTH 32 678 + DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH); 679 + 680 + /* Synchronize schedule_work() with cxl_cper_work changes */ 681 + static DEFINE_SPINLOCK(cxl_cper_work_lock); 682 + struct work_struct *cxl_cper_work; 683 + 684 + static void cxl_cper_post_event(enum cxl_event_type event_type, 685 + struct cxl_cper_event_rec *rec) 686 + { 687 + struct cxl_cper_work_data wd; 688 + 689 + if (rec->hdr.length <= sizeof(rec->hdr) || 690 + rec->hdr.length > sizeof(*rec)) { 691 + pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n", 692 + rec->hdr.length); 693 + return; 694 + } 695 + 696 + if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) { 697 + pr_err(FW_WARN "CXL CPER invalid event\n"); 698 + return; 699 + } 700 + 701 + guard(spinlock_irqsave)(&cxl_cper_work_lock); 702 + 703 + if (!cxl_cper_work) 704 + return; 705 + 706 + wd.event_type = event_type; 707 + memcpy(&wd.rec, rec, sizeof(wd.rec)); 708 + 709 + if (!kfifo_put(&cxl_cper_fifo, wd)) { 710 + pr_err_ratelimited("CXL CPER kfifo overflow\n"); 711 + return; 712 + } 713 + 714 + schedule_work(cxl_cper_work); 715 + } 716 + 717 + int cxl_cper_register_work(struct work_struct *work) 718 + { 719 + if (cxl_cper_work) 720 + return -EINVAL; 721 + 722 + guard(spinlock)(&cxl_cper_work_lock); 723 + cxl_cper_work = work; 724 + return 0; 725 + } 726 + EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL); 727 + 728 + int cxl_cper_unregister_work(struct work_struct *work) 729 + { 730 + if (cxl_cper_work != work) 731 + return -EINVAL; 732 + 733 + guard(spinlock)(&cxl_cper_work_lock); 734 + cxl_cper_work = NULL; 735 + return 0; 736 + } 737 + EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL); 738 + 739 + int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) 740 + { 741 + return kfifo_get(&cxl_cper_fifo, wd); 742 + } 743 + EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL); 744 + 679 745 static bool ghes_do_proc(struct ghes *ghes, 680 746 const struct acpi_hest_generic_status *estatus) 681 747 { ··· 779 707 } 780 708 else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { 781 709 queued = ghes_handle_arm_hw_error(gdata, sev, sync); 710 + } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) { 711 + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); 712 + 713 + cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec); 714 + } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) { 715 + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); 716 + 717 + cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec); 718 + } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) { 719 + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); 720 + 721 + cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); 782 722 } else { 783 723 void *err = acpi_hest_get_payload(gdata); 784 724
+1
drivers/cxl/Kconfig
··· 6 6 select FW_UPLOAD 7 7 select PCI_DOE 8 8 select FIRMWARE_TABLE 9 + select NUMA_KEEP_MEMINFO if (NUMA && X86) 9 10 help 10 11 CXL is a bus that is electrically compatible with PCI Express, but 11 12 layers three protocols on that signalling (CXL.io, CXL.cache, and
+51 -42
drivers/cxl/acpi.c
··· 316 316 .qos_class = cxl_acpi_qos_class, 317 317 }; 318 318 319 + static void del_cxl_resource(struct resource *res) 320 + { 321 + if (!res) 322 + return; 323 + kfree(res->name); 324 + kfree(res); 325 + } 326 + 327 + static struct resource *alloc_cxl_resource(resource_size_t base, 328 + resource_size_t n, int id) 329 + { 330 + struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL); 331 + 332 + if (!res) 333 + return NULL; 334 + 335 + res->start = base; 336 + res->end = base + n - 1; 337 + res->flags = IORESOURCE_MEM; 338 + res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id); 339 + if (!res->name) 340 + return NULL; 341 + 342 + return no_free_ptr(res); 343 + } 344 + 345 + static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res) 346 + { 347 + int rc = insert_resource(parent, res); 348 + 349 + if (rc) 350 + del_cxl_resource(res); 351 + return rc; 352 + } 353 + 354 + DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, 355 + if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) 356 + DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) 319 357 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, 320 358 struct cxl_cfmws_context *ctx) 321 359 { 322 360 int target_map[CXL_DECODER_MAX_INTERLEAVE]; 323 361 struct cxl_port *root_port = ctx->root_port; 324 - struct resource *cxl_res = ctx->cxl_res; 325 362 struct cxl_cxims_context cxims_ctx; 326 - struct cxl_root_decoder *cxlrd; 327 363 struct device *dev = ctx->dev; 328 364 cxl_calc_hb_fn cxl_calc_hb; 329 365 struct cxl_decoder *cxld; 330 366 unsigned int ways, i, ig; 331 - struct resource *res; 332 367 int rc; 333 368 334 369 rc = cxl_acpi_cfmws_verify(dev, cfmws); 335 - if (rc) { 336 - dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", 337 - cfmws->base_hpa, 338 - cfmws->base_hpa + cfmws->window_size - 1); 370 + if (rc) 339 371 return rc; 340 - } 341 372 342 373 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 343 374 if (rc) ··· 379 348 for (i = 0; i < ways; i++) 380 349 target_map[i] = cfmws->interleave_targets[i]; 381 350 382 - res = kzalloc(sizeof(*res), GFP_KERNEL); 351 + struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( 352 + cfmws->base_hpa, cfmws->window_size, ctx->id++); 383 353 if (!res) 384 354 return -ENOMEM; 385 355 386 - res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++); 387 - if (!res->name) 388 - goto err_name; 389 - 390 - res->start = cfmws->base_hpa; 391 - res->end = cfmws->base_hpa + cfmws->window_size - 1; 392 - res->flags = IORESOURCE_MEM; 393 - 394 356 /* add to the local resource tracking to establish a sort order */ 395 - rc = insert_resource(cxl_res, res); 357 + rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res)); 396 358 if (rc) 397 - goto err_insert; 359 + return rc; 398 360 399 361 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) 400 362 cxl_calc_hb = cxl_hb_modulo; 401 363 else 402 364 cxl_calc_hb = cxl_hb_xor; 403 365 404 - cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); 366 + struct cxl_root_decoder *cxlrd __free(put_cxlrd) = 367 + cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); 405 368 if (IS_ERR(cxlrd)) 406 369 return PTR_ERR(cxlrd); 407 370 ··· 403 378 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 404 379 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 405 380 cxld->hpa_range = (struct range) { 406 - .start = res->start, 407 - .end = res->end, 381 + .start = cfmws->base_hpa, 382 + .end = cfmws->base_hpa + cfmws->window_size - 1, 408 383 }; 409 384 cxld->interleave_ways = ways; 410 385 /* ··· 424 399 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, 425 400 cxl_parse_cxims, &cxims_ctx); 426 401 if (rc < 0) 427 - goto err_xormap; 402 + return rc; 428 403 if (!cxlrd->platform_data) { 429 404 dev_err(dev, "No CXIMS for HBIG %u\n", ig); 430 - rc = -EINVAL; 431 - goto err_xormap; 405 + return -EINVAL; 432 406 } 433 407 } 434 408 } ··· 435 411 cxlrd->qos_class = cfmws->qtg_id; 436 412 437 413 rc = cxl_decoder_add(cxld, target_map); 438 - err_xormap: 439 414 if (rc) 440 - put_device(&cxld->dev); 441 - else 442 - rc = cxl_decoder_autoremove(dev, cxld); 443 - return rc; 444 - 445 - err_insert: 446 - kfree(res->name); 447 - err_name: 448 - kfree(res); 449 - return -ENOMEM; 415 + return rc; 416 + return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd)); 450 417 } 451 418 452 419 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, ··· 696 681 static void cxl_acpi_lock_reset_class(void *dev) 697 682 { 698 683 device_lock_reset_class(dev); 699 - } 700 - 701 - static void del_cxl_resource(struct resource *res) 702 - { 703 - kfree(res->name); 704 - kfree(res); 705 684 } 706 685 707 686 static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
+14
drivers/cxl/core/core.h
··· 27 27 int cxl_region_init(void); 28 28 void cxl_region_exit(void); 29 29 int cxl_get_poison_by_endpoint(struct cxl_port *port); 30 + struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); 31 + u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 32 + u64 dpa); 33 + 30 34 #else 35 + static inline u64 36 + cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa) 37 + { 38 + return ULLONG_MAX; 39 + } 40 + static inline 41 + struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) 42 + { 43 + return NULL; 44 + } 31 45 static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) 32 46 { 33 47 return 0;
+8 -5
drivers/cxl/core/hdm.c
··· 319 319 else if (resource_contains(&cxlds->ram_res, res)) 320 320 cxled->mode = CXL_DECODER_RAM; 321 321 else { 322 - dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, 323 - cxled->cxld.id, cxled->dpa_res); 322 + dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n", 323 + port->id, cxled->cxld.id, cxled->dpa_res); 324 324 cxled->mode = CXL_DECODER_MIXED; 325 325 } 326 326 ··· 519 519 520 520 if (size > avail) { 521 521 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, 522 - cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", 523 - &avail); 522 + cxl_decoder_mode_name(cxled->mode), &avail); 524 523 rc = -ENOSPC; 525 524 goto out; 526 525 } ··· 887 888 } 888 889 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), 889 890 &cxld->interleave_granularity); 890 - if (rc) 891 + if (rc) { 892 + dev_warn(&port->dev, 893 + "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", 894 + port->id, cxld->id, ctrl); 891 895 return rc; 896 + } 892 897 893 898 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", 894 899 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
+42 -6
drivers/cxl/core/mbox.c
··· 56 56 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), 57 57 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 58 58 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 59 + CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), 60 + CXL_CMD(CLEAR_LOG, 0x10, 0, 0), 61 + CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), 59 62 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 60 63 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), 61 64 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), ··· 333 330 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) 334 331 return false; 335 332 break; 333 + } 334 + case CXL_MBOX_OP_CLEAR_LOG: { 335 + const uuid_t *uuid = (uuid_t *)payload_in; 336 + 337 + /* 338 + * Restrict the ‘Clear log’ action to only apply to 339 + * Vendor debug logs. 340 + */ 341 + return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); 336 342 } 337 343 default: 338 344 break; ··· 854 842 enum cxl_event_type event_type, 855 843 const uuid_t *uuid, union cxl_event *evt) 856 844 { 857 - if (event_type == CXL_CPER_EVENT_GEN_MEDIA) 858 - trace_cxl_general_media(cxlmd, type, &evt->gen_media); 859 - else if (event_type == CXL_CPER_EVENT_DRAM) 860 - trace_cxl_dram(cxlmd, type, &evt->dram); 861 - else if (event_type == CXL_CPER_EVENT_MEM_MODULE) 845 + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { 862 846 trace_cxl_memory_module(cxlmd, type, &evt->mem_module); 863 - else 847 + return; 848 + } 849 + if (event_type == CXL_CPER_EVENT_GENERIC) { 864 850 trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); 851 + return; 852 + } 853 + 854 + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { 855 + u64 dpa, hpa = ULLONG_MAX; 856 + struct cxl_region *cxlr; 857 + 858 + /* 859 + * These trace points are annotated with HPA and region 860 + * translations. Take topology mutation locks and lookup 861 + * { HPA, REGION } from { DPA, MEMDEV } in the event record. 862 + */ 863 + guard(rwsem_read)(&cxl_region_rwsem); 864 + guard(rwsem_read)(&cxl_dpa_rwsem); 865 + 866 + dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK; 867 + cxlr = cxl_dpa_to_region(cxlmd, dpa); 868 + if (cxlr) 869 + hpa = cxl_trace_hpa(cxlr, cxlmd, dpa); 870 + 871 + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) 872 + trace_cxl_general_media(cxlmd, type, cxlr, hpa, 873 + &evt->gen_media); 874 + else if (event_type == CXL_CPER_EVENT_DRAM) 875 + trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram); 876 + } 865 877 } 866 878 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); 867 879
-44
drivers/cxl/core/memdev.c
··· 251 251 } 252 252 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); 253 253 254 - struct cxl_dpa_to_region_context { 255 - struct cxl_region *cxlr; 256 - u64 dpa; 257 - }; 258 - 259 - static int __cxl_dpa_to_region(struct device *dev, void *arg) 260 - { 261 - struct cxl_dpa_to_region_context *ctx = arg; 262 - struct cxl_endpoint_decoder *cxled; 263 - u64 dpa = ctx->dpa; 264 - 265 - if (!is_endpoint_decoder(dev)) 266 - return 0; 267 - 268 - cxled = to_cxl_endpoint_decoder(dev); 269 - if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) 270 - return 0; 271 - 272 - if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) 273 - return 0; 274 - 275 - dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, 276 - dev_name(&cxled->cxld.region->dev)); 277 - 278 - ctx->cxlr = cxled->cxld.region; 279 - 280 - return 1; 281 - } 282 - 283 - static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) 284 - { 285 - struct cxl_dpa_to_region_context ctx; 286 - struct cxl_port *port; 287 - 288 - ctx = (struct cxl_dpa_to_region_context) { 289 - .dpa = dpa, 290 - }; 291 - port = cxlmd->endpoint; 292 - if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) 293 - device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); 294 - 295 - return ctx.cxlr; 296 - } 297 - 298 254 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) 299 255 { 300 256 struct cxl_dev_state *cxlds = cxlmd->cxlds;
+152 -25
drivers/cxl/core/region.c
··· 2679 2679 return rc; 2680 2680 } 2681 2681 2682 + struct cxl_dpa_to_region_context { 2683 + struct cxl_region *cxlr; 2684 + u64 dpa; 2685 + }; 2686 + 2687 + static int __cxl_dpa_to_region(struct device *dev, void *arg) 2688 + { 2689 + struct cxl_dpa_to_region_context *ctx = arg; 2690 + struct cxl_endpoint_decoder *cxled; 2691 + u64 dpa = ctx->dpa; 2692 + 2693 + if (!is_endpoint_decoder(dev)) 2694 + return 0; 2695 + 2696 + cxled = to_cxl_endpoint_decoder(dev); 2697 + if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) 2698 + return 0; 2699 + 2700 + if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) 2701 + return 0; 2702 + 2703 + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, 2704 + dev_name(&cxled->cxld.region->dev)); 2705 + 2706 + ctx->cxlr = cxled->cxld.region; 2707 + 2708 + return 1; 2709 + } 2710 + 2711 + struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) 2712 + { 2713 + struct cxl_dpa_to_region_context ctx; 2714 + struct cxl_port *port; 2715 + 2716 + ctx = (struct cxl_dpa_to_region_context) { 2717 + .dpa = dpa, 2718 + }; 2719 + port = cxlmd->endpoint; 2720 + if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) 2721 + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); 2722 + 2723 + return ctx.cxlr; 2724 + } 2725 + 2726 + static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) 2727 + { 2728 + struct cxl_region_params *p = &cxlr->params; 2729 + int gran = p->interleave_granularity; 2730 + int ways = p->interleave_ways; 2731 + u64 offset; 2732 + 2733 + /* Is the hpa within this region at all */ 2734 + if (hpa < p->res->start || hpa > p->res->end) { 2735 + dev_dbg(&cxlr->dev, 2736 + "Addr trans fail: hpa 0x%llx not in region\n", hpa); 2737 + return false; 2738 + } 2739 + 2740 + /* Is the hpa in an expected chunk for its pos(-ition) */ 2741 + offset = hpa - p->res->start; 2742 + offset = do_div(offset, gran * ways); 2743 + if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) 2744 + return true; 2745 + 2746 + dev_dbg(&cxlr->dev, 2747 + "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); 2748 + 2749 + return false; 2750 + } 2751 + 2752 + static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, 2753 + struct cxl_endpoint_decoder *cxled) 2754 + { 2755 + u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; 2756 + struct cxl_region_params *p = &cxlr->params; 2757 + int pos = cxled->pos; 2758 + u16 eig = 0; 2759 + u8 eiw = 0; 2760 + 2761 + ways_to_eiw(p->interleave_ways, &eiw); 2762 + granularity_to_eig(p->interleave_granularity, &eig); 2763 + 2764 + /* 2765 + * The device position in the region interleave set was removed 2766 + * from the offset at HPA->DPA translation. To reconstruct the 2767 + * HPA, place the 'pos' in the offset. 2768 + * 2769 + * The placement of 'pos' in the HPA is determined by interleave 2770 + * ways and granularity and is defined in the CXL Spec 3.0 Section 2771 + * 8.2.4.19.13 Implementation Note: Device Decode Logic 2772 + */ 2773 + 2774 + /* Remove the dpa base */ 2775 + dpa_offset = dpa - cxl_dpa_resource_start(cxled); 2776 + 2777 + mask_upper = GENMASK_ULL(51, eig + 8); 2778 + 2779 + if (eiw < 8) { 2780 + hpa_offset = (dpa_offset & mask_upper) << eiw; 2781 + hpa_offset |= pos << (eig + 8); 2782 + } else { 2783 + bits_upper = (dpa_offset & mask_upper) >> (eig + 8); 2784 + bits_upper = bits_upper * 3; 2785 + hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); 2786 + } 2787 + 2788 + /* The lower bits remain unchanged */ 2789 + hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); 2790 + 2791 + /* Apply the hpa_offset to the region base address */ 2792 + hpa = hpa_offset + p->res->start; 2793 + 2794 + if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) 2795 + return ULLONG_MAX; 2796 + 2797 + return hpa; 2798 + } 2799 + 2800 + u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 2801 + u64 dpa) 2802 + { 2803 + struct cxl_region_params *p = &cxlr->params; 2804 + struct cxl_endpoint_decoder *cxled = NULL; 2805 + 2806 + for (int i = 0; i < p->nr_targets; i++) { 2807 + cxled = p->targets[i]; 2808 + if (cxlmd == cxled_to_memdev(cxled)) 2809 + break; 2810 + } 2811 + if (!cxled || cxlmd != cxled_to_memdev(cxled)) 2812 + return ULLONG_MAX; 2813 + 2814 + return cxl_dpa_to_hpa(dpa, cxlr, cxled); 2815 + } 2816 + 2682 2817 static struct lock_class_key cxl_pmem_region_key; 2683 2818 2684 - static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) 2819 + static int cxl_pmem_region_alloc(struct cxl_region *cxlr) 2685 2820 { 2686 2821 struct cxl_region_params *p = &cxlr->params; 2687 2822 struct cxl_nvdimm_bridge *cxl_nvb; 2688 - struct cxl_pmem_region *cxlr_pmem; 2689 2823 struct device *dev; 2690 2824 int i; 2691 2825 2692 - down_read(&cxl_region_rwsem); 2693 - if (p->state != CXL_CONFIG_COMMIT) { 2694 - cxlr_pmem = ERR_PTR(-ENXIO); 2695 - goto out; 2696 - } 2826 + guard(rwsem_read)(&cxl_region_rwsem); 2827 + if (p->state != CXL_CONFIG_COMMIT) 2828 + return -ENXIO; 2697 2829 2698 - cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), 2699 - GFP_KERNEL); 2700 - if (!cxlr_pmem) { 2701 - cxlr_pmem = ERR_PTR(-ENOMEM); 2702 - goto out; 2703 - } 2830 + struct cxl_pmem_region *cxlr_pmem __free(kfree) = 2831 + kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); 2832 + if (!cxlr_pmem) 2833 + return -ENOMEM; 2704 2834 2705 2835 cxlr_pmem->hpa_range.start = p->res->start; 2706 2836 cxlr_pmem->hpa_range.end = p->res->end; ··· 2848 2718 */ 2849 2719 if (i == 0) { 2850 2720 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); 2851 - if (!cxl_nvb) { 2852 - cxlr_pmem = ERR_PTR(-ENODEV); 2853 - goto out; 2854 - } 2721 + if (!cxl_nvb) 2722 + return -ENODEV; 2855 2723 cxlr->cxl_nvb = cxl_nvb; 2856 2724 } 2857 2725 m->cxlmd = cxlmd; ··· 2860 2732 } 2861 2733 2862 2734 dev = &cxlr_pmem->dev; 2863 - cxlr_pmem->cxlr = cxlr; 2864 - cxlr->cxlr_pmem = cxlr_pmem; 2865 2735 device_initialize(dev); 2866 2736 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); 2867 2737 device_set_pm_not_required(dev); 2868 2738 dev->parent = &cxlr->dev; 2869 2739 dev->bus = &cxl_bus_type; 2870 2740 dev->type = &cxl_pmem_region_type; 2871 - out: 2872 - up_read(&cxl_region_rwsem); 2741 + cxlr_pmem->cxlr = cxlr; 2742 + cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); 2873 2743 2874 - return cxlr_pmem; 2744 + return 0; 2875 2745 } 2876 2746 2877 2747 static void cxl_dax_region_release(struct device *dev) ··· 2986 2860 struct device *dev; 2987 2861 int rc; 2988 2862 2989 - cxlr_pmem = cxl_pmem_region_alloc(cxlr); 2990 - if (IS_ERR(cxlr_pmem)) 2991 - return PTR_ERR(cxlr_pmem); 2863 + rc = cxl_pmem_region_alloc(cxlr); 2864 + if (rc) 2865 + return rc; 2866 + cxlr_pmem = cxlr->cxlr_pmem; 2992 2867 cxl_nvb = cxlr->cxl_nvb; 2993 2868 2994 2869 dev = &cxlr_pmem->dev;
-91
drivers/cxl/core/trace.c
··· 6 6 7 7 #define CREATE_TRACE_POINTS 8 8 #include "trace.h" 9 - 10 - static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) 11 - { 12 - struct cxl_region_params *p = &cxlr->params; 13 - int gran = p->interleave_granularity; 14 - int ways = p->interleave_ways; 15 - u64 offset; 16 - 17 - /* Is the hpa within this region at all */ 18 - if (hpa < p->res->start || hpa > p->res->end) { 19 - dev_dbg(&cxlr->dev, 20 - "Addr trans fail: hpa 0x%llx not in region\n", hpa); 21 - return false; 22 - } 23 - 24 - /* Is the hpa in an expected chunk for its pos(-ition) */ 25 - offset = hpa - p->res->start; 26 - offset = do_div(offset, gran * ways); 27 - if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) 28 - return true; 29 - 30 - dev_dbg(&cxlr->dev, 31 - "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); 32 - 33 - return false; 34 - } 35 - 36 - static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, 37 - struct cxl_endpoint_decoder *cxled) 38 - { 39 - u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; 40 - struct cxl_region_params *p = &cxlr->params; 41 - int pos = cxled->pos; 42 - u16 eig = 0; 43 - u8 eiw = 0; 44 - 45 - ways_to_eiw(p->interleave_ways, &eiw); 46 - granularity_to_eig(p->interleave_granularity, &eig); 47 - 48 - /* 49 - * The device position in the region interleave set was removed 50 - * from the offset at HPA->DPA translation. To reconstruct the 51 - * HPA, place the 'pos' in the offset. 52 - * 53 - * The placement of 'pos' in the HPA is determined by interleave 54 - * ways and granularity and is defined in the CXL Spec 3.0 Section 55 - * 8.2.4.19.13 Implementation Note: Device Decode Logic 56 - */ 57 - 58 - /* Remove the dpa base */ 59 - dpa_offset = dpa - cxl_dpa_resource_start(cxled); 60 - 61 - mask_upper = GENMASK_ULL(51, eig + 8); 62 - 63 - if (eiw < 8) { 64 - hpa_offset = (dpa_offset & mask_upper) << eiw; 65 - hpa_offset |= pos << (eig + 8); 66 - } else { 67 - bits_upper = (dpa_offset & mask_upper) >> (eig + 8); 68 - bits_upper = bits_upper * 3; 69 - hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); 70 - } 71 - 72 - /* The lower bits remain unchanged */ 73 - hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); 74 - 75 - /* Apply the hpa_offset to the region base address */ 76 - hpa = hpa_offset + p->res->start; 77 - 78 - if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) 79 - return ULLONG_MAX; 80 - 81 - return hpa; 82 - } 83 - 84 - u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd, 85 - u64 dpa) 86 - { 87 - struct cxl_region_params *p = &cxlr->params; 88 - struct cxl_endpoint_decoder *cxled = NULL; 89 - 90 - for (int i = 0; i < p->nr_targets; i++) { 91 - cxled = p->targets[i]; 92 - if (cxlmd == cxled_to_memdev(cxled)) 93 - break; 94 - } 95 - if (!cxled || cxlmd != cxled_to_memdev(cxled)) 96 - return ULLONG_MAX; 97 - 98 - return cxl_dpa_to_hpa(dpa, cxlr, cxled); 99 - }
+37 -13
drivers/cxl/core/trace.h
··· 253 253 * DRAM Event Record 254 254 * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 255 255 */ 256 - #define CXL_DPA_FLAGS_MASK 0x3F 257 - #define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK) 256 + #define CXL_DPA_FLAGS_MASK GENMASK(1, 0) 257 + #define CXL_DPA_MASK GENMASK_ULL(63, 6) 258 258 259 259 #define CXL_DPA_VOLATILE BIT(0) 260 260 #define CXL_DPA_NOT_REPAIRABLE BIT(1) ··· 316 316 TRACE_EVENT(cxl_general_media, 317 317 318 318 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 319 - struct cxl_event_gen_media *rec), 319 + struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec), 320 320 321 - TP_ARGS(cxlmd, log, rec), 321 + TP_ARGS(cxlmd, log, cxlr, hpa, rec), 322 322 323 323 TP_STRUCT__entry( 324 324 CXL_EVT_TP_entry ··· 330 330 __field(u8, channel) 331 331 __field(u32, device) 332 332 __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) 333 - __field(u16, validity_flags) 334 333 /* Following are out of order to pack trace record */ 334 + __field(u64, hpa) 335 + __field_struct(uuid_t, region_uuid) 336 + __field(u16, validity_flags) 335 337 __field(u8, rank) 336 338 __field(u8, dpa_flags) 339 + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") 337 340 ), 338 341 339 342 TP_fast_assign( ··· 357 354 memcpy(__entry->comp_id, &rec->component_id, 358 355 CXL_EVENT_GEN_MED_COMP_ID_SIZE); 359 356 __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); 357 + __entry->hpa = hpa; 358 + if (cxlr) { 359 + __assign_str(region_name, dev_name(&cxlr->dev)); 360 + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); 361 + } else { 362 + __assign_str(region_name, ""); 363 + uuid_copy(&__entry->region_uuid, &uuid_null); 364 + } 360 365 ), 361 366 362 367 CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ 363 368 "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ 364 - "device=%x comp_id=%s validity_flags='%s'", 369 + "device=%x comp_id=%s validity_flags='%s' " \ 370 + "hpa=%llx region=%s region_uuid=%pUb", 365 371 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 366 372 show_event_desc_flags(__entry->descriptor), 367 373 show_mem_event_type(__entry->type), 368 374 show_trans_type(__entry->transaction_type), 369 375 __entry->channel, __entry->rank, __entry->device, 370 376 __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), 371 - show_valid_flags(__entry->validity_flags) 377 + show_valid_flags(__entry->validity_flags), 378 + __entry->hpa, __get_str(region_name), &__entry->region_uuid 372 379 ) 373 380 ); 374 381 ··· 413 400 TRACE_EVENT(cxl_dram, 414 401 415 402 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 416 - struct cxl_event_dram *rec), 403 + struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec), 417 404 418 - TP_ARGS(cxlmd, log, rec), 405 + TP_ARGS(cxlmd, log, cxlr, hpa, rec), 419 406 420 407 TP_STRUCT__entry( 421 408 CXL_EVT_TP_entry ··· 430 417 __field(u32, nibble_mask) 431 418 __field(u32, row) 432 419 __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) 420 + __field(u64, hpa) 421 + __field_struct(uuid_t, region_uuid) 433 422 __field(u8, rank) /* Out of order to pack trace record */ 434 423 __field(u8, bank_group) /* Out of order to pack trace record */ 435 424 __field(u8, bank) /* Out of order to pack trace record */ 436 425 __field(u8, dpa_flags) /* Out of order to pack trace record */ 426 + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") 437 427 ), 438 428 439 429 TP_fast_assign( ··· 460 444 __entry->column = get_unaligned_le16(rec->column); 461 445 memcpy(__entry->cor_mask, &rec->correction_mask, 462 446 CXL_EVENT_DER_CORRECTION_MASK_SIZE); 447 + __entry->hpa = hpa; 448 + if (cxlr) { 449 + __assign_str(region_name, dev_name(&cxlr->dev)); 450 + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); 451 + } else { 452 + __assign_str(region_name, ""); 453 + uuid_copy(&__entry->region_uuid, &uuid_null); 454 + } 463 455 ), 464 456 465 457 CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ 466 458 "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ 467 459 "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ 468 - "validity_flags='%s'", 460 + "validity_flags='%s' " \ 461 + "hpa=%llx region=%s region_uuid=%pUb", 469 462 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 470 463 show_event_desc_flags(__entry->descriptor), 471 464 show_mem_event_type(__entry->type), ··· 483 458 __entry->bank_group, __entry->bank, 484 459 __entry->row, __entry->column, 485 460 __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), 486 - show_dram_valid_flags(__entry->validity_flags) 461 + show_dram_valid_flags(__entry->validity_flags), 462 + __entry->hpa, __get_str(region_name), &__entry->region_uuid 487 463 ) 488 464 ); 489 465 ··· 667 641 (le32_to_cpu(record->length) * CXL_POISON_LEN_MULT) 668 642 #define cxl_poison_overflow(flags, time) \ 669 643 (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) 670 - 671 - u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); 672 644 673 645 TRACE_EVENT(cxl_poison, 674 646
+7
drivers/cxl/cxl.h
··· 12 12 #include <linux/node.h> 13 13 #include <linux/io.h> 14 14 15 + extern const struct nvdimm_security_ops *cxl_security_ops; 16 + 15 17 /** 16 18 * DOC: cxl objects 17 19 * ··· 781 779 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); 782 780 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); 783 781 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); 782 + static inline int cxl_root_decoder_autoremove(struct device *host, 783 + struct cxl_root_decoder *cxlrd) 784 + { 785 + return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld); 786 + } 784 787 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); 785 788 786 789 /**
+3
drivers/cxl/cxlmem.h
··· 527 527 CXL_MBOX_OP_SET_TIMESTAMP = 0x0301, 528 528 CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, 529 529 CXL_MBOX_OP_GET_LOG = 0x0401, 530 + CXL_MBOX_OP_GET_LOG_CAPS = 0x0402, 531 + CXL_MBOX_OP_CLEAR_LOG = 0x0403, 532 + CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405, 530 533 CXL_MBOX_OP_IDENTIFY = 0x4000, 531 534 CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, 532 535 CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
+70 -1
drivers/cxl/pci.c
··· 974 974 }, 975 975 }; 976 976 977 - module_pci_driver(cxl_pci_driver); 977 + #define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0) 978 + static void cxl_handle_cper_event(enum cxl_event_type ev_type, 979 + struct cxl_cper_event_rec *rec) 980 + { 981 + struct cper_cxl_event_devid *device_id = &rec->hdr.device_id; 982 + struct pci_dev *pdev __free(pci_dev_put) = NULL; 983 + enum cxl_event_log_type log_type; 984 + struct cxl_dev_state *cxlds; 985 + unsigned int devfn; 986 + u32 hdr_flags; 987 + 988 + pr_debug("CPER event %d for device %u:%u:%u.%u\n", ev_type, 989 + device_id->segment_num, device_id->bus_num, 990 + device_id->device_num, device_id->func_num); 991 + 992 + devfn = PCI_DEVFN(device_id->device_num, device_id->func_num); 993 + pdev = pci_get_domain_bus_and_slot(device_id->segment_num, 994 + device_id->bus_num, devfn); 995 + if (!pdev) 996 + return; 997 + 998 + guard(device)(&pdev->dev); 999 + if (pdev->driver != &cxl_pci_driver) 1000 + return; 1001 + 1002 + cxlds = pci_get_drvdata(pdev); 1003 + if (!cxlds) 1004 + return; 1005 + 1006 + /* Fabricate a log type */ 1007 + hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags); 1008 + log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags); 1009 + 1010 + cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type, 1011 + &uuid_null, &rec->event); 1012 + } 1013 + 1014 + static void cxl_cper_work_fn(struct work_struct *work) 1015 + { 1016 + struct cxl_cper_work_data wd; 1017 + 1018 + while (cxl_cper_kfifo_get(&wd)) 1019 + cxl_handle_cper_event(wd.event_type, &wd.rec); 1020 + } 1021 + static DECLARE_WORK(cxl_cper_work, cxl_cper_work_fn); 1022 + 1023 + static int __init cxl_pci_driver_init(void) 1024 + { 1025 + int rc; 1026 + 1027 + rc = pci_register_driver(&cxl_pci_driver); 1028 + if (rc) 1029 + return rc; 1030 + 1031 + rc = cxl_cper_register_work(&cxl_cper_work); 1032 + if (rc) 1033 + pci_unregister_driver(&cxl_pci_driver); 1034 + 1035 + return rc; 1036 + } 1037 + 1038 + static void __exit cxl_pci_driver_exit(void) 1039 + { 1040 + cxl_cper_unregister_work(&cxl_cper_work); 1041 + cancel_work_sync(&cxl_cper_work); 1042 + pci_unregister_driver(&cxl_pci_driver); 1043 + } 1044 + 1045 + module_init(cxl_pci_driver_init); 1046 + module_exit(cxl_pci_driver_exit); 978 1047 MODULE_LICENSE("GPL v2"); 979 1048 MODULE_IMPORT_NS(CXL);
-2
drivers/cxl/pmem.c
··· 11 11 #include "cxlmem.h" 12 12 #include "cxl.h" 13 13 14 - extern const struct nvdimm_security_ops *cxl_security_ops; 15 - 16 14 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); 17 15 18 16 static void clear_exclusive(void *mds)
+39
include/linux/cxl-event.h
··· 3 3 #ifndef _LINUX_CXL_EVENT_H 4 4 #define _LINUX_CXL_EVENT_H 5 5 6 + #include <linux/types.h> 7 + #include <linux/uuid.h> 8 + #include <linux/workqueue_types.h> 9 + 6 10 /* 7 11 * Common Event Record Format 8 12 * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 ··· 95 91 u8 reserved[0x3d]; 96 92 } __packed; 97 93 94 + /* 95 + * General Media or DRAM Event Common Fields 96 + * - provides common access to phys_addr 97 + */ 98 + struct cxl_event_common { 99 + struct cxl_event_record_hdr hdr; 100 + __le64 phys_addr; 101 + } __packed; 102 + 98 103 union cxl_event { 99 104 struct cxl_event_generic generic; 100 105 struct cxl_event_gen_media gen_media; 101 106 struct cxl_event_dram dram; 102 107 struct cxl_event_mem_module mem_module; 108 + struct cxl_event_common common; 103 109 } __packed; 104 110 105 111 /* ··· 153 139 154 140 union cxl_event event; 155 141 } __packed; 142 + 143 + struct cxl_cper_work_data { 144 + enum cxl_event_type event_type; 145 + struct cxl_cper_event_rec rec; 146 + }; 147 + 148 + #ifdef CONFIG_ACPI_APEI_GHES 149 + int cxl_cper_register_work(struct work_struct *work); 150 + int cxl_cper_unregister_work(struct work_struct *work); 151 + int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd); 152 + #else 153 + static inline int cxl_cper_register_work(struct work_struct *work) 154 + { 155 + return 0; 156 + } 157 + 158 + static inline int cxl_cper_unregister_work(struct work_struct *work) 159 + { 160 + return 0; 161 + } 162 + static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) 163 + { 164 + return 0; 165 + } 166 + #endif 156 167 157 168 #endif /* _LINUX_CXL_EVENT_H */
+3
include/uapi/linux/cxl_mem.h
··· 47 47 ___DEPRECATED(SCAN_MEDIA, "Scan Media"), \ 48 48 ___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \ 49 49 ___C(GET_TIMESTAMP, "Get Timestamp"), \ 50 + ___C(GET_LOG_CAPS, "Get Log Capabilities"), \ 51 + ___C(CLEAR_LOG, "Clear Log"), \ 52 + ___C(GET_SUP_LOG_SUBLIST, "Get Supported Logs Sub-List"), \ 50 53 ___C(MAX, "invalid / last command") 51 54 52 55 #define ___C(a, b) CXL_MEM_COMMAND_ID_##a
+15 -4
tools/testing/cxl/test/mem.c
··· 127 127 #define CXL_TEST_EVENT_CNT_MAX 15 128 128 129 129 /* Set a number of events to return at a time for simulation. */ 130 - #define CXL_TEST_EVENT_CNT 3 130 + #define CXL_TEST_EVENT_RET_MAX 4 131 131 132 132 struct mock_event_log { 133 133 u16 clear_idx; ··· 222 222 log->nr_events++; 223 223 } 224 224 225 + /* 226 + * Vary the number of events returned to simulate events occuring while the 227 + * logs are being read. 228 + */ 229 + static int ret_limit = 0; 230 + 225 231 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd) 226 232 { 227 233 struct cxl_get_event_payload *pl; ··· 239 233 if (cmd->size_in != sizeof(log_type)) 240 234 return -EINVAL; 241 235 242 - if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT)) 236 + ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX; 237 + if (!ret_limit) 238 + ret_limit = 1; 239 + 240 + if (cmd->size_out < struct_size(pl, records, ret_limit)) 243 241 return -EINVAL; 244 242 245 243 log_type = *((u8 *)cmd->payload_in); 246 244 if (log_type >= CXL_EVENT_TYPE_MAX) 247 245 return -EINVAL; 248 246 249 - memset(cmd->payload_out, 0, cmd->size_out); 247 + memset(cmd->payload_out, 0, struct_size(pl, records, 0)); 250 248 251 249 log = event_find_log(dev, log_type); 252 250 if (!log || event_log_empty(log)) ··· 258 248 259 249 pl = cmd->payload_out; 260 250 261 - for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) { 251 + for (i = 0; i < ret_limit && !event_log_empty(log); i++) { 262 252 memcpy(&pl->records[i], event_get_current(log), 263 253 sizeof(pl->records[i])); 264 254 pl->records[i].event.generic.hdr.handle = ··· 266 256 log->cur_idx++; 267 257 } 268 258 259 + cmd->size_out = struct_size(pl, records, i); 269 260 pl->record_count = cpu_to_le16(i); 270 261 if (!event_log_empty(log)) 271 262 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;