Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'cxl-for-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl updates from Dave Jiang:

- Constify range_contains() input parameters to prevent changes

- Add support for displaying RCD capabilities in sysfs to support lspci
for CXL device

- Downgrade warning message to debug in cxl_probe_component_regs()

- Add support for adding a printf specifier '%pra' to emit 'struct
range' content:
- Add sanity tests for 'struct resource'
- Add documentation for special case
- Add %pra for 'struct range'
- Add %pra usage in CXL code

- Add preparation code for DCD support:
- Add range_overlaps()
- Add CDAT DSMAS table shared and read only flag in ACPICA
- Add documentation to 'struct dev_dax_range'
- Delay event buffer allocation in CXL PCI code until needed
- Use guard() in cxl_dpa_set_mode()
- Refactor create region code to consolidate common code

* tag 'cxl-for-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
cxl/region: Refactor common create region code
cxl/hdm: Use guard() in cxl_dpa_set_mode()
cxl/pci: Delay event buffer allocation
dax: Document struct dev_dax_range
ACPI/CDAT: Add CDAT/DSMAS shared and read only flag values
range: Add range_overlaps()
cxl/cdat: Use %pra for dpa range outputs
printf: Add print format (%pra) for struct range
Documentation/printf: struct resource add start == end special case
test printf: Add very basic struct resource tests
cxl: downgrade a warning message to debug level in cxl_probe_component_regs()
cxl/pci: Add sysfs attribute for CXL 1.1 device link status
cxl/core/regs: Add rcd_pcie_cap initialization
kernel/range: Const-ify range_contains parameters

+366 -65
+19 -1
Documentation/core-api/printk-formats.rst
··· 209 209 :: 210 210 211 211 %pr [mem 0x60000000-0x6fffffff flags 0x2200] or 212 + [mem 0x60000000 flags 0x2200] or 212 213 [mem 0x0000000060000000-0x000000006fffffff flags 0x2200] 214 + [mem 0x0000000060000000 flags 0x2200] 213 215 %pR [mem 0x60000000-0x6fffffff pref] or 216 + [mem 0x60000000 pref] or 214 217 [mem 0x0000000060000000-0x000000006fffffff pref] 218 + [mem 0x0000000060000000 pref] 215 219 216 220 For printing struct resources. The ``R`` and ``r`` specifiers result in a 217 - printed resource with (R) or without (r) a decoded flags member. 221 + printed resource with (R) or without (r) a decoded flags member. If start is 222 + equal to end only print the start value. 218 223 219 224 Passed by reference. 220 225 ··· 233 228 For printing a phys_addr_t type (and its derivatives, such as 234 229 resource_size_t) which can vary based on build options, regardless of the 235 230 width of the CPU data path. 231 + 232 + Passed by reference. 233 + 234 + Struct Range 235 + ------------ 236 + 237 + :: 238 + 239 + %pra [range 0x0000000060000000-0x000000006fffffff] or 240 + [range 0x0000000060000000] 241 + 242 + For printing struct range. struct range holds an arbitrary range of u64 243 + values. If start is equal to end only print the start value. 236 244 237 245 Passed by reference. 238 246
+4 -4
drivers/cxl/core/cdat.c
··· 247 247 dpa_perf->dpa_range = dent->dpa_range; 248 248 dpa_perf->qos_class = dent->qos_class; 249 249 dev_dbg(dev, 250 - "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", 251 - dent->dpa_range.start, dpa_perf->qos_class, 250 + "DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", 251 + &dent->dpa_range, dpa_perf->qos_class, 252 252 dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth, 253 253 dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth, 254 254 dent->coord[ACCESS_COORDINATE_CPU].read_latency, ··· 279 279 range_contains(&pmem_range, &dent->dpa_range)) 280 280 update_perf_entry(dev, dent, &mds->pmem_perf); 281 281 else 282 - dev_dbg(dev, "no partition for dsmas dpa: %#llx\n", 283 - dent->dpa_range.start); 282 + dev_dbg(dev, "no partition for dsmas dpa: %pra\n", 283 + &dent->dpa_range); 284 284 } 285 285 } 286 286
+5
drivers/cxl/core/core.h
··· 89 89 enum cxl_rcrb which); 90 90 u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb); 91 91 92 + #define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0) 93 + #define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0) 94 + #define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8) 95 + #define PCI_CAP_EXP_SIZEOF 0x3c 96 + 92 97 extern struct rw_semaphore cxl_dpa_rwsem; 93 98 extern struct rw_semaphore cxl_region_rwsem; 94 99
+6 -15
drivers/cxl/core/hdm.c
··· 424 424 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 425 425 struct cxl_dev_state *cxlds = cxlmd->cxlds; 426 426 struct device *dev = &cxled->cxld.dev; 427 - int rc; 428 427 429 428 switch (mode) { 430 429 case CXL_DECODER_RAM: ··· 434 435 return -EINVAL; 435 436 } 436 437 437 - down_write(&cxl_dpa_rwsem); 438 - if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 439 - rc = -EBUSY; 440 - goto out; 441 - } 438 + guard(rwsem_write)(&cxl_dpa_rwsem); 439 + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) 440 + return -EBUSY; 442 441 443 442 /* 444 443 * Only allow modes that are supported by the current partition ··· 444 447 */ 445 448 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 446 449 dev_dbg(dev, "no available pmem capacity\n"); 447 - rc = -ENXIO; 448 - goto out; 450 + return -ENXIO; 449 451 } 450 452 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 451 453 dev_dbg(dev, "no available ram capacity\n"); 452 - rc = -ENXIO; 453 - goto out; 454 + return -ENXIO; 454 455 } 455 456 456 457 cxled->mode = mode; 457 - rc = 0; 458 - out: 459 - up_write(&cxl_dpa_rwsem); 460 - 461 - return rc; 458 + return 0; 462 459 } 463 460 464 461 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+11 -17
drivers/cxl/core/region.c
··· 2537 2537 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); 2538 2538 } 2539 2539 2540 - static ssize_t create_pmem_region_store(struct device *dev, 2541 - struct device_attribute *attr, 2542 - const char *buf, size_t len) 2540 + static ssize_t create_region_store(struct device *dev, const char *buf, 2541 + size_t len, enum cxl_decoder_mode mode) 2543 2542 { 2544 2543 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2545 2544 struct cxl_region *cxlr; ··· 2548 2549 if (rc != 1) 2549 2550 return -EINVAL; 2550 2551 2551 - cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); 2552 + cxlr = __create_region(cxlrd, mode, id); 2552 2553 if (IS_ERR(cxlr)) 2553 2554 return PTR_ERR(cxlr); 2554 2555 2555 2556 return len; 2557 + } 2558 + 2559 + static ssize_t create_pmem_region_store(struct device *dev, 2560 + struct device_attribute *attr, 2561 + const char *buf, size_t len) 2562 + { 2563 + return create_region_store(dev, buf, len, CXL_DECODER_PMEM); 2556 2564 } 2557 2565 DEVICE_ATTR_RW(create_pmem_region); 2558 2566 ··· 2567 2561 struct device_attribute *attr, 2568 2562 const char *buf, size_t len) 2569 2563 { 2570 - struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2571 - struct cxl_region *cxlr; 2572 - int rc, id; 2573 - 2574 - rc = sscanf(buf, "region%d\n", &id); 2575 - if (rc != 1) 2576 - return -EINVAL; 2577 - 2578 - cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); 2579 - if (IS_ERR(cxlr)) 2580 - return PTR_ERR(cxlr); 2581 - 2582 - return len; 2564 + return create_region_store(dev, buf, len, CXL_DECODER_RAM); 2583 2565 } 2584 2566 DEVICE_ATTR_RW(create_ram_region); 2585 2567
+57 -1
drivers/cxl/core/regs.c
··· 52 52 cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET); 53 53 54 54 if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { 55 - dev_err(dev, 55 + dev_dbg(dev, 56 56 "Couldn't locate the CXL.cache and CXL.mem capability array header.\n"); 57 57 return; 58 58 } ··· 505 505 506 506 return offset; 507 507 } 508 + 509 + static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport) 510 + { 511 + resource_size_t rcrb = dport->rcrb.base; 512 + void __iomem *addr; 513 + u32 cap_hdr; 514 + u16 offset; 515 + 516 + if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB")) 517 + return CXL_RESOURCE_NONE; 518 + 519 + addr = ioremap(rcrb, SZ_4K); 520 + if (!addr) { 521 + dev_err(dev, "Failed to map region %pr\n", addr); 522 + release_mem_region(rcrb, SZ_4K); 523 + return CXL_RESOURCE_NONE; 524 + } 525 + 526 + offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST)); 527 + cap_hdr = readl(addr + offset); 528 + while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) { 529 + offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr); 530 + if (offset == 0 || offset > SZ_4K) { 531 + offset = 0; 532 + break; 533 + } 534 + cap_hdr = readl(addr + offset); 535 + } 536 + 537 + iounmap(addr); 538 + release_mem_region(rcrb, SZ_4K); 539 + if (!offset) 540 + return CXL_RESOURCE_NONE; 541 + 542 + return offset; 543 + } 544 + 545 + int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport) 546 + { 547 + void __iomem *dport_pcie_cap = NULL; 548 + resource_size_t pos; 549 + struct cxl_rcrb_info *ri; 550 + 551 + ri = &dport->rcrb; 552 + pos = cxl_rcrb_to_linkcap(&pdev->dev, dport); 553 + if (pos == CXL_RESOURCE_NONE) 554 + return -ENXIO; 555 + 556 + dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev, 557 + ri->base + pos, 558 + PCI_CAP_EXP_SIZEOF); 559 + dport->regs.rcd_pcie_cap = dport_pcie_cap; 560 + 561 + return 0; 562 + } 563 + EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, CXL); 508 564 509 565 resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, 510 566 enum cxl_rcrb which)
+9
drivers/cxl/cxl.h
··· 235 235 struct_group_tagged(cxl_rch_regs, rch_regs, 236 236 void __iomem *dport_aer; 237 237 ); 238 + 239 + /* 240 + * RCD upstream port specific PCIe cap register 241 + * @pcie_cap: CXL 3.0 8.2.1.2 RCD Upstream Port RCRB 242 + */ 243 + struct_group_tagged(cxl_rcd_regs, rcd_regs, 244 + void __iomem *rcd_pcie_cap; 245 + ); 238 246 }; 239 247 240 248 struct cxl_reg_map { ··· 312 304 struct cxl_dport; 313 305 resource_size_t cxl_rcd_component_reg_phys(struct device *dev, 314 306 struct cxl_dport *dport); 307 + int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport); 315 308 316 309 #define CXL_RESOURCE_NONE ((resource_size_t) -1) 317 310 #define CXL_TARGET_STRLEN 20
+100 -9
drivers/cxl/pci.c
··· 475 475 } 476 476 477 477 static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, 478 - struct cxl_register_map *map) 478 + struct cxl_register_map *map, 479 + struct cxl_dport *dport) 479 480 { 480 - struct cxl_dport *dport; 481 481 resource_size_t component_reg_phys; 482 482 483 483 *map = (struct cxl_register_map) { ··· 513 513 * is an RCH and try to extract the Component Registers from 514 514 * an RCRB. 515 515 */ 516 - if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) 517 - rc = cxl_rcrb_get_comp_regs(pdev, map); 516 + if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) { 517 + struct cxl_dport *dport; 518 + struct cxl_port *port __free(put_cxl_port) = 519 + cxl_pci_find_port(pdev, &dport); 520 + if (!port) 521 + return -EPROBE_DEFER; 518 522 519 - if (rc) 523 + rc = cxl_rcrb_get_comp_regs(pdev, map, dport); 524 + if (rc) 525 + return rc; 526 + 527 + rc = cxl_dport_map_rcd_linkcap(pdev, dport); 528 + if (rc) 529 + return rc; 530 + 531 + } else if (rc) { 520 532 return rc; 533 + } 521 534 522 535 return cxl_setup_regs(map); 523 536 } ··· 777 764 return 0; 778 765 } 779 766 780 - rc = cxl_mem_alloc_event_buf(mds); 781 - if (rc) 782 - return rc; 783 - 784 767 rc = cxl_event_get_int_policy(mds, &policy); 785 768 if (rc) 786 769 return rc; ··· 789 780 "FW still in control of Event Logs despite _OSC settings\n"); 790 781 return -EBUSY; 791 782 } 783 + 784 + rc = cxl_mem_alloc_event_buf(mds); 785 + if (rc) 786 + return rc; 792 787 793 788 rc = cxl_event_irqsetup(mds); 794 789 if (rc) ··· 819 806 820 807 return 0; 821 808 } 809 + 810 + static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size_t width) 811 + { 812 + struct cxl_dev_state *cxlds = dev_get_drvdata(dev); 813 + struct cxl_memdev *cxlmd = cxlds->cxlmd; 814 + struct device *root_dev; 815 + struct cxl_dport *dport; 816 + struct cxl_port *root __free(put_cxl_port) = 817 + cxl_mem_find_port(cxlmd, &dport); 818 + 819 + if (!root) 820 + return -ENXIO; 821 + 822 + root_dev = root->uport_dev; 823 + if (!root_dev) 824 + return -ENXIO; 825 + 826 + guard(device)(root_dev); 827 + if (!root_dev->driver) 828 + return -ENXIO; 829 + 830 + switch (width) { 831 + case 2: 832 + return sysfs_emit(buf, "%#x\n", 833 + readw(dport->regs.rcd_pcie_cap + offset)); 834 + case 4: 835 + return sysfs_emit(buf, "%#x\n", 836 + readl(dport->regs.rcd_pcie_cap + offset)); 837 + default: 838 + return -EINVAL; 839 + } 840 + } 841 + 842 + static ssize_t rcd_link_cap_show(struct device *dev, 843 + struct device_attribute *attr, char *buf) 844 + { 845 + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCAP, buf, sizeof(u32)); 846 + } 847 + static DEVICE_ATTR_RO(rcd_link_cap); 848 + 849 + static ssize_t rcd_link_ctrl_show(struct device *dev, 850 + struct device_attribute *attr, char *buf) 851 + { 852 + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCTL, buf, sizeof(u16)); 853 + } 854 + static DEVICE_ATTR_RO(rcd_link_ctrl); 855 + 856 + static ssize_t rcd_link_status_show(struct device *dev, 857 + struct device_attribute *attr, char *buf) 858 + { 859 + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKSTA, buf, sizeof(u16)); 860 + } 861 + static DEVICE_ATTR_RO(rcd_link_status); 862 + 863 + static struct attribute *cxl_rcd_attrs[] = { 864 + &dev_attr_rcd_link_cap.attr, 865 + &dev_attr_rcd_link_ctrl.attr, 866 + &dev_attr_rcd_link_status.attr, 867 + NULL 868 + }; 869 + 870 + static umode_t cxl_rcd_visible(struct kobject *kobj, struct attribute *a, int n) 871 + { 872 + struct device *dev = kobj_to_dev(kobj); 873 + struct pci_dev *pdev = to_pci_dev(dev); 874 + 875 + if (is_cxl_restricted(pdev)) 876 + return a->mode; 877 + 878 + return 0; 879 + } 880 + 881 + static struct attribute_group cxl_rcd_group = { 882 + .attrs = cxl_rcd_attrs, 883 + .is_visible = cxl_rcd_visible, 884 + }; 885 + __ATTRIBUTE_GROUPS(cxl_rcd); 822 886 823 887 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 824 888 { ··· 1106 1016 .id_table = cxl_mem_pci_tbl, 1107 1017 .probe = cxl_pci_probe, 1108 1018 .err_handler = &cxl_error_handlers, 1019 + .dev_groups = cxl_rcd_groups, 1109 1020 .driver = { 1110 1021 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1111 1022 },
+20 -6
drivers/dax/dax-private.h
··· 40 40 struct device *youngest; 41 41 }; 42 42 43 + /** 44 + * struct dax_mapping - device to display mapping range attributes 45 + * @dev: device representing this range 46 + * @range_id: index within dev_dax ranges array 47 + * @id: ida of this mapping 48 + */ 43 49 struct dax_mapping { 44 50 struct device dev; 45 51 int range_id; 46 52 int id; 53 + }; 54 + 55 + /** 56 + * struct dev_dax_range - tuple represenging a range of memory used by dev_dax 57 + * @pgoff: page offset 58 + * @range: resource-span 59 + * @mapping: reference to the dax_mapping for this range 60 + */ 61 + struct dev_dax_range { 62 + unsigned long pgoff; 63 + struct range range; 64 + struct dax_mapping *mapping; 47 65 }; 48 66 49 67 /** ··· 76 58 * @dev - device core 77 59 * @pgmap - pgmap for memmap setup / lifetime (driver owned) 78 60 * @nr_range: size of @ranges 79 - * @ranges: resource-span + pgoff tuples for the instance 61 + * @ranges: range tuples of memory used 80 62 */ 81 63 struct dev_dax { 82 64 struct dax_region *region; ··· 90 72 struct dev_pagemap *pgmap; 91 73 bool memmap_on_memory; 92 74 int nr_range; 93 - struct dev_dax_range { 94 - unsigned long pgoff; 95 - struct range range; 96 - struct dax_mapping *mapping; 97 - } *ranges; 75 + struct dev_dax_range *ranges; 98 76 }; 99 77 100 78 /*
+5 -5
fs/btrfs/ordered-data.c
··· 111 111 return NULL; 112 112 } 113 113 114 - static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 115 - u64 len) 114 + static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 115 + u64 len) 116 116 { 117 117 if (file_offset + len <= entry->file_offset || 118 118 entry->file_offset + entry->num_bytes <= file_offset) ··· 985 985 986 986 while (1) { 987 987 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 988 - if (range_overlaps(entry, file_offset, len)) 988 + if (btrfs_range_overlaps(entry, file_offset, len)) 989 989 break; 990 990 991 991 if (entry->file_offset >= file_offset + len) { ··· 1114 1114 } 1115 1115 if (prev) { 1116 1116 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); 1117 - if (range_overlaps(entry, file_offset, len)) 1117 + if (btrfs_range_overlaps(entry, file_offset, len)) 1118 1118 goto out; 1119 1119 } 1120 1120 if (next) { 1121 1121 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); 1122 - if (range_overlaps(entry, file_offset, len)) 1122 + if (btrfs_range_overlaps(entry, file_offset, len)) 1123 1123 goto out; 1124 1124 } 1125 1125 /* No ordered extent in the range */
+2
include/acpi/actbl1.h
··· 403 403 /* Flags for subtable above */ 404 404 405 405 #define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2) 406 + #define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3) 407 + #define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6) 406 408 407 409 /* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */ 408 410
+16 -1
include/linux/range.h
··· 13 13 return range->end - range->start + 1; 14 14 } 15 15 16 - static inline bool range_contains(struct range *r1, struct range *r2) 16 + /* True if r1 completely contains r2 */ 17 + static inline bool range_contains(const struct range *r1, 18 + const struct range *r2) 17 19 { 18 20 return r1->start <= r2->start && r1->end >= r2->end; 21 + } 22 + 23 + /* True if any part of r1 overlaps r2 */ 24 + static inline bool range_overlaps(const struct range *r1, 25 + const struct range *r2) 26 + { 27 + return r1->start <= r2->end && r1->end >= r2->start; 19 28 } 20 29 21 30 int add_range(struct range *range, int az, int nr_range, ··· 39 30 int clean_sort_range(struct range *range, int az); 40 31 41 32 void sort_range(struct range *range, int nr_range); 33 + 34 + #define DEFINE_RANGE(_start, _end) \ 35 + (struct range) { \ 36 + .start = (_start), \ 37 + .end = (_end), \ 38 + } 42 39 43 40 #endif
+61
lib/test_printf.c
··· 386 386 static void __init 387 387 struct_resource(void) 388 388 { 389 + struct resource test_resource = { 390 + .start = 0xc0ffee00, 391 + .end = 0xc0ffee00, 392 + .flags = IORESOURCE_MEM, 393 + }; 394 + 395 + test("[mem 0xc0ffee00 flags 0x200]", 396 + "%pr", &test_resource); 397 + 398 + test_resource = (struct resource) { 399 + .start = 0xc0ffee, 400 + .end = 0xba5eba11, 401 + .flags = IORESOURCE_MEM, 402 + }; 403 + test("[mem 0x00c0ffee-0xba5eba11 flags 0x200]", 404 + "%pr", &test_resource); 405 + 406 + test_resource = (struct resource) { 407 + .start = 0xba5eba11, 408 + .end = 0xc0ffee, 409 + .flags = IORESOURCE_MEM, 410 + }; 411 + test("[mem 0xba5eba11-0x00c0ffee flags 0x200]", 412 + "%pr", &test_resource); 413 + 414 + test_resource = (struct resource) { 415 + .start = 0xba5eba11, 416 + .end = 0xba5eca11, 417 + .flags = IORESOURCE_MEM, 418 + }; 419 + 420 + test("[mem 0xba5eba11-0xba5eca11 flags 0x200]", 421 + "%pr", &test_resource); 422 + 423 + test_resource = (struct resource) { 424 + .start = 0xba11, 425 + .end = 0xca10, 426 + .flags = IORESOURCE_IO | 427 + IORESOURCE_DISABLED | 428 + IORESOURCE_UNSET, 429 + }; 430 + 431 + test("[io size 0x1000 disabled]", 432 + "%pR", &test_resource); 433 + } 434 + 435 + static void __init 436 + struct_range(void) 437 + { 438 + struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11, 439 + 0xc0ffee00ba5eba11); 440 + test("[range 0xc0ffee00ba5eba11]", "%pra", &test_range); 441 + 442 + test_range = DEFINE_RANGE(0xc0ffee, 0xba5eba11); 443 + test("[range 0x0000000000c0ffee-0x00000000ba5eba11]", 444 + "%pra", &test_range); 445 + 446 + test_range = DEFINE_RANGE(0xba5eba11, 0xc0ffee); 447 + test("[range 0x00000000ba5eba11-0x0000000000c0ffee]", 448 + "%pra", &test_range); 389 449 } 390 450 391 451 static void __init ··· 823 763 symbol_ptr(); 824 764 kernel_ptr(); 825 765 struct_resource(); 766 + struct_range(); 826 767 addr(); 827 768 escaped_str(); 828 769 hex_string();
+51 -6
lib/vsprintf.c
··· 1040 1040 }; 1041 1041 1042 1042 static noinline_for_stack 1043 + char *hex_range(char *buf, char *end, u64 start_val, u64 end_val, 1044 + struct printf_spec spec) 1045 + { 1046 + buf = number(buf, end, start_val, spec); 1047 + if (start_val == end_val) 1048 + return buf; 1049 + 1050 + if (buf < end) 1051 + *buf = '-'; 1052 + ++buf; 1053 + return number(buf, end, end_val, spec); 1054 + } 1055 + 1056 + static noinline_for_stack 1043 1057 char *resource_string(char *buf, char *end, struct resource *res, 1044 1058 struct printf_spec spec, const char *fmt) 1045 1059 { ··· 1129 1115 p = string_nocheck(p, pend, "size ", str_spec); 1130 1116 p = number(p, pend, resource_size(res), *specp); 1131 1117 } else { 1132 - p = number(p, pend, res->start, *specp); 1133 - if (res->start != res->end) { 1134 - *p++ = '-'; 1135 - p = number(p, pend, res->end, *specp); 1136 - } 1118 + p = hex_range(p, pend, res->start, res->end, *specp); 1137 1119 } 1138 1120 if (decode) { 1139 1121 if (res->flags & IORESOURCE_MEM_64) ··· 1144 1134 p = string_nocheck(p, pend, " flags ", str_spec); 1145 1135 p = number(p, pend, res->flags, default_flag_spec); 1146 1136 } 1137 + *p++ = ']'; 1138 + *p = '\0'; 1139 + 1140 + return string_nocheck(buf, end, sym, spec); 1141 + } 1142 + 1143 + static noinline_for_stack 1144 + char *range_string(char *buf, char *end, const struct range *range, 1145 + struct printf_spec spec, const char *fmt) 1146 + { 1147 + char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")]; 1148 + char *p = sym, *pend = sym + sizeof(sym); 1149 + 1150 + struct printf_spec range_spec = { 1151 + .field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */ 1152 + .flags = SPECIAL | SMALL | ZEROPAD, 1153 + .base = 16, 1154 + .precision = -1, 1155 + }; 1156 + 1157 + if (check_pointer(&buf, end, range, spec)) 1158 + return buf; 1159 + 1160 + p = string_nocheck(p, pend, "[range ", default_str_spec); 1161 + p = hex_range(p, pend, range->start, range->end, range_spec); 1147 1162 *p++ = ']'; 1148 1163 *p = '\0'; 1149 1164 ··· 2264 2229 return widen_string(buf, buf - buf_start, end, spec); 2265 2230 } 2266 2231 2232 + static noinline_for_stack 2233 + char *resource_or_range(const char *fmt, char *buf, char *end, void *ptr, 2234 + struct printf_spec spec) 2235 + { 2236 + if (*fmt == 'r' && fmt[1] == 'a') 2237 + return range_string(buf, end, ptr, spec, fmt); 2238 + return resource_string(buf, end, ptr, spec, fmt); 2239 + } 2240 + 2267 2241 int __init no_hash_pointers_enable(char *str) 2268 2242 { 2269 2243 if (no_hash_pointers) ··· 2321 2277 * - 'Bb' as above with module build ID (for use in backtraces) 2322 2278 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] 2323 2279 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] 2280 + * - 'ra' For struct ranges, e.g., [range 0x0000000000000000 - 0x00000000000000ff] 2324 2281 * - 'b[l]' For a bitmap, the number of bits is determined by the field 2325 2282 * width which must be explicitly specified either as part of the 2326 2283 * format string '%32b[l]' or through '%*b[l]', [l] selects ··· 2446 2401 return symbol_string(buf, end, ptr, spec, fmt); 2447 2402 case 'R': 2448 2403 case 'r': 2449 - return resource_string(buf, end, ptr, spec, fmt); 2404 + return resource_or_range(fmt, buf, end, ptr, spec); 2450 2405 case 'h': 2451 2406 return hex_string(buf, end, ptr, spec, fmt); 2452 2407 case 'b':