cxl/core: Fold cxl_trace_hpa() into cxl_dpa_to_hpa()

Although cxl_trace_hpa() is used to populate TRACE EVENTs with HPA
addresses the work it performs is a DPA to HPA translation not a
trace. Tidy up this naming by moving the minimal work done in
cxl_trace_hpa() into cxl_dpa_to_hpa() and use cxl_dpa_to_hpa()
for trace event callbacks.

Suggested-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Alison Schofield <alison.schofield@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Robert Richter <rrichter@amd.com>
Link: https://patch.msgid.link/452a9b0c525b774c72d9d5851515ffa928750132.1719980933.git.alison.schofield@intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>

authored by Alison Schofield and committed by Dave Jiang 9aa5f623 22a40d14

+20 -27
+4 -4
drivers/cxl/core/core.h
··· 28 28 void cxl_region_exit(void); 29 29 int cxl_get_poison_by_endpoint(struct cxl_port *port); 30 30 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); 31 - u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 32 - u64 dpa); 31 + u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 32 + u64 dpa); 33 33 34 34 #else 35 - static inline u64 36 - cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa) 35 + static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, 36 + const struct cxl_memdev *cxlmd, u64 dpa) 37 37 { 38 38 return ULLONG_MAX; 39 39 }
+1 -1
drivers/cxl/core/mbox.c
··· 878 878 dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK; 879 879 cxlr = cxl_dpa_to_region(cxlmd, dpa); 880 880 if (cxlr) 881 - hpa = cxl_trace_hpa(cxlr, cxlmd, dpa); 881 + hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa); 882 882 883 883 if (event_type == CXL_CPER_EVENT_GEN_MEDIA) 884 884 trace_cxl_general_media(cxlmd, type, cxlr, hpa,
+13 -20
drivers/cxl/core/region.c
··· 2749 2749 return false; 2750 2750 } 2751 2751 2752 - static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, 2753 - struct cxl_endpoint_decoder *cxled) 2752 + u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 2753 + u64 dpa) 2754 2754 { 2755 2755 u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; 2756 2756 struct cxl_region_params *p = &cxlr->params; 2757 - int pos = cxled->pos; 2757 + struct cxl_endpoint_decoder *cxled = NULL; 2758 2758 u16 eig = 0; 2759 2759 u8 eiw = 0; 2760 + int pos; 2760 2761 2762 + for (int i = 0; i < p->nr_targets; i++) { 2763 + cxled = p->targets[i]; 2764 + if (cxlmd == cxled_to_memdev(cxled)) 2765 + break; 2766 + } 2767 + if (!cxled || cxlmd != cxled_to_memdev(cxled)) 2768 + return ULLONG_MAX; 2769 + 2770 + pos = cxled->pos; 2761 2771 ways_to_eiw(p->interleave_ways, &eiw); 2762 2772 granularity_to_eig(p->interleave_granularity, &eig); 2763 2773 ··· 2805 2795 return ULLONG_MAX; 2806 2796 2807 2797 return hpa; 2808 - } 2809 - 2810 - u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 2811 - u64 dpa) 2812 - { 2813 - struct cxl_region_params *p = &cxlr->params; 2814 - struct cxl_endpoint_decoder *cxled = NULL; 2815 - 2816 - for (int i = 0; i < p->nr_targets; i++) { 2817 - cxled = p->targets[i]; 2818 - if (cxlmd == cxled_to_memdev(cxled)) 2819 - break; 2820 - } 2821 - if (!cxled || cxlmd != cxled_to_memdev(cxled)) 2822 - return ULLONG_MAX; 2823 - 2824 - return cxl_dpa_to_hpa(dpa, cxlr, cxled); 2825 2798 } 2826 2799 2827 2800 static struct lock_class_key cxl_pmem_region_key;
+2 -2
drivers/cxl/core/trace.h
··· 704 704 if (cxlr) { 705 705 __assign_str(region); 706 706 memcpy(__entry->uuid, &cxlr->params.uuid, 16); 707 - __entry->hpa = cxl_trace_hpa(cxlr, cxlmd, 708 - __entry->dpa); 707 + __entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd, 708 + __entry->dpa); 709 709 } else { 710 710 __assign_str(region); 711 711 memset(__entry->uuid, 0, 16);