Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cxl: Simplify cxl_rd_ops allocation and handling

A root decoder's callback handlers are collected in struct cxl_rd_ops.
The structure is dynamically allocated, though it contains only a few
pointers in it. This also requires to check two pointes to check for
the existence of a callback.

Simplify the allocation, release and handler check by embedding the
ops statically in struct cxl_root_decoder.

Implementation is equivalent to how struct cxl_root_ops handles the
callbacks.

[ dj: Fix spelling error in commit log. ]

Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Robert Richter <rrichter@amd.com>
Link: https://patch.msgid.link/20251114075844.1315805-2-rrichter@amd.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>

authored by

Robert Richter and committed by
Dave Jiang
6123133e e9a6fb0b

+8 -23
+2 -6
drivers/cxl/acpi.c
··· 475 475 cxlrd->qos_class = cfmws->qtg_id; 476 476 477 477 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 478 - cxlrd->ops = kzalloc(sizeof(*cxlrd->ops), GFP_KERNEL); 479 - if (!cxlrd->ops) 480 - return -ENOMEM; 481 - 482 - cxlrd->ops->hpa_to_spa = cxl_apply_xor_maps; 483 - cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps; 478 + cxlrd->ops.hpa_to_spa = cxl_apply_xor_maps; 479 + cxlrd->ops.spa_to_hpa = cxl_apply_xor_maps; 484 480 } 485 481 486 482 rc = cxl_decoder_add(cxld);
-1
drivers/cxl/core/port.c
··· 459 459 if (atomic_read(&cxlrd->region_id) >= 0) 460 460 memregion_free(atomic_read(&cxlrd->region_id)); 461 461 __cxl_decoder_release(&cxlrd->cxlsd.cxld); 462 - kfree(cxlrd->ops); 463 462 kfree(cxlrd); 464 463 } 465 464
+5 -15
drivers/cxl/core/region.c
··· 2924 2924 return false; 2925 2925 } 2926 2926 2927 - static bool has_hpa_to_spa(struct cxl_root_decoder *cxlrd) 2928 - { 2929 - return cxlrd->ops && cxlrd->ops->hpa_to_spa; 2930 - } 2931 - 2932 - static bool has_spa_to_hpa(struct cxl_root_decoder *cxlrd) 2933 - { 2934 - return cxlrd->ops && cxlrd->ops->spa_to_hpa; 2935 - } 2936 - 2937 2927 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, 2938 2928 u64 dpa) 2939 2929 { ··· 2978 2988 hpa = hpa_offset + p->res->start + p->cache_size; 2979 2989 2980 2990 /* Root decoder translation overrides typical modulo decode */ 2981 - if (has_hpa_to_spa(cxlrd)) 2982 - hpa = cxlrd->ops->hpa_to_spa(cxlrd, hpa); 2991 + if (cxlrd->ops.hpa_to_spa) 2992 + hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa); 2983 2993 2984 2994 if (!cxl_resource_contains_addr(p->res, hpa)) { 2985 2995 dev_dbg(&cxlr->dev, ··· 2988 2998 } 2989 2999 2990 3000 /* Simple chunk check, by pos & gran, only applies to modulo decodes */ 2991 - if (!has_hpa_to_spa(cxlrd) && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos))) 3001 + if (!cxlrd->ops.hpa_to_spa && !cxl_is_hpa_in_chunk(hpa, cxlr, pos)) 2992 3002 return ULLONG_MAX; 2993 3003 2994 3004 return hpa; ··· 3023 3033 * If the root decoder has SPA to CXL HPA callback, use it. Otherwise 3024 3034 * CXL HPA is assumed to equal SPA. 3025 3035 */ 3026 - if (has_spa_to_hpa(cxlrd)) { 3027 - hpa = cxlrd->ops->spa_to_hpa(cxlrd, p->res->start + offset); 3036 + if (cxlrd->ops.spa_to_hpa) { 3037 + hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset); 3028 3038 hpa_offset = hpa - p->res->start; 3029 3039 } else { 3030 3040 hpa_offset = offset;
+1 -1
drivers/cxl/cxl.h
··· 451 451 void *platform_data; 452 452 struct mutex range_lock; 453 453 int qos_class; 454 - struct cxl_rd_ops *ops; 454 + struct cxl_rd_ops ops; 455 455 struct cxl_switch_decoder cxlsd; 456 456 }; 457 457