cxl: Simplify cxl_rd_ops allocation and handling

A root decoder's callback handlers are collected in struct cxl_rd_ops.
The structure is dynamically allocated, though it contains only a few
pointers in it. This also requires to check two pointes to check for
the existence of a callback.

Simplify the allocation, release and handler check by embedding the
ops statically in struct cxl_root_decoder.

Implementation is equivalent to how struct cxl_root_ops handles the
callbacks.

[ dj: Fix spelling error in commit log. ]

Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Robert Richter <rrichter@amd.com>
Link: https://patch.msgid.link/20251114075844.1315805-2-rrichter@amd.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
Robert Richter
2025-11-14 08:58:41 +01:00
committed by Dave Jiang
parent e9a6fb0bcd
commit 6123133ee9
4 changed files with 8 additions and 23 deletions

View File

@@ -475,12 +475,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxlrd->qos_class = cfmws->qtg_id;
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
cxlrd->ops = kzalloc(sizeof(*cxlrd->ops), GFP_KERNEL);
if (!cxlrd->ops)
return -ENOMEM;
cxlrd->ops->hpa_to_spa = cxl_apply_xor_maps;
cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps;
cxlrd->ops.hpa_to_spa = cxl_apply_xor_maps;
cxlrd->ops.spa_to_hpa = cxl_apply_xor_maps;
}
rc = cxl_decoder_add(cxld);

View File

@@ -459,7 +459,6 @@ static void cxl_root_decoder_release(struct device *dev)
if (atomic_read(&cxlrd->region_id) >= 0)
memregion_free(atomic_read(&cxlrd->region_id));
__cxl_decoder_release(&cxlrd->cxlsd.cxld);
kfree(cxlrd->ops);
kfree(cxlrd);
}

View File

@@ -2924,16 +2924,6 @@ static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
return false;
}
static bool has_hpa_to_spa(struct cxl_root_decoder *cxlrd)
{
return cxlrd->ops && cxlrd->ops->hpa_to_spa;
}
static bool has_spa_to_hpa(struct cxl_root_decoder *cxlrd)
{
return cxlrd->ops && cxlrd->ops->spa_to_hpa;
}
u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa)
{
@@ -2988,8 +2978,8 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
if (has_hpa_to_spa(cxlrd))
hpa = cxlrd->ops->hpa_to_spa(cxlrd, hpa);
if (cxlrd->ops.hpa_to_spa)
hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa);
if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
@@ -2998,7 +2988,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
}
/* Simple chunk check, by pos & gran, only applies to modulo decodes */
if (!has_hpa_to_spa(cxlrd) && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
if (!cxlrd->ops.hpa_to_spa && !cxl_is_hpa_in_chunk(hpa, cxlr, pos))
return ULLONG_MAX;
return hpa;
@@ -3033,8 +3023,8 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
* If the root decoder has SPA to CXL HPA callback, use it. Otherwise
* CXL HPA is assumed to equal SPA.
*/
if (has_spa_to_hpa(cxlrd)) {
hpa = cxlrd->ops->spa_to_hpa(cxlrd, p->res->start + offset);
if (cxlrd->ops.spa_to_hpa) {
hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
hpa_offset = hpa - p->res->start;
} else {
hpa_offset = offset;

View File

@@ -451,7 +451,7 @@ struct cxl_root_decoder {
void *platform_data;
struct mutex range_lock;
int qos_class;
struct cxl_rd_ops *ops;
struct cxl_rd_ops ops;
struct cxl_switch_decoder cxlsd;
};