mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 16:01:44 -04:00
cxl/pci: Hold memdev lock in cxl_event_trace_record()
cxl_event_config() invokes cxl_mem_get_event_record() to get remain event logs from CXL device during cxl_pci_probe(). If CXL memdev probing failed before that, it is possible to access an invalid endpoint. So adding a cxlmd->driver binding status checking inside cxl_dpa_to_region() to ensure the corresponding endpoint is valid. Besides, cxl_event_trace_record() needs to hold memdev lock to invoke cxl_dpa_to_region() to ensure the memdev probing completed. It is possible that cxl_event_trace_record() is invoked during the CXL memdev probing, especially user or cxl_acpi triggers CXL memdev re-probing. Suggested-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Li Ming <ming.li@zohomail.com> Link: https://patch.msgid.link/20260314-fix_access_endpoint_without_drv_check-v2-3-4c09edf2e1db@zohomail.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
@@ -893,7 +893,7 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
|
||||
|
||||
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
void cxl_event_trace_record(struct cxl_memdev *cxlmd,
|
||||
enum cxl_event_log_type type,
|
||||
enum cxl_event_type event_type,
|
||||
const uuid_t *uuid, union cxl_event *evt)
|
||||
@@ -920,6 +920,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
* translations. Take topology mutation locks and lookup
|
||||
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
|
||||
*/
|
||||
guard(device)(&cxlmd->dev);
|
||||
guard(rwsem_read)(&cxl_rwsem.region);
|
||||
guard(rwsem_read)(&cxl_rwsem.dpa);
|
||||
|
||||
@@ -968,7 +969,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
|
||||
|
||||
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
static void __cxl_event_trace_record(struct cxl_memdev *cxlmd,
|
||||
enum cxl_event_log_type type,
|
||||
struct cxl_event_record_raw *record)
|
||||
{
|
||||
|
||||
@@ -2950,13 +2950,15 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
|
||||
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
|
||||
{
|
||||
struct cxl_dpa_to_region_context ctx;
|
||||
struct cxl_port *port;
|
||||
struct cxl_port *port = cxlmd->endpoint;
|
||||
|
||||
if (!cxlmd->dev.driver)
|
||||
return NULL;
|
||||
|
||||
ctx = (struct cxl_dpa_to_region_context) {
|
||||
.dpa = dpa,
|
||||
};
|
||||
port = cxlmd->endpoint;
|
||||
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
|
||||
if (cxl_num_decoders_committed(port))
|
||||
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
|
||||
|
||||
return ctx.cxlr;
|
||||
|
||||
@@ -864,7 +864,7 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
|
||||
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
|
||||
unsigned long *cmds);
|
||||
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
|
||||
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
void cxl_event_trace_record(struct cxl_memdev *cxlmd,
|
||||
enum cxl_event_log_type type,
|
||||
enum cxl_event_type event_type,
|
||||
const uuid_t *uuid, union cxl_event *evt);
|
||||
|
||||
Reference in New Issue
Block a user