mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 07:51:31 -04:00
Merge branch 'for-7.1/cxl-consolidate-endpoint' into cxl-for-next
Add code to ensure the endpoint has completed initialization before usage. cxl/pci: Check memdev driver binding status in cxl_reset_done() cxl/pci: Hold memdev lock in cxl_event_trace_record()
This commit is contained in:
@@ -893,7 +893,7 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
|
||||
|
||||
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
void cxl_event_trace_record(struct cxl_memdev *cxlmd,
|
||||
enum cxl_event_log_type type,
|
||||
enum cxl_event_type event_type,
|
||||
const uuid_t *uuid, union cxl_event *evt)
|
||||
@@ -920,6 +920,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
* translations. Take topology mutation locks and lookup
|
||||
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
|
||||
*/
|
||||
guard(device)(&cxlmd->dev);
|
||||
guard(rwsem_read)(&cxl_rwsem.region);
|
||||
guard(rwsem_read)(&cxl_rwsem.dpa);
|
||||
|
||||
@@ -968,7 +969,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
|
||||
|
||||
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
static void __cxl_event_trace_record(struct cxl_memdev *cxlmd,
|
||||
enum cxl_event_log_type type,
|
||||
struct cxl_event_record_raw *record)
|
||||
{
|
||||
|
||||
@@ -2967,13 +2967,15 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
|
||||
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
|
||||
{
|
||||
struct cxl_dpa_to_region_context ctx;
|
||||
struct cxl_port *port;
|
||||
struct cxl_port *port = cxlmd->endpoint;
|
||||
|
||||
if (!cxlmd->dev.driver)
|
||||
return NULL;
|
||||
|
||||
ctx = (struct cxl_dpa_to_region_context) {
|
||||
.dpa = dpa,
|
||||
};
|
||||
port = cxlmd->endpoint;
|
||||
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
|
||||
if (cxl_num_decoders_committed(port))
|
||||
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
|
||||
|
||||
return ctx.cxlr;
|
||||
|
||||
@@ -864,7 +864,7 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
|
||||
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
|
||||
unsigned long *cmds);
|
||||
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
|
||||
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
void cxl_event_trace_record(struct cxl_memdev *cxlmd,
|
||||
enum cxl_event_log_type type,
|
||||
enum cxl_event_type event_type,
|
||||
const uuid_t *uuid, union cxl_event *evt);
|
||||
|
||||
@@ -1056,6 +1056,9 @@ static void cxl_reset_done(struct pci_dev *pdev)
|
||||
* that no longer exists.
|
||||
*/
|
||||
guard(device)(&cxlmd->dev);
|
||||
if (!cxlmd->dev.driver)
|
||||
return;
|
||||
|
||||
if (cxlmd->endpoint &&
|
||||
cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) {
|
||||
device_for_each_child(&cxlmd->endpoint->dev, NULL,
|
||||
|
||||
@@ -911,6 +911,7 @@ static inline void device_unlock(struct device *dev)
|
||||
}
|
||||
|
||||
DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
|
||||
DEFINE_GUARD_COND(device, _intr, device_lock_interruptible(_T), _RET == 0)
|
||||
|
||||
static inline void device_lock_assert(struct device *dev)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user