i3c: mipi-i3c-hci: Consolidate spinlocks

The MIPI I3C HCI driver currently uses separate spinlocks for different
contexts (PIO vs. DMA rings).  This split is unnecessary and complicates
upcoming fixes.  The driver does not support concurrent PIO and DMA
operation, and it only supports a single DMA ring, so a single lock is
sufficient for all paths.

Introduce a unified spinlock in struct i3c_hci, switch both PIO and DMA
code to use it, and remove the per-context locks.

No functional change is intended in this patch.

Fixes: 9ad9a52cce ("i3c/master: introduce the mipi-i3c-hci driver")
Cc: stable@vger.kernel.org
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://patch.msgid.link/20260306072451.11131-5-adrian.hunter@intel.com
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
This commit is contained in:
Adrian Hunter
2026-03-06 09:24:41 +02:00
committed by Alexandre Belloni
parent f3bcbfe1b8
commit fa12bb903b
4 changed files with 16 additions and 17 deletions

View File

@@ -926,6 +926,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
if (!hci)
return -ENOMEM;
spin_lock_init(&hci->lock);
/*
* Multi-bus instances share the same MMIO address range, but not
* necessarily in separate contiguous sub-ranges. To avoid overlapping

View File

@@ -131,7 +131,6 @@ struct hci_rh_data {
unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
unsigned int done_ptr, ibi_chunk_ptr;
struct hci_xfer **src_xfers;
spinlock_t lock;
struct completion op_done;
};
@@ -344,7 +343,6 @@ static int hci_dma_init(struct i3c_hci *hci)
goto err_out;
rh = &rings->headers[i];
rh->regs = hci->base_regs + offset;
spin_lock_init(&rh->lock);
init_completion(&rh->op_done);
rh->xfer_entries = XFER_RING_ENTRIES;
@@ -534,12 +532,12 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
}
/* take care to update the hardware enqueue pointer atomically */
spin_lock_irq(&rh->lock);
spin_lock_irq(&hci->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_ENQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock_irq(&rh->lock);
spin_unlock_irq(&hci->lock);
return 0;
}
@@ -637,12 +635,12 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
}
/* take care to update the software dequeue pointer atomically */
spin_lock(&rh->lock);
spin_lock(&hci->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock(&rh->lock);
spin_unlock(&hci->lock);
}
static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
@@ -823,12 +821,12 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
done:
/* take care to update the ibi dequeue pointer atomically */
spin_lock(&rh->lock);
spin_lock(&hci->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_IBI_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock(&rh->lock);
spin_unlock(&hci->lock);
/* update the chunk pointer */
rh->ibi_chunk_ptr += ibi_chunks;

View File

@@ -50,6 +50,7 @@ struct i3c_hci {
const struct hci_io_ops *io;
void *io_data;
const struct hci_cmd_ops *cmd;
spinlock_t lock;
atomic_t next_cmd_tid;
bool irq_inactive;
u32 caps;

View File

@@ -123,7 +123,6 @@ struct hci_pio_ibi_data {
};
struct hci_pio_data {
spinlock_t lock;
struct hci_xfer *curr_xfer, *xfer_queue;
struct hci_xfer *curr_rx, *rx_queue;
struct hci_xfer *curr_tx, *tx_queue;
@@ -212,7 +211,6 @@ static int hci_pio_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = pio;
spin_lock_init(&pio->lock);
__hci_pio_init(hci, &size_val);
@@ -631,7 +629,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
xfer[i].data_left = xfer[i].data_len;
}
spin_lock_irq(&pio->lock);
spin_lock_irq(&hci->lock);
prev_queue_tail = pio->xfer_queue;
pio->xfer_queue = &xfer[n - 1];
if (pio->curr_xfer) {
@@ -645,7 +643,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
pio_reg_read(INTR_STATUS),
pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
spin_unlock_irq(&hci->lock);
return 0;
}
@@ -716,14 +714,14 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
struct hci_pio_data *pio = hci->io_data;
int ret;
spin_lock_irq(&pio->lock);
spin_lock_irq(&hci->lock);
dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
spin_unlock_irq(&hci->lock);
return ret;
}
@@ -1016,13 +1014,13 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
struct hci_pio_data *pio = hci->io_data;
u32 status;
spin_lock(&pio->lock);
spin_lock(&hci->lock);
status = pio_reg_read(INTR_STATUS);
dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
spin_unlock(&hci->lock);
return false;
}
@@ -1058,7 +1056,7 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
spin_unlock(&hci->lock);
return true;
}