From 63ec9be13372759511ea868dbc59f439e936d2c6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:10:54 -0600 Subject: [PATCH 1/7] net: ipa: move mutex calls into __gsi_channel_stop() Move the mutex calls out of gsi_channel_stop_retry() and into __gsi_channel_stop(), to make the latter more semantically similar to __gsi_channel_start(). Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/gsi.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 53640447bf12..f0432c965168 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -910,11 +910,8 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) static int gsi_channel_stop_retry(struct gsi_channel *channel) { u32 retries = GSI_CHANNEL_STOP_RETRIES; - struct gsi *gsi = channel->gsi; int ret; - mutex_lock(&gsi->mutex); - do { ret = gsi_channel_stop_command(channel); if (ret != -EAGAIN) @@ -922,24 +919,33 @@ static int gsi_channel_stop_retry(struct gsi_channel *channel) usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); } while (retries--); - mutex_unlock(&gsi->mutex); - return ret; } static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) { + struct gsi *gsi = channel->gsi; int ret; /* Wait for any underway transactions to complete before stopping. */ gsi_channel_trans_quiesce(channel); - ret = stop ? gsi_channel_stop_retry(channel) : 0; - /* Finally, ensure NAPI polling has finished. */ - if (!ret) - napi_synchronize(&channel->napi); + if (!stop) + return 0; - return ret; + mutex_lock(&gsi->mutex); + + ret = gsi_channel_stop_retry(channel); + + mutex_unlock(&gsi->mutex); + + if (ret) + return ret; + + /* Ensure NAPI polling has finished. */ + napi_synchronize(&channel->napi); + + return 0; } /* Stop a started channel */ @@ -948,11 +954,11 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) struct gsi_channel *channel = &gsi->channel[channel_id]; int ret; - /* Only disable the completion interrupt if stop is successful */ ret = __gsi_channel_stop(channel, true); if (ret) return ret; + /* Disable the completion interrupt and NAPI if successful */ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); napi_disable(&channel->napi); From b1750723c99c5a4d9b452b5e51a9fd3227fceecb Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:10:55 -0600 Subject: [PATCH 2/7] net: ipa: synchronize NAPI only for suspend When stopping a channel, gsi_channel_stop() will ensure NAPI polling is complete when it calls napi_disable(). So there is no need to call napi_synchronize() in that case. Move the call to napi_synchronize() out of __gsi_channel_stop() and into gsi_channel_suspend(), so it's only used where needed. Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/gsi.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index f0432c965168..60eb765c5364 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -939,13 +939,7 @@ static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) mutex_unlock(&gsi->mutex); - if (ret) - return ret; - - /* Ensure NAPI polling has finished. */ - napi_synchronize(&channel->napi); - - return 0; + return ret; } /* Stop a started channel */ @@ -987,8 +981,16 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) { struct gsi_channel *channel = &gsi->channel[channel_id]; + int ret; - return __gsi_channel_stop(channel, stop); + ret = __gsi_channel_stop(channel, stop); + if (ret) + return ret; + + /* Ensure NAPI polling has finished. */ + napi_synchronize(&channel->napi); + + return 0; } /* Resume a suspended channel (starting will be requested if STOPPED) */ From 3f77c926f649eed686f36a1e6888abb698146a2a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:10:56 -0600 Subject: [PATCH 3/7] net: ipa: do not cache event ring state An event ring's state only needs to be known when it is allocated, reset, or deallocated. We check an event ring's state both before and after performing an event ring control command that changes its state. These are only issued at startup and shutdown, so there is very little value in caching the state. Stop recording a copy of the channel's last known state, and instead fetch the true state from hardware whenever it's needed. In such cases, *do* record the state in a local variable, in case an error message reports it (so the value reported is the value seen). Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/gsi.c | 39 +++++++++++++++++++++------------------ drivers/net/ipa/gsi.h | 1 - 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 60eb765c5364..511c94f66036 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -408,30 +408,31 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, return; dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", - opcode, evt_ring_id, evt_ring->state); + opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); } /* Allocate an event ring in NOT_ALLOCATED state */ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) { - struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; + enum gsi_evt_ring_state state; /* Get initial event ring state */ - evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); - if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { + state = gsi_evt_ring_state(gsi, evt_ring_id); + if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", - evt_ring_id, evt_ring->state); + evt_ring_id, state); return -EINVAL; } gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); /* If successful the event ring state will have changed */ - if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) + state = gsi_evt_ring_state(gsi, evt_ring_id); + if (state == GSI_EVT_RING_STATE_ALLOCATED) return 0; dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", - evt_ring_id, evt_ring->state); + evt_ring_id, state); return -EIO; } @@ -439,45 +440,48 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) /* Reset a GSI event ring in ALLOCATED or ERROR state. */ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) { - struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; - enum gsi_evt_ring_state state = evt_ring->state; + enum gsi_evt_ring_state state; + state = gsi_evt_ring_state(gsi, evt_ring_id); if (state != GSI_EVT_RING_STATE_ALLOCATED && state != GSI_EVT_RING_STATE_ERROR) { dev_err(gsi->dev, "event ring %u bad state %u before reset\n", - evt_ring_id, evt_ring->state); + evt_ring_id, state); return; } gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); /* If successful the event ring state will have changed */ - if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) + state = gsi_evt_ring_state(gsi, evt_ring_id); + if (state == GSI_EVT_RING_STATE_ALLOCATED) return; dev_err(gsi->dev, "event ring %u bad state %u after reset\n", - evt_ring_id, evt_ring->state); + evt_ring_id, state); } /* Issue a hardware de-allocation request for an allocated event ring */ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) { - struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; + enum gsi_evt_ring_state state; - if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { + state = gsi_evt_ring_state(gsi, evt_ring_id); + if (state != GSI_EVT_RING_STATE_ALLOCATED) { dev_err(gsi->dev, "event ring %u state %u before dealloc\n", - evt_ring_id, evt_ring->state); + evt_ring_id, state); return; } gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); /* If successful the event ring state will have changed */ - if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) + state = gsi_evt_ring_state(gsi, evt_ring_id); + if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) return; dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", - evt_ring_id, evt_ring->state); + evt_ring_id, state); } /* Fetch the current state of a channel from hardware */ @@ -1107,7 +1111,6 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi) event_mask ^= BIT(evt_ring_id); evt_ring = &gsi->evt_ring[evt_ring_id]; - evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); complete(&evt_ring->completion); } diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 96c9aed397aa..d674db0ba4eb 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -142,7 +142,6 @@ enum gsi_evt_ring_state { struct gsi_evt_ring { struct gsi_channel *channel; struct completion completion; /* signals event ring state changes */ - enum gsi_evt_ring_state state; struct gsi_ring ring; }; From d5bc5015eb9d64cbd14e467db1a56db1472d0d6c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:10:57 -0600 Subject: [PATCH 4/7] net: ipa: remove two unused register definitions We do not support inter-EE channel or event ring commands. Inter-EE interrupts are disabled (and never re-enabled) for all channels and event rings, so we have no need for the GSI registers that clear those interrupt conditions. So remove their definitions. Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/gsi_reg.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index 0e138bbd8205..299456e70f28 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -59,16 +59,6 @@ #define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \ (0x0000c01c + 0x1000 * (ee)) -#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \ - GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP) -#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \ - (0x0000c028 + 0x1000 * (ee)) - -#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \ - GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP) -#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \ - (0x0000c02c + 0x1000 * (ee)) - #define GSI_CH_C_CNTXT_0_OFFSET(ch) \ GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP) #define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \ From 9af5ccf32383005070092e51b15cee51584323c0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:10:58 -0600 Subject: [PATCH 5/7] net: ipa: use a Boolean rather than count when replenishing The count argument to ipa_endpoint_replenish() is only ever 0 or 1, and always will be (because we always handle each receive buffer in a single transaction). Rename the argument to be add_one and change it to be Boolean. Update the function description to reflect the current code. Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/ipa_endpoint.c | 35 ++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 7a46c790afbe..bff5d6ffd118 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1020,31 +1020,34 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) } /** - * ipa_endpoint_replenish() - Replenish the Rx packets cache. + * ipa_endpoint_replenish() - Replenish endpoint receive buffers * @endpoint: Endpoint to be replenished - * @count: Number of buffers to send to hardware + * @add_one: Whether this is replacing a just-consumed buffer * - * Allocate RX packet wrapper structures with maximal socket buffers - * for an endpoint. These are supplied to the hardware, which fills - * them with incoming data. + * The IPA hardware can hold a fixed number of receive buffers for an RX + * endpoint, based on the number of entries in the underlying channel ring + * buffer. If an endpoint's "backlog" is non-zero, it indicates how many + * more receive buffers can be supplied to the hardware. Replenishing for + * an endpoint can be disabled, in which case requests to replenish a + * buffer are "saved", and transferred to the backlog once it is re-enabled + * again. */ -static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) +static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) { struct gsi *gsi; u32 backlog; if (!endpoint->replenish_enabled) { - if (count) - atomic_add(count, &endpoint->replenish_saved); + if (add_one) + atomic_inc(&endpoint->replenish_saved); return; } - while (atomic_dec_not_zero(&endpoint->replenish_backlog)) if (ipa_endpoint_replenish_one(endpoint)) goto try_again_later; - if (count) - atomic_add(count, &endpoint->replenish_backlog); + if (add_one) + atomic_inc(&endpoint->replenish_backlog); return; @@ -1052,8 +1055,8 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) /* The last one didn't succeed, so fix the backlog */ backlog = atomic_inc_return(&endpoint->replenish_backlog); - if (count) - atomic_add(count, &endpoint->replenish_backlog); + if (add_one) + atomic_inc(&endpoint->replenish_backlog); /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even @@ -1080,7 +1083,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) /* Start replenishing if hardware currently has no buffers */ max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); if (atomic_read(&endpoint->replenish_backlog) == max_backlog) - ipa_endpoint_replenish(endpoint, 0); + ipa_endpoint_replenish(endpoint, false); } static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) @@ -1099,7 +1102,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work) endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); - ipa_endpoint_replenish(endpoint, 0); + ipa_endpoint_replenish(endpoint, false); } static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, @@ -1300,7 +1303,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, { struct page *page; - ipa_endpoint_replenish(endpoint, 1); + ipa_endpoint_replenish(endpoint, true); if (trans->cancelled) return; From 4873537430e5b6bbfc505a6a7b07a7c5e92ddffc Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:10:59 -0600 Subject: [PATCH 6/7] net: ipa: get rid of status size constraint There is a build-time check that the packet status structure is a multiple of 4 bytes in size. It's not clear where that constraint comes from, but the structure defines what hardware provides so its definition won't change. Get rid of the check; it adds no value. Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/ipa_endpoint.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index bff5d6ffd118..7209ee3c3124 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -174,9 +174,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, enum ipa_endpoint_name name; u32 limit; - /* Not sure where this constraint come from... */ - BUILD_BUG_ON(sizeof(struct ipa_status) % 4); - if (count > IPA_ENDPOINT_COUNT) { dev_err(dev, "too many endpoints specified (%u > %u)\n", count, IPA_ENDPOINT_COUNT); From cd1150098f2cc7bd05740c105488c293f6761f5a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Feb 2021 16:11:00 -0600 Subject: [PATCH 7/7] net: ipa: avoid field overflow It's possible that the length passed to ipa_header_size_encoded() is larger than what can be represented by the HDR_LEN field alone (starting with IPA v4.5). If we attempted that, u32_encode_bits() would trigger a build-time error. Avoid this problem by masking off high-order bits of the value encoded as the lower portion of the header length. The same sort of problem exists in ipa_metadata_offset_encoded(), so implement the same fix there. Signed-off-by: Alex Elder Signed-off-by: Jakub Kicinski --- drivers/net/ipa/ipa_reg.h | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index e6b0827a244e..732e691e9aa6 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -408,15 +408,18 @@ enum ipa_cs_offload_en { static inline u32 ipa_header_size_encoded(enum ipa_version version, u32 header_size) { + u32 size = header_size & field_mask(HDR_LEN_FMASK); u32 val; - val = u32_encode_bits(header_size, HDR_LEN_FMASK); - if (version < IPA_VERSION_4_5) + val = u32_encode_bits(size, HDR_LEN_FMASK); + if (version < IPA_VERSION_4_5) { + /* ipa_assert(header_size == size); */ return val; + } /* IPA v4.5 adds a few more most-significant bits */ - header_size >>= hweight32(HDR_LEN_FMASK); - val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK); + size = header_size >> hweight32(HDR_LEN_FMASK); + val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK); return val; } @@ -425,15 +428,18 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version, static inline u32 ipa_metadata_offset_encoded(enum ipa_version version, u32 offset) { + u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK); u32 val; - val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK); - if (version < IPA_VERSION_4_5) + val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK); + if (version < IPA_VERSION_4_5) { + /* ipa_assert(offset == off); */ return val; + } /* IPA v4.5 adds a few more most-significant bits */ - offset >>= hweight32(HDR_OFST_METADATA_FMASK); - val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK); + off = offset >> hweight32(HDR_OFST_METADATA_FMASK); + val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK); return val; }