iio: buffer-dmaengine: split requesting DMA channel from allocating buffer

Refactor the IIO dmaengine buffer code to split requesting the DMA
channel from allocating the buffer. We want to be able to add a new
function where the IIO device driver manages the DMA channel, so these
two actions need to be separate.

To do this, calling dma_request_chan() is moved from
iio_dmaengine_buffer_alloc() to iio_dmaengine_buffer_setup_ext(). A new
__iio_dmaengine_buffer_setup_ext() helper function is added to simplify
error unwinding and will also be used by a new function in a later
patch.

iio_dmaengine_buffer_free() now only frees the buffer and does not
release the DMA channel. A new iio_dmaengine_buffer_teardown() function
is added to unwind everything done in iio_dmaengine_buffer_setup_ext().
This keeps things more symmetrical with obvious pairs alloc/free and
setup/teardown.

Calling dma_get_slave_caps() in iio_dmaengine_buffer_alloc() is moved so
that we can avoid any gotos for error unwinding.

Reviewed-by: Nuno Sa <nuno.sa@analog.com>
Signed-off-by: David Lechner <dlechner@baylibre.com>
Link: https://patch.msgid.link/20250207-dlech-mainline-spi-engine-offload-2-v8-8-e48a489be48c@baylibre.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
This commit is contained in:
David Lechner
2025-02-07 14:09:05 -06:00
committed by Jonathan Cameron
parent a570114db8
commit 4fe7fd17fe
4 changed files with 68 additions and 50 deletions

View File

@@ -305,7 +305,7 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
static void axi_adc_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
iio_dmaengine_buffer_free(buffer);
iio_dmaengine_buffer_teardown(buffer);
}
static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,

View File

@@ -206,39 +206,29 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
* @dev: DMA channel consumer device
* @channel: DMA channel name, typically "rx".
* @chan: DMA channel.
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
* to perform its transfers. The parent device will be used to request the DMA
* channel.
* to perform its transfers.
*
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
{
struct dmaengine_buffer *dmaengine_buffer;
unsigned int width, src_width, dest_width;
struct dma_slave_caps caps;
struct dma_chan *chan;
int ret;
ret = dma_get_slave_caps(chan, &caps);
if (ret < 0)
return ERR_PTR(ret);
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
if (!dmaengine_buffer)
return ERR_PTR(-ENOMEM);
chan = dma_request_chan(dev, channel);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
goto err_free;
}
ret = dma_get_slave_caps(chan, &caps);
if (ret < 0)
goto err_release;
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
src_width = __ffs(caps.src_addr_widths);
@@ -262,12 +252,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
return &dmaengine_buffer->queue.buffer;
err_release:
dma_release_channel(chan);
err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
}
/**
@@ -276,42 +260,42 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
iio_dma_buffer_exit(&dmaengine_buffer->queue);
dma_release_channel(dmaengine_buffer->chan);
iio_buffer_put(buffer);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: DMA channel consumer device
* @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx".
* @dir: Direction of buffer (in or out)
* iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
* @buffer: Buffer to free
*
* This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
* and attaches it to an IIO device with iio_device_attach_buffer().
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
* Releases the DMA channel and frees the buffer previously setup with
* iio_dmaengine_buffer_setup_ext().
*/
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir)
void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
struct dma_chan *chan = dmaengine_buffer->chan;
iio_dmaengine_buffer_free(buffer);
dma_release_channel(chan);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER");
static struct iio_buffer
*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
struct dma_chan *chan,
enum iio_buffer_direction dir)
{
struct iio_buffer *buffer;
int ret;
buffer = iio_dmaengine_buffer_alloc(dev, channel);
buffer = iio_dmaengine_buffer_alloc(chan);
if (IS_ERR(buffer))
return ERR_CAST(buffer);
@@ -327,11 +311,45 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
return buffer;
}
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: DMA channel consumer device
* @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx".
* @dir: Direction of buffer (in or out)
*
* This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
* and attaches it to an IIO device with iio_device_attach_buffer().
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*
* Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
* release it.
*/
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir)
{
struct dma_chan *chan;
struct iio_buffer *buffer;
chan = dma_request_chan(dev, channel);
if (IS_ERR(chan))
return ERR_CAST(chan);
buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
if (IS_ERR(buffer))
dma_release_channel(chan);
return buffer;
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
static void __devm_iio_dmaengine_buffer_free(void *buffer)
static void devm_iio_dmaengine_buffer_teardown(void *buffer)
{
iio_dmaengine_buffer_free(buffer);
iio_dmaengine_buffer_teardown(buffer);
}
/**
@@ -357,7 +375,7 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
if (IS_ERR(buffer))
return PTR_ERR(buffer);
return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
buffer);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");

View File

@@ -168,7 +168,7 @@ static struct iio_buffer *axi_dac_request_buffer(struct iio_backend *back,
static void axi_dac_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
iio_dmaengine_buffer_free(buffer);
iio_dmaengine_buffer_teardown(buffer);
}
enum {

View File

@@ -12,7 +12,7 @@
struct iio_dev;
struct device;
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,