iio: core: Match iio_device_claim_*() semantics and implementation

Implement iio_device_claim_buffer_mode() fully inline with the use of
__iio_dev_mode_lock(), which takes care of sparse annotations.

To completely match iio_device_claim_direct() semantics, we need to
also change iio_device_claim_buffer_mode() return semantics to usual
true/false conditional lock semantics.

Additionally, to avoid silently breaking out-of-tree drivers, rename
iio_device_claim_buffer_mode() to iio_device_claim_try_buffer_mode().

Reviewed-by: David Lechner <dlechner@baylibre.com>
Reviewed-by: Nuno Sá <nuno.sa@analog.com>
Signed-off-by: Kurt Borja <kuurtb@gmail.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
This commit is contained in:
Kurt Borja
2026-01-20 01:20:43 -05:00
committed by Jonathan Cameron
parent c37ec9d507
commit 2daee817df
7 changed files with 39 additions and 57 deletions

View File

@@ -964,7 +964,7 @@ static irqreturn_t ade9000_dready_thread(int irq, void *data)
struct iio_dev *indio_dev = data;
/* Handle data ready interrupt from C4/EVENT/DREADY pin */
if (!iio_device_claim_buffer_mode(indio_dev)) {
if (iio_device_try_claim_buffer_mode(indio_dev)) {
ade9000_iio_push_buffer(indio_dev);
iio_device_release_buffer_mode(indio_dev);
}

View File

@@ -188,11 +188,8 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
/*
* Ignore samples if the buffer is not set: it is needed if the ODR is
* set but the buffer is not enabled yet.
*
* Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
* is not enabled.
*/
if (iio_device_claim_buffer_mode(indio_dev) < 0)
if (!iio_device_try_claim_buffer_mode(indio_dev))
return 0;
out = (s16 *)st->samples;

View File

@@ -417,13 +417,7 @@ static int max30100_read_raw(struct iio_dev *indio_dev,
* Temperature reading can only be acquired while engine
* is running
*/
if (iio_device_claim_buffer_mode(indio_dev)) {
/*
* Replacing -EBUSY or other error code
* returned by iio_device_claim_buffer_mode()
* because user space may rely on the current
* one.
*/
if (!iio_device_try_claim_buffer_mode(indio_dev)) {
ret = -EAGAIN;
} else {
ret = max30100_get_temp(data, val);

View File

@@ -476,7 +476,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
* shutdown; leave shutdown briefly when buffer not running
*/
any_mode_retry:
if (iio_device_claim_buffer_mode(indio_dev)) {
if (!iio_device_try_claim_buffer_mode(indio_dev)) {
/*
* This one is a *bit* hacky. If we cannot claim buffer
* mode, then try direct mode so that we make sure

View File

@@ -2183,7 +2183,7 @@ EXPORT_SYMBOL_GPL(__devm_iio_device_register);
*
* There are very few cases where a driver actually needs to lock the current
* mode unconditionally. It's recommended to use iio_device_claim_direct() or
* iio_device_claim_buffer_mode() pairs or related helpers instead.
* iio_device_try_claim_buffer_mode() pairs or related helpers instead.
*/
void __iio_dev_mode_lock(struct iio_dev *indio_dev)
{
@@ -2201,46 +2201,6 @@ void __iio_dev_mode_unlock(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(__iio_dev_mode_unlock);
/**
* iio_device_claim_buffer_mode - Keep device in buffer mode
* @indio_dev: the iio_dev associated with the device
*
* If the device is in buffer mode it is guaranteed to stay
* that way until iio_device_release_buffer_mode() is called.
*
* Use with iio_device_release_buffer_mode().
*
* Returns: 0 on success, -EBUSY on failure.
*/
int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_enabled(indio_dev))
return 0;
mutex_unlock(&iio_dev_opaque->mlock);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
/**
* iio_device_release_buffer_mode - releases claim on buffer mode
* @indio_dev: the iio_dev associated with the device
*
* Release the claim. Device is no longer guaranteed to stay
* in buffer mode.
*
* Use with iio_device_claim_buffer_mode().
*/
void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
{
mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
}
EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
/**
* iio_device_get_current_mode() - helper function providing read-only access to
* the opaque @currentmode variable

View File

@@ -304,7 +304,7 @@ static int opt4060_set_driver_state(struct iio_dev *indio_dev,
struct opt4060_chip *chip = iio_priv(indio_dev);
int ret = 0;
any_mode_retry:
if (iio_device_claim_buffer_mode(indio_dev)) {
if (!iio_device_try_claim_buffer_mode(indio_dev)) {
/*
* This one is a *bit* hacky. If we cannot claim buffer mode,
* then try direct mode so that we make sure things cannot

View File

@@ -706,8 +706,39 @@ static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
*/
#define iio_device_release_direct(indio_dev) __iio_dev_mode_unlock(indio_dev)
int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
/**
* iio_device_try_claim_buffer_mode() - Keep device in buffer mode
* @indio_dev: the iio_dev associated with the device
*
* If the device is in buffer mode it is guaranteed to stay
* that way until iio_device_release_buffer_mode() is called.
*
* Use with iio_device_release_buffer_mode().
*
* Returns: true on success, false on failure.
*/
static inline bool iio_device_try_claim_buffer_mode(struct iio_dev *indio_dev)
{
__iio_dev_mode_lock(indio_dev);
if (!iio_buffer_enabled(indio_dev)) {
__iio_dev_mode_unlock(indio_dev);
return false;
}
return true;
}
/**
* iio_device_release_buffer_mode() - releases claim on buffer mode
* @indio_dev: the iio_dev associated with the device
*
* Release the claim. Device is no longer guaranteed to stay
* in buffer mode.
*
* Use with iio_device_try_claim_buffer_mode().
*/
#define iio_device_release_buffer_mode(indio_dev) __iio_dev_mode_unlock(indio_dev)
extern const struct bus_type iio_bus_type;