PM: Block enabling of runtime PM during system suspend

If device_prepare() runs on a device that has never had runtime
PM enabled so far, it may reasonably assume that runtime PM will
not be enabled for that device during the system suspend-resume
cycle currently in progress, but this has never been guaranteed.

To verify this assumption, make device_prepare() arrange for
triggering a device warning accompanied by a call trace dump if
runtime PM is enabled for such a device after it has returned.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/6131109.lOV4Wx5bFT@rjwysocki.net
This commit is contained in:
Rafael J. Wysocki
2025-02-18 21:11:42 +01:00
parent 6146b94994
commit 3e5eee147b
4 changed files with 38 additions and 0 deletions

View File

@@ -1109,6 +1109,8 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
/* If enabling runtime PM for the device is blocked, unblock it. */
pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1815,6 +1817,13 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
/*
* If runtime PM is disabled for the device at this point and it has
* never been enabled so far, it should not be enabled until this system
* suspend-resume cycle is complete, so prepare to trigger a warning on
* subsequent attempts to enable it.
*/
pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;

View File

@@ -1460,6 +1460,26 @@ int pm_runtime_barrier(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
void pm_runtime_block_if_disabled(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.disable_depth && dev->power.last_status == RPM_INVALID)
dev->power.last_status = RPM_BLOCKED;
spin_unlock_irq(&dev->power.lock);
}
void pm_runtime_unblock(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.last_status == RPM_BLOCKED)
dev->power.last_status = RPM_INVALID;
spin_unlock_irq(&dev->power.lock);
}
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1518,6 +1538,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
if (dev->power.last_status == RPM_BLOCKED) {
dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
dump_stack();
}
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();

View File

@@ -597,6 +597,7 @@ enum rpm_status {
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
RPM_BLOCKED,
};
/*

View File

@@ -77,6 +77,8 @@ extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
extern void pm_runtime_block_if_disabled(struct device *dev);
extern void pm_runtime_unblock(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
@@ -271,6 +273,8 @@ static inline int pm_runtime_get_if_active(struct device *dev)
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
static inline void pm_runtime_block_if_disabled(struct device *dev) {}
static inline void pm_runtime_unblock(struct device *dev) {}
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}