summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 13:36:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 13:36:52 -0700
commit7100e505b76b4e2efd88b2459d1a932214e29f8a (patch)
treea8eae8687dc1511c89463b1eb93c8349a7471ab3 /drivers/base
parentcb47c1831fa406c964468b259f2082c16cc3f757 (diff)
parent75a4161a58dd157a2bd2dc8e9986e45b62ac46cf (diff)
Merge tag 'pm-for-3.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: - ACPI conversion to PM handling based on struct dev_pm_ops. - Conversion of a number of platform drivers to PM handling based on struct dev_pm_ops and removal of empty legacy PM callbacks from a couple of PCI drivers. - Suspend-to-both for in-kernel hibernation from Bojan Smojver. - cpuidle fixes and cleanups from ShuoX Liu, Daniel Lezcano and Preeti Murthy. - cpufreq bug fixes from Jonghwa Lee and Stephen Boyd. - Suspend and hibernate fixes from Srivatsa Bhat and Colin Cross. - Generic PM domains framework updates. - RTC CMOS wakeup signaling update from Paul Fox. - sparse warnings fixes from Sachin Kamat. - Build warnings fixes for the generic PM domains framework and PM sysfs code. - sysfs switch for printing device suspend times from Sameer Nanda. - Documentation fix from Oskar Schirmer. * tag 'pm-for-3.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (70 commits) cpufreq: Fix sysfs deadlock with concurrent hotplug/frequency switch EXYNOS: bugfix on retrieving old_index from freqs.old PM / Sleep: call early resume handlers when suspend_noirq fails PM / QoS: Use NULL pointer instead of plain integer in qos.c PM / QoS: Use NULL pointer instead of plain integer in pm_qos.h PM / Sleep: Require CAP_BLOCK_SUSPEND to use wake_lock/wake_unlock PM / Sleep: Add missing static storage class specifiers in main.c cpuilde / ACPI: remove time from acpi_processor_cx structure cpuidle / ACPI: remove usage from acpi_processor_cx structure cpuidle / ACPI : remove latency_ticks from acpi_processor_cx structure rtc-cmos: report wakeups from interrupt handler PM / Sleep: Fix build warning in sysfs.c for CONFIG_PM_SLEEP unset PM / Domains: Fix build warning for CONFIG_PM_RUNTIME unset olpc-xo15-sci: Use struct dev_pm_ops for power management PM / Domains: Replace plain integer with NULL pointer in domain.c file PM / Domains: Add missing static storage class specifier in domain.c file PM / crypto / ux500: Use struct dev_pm_ops for power management PM / IPMI: Remove empty legacy PCI PM callbacks tpm_nsc: Use struct dev_pm_ops for power management tpm_tis: Use struct dev_pm_ops for power management ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c342
-rw-r--r--drivers/base/power/main.c26
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/sysfs.c4
4 files changed, 291 insertions, 83 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 83aa694a8efe..ba3487c9835b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -75,19 +75,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
start_latency_ns, "start");
}
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
- save_state_latency_ns, "state save");
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
- restore_state_latency_ns,
- "state restore");
-}
-
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -139,6 +126,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
genpd->status = GPD_STATE_ACTIVE;
}
+static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
+{
+ s64 usecs64;
+
+ if (!genpd->cpu_data)
+ return;
+
+ usecs64 = genpd->power_on_latency_ns;
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs64 += genpd->cpu_data->saved_exit_latency;
+ genpd->cpu_data->idle_state->exit_latency = usecs64;
+}
+
/**
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
@@ -146,7 +146,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
struct gpd_link *link;
@@ -176,6 +176,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0;
}
+ if (genpd->cpu_data) {
+ cpuidle_pause_and_lock();
+ genpd->cpu_data->idle_state->disabled = true;
+ cpuidle_resume_and_unlock();
+ goto out;
+ }
+
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the masters' .power_on() callbacks fiddles
@@ -215,6 +222,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
if (elapsed_ns > genpd->power_on_latency_ns) {
genpd->power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
+ genpd_recalc_cpu_exit_latency(genpd);
if (genpd->name)
pr_warning("%s: Power-on latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -222,6 +230,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
}
+ out:
genpd_set_active(genpd);
return 0;
@@ -251,6 +260,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_RUNTIME
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -275,7 +297,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
pdd = dev->power.subsys_data ?
dev->power.subsys_data->domain_data : NULL;
- if (pdd) {
+ if (pdd && pdd->dev) {
to_gpd_data(pdd)->td.constraint_changed = true;
genpd = dev_to_genpd(dev);
} else {
@@ -339,19 +361,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
+ bool need_restore = gpd_data->need_restore;
- if (!gpd_data->need_restore)
- return;
-
+ gpd_data->need_restore = false;
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
- genpd_restore_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
+ if (need_restore)
+ genpd_restore_dev(genpd, dev);
mutex_lock(&genpd->lock);
-
- gpd_data->need_restore = false;
}
/**
@@ -458,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
}
+ if (genpd->cpu_data) {
+ /*
+ * If cpu_data is set, cpuidle should turn the domain off when
+ * the CPU in it is idle. In that case we don't decrement the
+ * subdomain counts of the master domains, so that power is not
+ * removed from the current domain prematurely as a result of
+ * cutting off the masters' power.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ cpuidle_pause_and_lock();
+ genpd->cpu_data->idle_state->disabled = false;
+ cpuidle_resume_and_unlock();
+ goto out;
+ }
+
if (genpd->power_off) {
ktime_t time_start;
s64 elapsed_ns;
@@ -595,7 +629,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
- goto out;
+ return genpd_start_dev(genpd, dev);
mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd);
@@ -628,9 +662,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
- out:
- genpd_start_dev(genpd, dev);
-
return 0;
}
@@ -1235,6 +1266,27 @@ static void pm_genpd_complete(struct device *dev)
#endif /* CONFIG_PM_SLEEP */
+static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data)
+ return NULL;
+
+ mutex_init(&gpd_data->lock);
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ return gpd_data;
+}
+
+static void __pm_genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
+{
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
+}
+
/**
* __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
@@ -1244,7 +1296,7 @@ static void pm_genpd_complete(struct device *dev)
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
struct gpd_timing_data *td)
{
- struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
struct pm_domain_data *pdd;
int ret = 0;
@@ -1253,14 +1305,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data)
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
+ if (!gpd_data_new)
return -ENOMEM;
- mutex_init(&gpd_data->lock);
- gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
-
genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
@@ -1274,35 +1322,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
goto out;
}
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ goto out;
+
genpd->device_count++;
genpd->max_off_time_changed = true;
- dev_pm_get_subsys_data(dev);
-
- mutex_lock(&gpd_data->lock);
spin_lock_irq(&dev->power.lock);
+
dev->pm_domain = &genpd->domain;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- gpd_data->base.dev = dev;
- list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ } else {
+ gpd_data = gpd_data_new;
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+ }
+ gpd_data->refcount++;
if (td)
gpd_data->td = *td;
+ spin_unlock_irq(&dev->power.lock);
+
+ mutex_lock(&gpd_data->lock);
+ gpd_data->base.dev = dev;
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
- spin_unlock_irq(&dev->power.lock);
mutex_unlock(&gpd_data->lock);
- genpd_release_lock(genpd);
-
- return 0;
-
out:
genpd_release_lock(genpd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
+ if (gpd_data != gpd_data_new)
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
+
return ret;
}
@@ -1348,6 +1403,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
+ bool remove = false;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1368,22 +1424,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
spin_lock_irq(&dev->power.lock);
+
dev->pm_domain = NULL;
pdd = dev->power.subsys_data->domain_data;
list_del_init(&pdd->list_node);
- dev->power.subsys_data->domain_data = NULL;
+ gpd_data = to_gpd_data(pdd);
+ if (--gpd_data->refcount == 0) {
+ dev->power.subsys_data->domain_data = NULL;
+ remove = true;
+ }
+
spin_unlock_irq(&dev->power.lock);
- gpd_data = to_gpd_data(pdd);
mutex_lock(&gpd_data->lock);
pdd->dev = NULL;
mutex_unlock(&gpd_data->lock);
genpd_release_lock(genpd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
dev_pm_put_subsys_data(dev);
+ if (remove)
+ __pm_genpd_free_dev_data(dev, gpd_data);
+
return 0;
out:
@@ -1541,33 +1603,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
* @dev: Device to add the callbacks to.
* @ops: Set of callbacks to add.
* @td: Timing data to add to the device along with the callbacks (optional).
+ *
+ * Every call to this routine should be balanced with a call to
+ * __pm_genpd_remove_callbacks() and they must not be nested.
*/
int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
struct gpd_timing_data *td)
{
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
int ret = 0;
- if (!(dev && dev->power.subsys_data && ops))
+ if (!(dev && ops))
return -EINVAL;
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
+ if (!gpd_data_new)
+ return -ENOMEM;
+
pm_runtime_disable(dev);
device_pm_lock();
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ goto out;
- gpd_data->ops = *ops;
- if (td)
- gpd_data->td = *td;
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
} else {
- ret = -EINVAL;
+ gpd_data = gpd_data_new;
+ dev->power.subsys_data->domain_data = &gpd_data->base;
}
+ gpd_data->refcount++;
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+
+ spin_unlock_irq(&dev->power.lock);
+ out:
device_pm_unlock();
pm_runtime_enable(dev);
+ if (gpd_data != gpd_data_new)
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
+
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
@@ -1576,10 +1657,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
* __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
* @dev: Device to remove the callbacks from.
* @clear_td: If set, clear the device's timing data too.
+ *
+ * This routine can only be called after pm_genpd_add_callbacks().
*/
int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
{
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data = NULL;
+ bool remove = false;
int ret = 0;
if (!(dev && dev->power.subsys_data))
@@ -1588,24 +1672,118 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
pm_runtime_disable(dev);
device_pm_lock();
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ spin_lock_irq(&dev->power.lock);
- gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->ops = (struct gpd_dev_ops){ NULL };
if (clear_td)
gpd_data->td = (struct gpd_timing_data){ 0 };
+
+ if (--gpd_data->refcount == 0) {
+ dev->power.subsys_data->domain_data = NULL;
+ remove = true;
+ }
} else {
ret = -EINVAL;
}
+ spin_unlock_irq(&dev->power.lock);
+
device_pm_unlock();
pm_runtime_enable(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ dev_pm_put_subsys_data(dev);
+ if (remove)
+ __pm_genpd_free_dev_data(dev, gpd_data);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+{
+ struct cpuidle_driver *cpuidle_drv;
+ struct gpd_cpu_data *cpu_data;
+ struct cpuidle_state *idle_state;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || state < 0)
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->cpu_data) {
+ ret = -EEXIST;
+ goto out;
+ }
+ cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
+ if (!cpu_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cpuidle_drv = cpuidle_driver_ref();
+ if (!cpuidle_drv) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cpuidle_drv->state_count <= state) {
+ ret = -EINVAL;
+ goto err;
+ }
+ idle_state = &cpuidle_drv->states[state];
+ if (!idle_state->disabled) {
+ ret = -EAGAIN;
+ goto err;
+ }
+ cpu_data->idle_state = idle_state;
+ cpu_data->saved_exit_latency = idle_state->exit_latency;
+ genpd->cpu_data = cpu_data;
+ genpd_recalc_cpu_exit_latency(genpd);
+
+ out:
+ genpd_release_lock(genpd);
+ return ret;
+
+ err:
+ cpuidle_driver_unref();
+ goto out;
+}
+
+int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+ struct gpd_cpu_data *cpu_data;
+ struct cpuidle_state *idle_state;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ cpu_data = genpd->cpu_data;
+ if (!cpu_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ idle_state = cpu_data->idle_state;
+ if (!idle_state->disabled) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ idle_state->exit_latency = cpu_data->saved_exit_latency;
+ cpuidle_driver_unref();
+ genpd->cpu_data = NULL;
+ kfree(cpu_data);
+
+ out:
+ genpd_release_lock(genpd);
+ return ret;
+}
+
/* Default device callbacks for generic PM domains. */
/**
@@ -1615,16 +1793,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
static int pm_genpd_default_save_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
cb = dev_gpd_data(dev)->ops.save_state;
if (cb)
return cb(dev);
- if (drv && drv->pm && drv->pm->runtime_suspend)
- return drv->pm->runtime_suspend(dev);
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
- return 0;
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
}
/**
@@ -1634,16 +1820,24 @@ static int pm_genpd_default_save_state(struct device *dev)
static int pm_genpd_default_restore_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
cb = dev_gpd_data(dev)->ops.restore_state;
if (cb)
return cb(dev);
- if (drv && drv->pm && drv->pm->runtime_resume)
- return drv->pm->runtime_resume(dev);
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
- return 0;
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9cb845e49334..0113adc310dc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,7 +28,7 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
-
+#include <linux/cpuidle.h>
#include "../base.h"
#include "power.h"
@@ -45,10 +45,10 @@ typedef int (*pm_callback_t)(struct device *);
*/
LIST_HEAD(dpm_list);
-LIST_HEAD(dpm_prepared_list);
-LIST_HEAD(dpm_suspended_list);
-LIST_HEAD(dpm_late_early_list);
-LIST_HEAD(dpm_noirq_list);
+static LIST_HEAD(dpm_prepared_list);
+static LIST_HEAD(dpm_suspended_list);
+static LIST_HEAD(dpm_late_early_list);
+static LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
@@ -166,7 +166,7 @@ static ktime_t initcall_debug_start(struct device *dev)
{
ktime_t calltime = ktime_set(0, 0);
- if (initcall_debug) {
+ if (pm_print_times_enabled) {
pr_info("calling %s+ @ %i, parent: %s\n",
dev_name(dev), task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
@@ -181,7 +181,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
{
ktime_t delta, rettime;
- if (initcall_debug) {
+ if (pm_print_times_enabled) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
@@ -467,6 +467,7 @@ static void dpm_resume_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
+ cpuidle_resume();
}
/**
@@ -867,6 +868,7 @@ static int dpm_suspend_noirq(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_late_early_list)) {
@@ -989,8 +991,16 @@ static int dpm_suspend_late(pm_message_t state)
int dpm_suspend_end(pm_message_t state)
{
int error = dpm_suspend_late(state);
+ if (error)
+ return error;
+
+ error = dpm_suspend_noirq(state);
+ if (error) {
+ dpm_resume_early(state);
+ return error;
+ }
- return error ? : dpm_suspend_noirq(state);
+ return 0;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index fd849a2c4fa8..74a67e0019a2 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -462,7 +462,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev)
{
dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = 0;
+ dev->power.pq_req = NULL;
}
/**
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 48be2ad4dd2c..b91dc6f1e914 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -474,6 +474,8 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
#endif
+#ifdef CONFIG_PM_SLEEP
+
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -500,6 +502,8 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
+
+#endif
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {