Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) PM / Hibernate: Implement compat_ioctl for /dev/snapshot PM / Freezer: fix return value of freezable_schedule_timeout_killable() PM / shmobile: Allow the A4R domain to be turned off at run time PM / input / touchscreen: Make st1232 use device PM QoS constraints PM / QoS: Introduce dev_pm_qos_add_ancestor_request() PM / shmobile: Remove the stay_on flag from SH7372's PM domains PM / shmobile: Don't include SH7372's INTCS in syscore suspend/resume PM / shmobile: Add support for the sh7372 A4S power domain / sleep mode PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM/Devfreq: Add Exynos4-bus device DVFS driver for Exynos4210/4212/4412. PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() ARM: S3C64XX: Implement basic power domain support PM / shmobile: Use common always on power domain governor ... Fix up trivial conflict in fs/xfs/xfs_buf.c due to removal of unused XBT_FORCE_SLEEP bit
This commit is contained in:
commit
eb59c505f8
107 changed files with 3252 additions and 1533 deletions
|
|
@ -5,71 +5,58 @@
|
|||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#ifdef CONFIG_FREEZER
|
||||
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
|
||||
extern bool pm_freezing; /* PM freezing in effect */
|
||||
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
||||
|
||||
/*
|
||||
* Check if a process has been frozen
|
||||
*/
|
||||
static inline int frozen(struct task_struct *p)
|
||||
static inline bool frozen(struct task_struct *p)
|
||||
{
|
||||
return p->flags & PF_FROZEN;
|
||||
}
|
||||
|
||||
extern bool freezing_slow_path(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Check if there is a request to freeze a process
|
||||
*/
|
||||
static inline int freezing(struct task_struct *p)
|
||||
static inline bool freezing(struct task_struct *p)
|
||||
{
|
||||
return test_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Request that a process be frozen
|
||||
*/
|
||||
static inline void set_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
set_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sometimes we may need to cancel the previous 'freeze' request
|
||||
*/
|
||||
static inline void clear_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
clear_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
static inline bool should_send_signal(struct task_struct *p)
|
||||
{
|
||||
return !(p->flags & PF_FREEZER_NOSIG);
|
||||
if (likely(!atomic_read(&system_freezing_cnt)))
|
||||
return false;
|
||||
return freezing_slow_path(p);
|
||||
}
|
||||
|
||||
/* Takes and releases task alloc lock using task_lock() */
|
||||
extern int thaw_process(struct task_struct *p);
|
||||
extern void __thaw_task(struct task_struct *t);
|
||||
|
||||
extern void refrigerator(void);
|
||||
extern bool __refrigerator(bool check_kthr_stop);
|
||||
extern int freeze_processes(void);
|
||||
extern int freeze_kernel_threads(void);
|
||||
extern void thaw_processes(void);
|
||||
|
||||
static inline int try_to_freeze(void)
|
||||
static inline bool try_to_freeze(void)
|
||||
{
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
might_sleep();
|
||||
if (likely(!freezing(current)))
|
||||
return false;
|
||||
return __refrigerator(false);
|
||||
}
|
||||
|
||||
extern bool freeze_task(struct task_struct *p, bool sig_only);
|
||||
extern void cancel_freezing(struct task_struct *p);
|
||||
extern bool freeze_task(struct task_struct *p);
|
||||
extern bool set_freezable(void);
|
||||
|
||||
#ifdef CONFIG_CGROUP_FREEZER
|
||||
extern int cgroup_freezing_or_frozen(struct task_struct *task);
|
||||
extern bool cgroup_freezing(struct task_struct *task);
|
||||
#else /* !CONFIG_CGROUP_FREEZER */
|
||||
static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
static inline bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_CGROUP_FREEZER */
|
||||
|
||||
|
|
@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
|||
* appropriately in case the child has exited before the freezing of tasks is
|
||||
* complete. However, we don't want kernel threads to be frozen in unexpected
|
||||
* places, so we allow them to block freeze_processes() instead or to set
|
||||
* PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
|
||||
* parents. Fortunately, in the ____call_usermodehelper() case the parent won't
|
||||
* really block freeze_processes(), since ____call_usermodehelper() (the child)
|
||||
* does a little before exec/exit and it can't be frozen before waking up the
|
||||
* parent.
|
||||
* PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
|
||||
* parent won't really block freeze_processes(), since ____call_usermodehelper()
|
||||
* (the child) does a little before exec/exit and it can't be frozen before
|
||||
* waking up the parent.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If the current task is a user space one, tell the freezer not to count it as
|
||||
* freezable.
|
||||
*/
|
||||
|
||||
/* Tell the freezer not to count the current task as freezable. */
|
||||
static inline void freezer_do_not_count(void)
|
||||
{
|
||||
if (current->mm)
|
||||
current->flags |= PF_FREEZER_SKIP;
|
||||
current->flags |= PF_FREEZER_SKIP;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current task is a user space one, tell the freezer to count it as
|
||||
* freezable again and try to freeze it.
|
||||
* Tell the freezer to count the current task as freezable again and try to
|
||||
* freeze it.
|
||||
*/
|
||||
static inline void freezer_count(void)
|
||||
{
|
||||
if (current->mm) {
|
||||
current->flags &= ~PF_FREEZER_SKIP;
|
||||
try_to_freeze();
|
||||
}
|
||||
current->flags &= ~PF_FREEZER_SKIP;
|
||||
try_to_freeze();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p)
|
|||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it
|
||||
* These macros are intended to be used whenever you want allow a task that's
|
||||
* sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
|
||||
* that neither return any clear indication of whether a freeze event happened
|
||||
* while in this function.
|
||||
*/
|
||||
static inline void set_freezable(void)
|
||||
{
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it and that it
|
||||
* should send a fake signal to the task to freeze it.
|
||||
*/
|
||||
static inline void set_freezable_with_signal(void)
|
||||
{
|
||||
current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
|
||||
}
|
||||
/* Like schedule(), but should not block the freezer. */
|
||||
#define freezable_schedule() \
|
||||
({ \
|
||||
freezer_do_not_count(); \
|
||||
schedule(); \
|
||||
freezer_count(); \
|
||||
})
|
||||
|
||||
/* Like schedule_timeout_killable(), but should not block the freezer. */
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
({ \
|
||||
long __retval; \
|
||||
freezer_do_not_count(); \
|
||||
__retval = schedule_timeout_killable(timeout); \
|
||||
freezer_count(); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||
|
|
@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void)
|
|||
#define wait_event_freezable(wq, condition) \
|
||||
({ \
|
||||
int __retval; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible(wq, \
|
||||
(condition) || freezing(current)); \
|
||||
if (__retval && !freezing(current)) \
|
||||
if (__retval || (condition)) \
|
||||
break; \
|
||||
else if (!(condition)) \
|
||||
__retval = -ERESTARTSYS; \
|
||||
} while (try_to_freeze()); \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
|
||||
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
||||
({ \
|
||||
long __retval = timeout; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible_timeout(wq, \
|
||||
(condition) || freezing(current), \
|
||||
__retval); \
|
||||
} while (try_to_freeze()); \
|
||||
if (__retval <= 0 || (condition)) \
|
||||
break; \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline int frozen(struct task_struct *p) { return 0; }
|
||||
static inline int freezing(struct task_struct *p) { return 0; }
|
||||
static inline void set_freeze_flag(struct task_struct *p) {}
|
||||
static inline void clear_freeze_flag(struct task_struct *p) {}
|
||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
||||
|
||||
static inline void refrigerator(void) {}
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline bool frozen(struct task_struct *p) { return false; }
|
||||
static inline bool freezing(struct task_struct *p) { return false; }
|
||||
static inline void __thaw_task(struct task_struct *t) {}
|
||||
|
||||
static inline bool __refrigerator(bool check_kthr_stop) { return false; }
|
||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||
static inline void thaw_processes(void) {}
|
||||
|
||||
static inline int try_to_freeze(void) { return 0; }
|
||||
static inline bool try_to_freeze(void) { return false; }
|
||||
|
||||
static inline void freezer_do_not_count(void) {}
|
||||
static inline void freezer_count(void) {}
|
||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||
static inline void set_freezable(void) {}
|
||||
static inline void set_freezable_with_signal(void) {}
|
||||
|
||||
#define freezable_schedule() schedule()
|
||||
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
schedule_timeout_killable(timeout)
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
wait_event_interruptible(wq, condition)
|
||||
|
|
|
|||
|
|
@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
|
|||
extern int usermodehelper_disable(void);
|
||||
extern void usermodehelper_enable(void);
|
||||
extern bool usermodehelper_is_disabled(void);
|
||||
extern void read_lock_usermodehelper(void);
|
||||
extern void read_unlock_usermodehelper(void);
|
||||
|
||||
#endif /* __LINUX_KMOD_H__ */
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
|||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
int kthread_should_stop(void);
|
||||
bool kthread_freezable_should_stop(bool *was_frozen);
|
||||
void *kthread_data(struct task_struct *k);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
|
|
|
|||
|
|
@ -256,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \
|
|||
}
|
||||
#endif /* MODULE */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
extern int platform_pm_prepare(struct device *dev);
|
||||
extern void platform_pm_complete(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_prepare NULL
|
||||
#define platform_pm_complete NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
extern int platform_pm_suspend(struct device *dev);
|
||||
extern int platform_pm_suspend_noirq(struct device *dev);
|
||||
extern int platform_pm_resume(struct device *dev);
|
||||
extern int platform_pm_resume_noirq(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_suspend NULL
|
||||
#define platform_pm_resume NULL
|
||||
#define platform_pm_suspend_noirq NULL
|
||||
#define platform_pm_resume_noirq NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
extern int platform_pm_freeze(struct device *dev);
|
||||
extern int platform_pm_freeze_noirq(struct device *dev);
|
||||
extern int platform_pm_thaw(struct device *dev);
|
||||
extern int platform_pm_thaw_noirq(struct device *dev);
|
||||
extern int platform_pm_poweroff(struct device *dev);
|
||||
extern int platform_pm_poweroff_noirq(struct device *dev);
|
||||
extern int platform_pm_restore(struct device *dev);
|
||||
extern int platform_pm_restore_noirq(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_freeze NULL
|
||||
#define platform_pm_thaw NULL
|
||||
#define platform_pm_poweroff NULL
|
||||
#define platform_pm_restore NULL
|
||||
#define platform_pm_freeze_noirq NULL
|
||||
#define platform_pm_thaw_noirq NULL
|
||||
#define platform_pm_poweroff_noirq NULL
|
||||
#define platform_pm_restore_noirq NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS \
|
||||
.prepare = platform_pm_prepare, \
|
||||
.complete = platform_pm_complete, \
|
||||
.suspend = platform_pm_suspend, \
|
||||
.resume = platform_pm_resume, \
|
||||
.freeze = platform_pm_freeze, \
|
||||
.thaw = platform_pm_thaw, \
|
||||
.poweroff = platform_pm_poweroff, \
|
||||
.restore = platform_pm_restore, \
|
||||
.suspend_noirq = platform_pm_suspend_noirq, \
|
||||
.resume_noirq = platform_pm_resume_noirq, \
|
||||
.freeze_noirq = platform_pm_freeze_noirq, \
|
||||
.thaw_noirq = platform_pm_thaw_noirq, \
|
||||
.poweroff_noirq = platform_pm_poweroff_noirq, \
|
||||
.restore_noirq = platform_pm_restore_noirq,
|
||||
.restore = platform_pm_restore,
|
||||
#else
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
|
|||
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this for subsystems (bus types, device types, device classes) that don't
|
||||
* need any special suspend/resume handling in addition to invoking the PM
|
||||
* callbacks provided by device drivers supporting both the system sleep PM and
|
||||
* runtime PM, make the pm member point to generic_subsys_pm_ops.
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
extern struct dev_pm_ops generic_subsys_pm_ops;
|
||||
#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
|
||||
#else
|
||||
#define GENERIC_SUBSYS_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* PM_EVENT_ messages
|
||||
*
|
||||
|
|
@ -521,6 +508,8 @@ struct dev_pm_info {
|
|||
unsigned long active_jiffies;
|
||||
unsigned long suspended_jiffies;
|
||||
unsigned long accounting_timestamp;
|
||||
ktime_t suspend_time;
|
||||
s64 max_time_suspended_ns;
|
||||
#endif
|
||||
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
|
||||
struct pm_qos_constraints *constraints;
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@
|
|||
#define _LINUX_PM_DOMAIN_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
|
|
@ -21,6 +22,23 @@ enum gpd_status {
|
|||
|
||||
struct dev_power_governor {
|
||||
bool (*power_down_ok)(struct dev_pm_domain *domain);
|
||||
bool (*stop_ok)(struct device *dev);
|
||||
};
|
||||
|
||||
struct gpd_dev_ops {
|
||||
int (*start)(struct device *dev);
|
||||
int (*stop)(struct device *dev);
|
||||
int (*save_state)(struct device *dev);
|
||||
int (*restore_state)(struct device *dev);
|
||||
int (*suspend)(struct device *dev);
|
||||
int (*suspend_late)(struct device *dev);
|
||||
int (*resume_early)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*freeze)(struct device *dev);
|
||||
int (*freeze_late)(struct device *dev);
|
||||
int (*thaw_early)(struct device *dev);
|
||||
int (*thaw)(struct device *dev);
|
||||
bool (*active_wakeup)(struct device *dev);
|
||||
};
|
||||
|
||||
struct generic_pm_domain {
|
||||
|
|
@ -32,6 +50,7 @@ struct generic_pm_domain {
|
|||
struct mutex lock;
|
||||
struct dev_power_governor *gov;
|
||||
struct work_struct power_off_work;
|
||||
char *name;
|
||||
unsigned int in_progress; /* Number of devices being suspended now */
|
||||
atomic_t sd_count; /* Number of subdomains with power "on" */
|
||||
enum gpd_status status; /* Current state of the domain */
|
||||
|
|
@ -44,10 +63,13 @@ struct generic_pm_domain {
|
|||
bool suspend_power_off; /* Power status before system suspend */
|
||||
bool dev_irq_safe; /* Device callbacks are IRQ-safe */
|
||||
int (*power_off)(struct generic_pm_domain *domain);
|
||||
s64 power_off_latency_ns;
|
||||
int (*power_on)(struct generic_pm_domain *domain);
|
||||
int (*start_device)(struct device *dev);
|
||||
int (*stop_device)(struct device *dev);
|
||||
bool (*active_wakeup)(struct device *dev);
|
||||
s64 power_on_latency_ns;
|
||||
struct gpd_dev_ops dev_ops;
|
||||
s64 break_even_ns; /* Power break even for the entire domain. */
|
||||
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
|
||||
ktime_t power_off_time;
|
||||
};
|
||||
|
||||
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||
|
|
@ -62,8 +84,18 @@ struct gpd_link {
|
|||
struct list_head slave_node;
|
||||
};
|
||||
|
||||
struct gpd_timing_data {
|
||||
s64 stop_latency_ns;
|
||||
s64 start_latency_ns;
|
||||
s64 save_state_latency_ns;
|
||||
s64 restore_state_latency_ns;
|
||||
s64 break_even_ns;
|
||||
};
|
||||
|
||||
struct generic_pm_domain_data {
|
||||
struct pm_domain_data base;
|
||||
struct gpd_dev_ops ops;
|
||||
struct gpd_timing_data td;
|
||||
bool need_restore;
|
||||
};
|
||||
|
||||
|
|
@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev);
|
||||
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
|
||||
{
|
||||
return to_gpd_data(dev->power.subsys_data->domain_data);
|
||||
}
|
||||
|
||||
extern struct dev_power_governor simple_qos_governor;
|
||||
|
||||
extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
|
||||
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td);
|
||||
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_add_device(genpd, dev, NULL);
|
||||
}
|
||||
|
||||
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev);
|
||||
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *new_subdomain);
|
||||
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *target);
|
||||
extern int pm_genpd_add_callbacks(struct device *dev,
|
||||
struct gpd_dev_ops *ops,
|
||||
struct gpd_timing_data *td);
|
||||
extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
|
||||
extern void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
struct dev_power_governor *gov, bool is_off);
|
||||
|
||||
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
|
||||
|
||||
extern bool default_stop_ok(struct device *dev);
|
||||
|
||||
extern struct dev_power_governor pm_domain_always_on_gov;
|
||||
#else
|
||||
|
||||
static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
|
|
@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
struct dev_power_governor *gov, bool is_off) {}
|
||||
static inline int pm_genpd_add_callbacks(struct device *dev,
|
||||
struct gpd_dev_ops *ops,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
|
||||
{
|
||||
}
|
||||
static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline bool default_stop_ok(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#define pm_domain_always_on_gov NULL
|
||||
#endif
|
||||
|
||||
static inline int pm_genpd_remove_callbacks(struct device *dev)
|
||||
{
|
||||
return __pm_genpd_remove_callbacks(dev, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
|
||||
extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
|
||||
extern void pm_genpd_poweroff_unused(void);
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
|
|||
int pm_qos_request_active(struct pm_qos_request *req);
|
||||
s32 pm_qos_read_value(struct pm_qos_constraints *c);
|
||||
|
||||
s32 __dev_pm_qos_read_value(struct device *dev);
|
||||
s32 dev_pm_qos_read_value(struct device *dev);
|
||||
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
s32 value);
|
||||
|
|
@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
|
|||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
|
||||
void dev_pm_qos_constraints_init(struct device *dev);
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev);
|
||||
int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req, s32 value);
|
||||
#else
|
||||
static inline int pm_qos_update_target(struct pm_qos_constraints *c,
|
||||
struct plist_node *node,
|
||||
|
|
@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
|
|||
static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
|
||||
{ return 0; }
|
||||
|
||||
static inline s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline s32 dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_request(struct device *dev,
|
||||
|
|
@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
{
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req, s32 value)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
|
|||
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
|
||||
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
|
||||
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
|
||||
extern void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns);
|
||||
|
||||
static inline bool pm_children_suspended(struct device *dev)
|
||||
{
|
||||
|
|
@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
|
|||
static inline unsigned long pm_runtime_autosuspend_expiration(
|
||||
struct device *dev) { return 0; }
|
||||
|
||||
static inline void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns) {}
|
||||
|
||||
#endif /* !CONFIG_PM_RUNTIME */
|
||||
|
||||
static inline int pm_runtime_idle(struct device *dev)
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
|
|||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||
(task->flags & PF_FREEZING) == 0)
|
||||
(task->flags & PF_FROZEN) == 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
|
|
@ -1787,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
|
|
@ -1803,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
|
|
|
|||
|
|
@ -95,6 +95,7 @@ struct intc_desc {
|
|||
unsigned int num_resources;
|
||||
intc_enum force_enable;
|
||||
intc_enum force_disable;
|
||||
bool skip_syscore_suspend;
|
||||
struct intc_hw_desc hw;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
|
|
@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
|
|||
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
|
||||
#define PM_POST_RESTORE 0x0006 /* Restore failed */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void save_processor_state(void);
|
||||
void restore_processor_state(void);
|
||||
|
|
@ -351,6 +354,19 @@ extern bool events_check_enabled;
|
|||
extern bool pm_wakeup_pending(void);
|
||||
extern bool pm_get_wakeup_count(unsigned int *count);
|
||||
extern bool pm_save_wakeup_count(unsigned int count);
|
||||
|
||||
static inline void lock_system_sleep(void)
|
||||
{
|
||||
freezer_do_not_count();
|
||||
mutex_lock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_system_sleep(void)
|
||||
{
|
||||
mutex_unlock(&pm_mutex);
|
||||
freezer_count();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline int register_pm_notifier(struct notifier_block *nb)
|
||||
|
|
@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
|||
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline bool pm_wakeup_pending(void) { return false; }
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#ifndef CONFIG_HIBERNATE_CALLBACKS
|
||||
static inline void lock_system_sleep(void) {}
|
||||
static inline void unlock_system_sleep(void) {}
|
||||
|
||||
#else
|
||||
|
||||
/* Let some subsystems like memory hotadd exclude hibernation */
|
||||
|
||||
static inline void lock_system_sleep(void)
|
||||
{
|
||||
mutex_lock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_system_sleep(void)
|
||||
{
|
||||
mutex_unlock(&pm_mutex);
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
|
||||
/*
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue