From 1d3a64fbd214f26d8776433e87de6ff7b8265b4a Mon Sep 17 00:00:00 2001 From: Stephen Dickey Date: Thu, 3 Dec 2020 13:49:13 -0800 Subject: [PATCH] ANDROID: cpu/hotplug: rebuild sched domains immediately In the resume_cpus() path, cpus cannot be taken advantage of until the cpus write lock is acquired, and cpus are activated and domains rebuilt. This can incurr significant delay in the unpause operation. Additionally, if scheduled through the kworker thread, the wait time for rebuilding sched domains becomes large due to a busy system that can prevent the kworker from executing. Activate the cpus and call the cpuset_hotplug_workfn directly within resume_cpus prior to getting the cpus write lock, thereby eliminating delays associated with scheduling this activity. Bug: 161210528 Change-Id: Ie2521f28ed9078b22d421d792f08413016d4dd62 Signed-off-by: Stephen Dickey Signed-off-by: Todd Kjos --- include/linux/cpuset.h | 4 ++++ kernel/cgroup/cpuset.c | 6 +++--- kernel/cpu.c | 10 ++++++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 7f1478c26a33..a433f4adf62d 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -160,6 +160,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) task_unlock(current); } +extern void cpuset_hotplug_workfn(struct work_struct *work); + #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } @@ -275,6 +277,8 @@ static inline bool read_mems_allowed_retry(unsigned int seq) return false; } +static inline void cpuset_hotplug_workfn(struct work_struct *work) {} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index cf643f3fd99a..9ac2a148bf48 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -342,9 +342,9 @@ static DEFINE_SPINLOCK(callback_lock); static struct workqueue_struct *cpuset_migrate_mm_wq; /* - * CPU / memory hotplug is handled asynchronously. + * CPU / memory hotplug is handled asynchronously + * for hotplug, synchronously for resume_cpus */ -static void cpuset_hotplug_workfn(struct work_struct *work); static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); @@ -3136,7 +3136,7 @@ update_tasks: * Note that CPU offlining during suspend is ignored. We don't modify * cpusets across suspend/resume cycles at all. */ -static void cpuset_hotplug_workfn(struct work_struct *work) +void cpuset_hotplug_workfn(struct work_struct *work) { static cpumask_t new_cpus; static nodemask_t new_mems; diff --git a/kernel/cpu.c b/kernel/cpu.c index 2bcc13dad8ea..e4b7df06bb64 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -1253,6 +1254,15 @@ int resume_cpus(struct cpumask *cpus) if (cpumask_empty(cpus)) goto err_cpu_maps_update; + for_each_cpu(cpu, cpus) + set_cpu_active(cpu, true); + + /* Lazy Resume. Build domains immediately instead of scheduling + * a workqueue. This is so that the cpu can pull load when + * sent a load balancing kick. + */ + cpuset_hotplug_workfn(NULL); + cpus_write_lock(); cpuhp_tasks_frozen = 0;