sched: Move struct sched_group to kernel/sched/sched.h
Move struct sched_group_power and sched_group and related inline functions to kernel/sched/sched.h, as they are used internally only. Signed-off-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/5135A77F.2010705@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								cc1f4b1f3f
							
						
					
				
			
			
				commit
				
					
						5e6521eaa1
					
				
			
		
					 2 changed files with 58 additions and 56 deletions
				
			
		| 
						 | 
					@ -780,62 +780,6 @@ enum cpu_idle_type {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int __weak arch_sd_sibiling_asym_packing(void);
 | 
					extern int __weak arch_sd_sibiling_asym_packing(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct sched_group_power {
 | 
					 | 
				
			||||||
	atomic_t ref;
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
 | 
					 | 
				
			||||||
	 * single CPU.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	unsigned int power, power_orig;
 | 
					 | 
				
			||||||
	unsigned long next_update;
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Number of busy cpus in this group.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	atomic_t nr_busy_cpus;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	unsigned long cpumask[0]; /* iteration mask */
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct sched_group {
 | 
					 | 
				
			||||||
	struct sched_group *next;	/* Must be a circular list */
 | 
					 | 
				
			||||||
	atomic_t ref;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	unsigned int group_weight;
 | 
					 | 
				
			||||||
	struct sched_group_power *sgp;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * The CPUs this group covers.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * NOTE: this field is variable length. (Allocated dynamically
 | 
					 | 
				
			||||||
	 * by attaching extra space to the end of the structure,
 | 
					 | 
				
			||||||
	 * depending on how many CPUs the kernel has booted up with)
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	unsigned long cpumask[0];
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return to_cpumask(sg->cpumask);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * cpumask masking which cpus in the group are allowed to iterate up the domain
 | 
					 | 
				
			||||||
 * tree.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline struct cpumask *sched_group_mask(struct sched_group *sg)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return to_cpumask(sg->sgp->cpumask);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
 | 
					 | 
				
			||||||
 * @group: The group whose first cpu is to be returned.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline unsigned int group_first_cpu(struct sched_group *group)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return cpumask_first(sched_group_cpus(group));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct sched_domain_attr {
 | 
					struct sched_domain_attr {
 | 
				
			||||||
	int relax_domain_level;
 | 
						int relax_domain_level;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					@ -846,6 +790,8 @@ struct sched_domain_attr {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int sched_domain_level_max;
 | 
					extern int sched_domain_level_max;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct sched_group;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct sched_domain {
 | 
					struct sched_domain {
 | 
				
			||||||
	/* These fields must be setup */
 | 
						/* These fields must be setup */
 | 
				
			||||||
	struct sched_domain *parent;	/* top domain must be null terminated */
 | 
						struct sched_domain *parent;	/* top domain must be null terminated */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -572,6 +572,62 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 | 
				
			||||||
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 | 
					DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 | 
				
			||||||
DECLARE_PER_CPU(int, sd_llc_id);
 | 
					DECLARE_PER_CPU(int, sd_llc_id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct sched_group_power {
 | 
				
			||||||
 | 
						atomic_t ref;
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
 | 
				
			||||||
 | 
						 * single CPU.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						unsigned int power, power_orig;
 | 
				
			||||||
 | 
						unsigned long next_update;
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Number of busy cpus in this group.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						atomic_t nr_busy_cpus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						unsigned long cpumask[0]; /* iteration mask */
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct sched_group {
 | 
				
			||||||
 | 
						struct sched_group *next;	/* Must be a circular list */
 | 
				
			||||||
 | 
						atomic_t ref;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						unsigned int group_weight;
 | 
				
			||||||
 | 
						struct sched_group_power *sgp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * The CPUs this group covers.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * NOTE: this field is variable length. (Allocated dynamically
 | 
				
			||||||
 | 
						 * by attaching extra space to the end of the structure,
 | 
				
			||||||
 | 
						 * depending on how many CPUs the kernel has booted up with)
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						unsigned long cpumask[0];
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return to_cpumask(sg->cpumask);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * cpumask masking which cpus in the group are allowed to iterate up the domain
 | 
				
			||||||
 | 
					 * tree.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline struct cpumask *sched_group_mask(struct sched_group *sg)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return to_cpumask(sg->sgp->cpumask);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
 | 
				
			||||||
 | 
					 * @group: The group whose first cpu is to be returned.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned int group_first_cpu(struct sched_group *group)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return cpumask_first(sched_group_cpus(group));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int group_balance_cpu(struct sched_group *sg);
 | 
					extern int group_balance_cpu(struct sched_group *sg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* CONFIG_SMP */
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue