 5ca62d6503
			
		
	
	
	5ca62d6503
	
	
	
		
			
			Revert commit 534b483a86 ("cpumask: don't perform while loop in
cpumask_next_and()").
This was a minor optimization, but it puts a `struct cpumask' on the
stack, which consumes too much stack space.
Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Reported-by: Peter Zijlstra <peterz@infradead.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Amir Vadai <amirv@mellanox.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			179 lines
		
	
	
	
		
			4.6 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			179 lines
		
	
	
	
		
			4.6 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #include <linux/slab.h>
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/bitops.h>
 | |
| #include <linux/cpumask.h>
 | |
| #include <linux/export.h>
 | |
| #include <linux/bootmem.h>
 | |
| 
 | |
| /**
 | |
|  * cpumask_next_and - get the next cpu in *src1p & *src2p
 | |
|  * @n: the cpu prior to the place to search (ie. return will be > @n)
 | |
|  * @src1p: the first cpumask pointer
 | |
|  * @src2p: the second cpumask pointer
 | |
|  *
 | |
|  * Returns >= nr_cpu_ids if no further cpus set in both.
 | |
|  */
 | |
| int cpumask_next_and(int n, const struct cpumask *src1p,
 | |
| 		     const struct cpumask *src2p)
 | |
| {
 | |
| 	while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
 | |
| 		if (cpumask_test_cpu(n, src2p))
 | |
| 			break;
 | |
| 	return n;
 | |
| }
 | |
| EXPORT_SYMBOL(cpumask_next_and);
 | |
| 
 | |
| /**
 | |
|  * cpumask_any_but - return a "random" in a cpumask, but not this one.
 | |
|  * @mask: the cpumask to search
 | |
|  * @cpu: the cpu to ignore.
 | |
|  *
 | |
|  * Often used to find any cpu but smp_processor_id() in a mask.
 | |
|  * Returns >= nr_cpu_ids if no cpus set.
 | |
|  */
 | |
| int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
 | |
| {
 | |
| 	unsigned int i;
 | |
| 
 | |
| 	cpumask_check(cpu);
 | |
| 	for_each_cpu(i, mask)
 | |
| 		if (i != cpu)
 | |
| 			break;
 | |
| 	return i;
 | |
| }
 | |
| 
 | |
| /* These are not inline because of header tangles. */
 | |
| #ifdef CONFIG_CPUMASK_OFFSTACK
 | |
| /**
 | |
|  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
 | |
|  * @mask: pointer to cpumask_var_t where the cpumask is returned
 | |
|  * @flags: GFP_ flags
 | |
|  *
 | |
|  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 | |
|  * a nop returning a constant 1 (in <linux/cpumask.h>)
 | |
|  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
 | |
|  *
 | |
|  * In addition, mask will be NULL if this fails.  Note that gcc is
 | |
|  * usually smart enough to know that mask can never be NULL if
 | |
|  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
 | |
|  * too.
 | |
|  */
 | |
| bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
 | |
| {
 | |
| 	*mask = kmalloc_node(cpumask_size(), flags, node);
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 | |
| 	if (!*mask) {
 | |
| 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
 | |
| 		dump_stack();
 | |
| 	}
 | |
| #endif
 | |
| 
 | |
| 	return *mask != NULL;
 | |
| }
 | |
| EXPORT_SYMBOL(alloc_cpumask_var_node);
 | |
| 
 | |
| bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
 | |
| {
 | |
| 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
 | |
| }
 | |
| EXPORT_SYMBOL(zalloc_cpumask_var_node);
 | |
| 
 | |
| /**
 | |
|  * alloc_cpumask_var - allocate a struct cpumask
 | |
|  * @mask: pointer to cpumask_var_t where the cpumask is returned
 | |
|  * @flags: GFP_ flags
 | |
|  *
 | |
|  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 | |
|  * a nop returning a constant 1 (in <linux/cpumask.h>).
 | |
|  *
 | |
|  * See alloc_cpumask_var_node.
 | |
|  */
 | |
| bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
 | |
| {
 | |
| 	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
 | |
| }
 | |
| EXPORT_SYMBOL(alloc_cpumask_var);
 | |
| 
 | |
| bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
 | |
| {
 | |
| 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
 | |
| }
 | |
| EXPORT_SYMBOL(zalloc_cpumask_var);
 | |
| 
 | |
| /**
 | |
|  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
 | |
|  * @mask: pointer to cpumask_var_t where the cpumask is returned
 | |
|  *
 | |
|  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 | |
|  * a nop (in <linux/cpumask.h>).
 | |
|  * Either returns an allocated (zero-filled) cpumask, or causes the
 | |
|  * system to panic.
 | |
|  */
 | |
| void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
 | |
| {
 | |
| 	*mask = memblock_virt_alloc(cpumask_size(), 0);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * free_cpumask_var - frees memory allocated for a struct cpumask.
 | |
|  * @mask: cpumask to free
 | |
|  *
 | |
|  * This is safe on a NULL mask.
 | |
|  */
 | |
| void free_cpumask_var(cpumask_var_t mask)
 | |
| {
 | |
| 	kfree(mask);
 | |
| }
 | |
| EXPORT_SYMBOL(free_cpumask_var);
 | |
| 
 | |
| /**
 | |
|  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
 | |
|  * @mask: cpumask to free
 | |
|  */
 | |
| void __init free_bootmem_cpumask_var(cpumask_var_t mask)
 | |
| {
 | |
| 	memblock_free_early(__pa(mask), cpumask_size());
 | |
| }
 | |
| #endif
 | |
| 
 | |
| /**
 | |
|  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
 | |
|  * @i: index number
 | |
|  * @node: local numa_node
 | |
|  *
 | |
|  * This function selects an online CPU according to a numa aware policy;
 | |
|  * local cpus are returned first, followed by non-local ones, then it
 | |
|  * wraps around.
 | |
|  *
 | |
|  * It's not very efficient, but useful for setup.
 | |
|  */
 | |
| unsigned int cpumask_local_spread(unsigned int i, int node)
 | |
| {
 | |
| 	int cpu;
 | |
| 
 | |
| 	/* Wrap: we always want a cpu. */
 | |
| 	i %= num_online_cpus();
 | |
| 
 | |
| 	if (node == -1) {
 | |
| 		for_each_cpu(cpu, cpu_online_mask)
 | |
| 			if (i-- == 0)
 | |
| 				return cpu;
 | |
| 	} else {
 | |
| 		/* NUMA first. */
 | |
| 		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
 | |
| 			if (i-- == 0)
 | |
| 				return cpu;
 | |
| 
 | |
| 		for_each_cpu(cpu, cpu_online_mask) {
 | |
| 			/* Skip NUMA nodes, done above. */
 | |
| 			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
 | |
| 				continue;
 | |
| 
 | |
| 			if (i-- == 0)
 | |
| 				return cpu;
 | |
| 		}
 | |
| 	}
 | |
| 	BUG();
 | |
| }
 | |
| EXPORT_SYMBOL(cpumask_local_spread);
 |