2008-10-22 22:26:29 -07:00
|
|
|
#ifndef _ASM_X86_PDA_H
|
|
|
|
|
#define _ASM_X86_PDA_H
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
#include <linux/cache.h>
|
2009-01-13 20:41:35 +09:00
|
|
|
#include <linux/threads.h>
|
2006-01-11 22:43:00 +01:00
|
|
|
#include <asm/page.h>
|
2009-01-13 20:41:35 +09:00
|
|
|
#include <asm/percpu.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-01-30 13:31:25 +01:00
|
|
|
/* Per processor datastructure. %gs points to it while the kernel runs */
|
2005-04-16 15:20:36 -07:00
|
|
|
struct x8664_pda {
|
2006-09-26 10:52:38 +02:00
|
|
|
struct task_struct *pcurrent; /* 0 Current process */
|
2009-01-13 20:41:35 +09:00
|
|
|
unsigned long dummy;
|
2008-01-30 13:31:25 +01:00
|
|
|
unsigned long kernelstack; /* 16 top of kernel stack for current */
|
|
|
|
|
unsigned long oldrsp; /* 24 user rsp for system call */
|
|
|
|
|
int irqcount; /* 32 Irq nesting counter. Starts -1 */
|
|
|
|
|
unsigned int cpunumber; /* 36 Logical CPU number */
|
2006-09-26 10:52:38 +02:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
|
unsigned long stack_canary; /* 40 stack canary value */
|
|
|
|
|
/* gcc-ABI: this canary MUST be at
|
|
|
|
|
offset 40!!! */
|
|
|
|
|
#endif
|
2008-05-12 21:21:13 +02:00
|
|
|
short nodenumber; /* number of current node (32k max) */
|
|
|
|
|
short in_bootmem; /* pda lives in bootmem */
|
2006-09-26 10:52:40 +02:00
|
|
|
short isidle;
|
2005-09-12 18:49:24 +02:00
|
|
|
} ____cacheline_aligned_in_smp;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-01-13 20:41:35 +09:00
|
|
|
DECLARE_PER_CPU(struct x8664_pda, __pda);
|
2008-01-30 13:30:18 +01:00
|
|
|
extern void pda_init(int);
|
2006-01-11 22:45:39 +01:00
|
|
|
|
2009-01-13 20:41:35 +09:00
|
|
|
#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
|
2005-04-16 15:20:36 -07:00
|
|
|
|
percpu: add optimized generic percpu accessors
It is an optimization and a cleanup, and adds the following new
generic percpu methods:
percpu_read()
percpu_write()
percpu_add()
percpu_sub()
percpu_and()
percpu_or()
percpu_xor()
and implements support for them on x86. (other architectures will fall
back to a default implementation)
The advantage is that for example to read a local percpu variable,
instead of this sequence:
return __get_cpu_var(var);
ffffffff8102ca2b: 48 8b 14 fd 80 09 74 mov -0x7e8bf680(,%rdi,8),%rdx
ffffffff8102ca32: 81
ffffffff8102ca33: 48 c7 c0 d8 59 00 00 mov $0x59d8,%rax
ffffffff8102ca3a: 48 8b 04 10 mov (%rax,%rdx,1),%rax
We can get a single instruction by using the optimized variants:
return percpu_read(var);
ffffffff8102ca3f: 65 48 8b 05 91 8f fd mov %gs:0x7efd8f91(%rip),%rax
I also cleaned up the x86-specific APIs and made the x86 code use
these new generic percpu primitives.
tj: * fixed generic percpu_sub() definition as Roel Kluin pointed out
* added percpu_and() for completeness's sake
* made generic percpu ops atomic against preemption
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Tejun Heo <tj@kernel.org>
2009-01-15 22:15:53 +09:00
|
|
|
#define read_pda(field) percpu_read(__pda.field)
|
|
|
|
|
#define write_pda(field, val) percpu_write(__pda.field, val)
|
|
|
|
|
#define add_pda(field, val) percpu_add(__pda.field, val)
|
|
|
|
|
#define sub_pda(field, val) percpu_sub(__pda.field, val)
|
|
|
|
|
#define or_pda(field, val) percpu_or(__pda.field, val)
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-11-14 16:57:46 +01:00
|
|
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
2008-03-23 01:03:05 -07:00
|
|
|
#define test_and_clear_bit_pda(bit, field) \
|
2009-01-13 20:41:35 +09:00
|
|
|
x86_test_and_clear_bit_percpu(bit, __pda.field)
|
2006-11-14 16:57:46 +01:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#define PDA_STACKOFFSET (5*8)
|
|
|
|
|
|
2008-10-22 22:26:29 -07:00
|
|
|
#endif /* _ASM_X86_PDA_H */
|