Merge branch 'cputime' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'cputime' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [PATCH] fast vdso implementation for CLOCK_THREAD_CPUTIME_ID [PATCH] improve idle cputime accounting [PATCH] improve precision of idle time detection. [PATCH] improve precision of process accounting. [PATCH] idle cputime accounting [PATCH] fix scaled & unscaled cputime accounting
This commit is contained in:
		
				commit
				
					
						61420f59a5
					
				
			
		
					 30 changed files with 691 additions and 464 deletions
				
			
		| 
						 | 
					@ -93,13 +93,14 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
 | 
				
			||||||
	now = ia64_get_itc();
 | 
						now = ia64_get_itc();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
 | 
						delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
 | 
				
			||||||
	account_system_time(prev, 0, delta_stime);
 | 
						if (idle_task(smp_processor_id()) != prev)
 | 
				
			||||||
	account_system_time_scaled(prev, delta_stime);
 | 
							account_system_time(prev, 0, delta_stime, delta_stime);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							account_idle_time(delta_stime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (pi->ac_utime) {
 | 
						if (pi->ac_utime) {
 | 
				
			||||||
		delta_utime = cycle_to_cputime(pi->ac_utime);
 | 
							delta_utime = cycle_to_cputime(pi->ac_utime);
 | 
				
			||||||
		account_user_time(prev, delta_utime);
 | 
							account_user_time(prev, delta_utime, delta_utime);
 | 
				
			||||||
		account_user_time_scaled(prev, delta_utime);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pi->ac_stamp = ni->ac_stamp = now;
 | 
						pi->ac_stamp = ni->ac_stamp = now;
 | 
				
			||||||
| 
						 | 
					@ -122,8 +123,10 @@ void account_system_vtime(struct task_struct *tsk)
 | 
				
			||||||
	now = ia64_get_itc();
 | 
						now = ia64_get_itc();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
 | 
						delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
 | 
				
			||||||
	account_system_time(tsk, 0, delta_stime);
 | 
						if (irq_count() || idle_task(smp_processor_id()) != tsk)
 | 
				
			||||||
	account_system_time_scaled(tsk, delta_stime);
 | 
							account_system_time(tsk, 0, delta_stime, delta_stime);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							account_idle_time(delta_stime);
 | 
				
			||||||
	ti->ac_stime = 0;
 | 
						ti->ac_stime = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ti->ac_stamp = now;
 | 
						ti->ac_stamp = now;
 | 
				
			||||||
| 
						 | 
					@ -143,8 +146,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ti->ac_utime) {
 | 
						if (ti->ac_utime) {
 | 
				
			||||||
		delta_utime = cycle_to_cputime(ti->ac_utime);
 | 
							delta_utime = cycle_to_cputime(ti->ac_utime);
 | 
				
			||||||
		account_user_time(p, delta_utime);
 | 
							account_user_time(p, delta_utime, delta_utime);
 | 
				
			||||||
		account_user_time_scaled(p, delta_utime);
 | 
					 | 
				
			||||||
		ti->ac_utime = 0;
 | 
							ti->ac_utime = 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -33,6 +33,7 @@
 | 
				
			||||||
#include <linux/mqueue.h>
 | 
					#include <linux/mqueue.h>
 | 
				
			||||||
#include <linux/hardirq.h>
 | 
					#include <linux/hardirq.h>
 | 
				
			||||||
#include <linux/utsname.h>
 | 
					#include <linux/utsname.h>
 | 
				
			||||||
 | 
					#include <linux/kernel_stat.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/pgtable.h>
 | 
					#include <asm/pgtable.h>
 | 
				
			||||||
#include <asm/uaccess.h>
 | 
					#include <asm/uaccess.h>
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -256,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk)
 | 
				
			||||||
		delta += sys_time;
 | 
							delta += sys_time;
 | 
				
			||||||
		get_paca()->system_time = 0;
 | 
							get_paca()->system_time = 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	account_system_time(tsk, 0, delta);
 | 
						if (in_irq() || idle_task(smp_processor_id()) != tsk)
 | 
				
			||||||
	account_system_time_scaled(tsk, deltascaled);
 | 
							account_system_time(tsk, 0, delta, deltascaled);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							account_idle_time(delta);
 | 
				
			||||||
	per_cpu(cputime_last_delta, smp_processor_id()) = delta;
 | 
						per_cpu(cputime_last_delta, smp_processor_id()) = delta;
 | 
				
			||||||
	per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
 | 
						per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
 | 
				
			||||||
	local_irq_restore(flags);
 | 
						local_irq_restore(flags);
 | 
				
			||||||
| 
						 | 
					@ -275,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	utime = get_paca()->user_time;
 | 
						utime = get_paca()->user_time;
 | 
				
			||||||
	get_paca()->user_time = 0;
 | 
						get_paca()->user_time = 0;
 | 
				
			||||||
	account_user_time(tsk, utime);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	utimescaled = cputime_to_scaled(utime);
 | 
						utimescaled = cputime_to_scaled(utime);
 | 
				
			||||||
	account_user_time_scaled(tsk, utimescaled);
 | 
						account_user_time(tsk, utime, utimescaled);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -338,8 +338,12 @@ void calculate_steal_time(void)
 | 
				
			||||||
	tb = mftb();
 | 
						tb = mftb();
 | 
				
			||||||
	purr = mfspr(SPRN_PURR);
 | 
						purr = mfspr(SPRN_PURR);
 | 
				
			||||||
	stolen = (tb - pme->tb) - (purr - pme->purr);
 | 
						stolen = (tb - pme->tb) - (purr - pme->purr);
 | 
				
			||||||
	if (stolen > 0)
 | 
						if (stolen > 0) {
 | 
				
			||||||
		account_steal_time(current, stolen);
 | 
							if (idle_task(smp_processor_id()) != current)
 | 
				
			||||||
 | 
								account_steal_time(stolen);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								account_idle_time(stolen);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	pme->tb = tb;
 | 
						pme->tb = tb;
 | 
				
			||||||
	pme->purr = purr;
 | 
						pme->purr = purr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -14,7 +14,6 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct s390_idle_data {
 | 
					struct s390_idle_data {
 | 
				
			||||||
	spinlock_t lock;
 | 
						spinlock_t lock;
 | 
				
			||||||
	unsigned int in_idle;
 | 
					 | 
				
			||||||
	unsigned long long idle_count;
 | 
						unsigned long long idle_count;
 | 
				
			||||||
	unsigned long long idle_enter;
 | 
						unsigned long long idle_enter;
 | 
				
			||||||
	unsigned long long idle_time;
 | 
						unsigned long long idle_time;
 | 
				
			||||||
| 
						 | 
					@ -22,12 +21,12 @@ struct s390_idle_data {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
 | 
					DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void s390_idle_leave(void);
 | 
					void vtime_start_cpu(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void s390_idle_check(void)
 | 
					static inline void s390_idle_check(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if ((&__get_cpu_var(s390_idle))->in_idle)
 | 
						if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
 | 
				
			||||||
		s390_idle_leave();
 | 
							vtime_start_cpu();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_S390_CPU_H_ */
 | 
					#endif /* _ASM_S390_CPU_H_ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,7 +11,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/div64.h>
 | 
					#include <asm/div64.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* We want to use micro-second resolution. */
 | 
					/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
typedef unsigned long long cputime_t;
 | 
					typedef unsigned long long cputime_t;
 | 
				
			||||||
typedef unsigned long long cputime64_t;
 | 
					typedef unsigned long long cputime64_t;
 | 
				
			||||||
| 
						 | 
					@ -53,9 +53,9 @@ __div(unsigned long long n, unsigned int base)
 | 
				
			||||||
#define cputime_ge(__a, __b)		((__a) >= (__b))
 | 
					#define cputime_ge(__a, __b)		((__a) >= (__b))
 | 
				
			||||||
#define cputime_lt(__a, __b)		((__a) <  (__b))
 | 
					#define cputime_lt(__a, __b)		((__a) <  (__b))
 | 
				
			||||||
#define cputime_le(__a, __b)		((__a) <= (__b))
 | 
					#define cputime_le(__a, __b)		((__a) <= (__b))
 | 
				
			||||||
#define cputime_to_jiffies(__ct)	(__div((__ct), 1000000 / HZ))
 | 
					#define cputime_to_jiffies(__ct)	(__div((__ct), 4096000000ULL / HZ))
 | 
				
			||||||
#define cputime_to_scaled(__ct)		(__ct)
 | 
					#define cputime_to_scaled(__ct)		(__ct)
 | 
				
			||||||
#define jiffies_to_cputime(__hz)	((cputime_t)(__hz) * (1000000 / HZ))
 | 
					#define jiffies_to_cputime(__hz)	((cputime_t)(__hz) * (4096000000ULL / HZ))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define cputime64_zero			(0ULL)
 | 
					#define cputime64_zero			(0ULL)
 | 
				
			||||||
#define cputime64_add(__a, __b)		((__a) + (__b))
 | 
					#define cputime64_add(__a, __b)		((__a) + (__b))
 | 
				
			||||||
| 
						 | 
					@ -64,7 +64,7 @@ __div(unsigned long long n, unsigned int base)
 | 
				
			||||||
static inline u64
 | 
					static inline u64
 | 
				
			||||||
cputime64_to_jiffies64(cputime64_t cputime)
 | 
					cputime64_to_jiffies64(cputime64_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	do_div(cputime, 1000000 / HZ);
 | 
						do_div(cputime, 4096000000ULL / HZ);
 | 
				
			||||||
	return cputime;
 | 
						return cputime;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -74,13 +74,13 @@ cputime64_to_jiffies64(cputime64_t cputime)
 | 
				
			||||||
static inline unsigned int
 | 
					static inline unsigned int
 | 
				
			||||||
cputime_to_msecs(const cputime_t cputime)
 | 
					cputime_to_msecs(const cputime_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __div(cputime, 1000);
 | 
						return __div(cputime, 4096000);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline cputime_t
 | 
					static inline cputime_t
 | 
				
			||||||
msecs_to_cputime(const unsigned int m)
 | 
					msecs_to_cputime(const unsigned int m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (cputime_t) m * 1000;
 | 
						return (cputime_t) m * 4096000;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -89,13 +89,13 @@ msecs_to_cputime(const unsigned int m)
 | 
				
			||||||
static inline unsigned int
 | 
					static inline unsigned int
 | 
				
			||||||
cputime_to_secs(const cputime_t cputime)
 | 
					cputime_to_secs(const cputime_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __div(cputime, 1000000);
 | 
						return __div(cputime, 2048000000) >> 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline cputime_t
 | 
					static inline cputime_t
 | 
				
			||||||
secs_to_cputime(const unsigned int s)
 | 
					secs_to_cputime(const unsigned int s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (cputime_t) s * 1000000;
 | 
						return (cputime_t) s * 4096000000ULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -104,7 +104,7 @@ secs_to_cputime(const unsigned int s)
 | 
				
			||||||
static inline cputime_t
 | 
					static inline cputime_t
 | 
				
			||||||
timespec_to_cputime(const struct timespec *value)
 | 
					timespec_to_cputime(const struct timespec *value)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
        return value->tv_nsec / 1000 + (u64) value->tv_sec * 1000000;
 | 
						return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void
 | 
					static inline void
 | 
				
			||||||
| 
						 | 
					@ -114,12 +114,12 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
 | 
				
			||||||
	register_pair rp;
 | 
						register_pair rp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rp.pair = cputime >> 1;
 | 
						rp.pair = cputime >> 1;
 | 
				
			||||||
	asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1));
 | 
						asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
 | 
				
			||||||
	value->tv_nsec = rp.subreg.even * 1000;
 | 
						value->tv_nsec = rp.subreg.even * 1000 / 4096;
 | 
				
			||||||
	value->tv_sec = rp.subreg.odd;
 | 
						value->tv_sec = rp.subreg.odd;
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
	value->tv_nsec = (cputime % 1000000) * 1000;
 | 
						value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
 | 
				
			||||||
	value->tv_sec = cputime / 1000000;
 | 
						value->tv_sec = cputime / 4096000000ULL;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -131,7 +131,7 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
 | 
				
			||||||
static inline cputime_t
 | 
					static inline cputime_t
 | 
				
			||||||
timeval_to_cputime(const struct timeval *value)
 | 
					timeval_to_cputime(const struct timeval *value)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
        return value->tv_usec + (u64) value->tv_sec * 1000000;
 | 
						return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void
 | 
					static inline void
 | 
				
			||||||
| 
						 | 
					@ -141,12 +141,12 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
 | 
				
			||||||
	register_pair rp;
 | 
						register_pair rp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rp.pair = cputime >> 1;
 | 
						rp.pair = cputime >> 1;
 | 
				
			||||||
	asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1));
 | 
						asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
 | 
				
			||||||
	value->tv_usec = rp.subreg.even;
 | 
						value->tv_usec = rp.subreg.even / 4096;
 | 
				
			||||||
	value->tv_sec = rp.subreg.odd;
 | 
						value->tv_sec = rp.subreg.odd;
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
	value->tv_usec = cputime % 1000000;
 | 
						value->tv_usec = cputime % 4096000000ULL;
 | 
				
			||||||
	value->tv_sec = cputime / 1000000;
 | 
						value->tv_sec = cputime / 4096000000ULL;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -156,13 +156,13 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
 | 
				
			||||||
static inline clock_t
 | 
					static inline clock_t
 | 
				
			||||||
cputime_to_clock_t(cputime_t cputime)
 | 
					cputime_to_clock_t(cputime_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __div(cputime, 1000000 / USER_HZ);
 | 
						return __div(cputime, 4096000000ULL / USER_HZ);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline cputime_t
 | 
					static inline cputime_t
 | 
				
			||||||
clock_t_to_cputime(unsigned long x)
 | 
					clock_t_to_cputime(unsigned long x)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (cputime_t) x * (1000000 / USER_HZ);
 | 
						return (cputime_t) x * (4096000000ULL / USER_HZ);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -171,7 +171,7 @@ clock_t_to_cputime(unsigned long x)
 | 
				
			||||||
static inline clock_t
 | 
					static inline clock_t
 | 
				
			||||||
cputime64_to_clock_t(cputime64_t cputime)
 | 
					cputime64_to_clock_t(cputime64_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
       return __div(cputime, 1000000 / USER_HZ);
 | 
					       return __div(cputime, 4096000000ULL / USER_HZ);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _S390_CPUTIME_H */
 | 
					#endif /* _S390_CPUTIME_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -67,11 +67,11 @@
 | 
				
			||||||
#define __LC_SYNC_ENTER_TIMER		0x248
 | 
					#define __LC_SYNC_ENTER_TIMER		0x248
 | 
				
			||||||
#define __LC_ASYNC_ENTER_TIMER		0x250
 | 
					#define __LC_ASYNC_ENTER_TIMER		0x250
 | 
				
			||||||
#define __LC_EXIT_TIMER			0x258
 | 
					#define __LC_EXIT_TIMER			0x258
 | 
				
			||||||
#define __LC_LAST_UPDATE_TIMER		0x260
 | 
					#define __LC_USER_TIMER			0x260
 | 
				
			||||||
#define __LC_USER_TIMER			0x268
 | 
					#define __LC_SYSTEM_TIMER		0x268
 | 
				
			||||||
#define __LC_SYSTEM_TIMER		0x270
 | 
					#define __LC_STEAL_TIMER		0x270
 | 
				
			||||||
#define __LC_LAST_UPDATE_CLOCK		0x278
 | 
					#define __LC_LAST_UPDATE_TIMER		0x278
 | 
				
			||||||
#define __LC_STEAL_CLOCK		0x280
 | 
					#define __LC_LAST_UPDATE_CLOCK		0x280
 | 
				
			||||||
#define __LC_RETURN_MCCK_PSW            0x288
 | 
					#define __LC_RETURN_MCCK_PSW            0x288
 | 
				
			||||||
#define __LC_KERNEL_STACK               0xC40
 | 
					#define __LC_KERNEL_STACK               0xC40
 | 
				
			||||||
#define __LC_THREAD_INFO		0xC44
 | 
					#define __LC_THREAD_INFO		0xC44
 | 
				
			||||||
| 
						 | 
					@ -89,11 +89,11 @@
 | 
				
			||||||
#define __LC_SYNC_ENTER_TIMER		0x250
 | 
					#define __LC_SYNC_ENTER_TIMER		0x250
 | 
				
			||||||
#define __LC_ASYNC_ENTER_TIMER		0x258
 | 
					#define __LC_ASYNC_ENTER_TIMER		0x258
 | 
				
			||||||
#define __LC_EXIT_TIMER			0x260
 | 
					#define __LC_EXIT_TIMER			0x260
 | 
				
			||||||
#define __LC_LAST_UPDATE_TIMER		0x268
 | 
					#define __LC_USER_TIMER			0x268
 | 
				
			||||||
#define __LC_USER_TIMER			0x270
 | 
					#define __LC_SYSTEM_TIMER		0x270
 | 
				
			||||||
#define __LC_SYSTEM_TIMER		0x278
 | 
					#define __LC_STEAL_TIMER		0x278
 | 
				
			||||||
#define __LC_LAST_UPDATE_CLOCK		0x280
 | 
					#define __LC_LAST_UPDATE_TIMER		0x280
 | 
				
			||||||
#define __LC_STEAL_CLOCK		0x288
 | 
					#define __LC_LAST_UPDATE_CLOCK		0x288
 | 
				
			||||||
#define __LC_RETURN_MCCK_PSW            0x290
 | 
					#define __LC_RETURN_MCCK_PSW            0x290
 | 
				
			||||||
#define __LC_KERNEL_STACK               0xD40
 | 
					#define __LC_KERNEL_STACK               0xD40
 | 
				
			||||||
#define __LC_THREAD_INFO		0xD48
 | 
					#define __LC_THREAD_INFO		0xD48
 | 
				
			||||||
| 
						 | 
					@ -106,8 +106,10 @@
 | 
				
			||||||
#define __LC_IPLDEV                     0xDB8
 | 
					#define __LC_IPLDEV                     0xDB8
 | 
				
			||||||
#define __LC_CURRENT			0xDD8
 | 
					#define __LC_CURRENT			0xDD8
 | 
				
			||||||
#define __LC_INT_CLOCK			0xDE8
 | 
					#define __LC_INT_CLOCK			0xDE8
 | 
				
			||||||
 | 
					#define __LC_VDSO_PER_CPU		0xE38
 | 
				
			||||||
#endif /* __s390x__ */
 | 
					#endif /* __s390x__ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define __LC_PASTE			0xE40
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define __LC_PANIC_MAGIC		0xE00
 | 
					#define __LC_PANIC_MAGIC		0xE00
 | 
				
			||||||
#ifndef __s390x__
 | 
					#ifndef __s390x__
 | 
				
			||||||
| 
						 | 
					@ -252,11 +254,11 @@ struct _lowcore
 | 
				
			||||||
	__u64        sync_enter_timer;         /* 0x248 */
 | 
						__u64        sync_enter_timer;         /* 0x248 */
 | 
				
			||||||
	__u64        async_enter_timer;        /* 0x250 */
 | 
						__u64        async_enter_timer;        /* 0x250 */
 | 
				
			||||||
	__u64        exit_timer;               /* 0x258 */
 | 
						__u64        exit_timer;               /* 0x258 */
 | 
				
			||||||
	__u64        last_update_timer;        /* 0x260 */
 | 
						__u64	     user_timer;	       /* 0x260 */
 | 
				
			||||||
	__u64        user_timer;               /* 0x268 */
 | 
						__u64	     system_timer;	       /* 0x268 */
 | 
				
			||||||
	__u64        system_timer;             /* 0x270 */
 | 
						__u64	     steal_timer;	       /* 0x270 */
 | 
				
			||||||
	__u64        last_update_clock;        /* 0x278 */
 | 
						__u64	     last_update_timer;        /* 0x278 */
 | 
				
			||||||
	__u64        steal_clock;              /* 0x280 */
 | 
						__u64	     last_update_clock;        /* 0x280 */
 | 
				
			||||||
        psw_t        return_mcck_psw;          /* 0x288 */
 | 
					        psw_t        return_mcck_psw;          /* 0x288 */
 | 
				
			||||||
	__u8         pad8[0xc00-0x290];        /* 0x290 */
 | 
						__u8         pad8[0xc00-0x290];        /* 0x290 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -343,11 +345,11 @@ struct _lowcore
 | 
				
			||||||
	__u64        sync_enter_timer;         /* 0x250 */
 | 
						__u64        sync_enter_timer;         /* 0x250 */
 | 
				
			||||||
	__u64        async_enter_timer;        /* 0x258 */
 | 
						__u64        async_enter_timer;        /* 0x258 */
 | 
				
			||||||
	__u64        exit_timer;               /* 0x260 */
 | 
						__u64        exit_timer;               /* 0x260 */
 | 
				
			||||||
	__u64        last_update_timer;        /* 0x268 */
 | 
						__u64	     user_timer;	       /* 0x268 */
 | 
				
			||||||
	__u64        user_timer;               /* 0x270 */
 | 
						__u64	     system_timer;	       /* 0x270 */
 | 
				
			||||||
	__u64        system_timer;             /* 0x278 */
 | 
						__u64	     steal_timer;	       /* 0x278 */
 | 
				
			||||||
	__u64        last_update_clock;        /* 0x280 */
 | 
						__u64	     last_update_timer;        /* 0x280 */
 | 
				
			||||||
	__u64        steal_clock;              /* 0x288 */
 | 
						__u64	     last_update_clock;        /* 0x288 */
 | 
				
			||||||
        psw_t        return_mcck_psw;          /* 0x290 */
 | 
					        psw_t        return_mcck_psw;          /* 0x290 */
 | 
				
			||||||
        __u8         pad8[0xc00-0x2a0];        /* 0x2a0 */
 | 
					        __u8         pad8[0xc00-0x2a0];        /* 0x2a0 */
 | 
				
			||||||
        /* System info area */
 | 
					        /* System info area */
 | 
				
			||||||
| 
						 | 
					@ -381,7 +383,12 @@ struct _lowcore
 | 
				
			||||||
        /* whether the kernel died with panic() or not */
 | 
					        /* whether the kernel died with panic() or not */
 | 
				
			||||||
        __u32        panic_magic;              /* 0xe00 */
 | 
					        __u32        panic_magic;              /* 0xe00 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__u8         pad13[0x11b8-0xe04];      /* 0xe04 */
 | 
						/* Per cpu primary space access list */
 | 
				
			||||||
 | 
						__u8	     pad_0xe04[0xe3c-0xe04];   /* 0xe04 */
 | 
				
			||||||
 | 
						__u32	     vdso_per_cpu_data;	       /* 0xe3c */
 | 
				
			||||||
 | 
						__u32	     paste[16];		       /* 0xe40 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						__u8	     pad13[0x11b8-0xe80];      /* 0xe80 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* 64 bit extparam used for pfault, diag 250 etc  */
 | 
						/* 64 bit extparam used for pfault, diag 250 etc  */
 | 
				
			||||||
	__u64        ext_params2;               /* 0x11B8 */
 | 
						__u64        ext_params2;               /* 0x11B8 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -99,7 +99,7 @@ static inline void restore_access_regs(unsigned int *acrs)
 | 
				
			||||||
	prev = __switch_to(prev,next);					     \
 | 
						prev = __switch_to(prev,next);					     \
 | 
				
			||||||
} while (0)
 | 
					} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void account_vtime(struct task_struct *);
 | 
					extern void account_vtime(struct task_struct *, struct task_struct *);
 | 
				
			||||||
extern void account_tick_vtime(struct task_struct *);
 | 
					extern void account_tick_vtime(struct task_struct *);
 | 
				
			||||||
extern void account_system_vtime(struct task_struct *);
 | 
					extern void account_system_vtime(struct task_struct *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -121,7 +121,7 @@ static inline void cmma_init(void) { }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define finish_arch_switch(prev) do {					     \
 | 
					#define finish_arch_switch(prev) do {					     \
 | 
				
			||||||
	set_fs(current->thread.mm_segment);				     \
 | 
						set_fs(current->thread.mm_segment);				     \
 | 
				
			||||||
	account_vtime(prev);						     \
 | 
						account_vtime(prev, current);					     \
 | 
				
			||||||
} while (0)
 | 
					} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define nop() asm volatile("nop")
 | 
					#define nop() asm volatile("nop")
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,6 +47,8 @@ struct thread_info {
 | 
				
			||||||
	unsigned int		cpu;		/* current CPU */
 | 
						unsigned int		cpu;		/* current CPU */
 | 
				
			||||||
	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
 | 
						int			preempt_count;	/* 0 => preemptable, <0 => BUG */
 | 
				
			||||||
	struct restart_block	restart_block;
 | 
						struct restart_block	restart_block;
 | 
				
			||||||
 | 
						__u64			user_timer;
 | 
				
			||||||
 | 
						__u64			system_timer;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,20 +23,18 @@ struct vtimer_list {
 | 
				
			||||||
	__u64 expires;
 | 
						__u64 expires;
 | 
				
			||||||
	__u64 interval;
 | 
						__u64 interval;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spinlock_t lock;
 | 
					 | 
				
			||||||
	unsigned long magic;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	void (*function)(unsigned long);
 | 
						void (*function)(unsigned long);
 | 
				
			||||||
	unsigned long data;
 | 
						unsigned long data;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* the offset value will wrap after ca. 71 years */
 | 
					/* the vtimer value will wrap after ca. 71 years */
 | 
				
			||||||
struct vtimer_queue {
 | 
					struct vtimer_queue {
 | 
				
			||||||
	struct list_head list;
 | 
						struct list_head list;
 | 
				
			||||||
	spinlock_t lock;
 | 
						spinlock_t lock;
 | 
				
			||||||
	__u64 to_expire;	  /* current event expire time */
 | 
						__u64 timer;		/* last programmed timer */
 | 
				
			||||||
	__u64 offset;		  /* list offset to zero */
 | 
						__u64 elapsed;		/* elapsed time of timer expire values */
 | 
				
			||||||
	__u64 idle;		  /* temp var for idle */
 | 
						__u64 idle;		/* temp var for idle */
 | 
				
			||||||
 | 
						int do_spt;		/* =1: reprogram cpu timer in idle */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void init_virt_timer(struct vtimer_list *timer);
 | 
					extern void init_virt_timer(struct vtimer_list *timer);
 | 
				
			||||||
| 
						 | 
					@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
 | 
				
			||||||
extern void init_cpu_vtimer(void);
 | 
					extern void init_cpu_vtimer(void);
 | 
				
			||||||
extern void vtime_init(void);
 | 
					extern void vtime_init(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void vtime_start_cpu_timer(void);
 | 
					extern void vtime_stop_cpu(void);
 | 
				
			||||||
extern void vtime_stop_cpu_timer(void);
 | 
					extern void vtime_start_leave(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __KERNEL__ */
 | 
					#endif /* __KERNEL__ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -12,9 +12,9 @@
 | 
				
			||||||
#ifndef __ASSEMBLY__
 | 
					#ifndef __ASSEMBLY__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Note about this structure:
 | 
					 * Note about the vdso_data and vdso_per_cpu_data structures:
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this
 | 
					 * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
 | 
				
			||||||
 * structure is supposed to be known only to the function in the vdso
 | 
					 * structure is supposed to be known only to the function in the vdso
 | 
				
			||||||
 * itself and may change without notice.
 | 
					 * itself and may change without notice.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					@ -28,10 +28,21 @@ struct vdso_data {
 | 
				
			||||||
	__u64 wtom_clock_nsec;		/*				0x28 */
 | 
						__u64 wtom_clock_nsec;		/*				0x28 */
 | 
				
			||||||
	__u32 tz_minuteswest;		/* Minutes west of Greenwich	0x30 */
 | 
						__u32 tz_minuteswest;		/* Minutes west of Greenwich	0x30 */
 | 
				
			||||||
	__u32 tz_dsttime;		/* Type of dst correction	0x34 */
 | 
						__u32 tz_dsttime;		/* Type of dst correction	0x34 */
 | 
				
			||||||
 | 
						__u32 ectg_available;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct vdso_per_cpu_data {
 | 
				
			||||||
 | 
						__u64 ectg_timer_base;
 | 
				
			||||||
 | 
						__u64 ectg_user_time;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern struct vdso_data *vdso_data;
 | 
					extern struct vdso_data *vdso_data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_64BIT
 | 
				
			||||||
 | 
					int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore);
 | 
				
			||||||
 | 
					void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __ASSEMBLY__ */
 | 
					#endif /* __ASSEMBLY__ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __KERNEL__ */
 | 
					#endif /* __KERNEL__ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -48,6 +48,11 @@ int main(void)
 | 
				
			||||||
	DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
 | 
						DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
 | 
				
			||||||
	DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
 | 
						DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
 | 
				
			||||||
	DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
 | 
						DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
 | 
				
			||||||
 | 
						DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
 | 
				
			||||||
 | 
						DEFINE(__VDSO_ECTG_BASE,
 | 
				
			||||||
 | 
						       offsetof(struct vdso_per_cpu_data, ectg_timer_base));
 | 
				
			||||||
 | 
						DEFINE(__VDSO_ECTG_USER,
 | 
				
			||||||
 | 
						       offsetof(struct vdso_per_cpu_data, ectg_user_time));
 | 
				
			||||||
	/* constants used by the vdso */
 | 
						/* constants used by the vdso */
 | 
				
			||||||
	DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
 | 
						DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
 | 
				
			||||||
	DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
 | 
						DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -583,8 +583,8 @@ kernel_per:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	.globl io_int_handler
 | 
						.globl io_int_handler
 | 
				
			||||||
io_int_handler:
 | 
					io_int_handler:
 | 
				
			||||||
	stpt	__LC_ASYNC_ENTER_TIMER
 | 
					 | 
				
			||||||
	stck	__LC_INT_CLOCK
 | 
						stck	__LC_INT_CLOCK
 | 
				
			||||||
 | 
						stpt	__LC_ASYNC_ENTER_TIMER
 | 
				
			||||||
	SAVE_ALL_BASE __LC_SAVE_AREA+16
 | 
						SAVE_ALL_BASE __LC_SAVE_AREA+16
 | 
				
			||||||
	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
 | 
						SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
 | 
				
			||||||
	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
 | 
						CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
 | 
				
			||||||
| 
						 | 
					@ -723,8 +723,8 @@ io_notify_resume:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	.globl	ext_int_handler
 | 
						.globl	ext_int_handler
 | 
				
			||||||
ext_int_handler:
 | 
					ext_int_handler:
 | 
				
			||||||
	stpt	__LC_ASYNC_ENTER_TIMER
 | 
					 | 
				
			||||||
	stck	__LC_INT_CLOCK
 | 
						stck	__LC_INT_CLOCK
 | 
				
			||||||
 | 
						stpt	__LC_ASYNC_ENTER_TIMER
 | 
				
			||||||
	SAVE_ALL_BASE __LC_SAVE_AREA+16
 | 
						SAVE_ALL_BASE __LC_SAVE_AREA+16
 | 
				
			||||||
	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
 | 
						SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
 | 
				
			||||||
	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
 | 
						CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
 | 
				
			||||||
| 
						 | 
					@ -750,6 +750,7 @@ __critical_end:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	.globl mcck_int_handler
 | 
						.globl mcck_int_handler
 | 
				
			||||||
mcck_int_handler:
 | 
					mcck_int_handler:
 | 
				
			||||||
 | 
						stck	__LC_INT_CLOCK
 | 
				
			||||||
	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
 | 
						spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
 | 
				
			||||||
	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
 | 
						lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
 | 
				
			||||||
	SAVE_ALL_BASE __LC_SAVE_AREA+32
 | 
						SAVE_ALL_BASE __LC_SAVE_AREA+32
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -177,8 +177,11 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 | 
				
			||||||
	.if !\sync
 | 
						.if !\sync
 | 
				
			||||||
	ni	\psworg+1,0xfd		# clear wait state bit
 | 
						ni	\psworg+1,0xfd		# clear wait state bit
 | 
				
			||||||
	.endif
 | 
						.endif
 | 
				
			||||||
	lmg	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
 | 
						lg	%r14,__LC_VDSO_PER_CPU
 | 
				
			||||||
 | 
						lmg	%r0,%r13,SP_R0(%r15)	# load gprs 0-13 of user
 | 
				
			||||||
	stpt	__LC_EXIT_TIMER
 | 
						stpt	__LC_EXIT_TIMER
 | 
				
			||||||
 | 
						mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 | 
				
			||||||
 | 
						lmg	%r14,%r15,SP_R14(%r15)	# load grps 14-15 of user
 | 
				
			||||||
	lpswe	\psworg			# back to caller
 | 
						lpswe	\psworg			# back to caller
 | 
				
			||||||
	.endm
 | 
						.endm
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -559,8 +562,8 @@ kernel_per:
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
	.globl io_int_handler
 | 
						.globl io_int_handler
 | 
				
			||||||
io_int_handler:
 | 
					io_int_handler:
 | 
				
			||||||
	stpt	__LC_ASYNC_ENTER_TIMER
 | 
					 | 
				
			||||||
	stck	__LC_INT_CLOCK
 | 
						stck	__LC_INT_CLOCK
 | 
				
			||||||
 | 
						stpt	__LC_ASYNC_ENTER_TIMER
 | 
				
			||||||
	SAVE_ALL_BASE __LC_SAVE_AREA+32
 | 
						SAVE_ALL_BASE __LC_SAVE_AREA+32
 | 
				
			||||||
	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
 | 
						SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
 | 
				
			||||||
	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
 | 
						CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
 | 
				
			||||||
| 
						 | 
					@ -721,8 +724,8 @@ io_notify_resume:
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
	.globl	ext_int_handler
 | 
						.globl	ext_int_handler
 | 
				
			||||||
ext_int_handler:
 | 
					ext_int_handler:
 | 
				
			||||||
	stpt	__LC_ASYNC_ENTER_TIMER
 | 
					 | 
				
			||||||
	stck	__LC_INT_CLOCK
 | 
						stck	__LC_INT_CLOCK
 | 
				
			||||||
 | 
						stpt	__LC_ASYNC_ENTER_TIMER
 | 
				
			||||||
	SAVE_ALL_BASE __LC_SAVE_AREA+32
 | 
						SAVE_ALL_BASE __LC_SAVE_AREA+32
 | 
				
			||||||
	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
 | 
						SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
 | 
				
			||||||
	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
 | 
						CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
 | 
				
			||||||
| 
						 | 
					@ -746,6 +749,7 @@ __critical_end:
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
	.globl mcck_int_handler
 | 
						.globl mcck_int_handler
 | 
				
			||||||
mcck_int_handler:
 | 
					mcck_int_handler:
 | 
				
			||||||
 | 
						stck	__LC_INT_CLOCK
 | 
				
			||||||
	la	%r1,4095		# revalidate r1
 | 
						la	%r1,4095		# revalidate r1
 | 
				
			||||||
	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 | 
						spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 | 
				
			||||||
	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 | 
						lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 | 
				
			||||||
| 
						 | 
					@ -979,23 +983,23 @@ cleanup_sysc_return:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cleanup_sysc_leave:
 | 
					cleanup_sysc_leave:
 | 
				
			||||||
	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn)
 | 
						clc	8(8,%r12),BASED(cleanup_sysc_leave_insn)
 | 
				
			||||||
	je	2f
 | 
						je	3f
 | 
				
			||||||
	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
 | 
					 | 
				
			||||||
	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
 | 
						clc	8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
 | 
				
			||||||
	je	2f
 | 
						jhe	0f
 | 
				
			||||||
	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
 | 
						mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
 | 
				
			||||||
 | 
					0:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
 | 
				
			||||||
	cghi	%r12,__LC_MCK_OLD_PSW
 | 
						cghi	%r12,__LC_MCK_OLD_PSW
 | 
				
			||||||
	jne	0f
 | 
						jne	1f
 | 
				
			||||||
	mvc	__LC_SAVE_AREA+64(32),SP_R12(%r15)
 | 
						mvc	__LC_SAVE_AREA+64(32),SP_R12(%r15)
 | 
				
			||||||
	j	1f
 | 
						j	2f
 | 
				
			||||||
0:	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
 | 
					1:	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
 | 
				
			||||||
1:	lmg	%r0,%r11,SP_R0(%r15)
 | 
					2:	lmg	%r0,%r11,SP_R0(%r15)
 | 
				
			||||||
	lg	%r15,SP_R15(%r15)
 | 
						lg	%r15,SP_R15(%r15)
 | 
				
			||||||
2:	la	%r12,__LC_RETURN_PSW
 | 
					3:	la	%r12,__LC_RETURN_PSW
 | 
				
			||||||
	br	%r14
 | 
						br	%r14
 | 
				
			||||||
cleanup_sysc_leave_insn:
 | 
					cleanup_sysc_leave_insn:
 | 
				
			||||||
	.quad	sysc_done - 4
 | 
						.quad	sysc_done - 4
 | 
				
			||||||
	.quad	sysc_done - 8
 | 
						.quad	sysc_done - 16
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cleanup_io_return:
 | 
					cleanup_io_return:
 | 
				
			||||||
	mvc	__LC_RETURN_PSW(8),0(%r12)
 | 
						mvc	__LC_RETURN_PSW(8),0(%r12)
 | 
				
			||||||
| 
						 | 
					@ -1005,23 +1009,23 @@ cleanup_io_return:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cleanup_io_leave:
 | 
					cleanup_io_leave:
 | 
				
			||||||
	clc	8(8,%r12),BASED(cleanup_io_leave_insn)
 | 
						clc	8(8,%r12),BASED(cleanup_io_leave_insn)
 | 
				
			||||||
	je	2f
 | 
						je	3f
 | 
				
			||||||
	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
 | 
					 | 
				
			||||||
	clc	8(8,%r12),BASED(cleanup_io_leave_insn+8)
 | 
						clc	8(8,%r12),BASED(cleanup_io_leave_insn+8)
 | 
				
			||||||
	je	2f
 | 
						jhe	0f
 | 
				
			||||||
	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
 | 
						mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
 | 
				
			||||||
 | 
					0:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
 | 
				
			||||||
	cghi	%r12,__LC_MCK_OLD_PSW
 | 
						cghi	%r12,__LC_MCK_OLD_PSW
 | 
				
			||||||
	jne	0f
 | 
						jne	1f
 | 
				
			||||||
	mvc	__LC_SAVE_AREA+64(32),SP_R12(%r15)
 | 
						mvc	__LC_SAVE_AREA+64(32),SP_R12(%r15)
 | 
				
			||||||
	j	1f
 | 
						j	2f
 | 
				
			||||||
0:	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
 | 
					1:	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
 | 
				
			||||||
1:	lmg	%r0,%r11,SP_R0(%r15)
 | 
					2:	lmg	%r0,%r11,SP_R0(%r15)
 | 
				
			||||||
	lg	%r15,SP_R15(%r15)
 | 
						lg	%r15,SP_R15(%r15)
 | 
				
			||||||
2:	la	%r12,__LC_RETURN_PSW
 | 
					3:	la	%r12,__LC_RETURN_PSW
 | 
				
			||||||
	br	%r14
 | 
						br	%r14
 | 
				
			||||||
cleanup_io_leave_insn:
 | 
					cleanup_io_leave_insn:
 | 
				
			||||||
	.quad	io_done - 4
 | 
						.quad	io_done - 4
 | 
				
			||||||
	.quad	io_done - 8
 | 
						.quad	io_done - 16
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Integer constants
 | 
					 * Integer constants
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -87,6 +87,8 @@ startup_continue:
 | 
				
			||||||
	lg	%r12,.Lparmaddr-.LPG1(%r13)	# pointer to parameter area
 | 
						lg	%r12,.Lparmaddr-.LPG1(%r13)	# pointer to parameter area
 | 
				
			||||||
					# move IPL device to lowcore
 | 
										# move IPL device to lowcore
 | 
				
			||||||
	mvc	__LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
 | 
						mvc	__LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
 | 
				
			||||||
 | 
						lghi	%r0,__LC_PASTE
 | 
				
			||||||
 | 
						stg	%r0,__LC_VDSO_PER_CPU
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Setup stack
 | 
					# Setup stack
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -38,6 +38,7 @@
 | 
				
			||||||
#include <linux/utsname.h>
 | 
					#include <linux/utsname.h>
 | 
				
			||||||
#include <linux/tick.h>
 | 
					#include <linux/tick.h>
 | 
				
			||||||
#include <linux/elfcore.h>
 | 
					#include <linux/elfcore.h>
 | 
				
			||||||
 | 
					#include <linux/kernel_stat.h>
 | 
				
			||||||
#include <asm/uaccess.h>
 | 
					#include <asm/uaccess.h>
 | 
				
			||||||
#include <asm/pgtable.h>
 | 
					#include <asm/pgtable.h>
 | 
				
			||||||
#include <asm/system.h>
 | 
					#include <asm/system.h>
 | 
				
			||||||
| 
						 | 
					@ -45,7 +46,6 @@
 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <asm/irq.h>
 | 
					#include <asm/irq.h>
 | 
				
			||||||
#include <asm/timer.h>
 | 
					#include <asm/timer.h>
 | 
				
			||||||
#include <asm/cpu.h>
 | 
					 | 
				
			||||||
#include "entry.h"
 | 
					#include "entry.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 | 
					asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 | 
				
			||||||
| 
						 | 
					@ -75,36 +75,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 | 
				
			||||||
	return sf->gprs[8];
 | 
						return sf->gprs[8];
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
 | 
					 | 
				
			||||||
	.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int s390_idle_enter(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct s390_idle_data *idle;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	idle = &__get_cpu_var(s390_idle);
 | 
					 | 
				
			||||||
	spin_lock(&idle->lock);
 | 
					 | 
				
			||||||
	idle->idle_count++;
 | 
					 | 
				
			||||||
	idle->in_idle = 1;
 | 
					 | 
				
			||||||
	idle->idle_enter = get_clock();
 | 
					 | 
				
			||||||
	spin_unlock(&idle->lock);
 | 
					 | 
				
			||||||
	vtime_stop_cpu_timer();
 | 
					 | 
				
			||||||
	return NOTIFY_OK;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void s390_idle_leave(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct s390_idle_data *idle;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	vtime_start_cpu_timer();
 | 
					 | 
				
			||||||
	idle = &__get_cpu_var(s390_idle);
 | 
					 | 
				
			||||||
	spin_lock(&idle->lock);
 | 
					 | 
				
			||||||
	idle->idle_time += get_clock() - idle->idle_enter;
 | 
					 | 
				
			||||||
	idle->in_idle = 0;
 | 
					 | 
				
			||||||
	spin_unlock(&idle->lock);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern void s390_handle_mcck(void);
 | 
					extern void s390_handle_mcck(void);
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * The idle loop on a S390...
 | 
					 * The idle loop on a S390...
 | 
				
			||||||
| 
						 | 
					@ -117,10 +87,6 @@ static void default_idle(void)
 | 
				
			||||||
		local_irq_enable();
 | 
							local_irq_enable();
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (s390_idle_enter() == NOTIFY_BAD) {
 | 
					 | 
				
			||||||
		local_irq_enable();
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					#ifdef CONFIG_HOTPLUG_CPU
 | 
				
			||||||
	if (cpu_is_offline(smp_processor_id())) {
 | 
						if (cpu_is_offline(smp_processor_id())) {
 | 
				
			||||||
		preempt_enable_no_resched();
 | 
							preempt_enable_no_resched();
 | 
				
			||||||
| 
						 | 
					@ -130,7 +96,6 @@ static void default_idle(void)
 | 
				
			||||||
	local_mcck_disable();
 | 
						local_mcck_disable();
 | 
				
			||||||
	if (test_thread_flag(TIF_MCCK_PENDING)) {
 | 
						if (test_thread_flag(TIF_MCCK_PENDING)) {
 | 
				
			||||||
		local_mcck_enable();
 | 
							local_mcck_enable();
 | 
				
			||||||
		s390_idle_leave();
 | 
					 | 
				
			||||||
		local_irq_enable();
 | 
							local_irq_enable();
 | 
				
			||||||
		s390_handle_mcck();
 | 
							s390_handle_mcck();
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -138,9 +103,9 @@ static void default_idle(void)
 | 
				
			||||||
	trace_hardirqs_on();
 | 
						trace_hardirqs_on();
 | 
				
			||||||
	/* Don't trace preempt off for idle. */
 | 
						/* Don't trace preempt off for idle. */
 | 
				
			||||||
	stop_critical_timings();
 | 
						stop_critical_timings();
 | 
				
			||||||
	/* Wait for external, I/O or machine check interrupt. */
 | 
						/* Stop virtual timer and halt the cpu. */
 | 
				
			||||||
	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
 | 
						vtime_stop_cpu();
 | 
				
			||||||
			PSW_MASK_IO | PSW_MASK_EXT);
 | 
						/* Reenable preemption tracer. */
 | 
				
			||||||
	start_critical_timings();
 | 
						start_critical_timings();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
 | 
				
			||||||
	struct pt_regs *old_regs;
 | 
						struct pt_regs *old_regs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old_regs = set_irq_regs(regs);
 | 
						old_regs = set_irq_regs(regs);
 | 
				
			||||||
	irq_enter();
 | 
					 | 
				
			||||||
	s390_idle_check();
 | 
						s390_idle_check();
 | 
				
			||||||
 | 
						irq_enter();
 | 
				
			||||||
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 | 
						if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 | 
				
			||||||
		/* Serve timer interrupts first. */
 | 
							/* Serve timer interrupts first. */
 | 
				
			||||||
		clock_comparator_work();
 | 
							clock_comparator_work();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -427,6 +427,8 @@ setup_lowcore(void)
 | 
				
			||||||
		/* enable extended save area */
 | 
							/* enable extended save area */
 | 
				
			||||||
		__ctl_set_bit(14, 29);
 | 
							__ctl_set_bit(14, 29);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	set_prefix((u32)(unsigned long) lc);
 | 
						set_prefix((u32)(unsigned long) lc);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,6 +47,7 @@
 | 
				
			||||||
#include <asm/lowcore.h>
 | 
					#include <asm/lowcore.h>
 | 
				
			||||||
#include <asm/sclp.h>
 | 
					#include <asm/sclp.h>
 | 
				
			||||||
#include <asm/cpu.h>
 | 
					#include <asm/cpu.h>
 | 
				
			||||||
 | 
					#include <asm/vdso.h>
 | 
				
			||||||
#include "entry.h"
 | 
					#include "entry.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -500,6 +501,9 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
		lowcore->extended_save_area_addr = (u32) save_area;
 | 
							lowcore->extended_save_area_addr = (u32) save_area;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						if (vdso_alloc_per_cpu(cpu, lowcore))
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	lowcore_ptr[cpu] = lowcore;
 | 
						lowcore_ptr[cpu] = lowcore;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -522,6 +526,8 @@ static void smp_free_lowcore(int cpu)
 | 
				
			||||||
#ifndef CONFIG_64BIT
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
	if (MACHINE_HAS_IEEE)
 | 
						if (MACHINE_HAS_IEEE)
 | 
				
			||||||
		free_page((unsigned long) lowcore->extended_save_area_addr);
 | 
							free_page((unsigned long) lowcore->extended_save_area_addr);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						vdso_free_per_cpu(cpu, lowcore);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	free_page(lowcore->panic_stack - PAGE_SIZE);
 | 
						free_page(lowcore->panic_stack - PAGE_SIZE);
 | 
				
			||||||
	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
 | 
						free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
 | 
				
			||||||
| 
						 | 
					@ -664,6 +670,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 | 
				
			||||||
	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
 | 
						lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
 | 
				
			||||||
	panic_stack = __get_free_page(GFP_KERNEL);
 | 
						panic_stack = __get_free_page(GFP_KERNEL);
 | 
				
			||||||
	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 | 
						async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 | 
				
			||||||
 | 
						BUG_ON(!lowcore || !panic_stack || !async_stack);
 | 
				
			||||||
#ifndef CONFIG_64BIT
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
	if (MACHINE_HAS_IEEE)
 | 
						if (MACHINE_HAS_IEEE)
 | 
				
			||||||
		save_area = get_zeroed_page(GFP_KERNEL);
 | 
							save_area = get_zeroed_page(GFP_KERNEL);
 | 
				
			||||||
| 
						 | 
					@ -677,6 +684,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 | 
				
			||||||
#ifndef CONFIG_64BIT
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
	if (MACHINE_HAS_IEEE)
 | 
						if (MACHINE_HAS_IEEE)
 | 
				
			||||||
		lowcore->extended_save_area_addr = (u32) save_area;
 | 
							lowcore->extended_save_area_addr = (u32) save_area;
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	set_prefix((u32)(unsigned long) lowcore);
 | 
						set_prefix((u32)(unsigned long) lowcore);
 | 
				
			||||||
	local_mcck_enable();
 | 
						local_mcck_enable();
 | 
				
			||||||
| 
						 | 
					@ -845,9 +854,11 @@ static ssize_t show_idle_count(struct sys_device *dev,
 | 
				
			||||||
	unsigned long long idle_count;
 | 
						unsigned long long idle_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	idle = &per_cpu(s390_idle, dev->id);
 | 
						idle = &per_cpu(s390_idle, dev->id);
 | 
				
			||||||
	spin_lock_irq(&idle->lock);
 | 
						spin_lock(&idle->lock);
 | 
				
			||||||
	idle_count = idle->idle_count;
 | 
						idle_count = idle->idle_count;
 | 
				
			||||||
	spin_unlock_irq(&idle->lock);
 | 
						if (idle->idle_enter)
 | 
				
			||||||
 | 
							idle_count++;
 | 
				
			||||||
 | 
						spin_unlock(&idle->lock);
 | 
				
			||||||
	return sprintf(buf, "%llu\n", idle_count);
 | 
						return sprintf(buf, "%llu\n", idle_count);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
 | 
					static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
 | 
				
			||||||
| 
						 | 
					@ -856,18 +867,17 @@ static ssize_t show_idle_time(struct sys_device *dev,
 | 
				
			||||||
				struct sysdev_attribute *attr, char *buf)
 | 
									struct sysdev_attribute *attr, char *buf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct s390_idle_data *idle;
 | 
						struct s390_idle_data *idle;
 | 
				
			||||||
	unsigned long long new_time;
 | 
						unsigned long long now, idle_time, idle_enter;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	idle = &per_cpu(s390_idle, dev->id);
 | 
						idle = &per_cpu(s390_idle, dev->id);
 | 
				
			||||||
	spin_lock_irq(&idle->lock);
 | 
						spin_lock(&idle->lock);
 | 
				
			||||||
	if (idle->in_idle) {
 | 
						now = get_clock();
 | 
				
			||||||
		new_time = get_clock();
 | 
						idle_time = idle->idle_time;
 | 
				
			||||||
		idle->idle_time += new_time - idle->idle_enter;
 | 
						idle_enter = idle->idle_enter;
 | 
				
			||||||
		idle->idle_enter = new_time;
 | 
						if (idle_enter != 0ULL && idle_enter < now)
 | 
				
			||||||
	}
 | 
							idle_time += now - idle_enter;
 | 
				
			||||||
	new_time = idle->idle_time;
 | 
						spin_unlock(&idle->lock);
 | 
				
			||||||
	spin_unlock_irq(&idle->lock);
 | 
						return sprintf(buf, "%llu\n", idle_time >> 12);
 | 
				
			||||||
	return sprintf(buf, "%llu\n", new_time >> 12);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 | 
					static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -31,9 +31,6 @@
 | 
				
			||||||
#include <asm/sections.h>
 | 
					#include <asm/sections.h>
 | 
				
			||||||
#include <asm/vdso.h>
 | 
					#include <asm/vdso.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Max supported size for symbol names */
 | 
					 | 
				
			||||||
#define MAX_SYMNAME	64
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
 | 
					#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
 | 
				
			||||||
extern char vdso32_start, vdso32_end;
 | 
					extern char vdso32_start, vdso32_end;
 | 
				
			||||||
static void *vdso32_kbase = &vdso32_start;
 | 
					static void *vdso32_kbase = &vdso32_start;
 | 
				
			||||||
| 
						 | 
					@ -70,6 +67,119 @@ static union {
 | 
				
			||||||
} vdso_data_store __attribute__((__section__(".data.page_aligned")));
 | 
					} vdso_data_store __attribute__((__section__(".data.page_aligned")));
 | 
				
			||||||
struct vdso_data *vdso_data = &vdso_data_store.data;
 | 
					struct vdso_data *vdso_data = &vdso_data_store.data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Setup vdso data page.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void vdso_init_data(struct vdso_data *vd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned int facility_list;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						facility_list = stfl();
 | 
				
			||||||
 | 
						vd->ectg_available = switch_amode && (facility_list & 1);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_64BIT
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Setup per cpu vdso data page.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Allocate/free per cpu vdso data.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifdef CONFIG_64BIT
 | 
				
			||||||
 | 
					#define SEGMENT_ORDER	2
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					#define SEGMENT_ORDER	1
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long segment_table, page_table, page_frame;
 | 
				
			||||||
 | 
						u32 *psal, *aste;
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						lowcore->vdso_per_cpu_data = __LC_PASTE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!switch_amode || !vdso_enabled)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
 | 
				
			||||||
 | 
						page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
 | 
				
			||||||
 | 
						page_frame = get_zeroed_page(GFP_KERNEL);
 | 
				
			||||||
 | 
						if (!segment_table || !page_table || !page_frame)
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
 | 
				
			||||||
 | 
							    PAGE_SIZE << SEGMENT_ORDER);
 | 
				
			||||||
 | 
						clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
 | 
				
			||||||
 | 
							    256*sizeof(unsigned long));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
 | 
				
			||||||
 | 
						*(unsigned long *) page_table = _PAGE_RO + page_frame;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						psal = (u32 *) (page_table + 256*sizeof(unsigned long));
 | 
				
			||||||
 | 
						aste = psal + 32;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 4; i < 32; i += 4)
 | 
				
			||||||
 | 
							psal[i] = 0x80000000;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						lowcore->paste[4] = (u32)(addr_t) psal;
 | 
				
			||||||
 | 
						psal[0] = 0x20000000;
 | 
				
			||||||
 | 
						psal[2] = (u32)(addr_t) aste;
 | 
				
			||||||
 | 
						*(unsigned long *) (aste + 2) = segment_table +
 | 
				
			||||||
 | 
							_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
 | 
				
			||||||
 | 
						aste[4] = (u32)(addr_t) psal;
 | 
				
			||||||
 | 
						lowcore->vdso_per_cpu_data = page_frame;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						free_page(page_frame);
 | 
				
			||||||
 | 
						free_page(page_table);
 | 
				
			||||||
 | 
						free_pages(segment_table, SEGMENT_ORDER);
 | 
				
			||||||
 | 
						return -ENOMEM;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_HOTPLUG_CPU
 | 
				
			||||||
 | 
					void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long segment_table, page_table, page_frame;
 | 
				
			||||||
 | 
						u32 *psal, *aste;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!switch_amode || !vdso_enabled)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						psal = (u32 *)(addr_t) lowcore->paste[4];
 | 
				
			||||||
 | 
						aste = (u32 *)(addr_t) psal[2];
 | 
				
			||||||
 | 
						segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
 | 
				
			||||||
 | 
						page_table = *(unsigned long *) segment_table;
 | 
				
			||||||
 | 
						page_frame = *(unsigned long *) page_table;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						free_page(page_frame);
 | 
				
			||||||
 | 
						free_page(page_table);
 | 
				
			||||||
 | 
						free_pages(segment_table, SEGMENT_ORDER);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif /* CONFIG_HOTPLUG_CPU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __vdso_init_cr5(void *dummy)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long cr5;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cr5 = offsetof(struct _lowcore, paste);
 | 
				
			||||||
 | 
						__ctl_load(cr5, 5, 5);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void vdso_init_cr5(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (switch_amode && vdso_enabled)
 | 
				
			||||||
 | 
							on_each_cpu(__vdso_init_cr5, NULL, 1);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif /* CONFIG_64BIT */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * This is called from binfmt_elf, we create the special vma for the
 | 
					 * This is called from binfmt_elf, we create the special vma for the
 | 
				
			||||||
 * vDSO and insert it into the mm struct tree
 | 
					 * vDSO and insert it into the mm struct tree
 | 
				
			||||||
| 
						 | 
					@ -172,6 +282,9 @@ static int __init vdso_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!vdso_enabled)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
						vdso_init_data(vdso_data);
 | 
				
			||||||
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
 | 
					#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
 | 
				
			||||||
	/* Calculate the size of the 32 bit vDSO */
 | 
						/* Calculate the size of the 32 bit vDSO */
 | 
				
			||||||
	vdso32_pages = ((&vdso32_end - &vdso32_start
 | 
						vdso32_pages = ((&vdso32_end - &vdso32_start
 | 
				
			||||||
| 
						 | 
					@ -208,6 +321,10 @@ static int __init vdso_init(void)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
 | 
						vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
 | 
				
			||||||
	vdso64_pagelist[vdso64_pages] = NULL;
 | 
						vdso64_pagelist[vdso64_pages] = NULL;
 | 
				
			||||||
 | 
					#ifndef CONFIG_SMP
 | 
				
			||||||
 | 
						BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore));
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
						vdso_init_cr5();
 | 
				
			||||||
#endif /* CONFIG_64BIT */
 | 
					#endif /* CONFIG_64BIT */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	get_page(virt_to_page(vdso_data));
 | 
						get_page(virt_to_page(vdso_data));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -22,7 +22,12 @@ __kernel_clock_getres:
 | 
				
			||||||
	cghi	%r2,CLOCK_REALTIME
 | 
						cghi	%r2,CLOCK_REALTIME
 | 
				
			||||||
	je	0f
 | 
						je	0f
 | 
				
			||||||
	cghi	%r2,CLOCK_MONOTONIC
 | 
						cghi	%r2,CLOCK_MONOTONIC
 | 
				
			||||||
 | 
						je	0f
 | 
				
			||||||
 | 
						cghi	%r2,-2		/* CLOCK_THREAD_CPUTIME_ID for this thread */
 | 
				
			||||||
	jne	2f
 | 
						jne	2f
 | 
				
			||||||
 | 
						larl	%r5,_vdso_data
 | 
				
			||||||
 | 
						icm	%r0,15,__LC_ECTG_OK(%r5)
 | 
				
			||||||
 | 
						jz	2f
 | 
				
			||||||
0:	ltgr	%r3,%r3
 | 
					0:	ltgr	%r3,%r3
 | 
				
			||||||
	jz	1f				/* res == NULL */
 | 
						jz	1f				/* res == NULL */
 | 
				
			||||||
	larl	%r1,3f
 | 
						larl	%r1,3f
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -22,8 +22,10 @@ __kernel_clock_gettime:
 | 
				
			||||||
	larl	%r5,_vdso_data
 | 
						larl	%r5,_vdso_data
 | 
				
			||||||
	cghi	%r2,CLOCK_REALTIME
 | 
						cghi	%r2,CLOCK_REALTIME
 | 
				
			||||||
	je	4f
 | 
						je	4f
 | 
				
			||||||
 | 
						cghi	%r2,-2		/* CLOCK_THREAD_CPUTIME_ID for this thread */
 | 
				
			||||||
 | 
						je	9f
 | 
				
			||||||
	cghi	%r2,CLOCK_MONOTONIC
 | 
						cghi	%r2,CLOCK_MONOTONIC
 | 
				
			||||||
	jne	9f
 | 
						jne	12f
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* CLOCK_MONOTONIC */
 | 
						/* CLOCK_MONOTONIC */
 | 
				
			||||||
	ltgr	%r3,%r3
 | 
						ltgr	%r3,%r3
 | 
				
			||||||
| 
						 | 
					@ -42,7 +44,7 @@ __kernel_clock_gettime:
 | 
				
			||||||
	alg	%r0,__VDSO_WTOM_SEC(%r5)
 | 
						alg	%r0,__VDSO_WTOM_SEC(%r5)
 | 
				
			||||||
	clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
 | 
						clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
 | 
				
			||||||
	jne	0b
 | 
						jne	0b
 | 
				
			||||||
	larl	%r5,10f
 | 
						larl	%r5,13f
 | 
				
			||||||
1:	clg	%r1,0(%r5)
 | 
					1:	clg	%r1,0(%r5)
 | 
				
			||||||
	jl	2f
 | 
						jl	2f
 | 
				
			||||||
	slg	%r1,0(%r5)
 | 
						slg	%r1,0(%r5)
 | 
				
			||||||
| 
						 | 
					@ -68,7 +70,7 @@ __kernel_clock_gettime:
 | 
				
			||||||
	lg	%r0,__VDSO_XTIME_SEC(%r5)
 | 
						lg	%r0,__VDSO_XTIME_SEC(%r5)
 | 
				
			||||||
	clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
 | 
						clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
 | 
				
			||||||
	jne	5b
 | 
						jne	5b
 | 
				
			||||||
	larl	%r5,10f
 | 
						larl	%r5,13f
 | 
				
			||||||
6:	clg	%r1,0(%r5)
 | 
					6:	clg	%r1,0(%r5)
 | 
				
			||||||
	jl	7f
 | 
						jl	7f
 | 
				
			||||||
	slg	%r1,0(%r5)
 | 
						slg	%r1,0(%r5)
 | 
				
			||||||
| 
						 | 
					@ -79,11 +81,38 @@ __kernel_clock_gettime:
 | 
				
			||||||
8:	lghi	%r2,0
 | 
					8:	lghi	%r2,0
 | 
				
			||||||
	br	%r14
 | 
						br	%r14
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* CLOCK_THREAD_CPUTIME_ID for this thread */
 | 
				
			||||||
 | 
					9:	icm	%r0,15,__VDSO_ECTG_OK(%r5)
 | 
				
			||||||
 | 
						jz	12f
 | 
				
			||||||
 | 
						ear	%r2,%a4
 | 
				
			||||||
 | 
						llilh	%r4,0x0100
 | 
				
			||||||
 | 
						sar	%a4,%r4
 | 
				
			||||||
 | 
						lghi	%r4,0
 | 
				
			||||||
 | 
						sacf	512				/* Magic ectg instruction */
 | 
				
			||||||
 | 
						.insn	ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
 | 
				
			||||||
 | 
						sacf	0
 | 
				
			||||||
 | 
						sar	%a4,%r2
 | 
				
			||||||
 | 
						algr	%r1,%r0				/* r1 = cputime as TOD value */
 | 
				
			||||||
 | 
						mghi	%r1,1000			/* convert to nanoseconds */
 | 
				
			||||||
 | 
						srlg	%r1,%r1,12			/* r1 = cputime in nanosec */
 | 
				
			||||||
 | 
						lgr	%r4,%r1
 | 
				
			||||||
 | 
						larl	%r5,13f
 | 
				
			||||||
 | 
						srlg	%r1,%r1,9			/* divide by 1000000000 */
 | 
				
			||||||
 | 
						mlg	%r0,8(%r5)
 | 
				
			||||||
 | 
						srlg	%r0,%r0,11			/* r0 = tv_sec */
 | 
				
			||||||
 | 
						stg	%r0,0(%r3)
 | 
				
			||||||
 | 
						msg	%r0,0(%r5)			/* calculate tv_nsec */
 | 
				
			||||||
 | 
						slgr	%r4,%r0				/* r4 = tv_nsec */
 | 
				
			||||||
 | 
						stg	%r4,8(%r3)
 | 
				
			||||||
 | 
						lghi	%r2,0
 | 
				
			||||||
 | 
						br	%r14
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Fallback to system call */
 | 
						/* Fallback to system call */
 | 
				
			||||||
9:	lghi	%r1,__NR_clock_gettime
 | 
					12:	lghi	%r1,__NR_clock_gettime
 | 
				
			||||||
	svc	0
 | 
						svc	0
 | 
				
			||||||
	br	%r14
 | 
						br	%r14
 | 
				
			||||||
 | 
					
 | 
				
			||||||
10:	.quad	1000000000
 | 
					13:	.quad	1000000000
 | 
				
			||||||
 | 
					14:	.quad	19342813113834067
 | 
				
			||||||
	.cfi_endproc
 | 
						.cfi_endproc
 | 
				
			||||||
	.size	__kernel_clock_gettime,.-__kernel_clock_gettime
 | 
						.size	__kernel_clock_gettime,.-__kernel_clock_gettime
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,94 +23,24 @@
 | 
				
			||||||
#include <asm/s390_ext.h>
 | 
					#include <asm/s390_ext.h>
 | 
				
			||||||
#include <asm/timer.h>
 | 
					#include <asm/timer.h>
 | 
				
			||||||
#include <asm/irq_regs.h>
 | 
					#include <asm/irq_regs.h>
 | 
				
			||||||
 | 
					#include <asm/cpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static ext_int_info_t ext_int_info_timer;
 | 
					static ext_int_info_t ext_int_info_timer;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
 | 
					static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
 | 
				
			||||||
 * Update process times based on virtual cpu times stored by entry.S
 | 
						.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
 | 
				
			||||||
 * to the lowcore fields user_timer, system_timer & steal_clock.
 | 
					};
 | 
				
			||||||
 */
 | 
					
 | 
				
			||||||
void account_process_tick(struct task_struct *tsk, int user_tick)
 | 
					static inline __u64 get_vtimer(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cputime_t cputime;
 | 
					 | 
				
			||||||
	__u64 timer, clock;
 | 
					 | 
				
			||||||
	int rcu_user_flag;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	timer = S390_lowcore.last_update_timer;
 | 
					 | 
				
			||||||
	clock = S390_lowcore.last_update_clock;
 | 
					 | 
				
			||||||
	asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
 | 
					 | 
				
			||||||
		      "  STCK %1"      /* Store current tod clock value */
 | 
					 | 
				
			||||||
		      : "=m" (S390_lowcore.last_update_timer),
 | 
					 | 
				
			||||||
		        "=m" (S390_lowcore.last_update_clock) );
 | 
					 | 
				
			||||||
	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 | 
					 | 
				
			||||||
	S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cputime = S390_lowcore.user_timer >> 12;
 | 
					 | 
				
			||||||
	rcu_user_flag = cputime != 0;
 | 
					 | 
				
			||||||
	S390_lowcore.user_timer -= cputime << 12;
 | 
					 | 
				
			||||||
	S390_lowcore.steal_clock -= cputime << 12;
 | 
					 | 
				
			||||||
	account_user_time(tsk, cputime);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cputime =  S390_lowcore.system_timer >> 12;
 | 
					 | 
				
			||||||
	S390_lowcore.system_timer -= cputime << 12;
 | 
					 | 
				
			||||||
	S390_lowcore.steal_clock -= cputime << 12;
 | 
					 | 
				
			||||||
	account_system_time(tsk, HARDIRQ_OFFSET, cputime);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cputime = S390_lowcore.steal_clock;
 | 
					 | 
				
			||||||
	if ((__s64) cputime > 0) {
 | 
					 | 
				
			||||||
		cputime >>= 12;
 | 
					 | 
				
			||||||
		S390_lowcore.steal_clock -= cputime << 12;
 | 
					 | 
				
			||||||
		account_steal_time(tsk, cputime);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Update process times based on virtual cpu times stored by entry.S
 | 
					 | 
				
			||||||
 * to the lowcore fields user_timer, system_timer & steal_clock.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void account_vtime(struct task_struct *tsk)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	cputime_t cputime;
 | 
					 | 
				
			||||||
	__u64 timer;
 | 
						__u64 timer;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	timer = S390_lowcore.last_update_timer;
 | 
						asm volatile("STPT %0" : "=m" (timer));
 | 
				
			||||||
	asm volatile ("  STPT %0"    /* Store current cpu timer value */
 | 
						return timer;
 | 
				
			||||||
		      : "=m" (S390_lowcore.last_update_timer) );
 | 
					 | 
				
			||||||
	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cputime = S390_lowcore.user_timer >> 12;
 | 
					 | 
				
			||||||
	S390_lowcore.user_timer -= cputime << 12;
 | 
					 | 
				
			||||||
	S390_lowcore.steal_clock -= cputime << 12;
 | 
					 | 
				
			||||||
	account_user_time(tsk, cputime);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cputime =  S390_lowcore.system_timer >> 12;
 | 
					 | 
				
			||||||
	S390_lowcore.system_timer -= cputime << 12;
 | 
					 | 
				
			||||||
	S390_lowcore.steal_clock -= cputime << 12;
 | 
					 | 
				
			||||||
	account_system_time(tsk, 0, cputime);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Update process times based on virtual cpu times stored by entry.S
 | 
					 | 
				
			||||||
 * to the lowcore fields user_timer, system_timer & steal_clock.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void account_system_vtime(struct task_struct *tsk)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	cputime_t cputime;
 | 
					 | 
				
			||||||
	__u64 timer;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	timer = S390_lowcore.last_update_timer;
 | 
					 | 
				
			||||||
	asm volatile ("  STPT %0"    /* Store current cpu timer value */
 | 
					 | 
				
			||||||
		      : "=m" (S390_lowcore.last_update_timer) );
 | 
					 | 
				
			||||||
	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cputime =  S390_lowcore.system_timer >> 12;
 | 
					 | 
				
			||||||
	S390_lowcore.system_timer -= cputime << 12;
 | 
					 | 
				
			||||||
	S390_lowcore.steal_clock -= cputime << 12;
 | 
					 | 
				
			||||||
	account_system_time(tsk, 0, cputime);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(account_system_vtime);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void set_vtimer(__u64 expires)
 | 
					static inline void set_vtimer(__u64 expires)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__u64 timer;
 | 
						__u64 timer;
 | 
				
			||||||
| 
						 | 
					@ -120,56 +50,192 @@ static inline void set_vtimer(__u64 expires)
 | 
				
			||||||
		      : "=m" (timer) : "m" (expires) );
 | 
							      : "=m" (timer) : "m" (expires) );
 | 
				
			||||||
	S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
 | 
						S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
 | 
				
			||||||
	S390_lowcore.last_update_timer = expires;
 | 
						S390_lowcore.last_update_timer = expires;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* store expire time for this CPU timer */
 | 
					 | 
				
			||||||
	__get_cpu_var(virt_cpu_timer).to_expire = expires;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void vtime_start_cpu_timer(void)
 | 
					/*
 | 
				
			||||||
 | 
					 * Update process times based on virtual cpu times stored by entry.S
 | 
				
			||||||
 | 
					 * to the lowcore fields user_timer, system_timer & steal_clock.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
						struct thread_info *ti = task_thread_info(tsk);
 | 
				
			||||||
 | 
						__u64 timer, clock, user, system, steal;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vt_list = &__get_cpu_var(virt_cpu_timer);
 | 
						timer = S390_lowcore.last_update_timer;
 | 
				
			||||||
 | 
						clock = S390_lowcore.last_update_clock;
 | 
				
			||||||
 | 
						asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
 | 
				
			||||||
 | 
							      "  STCK %1"      /* Store current tod clock value */
 | 
				
			||||||
 | 
							      : "=m" (S390_lowcore.last_update_timer),
 | 
				
			||||||
 | 
							        "=m" (S390_lowcore.last_update_clock) );
 | 
				
			||||||
 | 
						S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 | 
				
			||||||
 | 
						S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* CPU timer interrupt is pending, don't reprogramm it */
 | 
						user = S390_lowcore.user_timer - ti->user_timer;
 | 
				
			||||||
	if (vt_list->idle & 1LL<<63)
 | 
						S390_lowcore.steal_timer -= user;
 | 
				
			||||||
		return;
 | 
						ti->user_timer = S390_lowcore.user_timer;
 | 
				
			||||||
 | 
						account_user_time(tsk, user, user);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!list_empty(&vt_list->list))
 | 
						system = S390_lowcore.system_timer - ti->system_timer;
 | 
				
			||||||
		set_vtimer(vt_list->idle);
 | 
						S390_lowcore.steal_timer -= system;
 | 
				
			||||||
 | 
						ti->system_timer = S390_lowcore.system_timer;
 | 
				
			||||||
 | 
						account_system_time(tsk, hardirq_offset, system, system);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						steal = S390_lowcore.steal_timer;
 | 
				
			||||||
 | 
						if ((s64) steal > 0) {
 | 
				
			||||||
 | 
							S390_lowcore.steal_timer = 0;
 | 
				
			||||||
 | 
							account_steal_time(steal);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void vtime_stop_cpu_timer(void)
 | 
					void account_vtime(struct task_struct *prev, struct task_struct *next)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
						struct thread_info *ti;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vt_list = &__get_cpu_var(virt_cpu_timer);
 | 
						do_account_vtime(prev, 0);
 | 
				
			||||||
 | 
						ti = task_thread_info(prev);
 | 
				
			||||||
 | 
						ti->user_timer = S390_lowcore.user_timer;
 | 
				
			||||||
 | 
						ti->system_timer = S390_lowcore.system_timer;
 | 
				
			||||||
 | 
						ti = task_thread_info(next);
 | 
				
			||||||
 | 
						S390_lowcore.user_timer = ti->user_timer;
 | 
				
			||||||
 | 
						S390_lowcore.system_timer = ti->system_timer;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* nothing to do */
 | 
					void account_process_tick(struct task_struct *tsk, int user_tick)
 | 
				
			||||||
	if (list_empty(&vt_list->list)) {
 | 
					{
 | 
				
			||||||
		vt_list->idle = VTIMER_MAX_SLICE;
 | 
						do_account_vtime(tsk, HARDIRQ_OFFSET);
 | 
				
			||||||
		goto fire;
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Update process times based on virtual cpu times stored by entry.S
 | 
				
			||||||
 | 
					 * to the lowcore fields user_timer, system_timer & steal_clock.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void account_system_vtime(struct task_struct *tsk)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct thread_info *ti = task_thread_info(tsk);
 | 
				
			||||||
 | 
						__u64 timer, system;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						timer = S390_lowcore.last_update_timer;
 | 
				
			||||||
 | 
						S390_lowcore.last_update_timer = get_vtimer();
 | 
				
			||||||
 | 
						S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						system = S390_lowcore.system_timer - ti->system_timer;
 | 
				
			||||||
 | 
						S390_lowcore.steal_timer -= system;
 | 
				
			||||||
 | 
						ti->system_timer = S390_lowcore.system_timer;
 | 
				
			||||||
 | 
						account_system_time(tsk, 0, system, system);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(account_system_vtime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void vtime_start_cpu(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
 | 
				
			||||||
 | 
						struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
 | 
				
			||||||
 | 
						__u64 idle_time, expires;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Account time spent with enabled wait psw loaded as idle time. */
 | 
				
			||||||
 | 
						idle_time = S390_lowcore.int_clock - idle->idle_enter;
 | 
				
			||||||
 | 
						account_idle_time(idle_time);
 | 
				
			||||||
 | 
						S390_lowcore.last_update_clock = S390_lowcore.int_clock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Account system time spent going idle. */
 | 
				
			||||||
 | 
						S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
 | 
				
			||||||
 | 
						S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Restart vtime CPU timer */
 | 
				
			||||||
 | 
						if (vq->do_spt) {
 | 
				
			||||||
 | 
							/* Program old expire value but first save progress. */
 | 
				
			||||||
 | 
							expires = vq->idle - S390_lowcore.async_enter_timer;
 | 
				
			||||||
 | 
							expires += get_vtimer();
 | 
				
			||||||
 | 
							set_vtimer(expires);
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							/* Don't account the CPU timer delta while the cpu was idle. */
 | 
				
			||||||
 | 
							vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* store the actual expire value */
 | 
						spin_lock(&idle->lock);
 | 
				
			||||||
	asm volatile ("STPT %0" : "=m" (vt_list->idle));
 | 
						idle->idle_time += idle_time;
 | 
				
			||||||
 | 
						idle->idle_enter = 0ULL;
 | 
				
			||||||
 | 
						idle->idle_count++;
 | 
				
			||||||
 | 
						spin_unlock(&idle->lock);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					void vtime_stop_cpu(void)
 | 
				
			||||||
	 * If the CPU timer is negative we don't reprogramm
 | 
					{
 | 
				
			||||||
	 * it because we will get instantly an interrupt.
 | 
						struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
 | 
				
			||||||
	 */
 | 
						struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
 | 
				
			||||||
	if (vt_list->idle & 1LL<<63)
 | 
						psw_t psw;
 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vt_list->offset += vt_list->to_expire - vt_list->idle;
 | 
						/* Wait for external, I/O or machine check interrupt. */
 | 
				
			||||||
 | 
						psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/* Check if the CPU timer needs to be reprogrammed. */
 | 
				
			||||||
	 * We cannot halt the CPU timer, we just write a value that
 | 
						if (vq->do_spt) {
 | 
				
			||||||
	 * nearly never expires (only after 71 years) and re-write
 | 
							__u64 vmax = VTIMER_MAX_SLICE;
 | 
				
			||||||
	 * the stored expire value if we continue the timer
 | 
							/*
 | 
				
			||||||
	 */
 | 
							 * The inline assembly is equivalent to
 | 
				
			||||||
 fire:
 | 
							 *	vq->idle = get_cpu_timer();
 | 
				
			||||||
	set_vtimer(VTIMER_MAX_SLICE);
 | 
							 *	set_cpu_timer(VTIMER_MAX_SLICE);
 | 
				
			||||||
 | 
							 *	idle->idle_enter = get_clock();
 | 
				
			||||||
 | 
							 *	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
 | 
				
			||||||
 | 
							 *			   PSW_MASK_IO | PSW_MASK_EXT);
 | 
				
			||||||
 | 
							 * The difference is that the inline assembly makes sure that
 | 
				
			||||||
 | 
							 * the last three instruction are stpt, stck and lpsw in that
 | 
				
			||||||
 | 
							 * order. This is done to increase the precision.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							asm volatile(
 | 
				
			||||||
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
 | 
								"	basr	1,0\n"
 | 
				
			||||||
 | 
								"0:	ahi	1,1f-0b\n"
 | 
				
			||||||
 | 
								"	st	1,4(%2)\n"
 | 
				
			||||||
 | 
					#else /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"	larl	1,1f\n"
 | 
				
			||||||
 | 
								"	stg	1,8(%2)\n"
 | 
				
			||||||
 | 
					#endif /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"	stpt	0(%4)\n"
 | 
				
			||||||
 | 
								"	spt	0(%5)\n"
 | 
				
			||||||
 | 
								"	stck	0(%3)\n"
 | 
				
			||||||
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
 | 
								"	lpsw	0(%2)\n"
 | 
				
			||||||
 | 
					#else /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"	lpswe	0(%2)\n"
 | 
				
			||||||
 | 
					#endif /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"1:"
 | 
				
			||||||
 | 
								: "=m" (idle->idle_enter), "=m" (vq->idle)
 | 
				
			||||||
 | 
								: "a" (&psw), "a" (&idle->idle_enter),
 | 
				
			||||||
 | 
								  "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
 | 
				
			||||||
 | 
								: "memory", "cc", "1");
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * The inline assembly is equivalent to
 | 
				
			||||||
 | 
							 *	vq->idle = get_cpu_timer();
 | 
				
			||||||
 | 
							 *	idle->idle_enter = get_clock();
 | 
				
			||||||
 | 
							 *	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
 | 
				
			||||||
 | 
							 *			   PSW_MASK_IO | PSW_MASK_EXT);
 | 
				
			||||||
 | 
							 * The difference is that the inline assembly makes sure that
 | 
				
			||||||
 | 
							 * the last three instruction are stpt, stck and lpsw in that
 | 
				
			||||||
 | 
							 * order. This is done to increase the precision.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							asm volatile(
 | 
				
			||||||
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
 | 
								"	basr	1,0\n"
 | 
				
			||||||
 | 
								"0:	ahi	1,1f-0b\n"
 | 
				
			||||||
 | 
								"	st	1,4(%2)\n"
 | 
				
			||||||
 | 
					#else /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"	larl	1,1f\n"
 | 
				
			||||||
 | 
								"	stg	1,8(%2)\n"
 | 
				
			||||||
 | 
					#endif /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"	stpt	0(%4)\n"
 | 
				
			||||||
 | 
								"	stck	0(%3)\n"
 | 
				
			||||||
 | 
					#ifndef CONFIG_64BIT
 | 
				
			||||||
 | 
								"	lpsw	0(%2)\n"
 | 
				
			||||||
 | 
					#else /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"	lpswe	0(%2)\n"
 | 
				
			||||||
 | 
					#endif /* CONFIG_64BIT */
 | 
				
			||||||
 | 
								"1:"
 | 
				
			||||||
 | 
								: "=m" (idle->idle_enter), "=m" (vq->idle)
 | 
				
			||||||
 | 
								: "a" (&psw), "a" (&idle->idle_enter),
 | 
				
			||||||
 | 
								  "a" (&vq->idle), "m" (psw)
 | 
				
			||||||
 | 
								: "memory", "cc", "1");
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -195,30 +261,23 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void do_callbacks(struct list_head *cb_list)
 | 
					static void do_callbacks(struct list_head *cb_list)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
						struct vtimer_queue *vq;
 | 
				
			||||||
	struct vtimer_list *event, *tmp;
 | 
						struct vtimer_list *event, *tmp;
 | 
				
			||||||
	void (*fn)(unsigned long);
 | 
					 | 
				
			||||||
	unsigned long data;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (list_empty(cb_list))
 | 
						if (list_empty(cb_list))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vt_list = &__get_cpu_var(virt_cpu_timer);
 | 
						vq = &__get_cpu_var(virt_cpu_timer);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_for_each_entry_safe(event, tmp, cb_list, entry) {
 | 
						list_for_each_entry_safe(event, tmp, cb_list, entry) {
 | 
				
			||||||
		fn = event->function;
 | 
							list_del_init(&event->entry);
 | 
				
			||||||
		data = event->data;
 | 
							(event->function)(event->data);
 | 
				
			||||||
		fn(data);
 | 
							if (event->interval) {
 | 
				
			||||||
 | 
								/* Recharge interval timer */
 | 
				
			||||||
		if (!event->interval)
 | 
								event->expires = event->interval + vq->elapsed;
 | 
				
			||||||
			/* delete one shot timer */
 | 
								spin_lock(&vq->lock);
 | 
				
			||||||
			list_del_init(&event->entry);
 | 
								list_add_sorted(event, &vq->list);
 | 
				
			||||||
		else {
 | 
								spin_unlock(&vq->lock);
 | 
				
			||||||
			/* move interval timer back to list */
 | 
					 | 
				
			||||||
			spin_lock(&vt_list->lock);
 | 
					 | 
				
			||||||
			list_del_init(&event->entry);
 | 
					 | 
				
			||||||
			list_add_sorted(event, &vt_list->list);
 | 
					 | 
				
			||||||
			spin_unlock(&vt_list->lock);
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -228,64 +287,57 @@ static void do_callbacks(struct list_head *cb_list)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void do_cpu_timer_interrupt(__u16 error_code)
 | 
					static void do_cpu_timer_interrupt(__u16 error_code)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__u64 next, delta;
 | 
						struct vtimer_queue *vq;
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
					 | 
				
			||||||
	struct vtimer_list *event, *tmp;
 | 
						struct vtimer_list *event, *tmp;
 | 
				
			||||||
	struct list_head *ptr;
 | 
						struct list_head cb_list;	/* the callback queue */
 | 
				
			||||||
	/* the callback queue */
 | 
						__u64 elapsed, next;
 | 
				
			||||||
	struct list_head cb_list;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&cb_list);
 | 
						INIT_LIST_HEAD(&cb_list);
 | 
				
			||||||
	vt_list = &__get_cpu_var(virt_cpu_timer);
 | 
						vq = &__get_cpu_var(virt_cpu_timer);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* walk timer list, fire all expired events */
 | 
						/* walk timer list, fire all expired events */
 | 
				
			||||||
	spin_lock(&vt_list->lock);
 | 
						spin_lock(&vq->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (vt_list->to_expire < VTIMER_MAX_SLICE)
 | 
						elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
 | 
				
			||||||
		vt_list->offset += vt_list->to_expire;
 | 
						BUG_ON((s64) elapsed < 0);
 | 
				
			||||||
 | 
						vq->elapsed = 0;
 | 
				
			||||||
	list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
 | 
						list_for_each_entry_safe(event, tmp, &vq->list, entry) {
 | 
				
			||||||
		if (event->expires > vt_list->offset)
 | 
							if (event->expires < elapsed)
 | 
				
			||||||
			/* found first unexpired event, leave */
 | 
								/* move expired timer to the callback queue */
 | 
				
			||||||
			break;
 | 
								list_move_tail(&event->entry, &cb_list);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
		/* re-charge interval timer, we have to add the offset */
 | 
								event->expires -= elapsed;
 | 
				
			||||||
		if (event->interval)
 | 
					 | 
				
			||||||
			event->expires = event->interval + vt_list->offset;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* move expired timer to the callback queue */
 | 
					 | 
				
			||||||
		list_move_tail(&event->entry, &cb_list);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	spin_unlock(&vt_list->lock);
 | 
						spin_unlock(&vq->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						vq->do_spt = list_empty(&cb_list);
 | 
				
			||||||
	do_callbacks(&cb_list);
 | 
						do_callbacks(&cb_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* next event is first in list */
 | 
						/* next event is first in list */
 | 
				
			||||||
	spin_lock(&vt_list->lock);
 | 
						next = VTIMER_MAX_SLICE;
 | 
				
			||||||
	if (!list_empty(&vt_list->list)) {
 | 
						spin_lock(&vq->lock);
 | 
				
			||||||
		ptr = vt_list->list.next;
 | 
						if (!list_empty(&vq->list)) {
 | 
				
			||||||
		event = list_entry(ptr, struct vtimer_list, entry);
 | 
							event = list_first_entry(&vq->list, struct vtimer_list, entry);
 | 
				
			||||||
		next = event->expires - vt_list->offset;
 | 
							next = event->expires;
 | 
				
			||||||
 | 
						} else
 | 
				
			||||||
		/* add the expired time from this interrupt handler
 | 
							vq->do_spt = 0;
 | 
				
			||||||
		 * and the callback functions
 | 
						spin_unlock(&vq->lock);
 | 
				
			||||||
		 */
 | 
						/*
 | 
				
			||||||
		asm volatile ("STPT %0" : "=m" (delta));
 | 
						 * To improve precision add the time spent by the
 | 
				
			||||||
		delta = 0xffffffffffffffffLL - delta + 1;
 | 
						 * interrupt handler to the elapsed time.
 | 
				
			||||||
		vt_list->offset += delta;
 | 
						 * Note: CPU timer counts down and we got an interrupt,
 | 
				
			||||||
		next -= delta;
 | 
						 *	 the current content is negative
 | 
				
			||||||
	} else {
 | 
						 */
 | 
				
			||||||
		vt_list->offset = 0;
 | 
						elapsed = S390_lowcore.async_enter_timer - get_vtimer();
 | 
				
			||||||
		next = VTIMER_MAX_SLICE;
 | 
						set_vtimer(next - elapsed);
 | 
				
			||||||
	}
 | 
						vq->timer = next - elapsed;
 | 
				
			||||||
	spin_unlock(&vt_list->lock);
 | 
						vq->elapsed = elapsed;
 | 
				
			||||||
	set_vtimer(next);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void init_virt_timer(struct vtimer_list *timer)
 | 
					void init_virt_timer(struct vtimer_list *timer)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	timer->function = NULL;
 | 
						timer->function = NULL;
 | 
				
			||||||
	INIT_LIST_HEAD(&timer->entry);
 | 
						INIT_LIST_HEAD(&timer->entry);
 | 
				
			||||||
	spin_lock_init(&timer->lock);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(init_virt_timer);
 | 
					EXPORT_SYMBOL(init_virt_timer);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -299,44 +351,40 @@ static inline int vtimer_pending(struct vtimer_list *timer)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void internal_add_vtimer(struct vtimer_list *timer)
 | 
					static void internal_add_vtimer(struct vtimer_list *timer)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct vtimer_queue *vq;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	__u64 done;
 | 
						__u64 left, expires;
 | 
				
			||||||
	struct vtimer_list *event;
 | 
					 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
 | 
						vq = &per_cpu(virt_cpu_timer, timer->cpu);
 | 
				
			||||||
	spin_lock_irqsave(&vt_list->lock, flags);
 | 
						spin_lock_irqsave(&vq->lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(timer->cpu != smp_processor_id());
 | 
						BUG_ON(timer->cpu != smp_processor_id());
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* if list is empty we only have to set the timer */
 | 
						if (list_empty(&vq->list)) {
 | 
				
			||||||
	if (list_empty(&vt_list->list)) {
 | 
							/* First timer on this cpu, just program it. */
 | 
				
			||||||
		/* reset the offset, this may happen if the last timer was
 | 
							list_add(&timer->entry, &vq->list);
 | 
				
			||||||
		 * just deleted by mod_virt_timer and the interrupt
 | 
							set_vtimer(timer->expires);
 | 
				
			||||||
		 * didn't happen until here
 | 
							vq->timer = timer->expires;
 | 
				
			||||||
		 */
 | 
							vq->elapsed = 0;
 | 
				
			||||||
		vt_list->offset = 0;
 | 
						} else {
 | 
				
			||||||
		goto fire;
 | 
							/* Check progress of old timers. */
 | 
				
			||||||
 | 
							expires = timer->expires;
 | 
				
			||||||
 | 
							left = get_vtimer();
 | 
				
			||||||
 | 
							if (likely((s64) expires < (s64) left)) {
 | 
				
			||||||
 | 
								/* The new timer expires before the current timer. */
 | 
				
			||||||
 | 
								set_vtimer(expires);
 | 
				
			||||||
 | 
								vq->elapsed += vq->timer - left;
 | 
				
			||||||
 | 
								vq->timer = expires;
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								vq->elapsed += vq->timer - left;
 | 
				
			||||||
 | 
								vq->timer = left;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							/* Insert new timer into per cpu list. */
 | 
				
			||||||
 | 
							timer->expires += vq->elapsed;
 | 
				
			||||||
 | 
							list_add_sorted(timer, &vq->list);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* save progress */
 | 
						spin_unlock_irqrestore(&vq->lock, flags);
 | 
				
			||||||
	asm volatile ("STPT %0" : "=m" (done));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* calculate completed work */
 | 
					 | 
				
			||||||
	done = vt_list->to_expire - done + vt_list->offset;
 | 
					 | 
				
			||||||
	vt_list->offset = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	list_for_each_entry(event, &vt_list->list, entry)
 | 
					 | 
				
			||||||
		event->expires -= done;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 fire:
 | 
					 | 
				
			||||||
	list_add_sorted(timer, &vt_list->list);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* get first element, which is the next vtimer slice */
 | 
					 | 
				
			||||||
	event = list_entry(vt_list->list.next, struct vtimer_list, entry);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	set_vtimer(event->expires);
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&vt_list->lock, flags);
 | 
					 | 
				
			||||||
	/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
 | 
						/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
 | 
				
			||||||
	put_cpu();
 | 
						put_cpu();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -381,14 +429,15 @@ EXPORT_SYMBOL(add_virt_timer_periodic);
 | 
				
			||||||
 * If we change a pending timer the function must be called on the CPU
 | 
					 * If we change a pending timer the function must be called on the CPU
 | 
				
			||||||
 * where the timer is running on, e.g. by smp_call_function_single()
 | 
					 * where the timer is running on, e.g. by smp_call_function_single()
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * The original mod_timer adds the timer if it is not pending. For compatibility
 | 
					 * The original mod_timer adds the timer if it is not pending. For
 | 
				
			||||||
 * we do the same. The timer will be added on the current CPU as a oneshot timer.
 | 
					 * compatibility we do the same. The timer will be added on the current
 | 
				
			||||||
 | 
					 * CPU as a oneshot timer.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * returns whether it has modified a pending timer (1) or not (0)
 | 
					 * returns whether it has modified a pending timer (1) or not (0)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
 | 
					int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
						struct vtimer_queue *vq;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -404,17 +453,17 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpu = get_cpu();
 | 
						cpu = get_cpu();
 | 
				
			||||||
	vt_list = &per_cpu(virt_cpu_timer, cpu);
 | 
						vq = &per_cpu(virt_cpu_timer, cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* check if we run on the right CPU */
 | 
						/* check if we run on the right CPU */
 | 
				
			||||||
	BUG_ON(timer->cpu != cpu);
 | 
						BUG_ON(timer->cpu != cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* disable interrupts before test if timer is pending */
 | 
						/* disable interrupts before test if timer is pending */
 | 
				
			||||||
	spin_lock_irqsave(&vt_list->lock, flags);
 | 
						spin_lock_irqsave(&vq->lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* if timer isn't pending add it on the current CPU */
 | 
						/* if timer isn't pending add it on the current CPU */
 | 
				
			||||||
	if (!vtimer_pending(timer)) {
 | 
						if (!vtimer_pending(timer)) {
 | 
				
			||||||
		spin_unlock_irqrestore(&vt_list->lock, flags);
 | 
							spin_unlock_irqrestore(&vq->lock, flags);
 | 
				
			||||||
		/* we do not activate an interval timer with mod_virt_timer */
 | 
							/* we do not activate an interval timer with mod_virt_timer */
 | 
				
			||||||
		timer->interval = 0;
 | 
							timer->interval = 0;
 | 
				
			||||||
		timer->expires = expires;
 | 
							timer->expires = expires;
 | 
				
			||||||
| 
						 | 
					@ -431,7 +480,7 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
 | 
				
			||||||
		timer->interval = expires;
 | 
							timer->interval = expires;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* the timer can't expire anymore so we can release the lock */
 | 
						/* the timer can't expire anymore so we can release the lock */
 | 
				
			||||||
	spin_unlock_irqrestore(&vt_list->lock, flags);
 | 
						spin_unlock_irqrestore(&vq->lock, flags);
 | 
				
			||||||
	internal_add_vtimer(timer);
 | 
						internal_add_vtimer(timer);
 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -445,25 +494,19 @@ EXPORT_SYMBOL(mod_virt_timer);
 | 
				
			||||||
int del_virt_timer(struct vtimer_list *timer)
 | 
					int del_virt_timer(struct vtimer_list *timer)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
						struct vtimer_queue *vq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* check if timer is pending */
 | 
						/* check if timer is pending */
 | 
				
			||||||
	if (!vtimer_pending(timer))
 | 
						if (!vtimer_pending(timer))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
 | 
						vq = &per_cpu(virt_cpu_timer, timer->cpu);
 | 
				
			||||||
	spin_lock_irqsave(&vt_list->lock, flags);
 | 
						spin_lock_irqsave(&vq->lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we don't interrupt a running timer, just let it expire! */
 | 
						/* we don't interrupt a running timer, just let it expire! */
 | 
				
			||||||
	list_del_init(&timer->entry);
 | 
						list_del_init(&timer->entry);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* last timer removed */
 | 
						spin_unlock_irqrestore(&vq->lock, flags);
 | 
				
			||||||
	if (list_empty(&vt_list->list)) {
 | 
					 | 
				
			||||||
		vt_list->to_expire = 0;
 | 
					 | 
				
			||||||
		vt_list->offset = 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&vt_list->lock, flags);
 | 
					 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(del_virt_timer);
 | 
					EXPORT_SYMBOL(del_virt_timer);
 | 
				
			||||||
| 
						 | 
					@ -473,24 +516,19 @@ EXPORT_SYMBOL(del_virt_timer);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void init_cpu_vtimer(void)
 | 
					void init_cpu_vtimer(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vtimer_queue *vt_list;
 | 
						struct vtimer_queue *vq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* kick the virtual timer */
 | 
						/* kick the virtual timer */
 | 
				
			||||||
	S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
 | 
					 | 
				
			||||||
	S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
 | 
					 | 
				
			||||||
	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
 | 
					 | 
				
			||||||
	asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
 | 
						asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
 | 
				
			||||||
 | 
						asm volatile ("STPT %0" : "=m" (S390_lowcore.last_update_timer));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* initialize per cpu vtimer structure */
 | 
				
			||||||
 | 
						vq = &__get_cpu_var(virt_cpu_timer);
 | 
				
			||||||
 | 
						INIT_LIST_HEAD(&vq->list);
 | 
				
			||||||
 | 
						spin_lock_init(&vq->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* enable cpu timer interrupts */
 | 
						/* enable cpu timer interrupts */
 | 
				
			||||||
	__ctl_set_bit(0,10);
 | 
						__ctl_set_bit(0,10);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	vt_list = &__get_cpu_var(virt_cpu_timer);
 | 
					 | 
				
			||||||
	INIT_LIST_HEAD(&vt_list->list);
 | 
					 | 
				
			||||||
	spin_lock_init(&vt_list->lock);
 | 
					 | 
				
			||||||
	vt_list->to_expire = 0;
 | 
					 | 
				
			||||||
	vt_list->offset = 0;
 | 
					 | 
				
			||||||
	vt_list->idle = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __init vtime_init(void)
 | 
					void __init vtime_init(void)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -132,8 +132,7 @@ static void do_stolen_accounting(void)
 | 
				
			||||||
	*snap = state;
 | 
						*snap = state;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Add the appropriate number of ticks of stolen time,
 | 
						/* Add the appropriate number of ticks of stolen time,
 | 
				
			||||||
	   including any left-overs from last time.  Passing NULL to
 | 
						   including any left-overs from last time. */
 | 
				
			||||||
	   account_steal_time accounts the time as stolen. */
 | 
					 | 
				
			||||||
	stolen = runnable + offline + __get_cpu_var(residual_stolen);
 | 
						stolen = runnable + offline + __get_cpu_var(residual_stolen);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (stolen < 0)
 | 
						if (stolen < 0)
 | 
				
			||||||
| 
						 | 
					@ -141,11 +140,10 @@ static void do_stolen_accounting(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
 | 
						ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
 | 
				
			||||||
	__get_cpu_var(residual_stolen) = stolen;
 | 
						__get_cpu_var(residual_stolen) = stolen;
 | 
				
			||||||
	account_steal_time(NULL, ticks);
 | 
						account_steal_ticks(ticks);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Add the appropriate number of ticks of blocked time,
 | 
						/* Add the appropriate number of ticks of blocked time,
 | 
				
			||||||
	   including any left-overs from last time.  Passing idle to
 | 
						   including any left-overs from last time. */
 | 
				
			||||||
	   account_steal_time accounts the time as idle/wait. */
 | 
					 | 
				
			||||||
	blocked += __get_cpu_var(residual_blocked);
 | 
						blocked += __get_cpu_var(residual_blocked);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (blocked < 0)
 | 
						if (blocked < 0)
 | 
				
			||||||
| 
						 | 
					@ -153,7 +151,7 @@ static void do_stolen_accounting(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
 | 
						ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
 | 
				
			||||||
	__get_cpu_var(residual_blocked) = blocked;
 | 
						__get_cpu_var(residual_blocked) = blocked;
 | 
				
			||||||
	account_steal_time(idle_task(smp_processor_id()), ticks);
 | 
						account_idle_ticks(ticks);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -632,8 +632,8 @@ do_IRQ (struct pt_regs *regs)
 | 
				
			||||||
	struct pt_regs *old_regs;
 | 
						struct pt_regs *old_regs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old_regs = set_irq_regs(regs);
 | 
						old_regs = set_irq_regs(regs);
 | 
				
			||||||
	irq_enter();
 | 
					 | 
				
			||||||
	s390_idle_check();
 | 
						s390_idle_check();
 | 
				
			||||||
 | 
						irq_enter();
 | 
				
			||||||
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 | 
						if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 | 
				
			||||||
		/* Serve timer interrupts first. */
 | 
							/* Serve timer interrupts first. */
 | 
				
			||||||
		clock_comparator_work();
 | 
							clock_comparator_work();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,6 +18,7 @@
 | 
				
			||||||
#include <asm/etr.h>
 | 
					#include <asm/etr.h>
 | 
				
			||||||
#include <asm/lowcore.h>
 | 
					#include <asm/lowcore.h>
 | 
				
			||||||
#include <asm/cio.h>
 | 
					#include <asm/cio.h>
 | 
				
			||||||
 | 
					#include <asm/cpu.h>
 | 
				
			||||||
#include "s390mach.h"
 | 
					#include "s390mach.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct semaphore m_sem;
 | 
					static struct semaphore m_sem;
 | 
				
			||||||
| 
						 | 
					@ -369,6 +370,8 @@ s390_do_machine_check(struct pt_regs *regs)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lockdep_off();
 | 
						lockdep_off();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						s390_idle_check();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
 | 
						mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
 | 
				
			||||||
	mcck = &__get_cpu_var(cpu_mcck);
 | 
						mcck = &__get_cpu_var(cpu_mcck);
 | 
				
			||||||
	umode = user_mode(regs);
 | 
						umode = user_mode(regs);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -79,10 +79,13 @@ static inline unsigned int kstat_irqs(unsigned int irq)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned long long task_delta_exec(struct task_struct *);
 | 
					extern unsigned long long task_delta_exec(struct task_struct *);
 | 
				
			||||||
extern void account_user_time(struct task_struct *, cputime_t);
 | 
					extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
 | 
				
			||||||
extern void account_user_time_scaled(struct task_struct *, cputime_t);
 | 
					extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
 | 
				
			||||||
extern void account_system_time(struct task_struct *, int, cputime_t);
 | 
					extern void account_steal_time(cputime_t);
 | 
				
			||||||
extern void account_system_time_scaled(struct task_struct *, cputime_t);
 | 
					extern void account_idle_time(cputime_t);
 | 
				
			||||||
extern void account_steal_time(struct task_struct *, cputime_t);
 | 
					
 | 
				
			||||||
 | 
					extern void account_process_tick(struct task_struct *, int user);
 | 
				
			||||||
 | 
					extern void account_steal_ticks(unsigned long ticks);
 | 
				
			||||||
 | 
					extern void account_idle_ticks(unsigned long ticks);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _LINUX_KERNEL_STAT_H */
 | 
					#endif /* _LINUX_KERNEL_STAT_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void cpu_init (void);
 | 
					extern void cpu_init (void);
 | 
				
			||||||
extern void trap_init(void);
 | 
					extern void trap_init(void);
 | 
				
			||||||
extern void account_process_tick(struct task_struct *task, int user);
 | 
					 | 
				
			||||||
extern void update_process_times(int user);
 | 
					extern void update_process_times(int user);
 | 
				
			||||||
extern void scheduler_tick(void);
 | 
					extern void scheduler_tick(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										115
									
								
								kernel/sched.c
									
										
									
									
									
								
							
							
						
						
									
										115
									
								
								kernel/sched.c
									
										
									
									
									
								
							| 
						 | 
					@ -4150,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p)
 | 
				
			||||||
 * Account user cpu time to a process.
 | 
					 * Account user cpu time to a process.
 | 
				
			||||||
 * @p: the process that the cpu time gets accounted to
 | 
					 * @p: the process that the cpu time gets accounted to
 | 
				
			||||||
 * @cputime: the cpu time spent in user space since the last update
 | 
					 * @cputime: the cpu time spent in user space since the last update
 | 
				
			||||||
 | 
					 * @cputime_scaled: cputime scaled by cpu frequency
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void account_user_time(struct task_struct *p, cputime_t cputime)
 | 
					void account_user_time(struct task_struct *p, cputime_t cputime,
 | 
				
			||||||
 | 
							       cputime_t cputime_scaled)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
						struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
				
			||||||
	cputime64_t tmp;
 | 
						cputime64_t tmp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Add user time to process. */
 | 
				
			||||||
	p->utime = cputime_add(p->utime, cputime);
 | 
						p->utime = cputime_add(p->utime, cputime);
 | 
				
			||||||
 | 
						p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
 | 
				
			||||||
	account_group_user_time(p, cputime);
 | 
						account_group_user_time(p, cputime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Add user time to cpustat. */
 | 
						/* Add user time to cpustat. */
 | 
				
			||||||
| 
						 | 
					@ -4173,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
 | 
				
			||||||
 * Account guest cpu time to a process.
 | 
					 * Account guest cpu time to a process.
 | 
				
			||||||
 * @p: the process that the cpu time gets accounted to
 | 
					 * @p: the process that the cpu time gets accounted to
 | 
				
			||||||
 * @cputime: the cpu time spent in virtual machine since the last update
 | 
					 * @cputime: the cpu time spent in virtual machine since the last update
 | 
				
			||||||
 | 
					 * @cputime_scaled: cputime scaled by cpu frequency
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void account_guest_time(struct task_struct *p, cputime_t cputime)
 | 
					static void account_guest_time(struct task_struct *p, cputime_t cputime,
 | 
				
			||||||
 | 
								       cputime_t cputime_scaled)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cputime64_t tmp;
 | 
						cputime64_t tmp;
 | 
				
			||||||
	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
						struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tmp = cputime_to_cputime64(cputime);
 | 
						tmp = cputime_to_cputime64(cputime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Add guest time to process. */
 | 
				
			||||||
	p->utime = cputime_add(p->utime, cputime);
 | 
						p->utime = cputime_add(p->utime, cputime);
 | 
				
			||||||
 | 
						p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
 | 
				
			||||||
	account_group_user_time(p, cputime);
 | 
						account_group_user_time(p, cputime);
 | 
				
			||||||
	p->gtime = cputime_add(p->gtime, cputime);
 | 
						p->gtime = cputime_add(p->gtime, cputime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Add guest time to cpustat. */
 | 
				
			||||||
	cpustat->user = cputime64_add(cpustat->user, tmp);
 | 
						cpustat->user = cputime64_add(cpustat->user, tmp);
 | 
				
			||||||
	cpustat->guest = cputime64_add(cpustat->guest, tmp);
 | 
						cpustat->guest = cputime64_add(cpustat->guest, tmp);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Account scaled user cpu time to a process.
 | 
					 | 
				
			||||||
 * @p: the process that the cpu time gets accounted to
 | 
					 | 
				
			||||||
 * @cputime: the cpu time spent in user space since the last update
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	p->utimescaled = cputime_add(p->utimescaled, cputime);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Account system cpu time to a process.
 | 
					 * Account system cpu time to a process.
 | 
				
			||||||
 * @p: the process that the cpu time gets accounted to
 | 
					 * @p: the process that the cpu time gets accounted to
 | 
				
			||||||
 * @hardirq_offset: the offset to subtract from hardirq_count()
 | 
					 * @hardirq_offset: the offset to subtract from hardirq_count()
 | 
				
			||||||
 * @cputime: the cpu time spent in kernel space since the last update
 | 
					 * @cputime: the cpu time spent in kernel space since the last update
 | 
				
			||||||
 | 
					 * @cputime_scaled: cputime scaled by cpu frequency
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void account_system_time(struct task_struct *p, int hardirq_offset,
 | 
					void account_system_time(struct task_struct *p, int hardirq_offset,
 | 
				
			||||||
			 cputime_t cputime)
 | 
								 cputime_t cputime, cputime_t cputime_scaled)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
						struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
				
			||||||
	struct rq *rq = this_rq();
 | 
					 | 
				
			||||||
	cputime64_t tmp;
 | 
						cputime64_t tmp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
 | 
						if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
 | 
				
			||||||
		account_guest_time(p, cputime);
 | 
							account_guest_time(p, cputime, cputime_scaled);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Add system time to process. */
 | 
				
			||||||
	p->stime = cputime_add(p->stime, cputime);
 | 
						p->stime = cputime_add(p->stime, cputime);
 | 
				
			||||||
 | 
						p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
 | 
				
			||||||
	account_group_system_time(p, cputime);
 | 
						account_group_system_time(p, cputime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Add system time to cpustat. */
 | 
						/* Add system time to cpustat. */
 | 
				
			||||||
| 
						 | 
					@ -4226,48 +4227,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
 | 
				
			||||||
		cpustat->irq = cputime64_add(cpustat->irq, tmp);
 | 
							cpustat->irq = cputime64_add(cpustat->irq, tmp);
 | 
				
			||||||
	else if (softirq_count())
 | 
						else if (softirq_count())
 | 
				
			||||||
		cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
 | 
							cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
 | 
				
			||||||
	else if (p != rq->idle)
 | 
					 | 
				
			||||||
		cpustat->system = cputime64_add(cpustat->system, tmp);
 | 
					 | 
				
			||||||
	else if (atomic_read(&rq->nr_iowait) > 0)
 | 
					 | 
				
			||||||
		cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
 | 
					 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		cpustat->idle = cputime64_add(cpustat->idle, tmp);
 | 
							cpustat->system = cputime64_add(cpustat->system, tmp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Account for system time used */
 | 
						/* Account for system time used */
 | 
				
			||||||
	acct_update_integrals(p);
 | 
						acct_update_integrals(p);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Account scaled system cpu time to a process.
 | 
					 * Account for involuntary wait time.
 | 
				
			||||||
 * @p: the process that the cpu time gets accounted to
 | 
					 * @steal: the cpu time spent in involuntary wait
 | 
				
			||||||
 * @hardirq_offset: the offset to subtract from hardirq_count()
 | 
					 | 
				
			||||||
 * @cputime: the cpu time spent in kernel space since the last update
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
 | 
					void account_steal_time(cputime_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	p->stimescaled = cputime_add(p->stimescaled, cputime);
 | 
						struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
				
			||||||
 | 
						cputime64_t cputime64 = cputime_to_cputime64(cputime);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cpustat->steal = cputime64_add(cpustat->steal, cputime64);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Account for involuntary wait time.
 | 
					 * Account for idle time.
 | 
				
			||||||
 * @p: the process from which the cpu time has been stolen
 | 
					 * @cputime: the cpu time spent in idle wait
 | 
				
			||||||
 * @steal: the cpu time spent in involuntary wait
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void account_steal_time(struct task_struct *p, cputime_t steal)
 | 
					void account_idle_time(cputime_t cputime)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
						struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
 | 
				
			||||||
	cputime64_t tmp = cputime_to_cputime64(steal);
 | 
						cputime64_t cputime64 = cputime_to_cputime64(cputime);
 | 
				
			||||||
	struct rq *rq = this_rq();
 | 
						struct rq *rq = this_rq();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (p == rq->idle) {
 | 
						if (atomic_read(&rq->nr_iowait) > 0)
 | 
				
			||||||
		p->stime = cputime_add(p->stime, steal);
 | 
							cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
 | 
				
			||||||
		if (atomic_read(&rq->nr_iowait) > 0)
 | 
						else
 | 
				
			||||||
			cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
 | 
							cpustat->idle = cputime64_add(cpustat->idle, cputime64);
 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			cpustat->idle = cputime64_add(cpustat->idle, tmp);
 | 
					 | 
				
			||||||
	} else
 | 
					 | 
				
			||||||
		cpustat->steal = cputime64_add(cpustat->steal, tmp);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Account a single tick of cpu time.
 | 
				
			||||||
 | 
					 * @p: the process that the cpu time gets accounted to
 | 
				
			||||||
 | 
					 * @user_tick: indicates if the tick is a user or a system tick
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void account_process_tick(struct task_struct *p, int user_tick)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						cputime_t one_jiffy = jiffies_to_cputime(1);
 | 
				
			||||||
 | 
						cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
 | 
				
			||||||
 | 
						struct rq *rq = this_rq();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (user_tick)
 | 
				
			||||||
 | 
							account_user_time(p, one_jiffy, one_jiffy_scaled);
 | 
				
			||||||
 | 
						else if (p != rq->idle)
 | 
				
			||||||
 | 
							account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
 | 
				
			||||||
 | 
									    one_jiffy_scaled);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							account_idle_time(one_jiffy);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Account multiple ticks of steal time.
 | 
				
			||||||
 | 
					 * @p: the process from which the cpu time has been stolen
 | 
				
			||||||
 | 
					 * @ticks: number of stolen ticks
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void account_steal_ticks(unsigned long ticks)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						account_steal_time(jiffies_to_cputime(ticks));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Account multiple ticks of idle time.
 | 
				
			||||||
 | 
					 * @ticks: number of stolen ticks
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void account_idle_ticks(unsigned long ticks)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						account_idle_time(jiffies_to_cputime(ticks));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Use precise platform statistics if available:
 | 
					 * Use precise platform statistics if available:
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -419,7 +419,9 @@ void tick_nohz_restart_sched_tick(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu = smp_processor_id();
 | 
						int cpu = smp_processor_id();
 | 
				
			||||||
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 | 
						struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 | 
				
			||||||
 | 
					#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 | 
				
			||||||
	unsigned long ticks;
 | 
						unsigned long ticks;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
	ktime_t now;
 | 
						ktime_t now;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	local_irq_disable();
 | 
						local_irq_disable();
 | 
				
			||||||
| 
						 | 
					@ -441,6 +443,7 @@ void tick_nohz_restart_sched_tick(void)
 | 
				
			||||||
	tick_do_update_jiffies64(now);
 | 
						tick_do_update_jiffies64(now);
 | 
				
			||||||
	cpumask_clear_cpu(cpu, nohz_cpu_mask);
 | 
						cpumask_clear_cpu(cpu, nohz_cpu_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We stopped the tick in idle. Update process times would miss the
 | 
						 * We stopped the tick in idle. Update process times would miss the
 | 
				
			||||||
	 * time we slept as update_process_times does only a 1 tick
 | 
						 * time we slept as update_process_times does only a 1 tick
 | 
				
			||||||
| 
						 | 
					@ -450,12 +453,9 @@ void tick_nohz_restart_sched_tick(void)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We might be one off. Do not randomly account a huge number of ticks!
 | 
						 * We might be one off. Do not randomly account a huge number of ticks!
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (ticks && ticks < LONG_MAX) {
 | 
						if (ticks && ticks < LONG_MAX)
 | 
				
			||||||
		add_preempt_count(HARDIRQ_OFFSET);
 | 
							account_idle_ticks(ticks);
 | 
				
			||||||
		account_system_time(current, HARDIRQ_OFFSET,
 | 
					#endif
 | 
				
			||||||
				    jiffies_to_cputime(ticks));
 | 
					 | 
				
			||||||
		sub_preempt_count(HARDIRQ_OFFSET);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	touch_softlockup_watchdog();
 | 
						touch_softlockup_watchdog();
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1018,21 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 | 
					 | 
				
			||||||
void account_process_tick(struct task_struct *p, int user_tick)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	cputime_t one_jiffy = jiffies_to_cputime(1);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (user_tick) {
 | 
					 | 
				
			||||||
		account_user_time(p, one_jiffy);
 | 
					 | 
				
			||||||
		account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
 | 
					 | 
				
			||||||
		account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Called from the timer interrupt handler to charge one tick to the current
 | 
					 * Called from the timer interrupt handler to charge one tick to the current
 | 
				
			||||||
 * process.  user_tick is 1 if the tick is user time, 0 for system.
 | 
					 * process.  user_tick is 1 if the tick is user time, 0 for system.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue