Git commit a25cac5198 "proc: Consider NO_HZ when printing idle and
iowait times" changes the code for /proc/stat to use get_cpu_idle_time_us
and get_cpu_iowait_time_us if the system is running with nohz enabled.
For architectures which define arch_idle_time (currently s390 only)
this is a change for the worse. The result of arch_idle_time is supposed
to be the exact sleep time of the target cpu and should be used instead
of the value kept by the scheduler.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20120330122308.18720283@de.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
		
	
			
		
			
				
	
	
		
			218 lines
		
	
	
	
		
			5.8 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			218 lines
		
	
	
	
		
			5.8 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#include <linux/cpumask.h>
 | 
						|
#include <linux/fs.h>
 | 
						|
#include <linux/init.h>
 | 
						|
#include <linux/interrupt.h>
 | 
						|
#include <linux/kernel_stat.h>
 | 
						|
#include <linux/proc_fs.h>
 | 
						|
#include <linux/sched.h>
 | 
						|
#include <linux/seq_file.h>
 | 
						|
#include <linux/slab.h>
 | 
						|
#include <linux/time.h>
 | 
						|
#include <linux/irqnr.h>
 | 
						|
#include <asm/cputime.h>
 | 
						|
#include <linux/tick.h>
 | 
						|
 | 
						|
#ifndef arch_irq_stat_cpu
 | 
						|
#define arch_irq_stat_cpu(cpu) 0
 | 
						|
#endif
 | 
						|
#ifndef arch_irq_stat
 | 
						|
#define arch_irq_stat() 0
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef arch_idle_time
 | 
						|
 | 
						|
static cputime64_t get_idle_time(int cpu)
 | 
						|
{
 | 
						|
	cputime64_t idle;
 | 
						|
 | 
						|
	idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
 | 
						|
	if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
 | 
						|
		idle += arch_idle_time(cpu);
 | 
						|
	return idle;
 | 
						|
}
 | 
						|
 | 
						|
static cputime64_t get_iowait_time(int cpu)
 | 
						|
{
 | 
						|
	cputime64_t iowait;
 | 
						|
 | 
						|
	iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
 | 
						|
	if (cpu_online(cpu) && nr_iowait_cpu(cpu))
 | 
						|
		iowait += arch_idle_time(cpu);
 | 
						|
	return iowait;
 | 
						|
}
 | 
						|
 | 
						|
#else
 | 
						|
 | 
						|
static u64 get_idle_time(int cpu)
 | 
						|
{
 | 
						|
	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
 | 
						|
 | 
						|
	if (idle_time == -1ULL)
 | 
						|
		/* !NO_HZ so we can rely on cpustat.idle */
 | 
						|
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
 | 
						|
	else
 | 
						|
		idle = usecs_to_cputime64(idle_time);
 | 
						|
 | 
						|
	return idle;
 | 
						|
}
 | 
						|
 | 
						|
static u64 get_iowait_time(int cpu)
 | 
						|
{
 | 
						|
	u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
 | 
						|
 | 
						|
	if (iowait_time == -1ULL)
 | 
						|
		/* !NO_HZ so we can rely on cpustat.iowait */
 | 
						|
		iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
 | 
						|
	else
 | 
						|
		iowait = usecs_to_cputime64(iowait_time);
 | 
						|
 | 
						|
	return iowait;
 | 
						|
}
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
static int show_stat(struct seq_file *p, void *v)
 | 
						|
{
 | 
						|
	int i, j;
 | 
						|
	unsigned long jif;
 | 
						|
	u64 user, nice, system, idle, iowait, irq, softirq, steal;
 | 
						|
	u64 guest, guest_nice;
 | 
						|
	u64 sum = 0;
 | 
						|
	u64 sum_softirq = 0;
 | 
						|
	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
 | 
						|
	struct timespec boottime;
 | 
						|
 | 
						|
	user = nice = system = idle = iowait =
 | 
						|
		irq = softirq = steal = 0;
 | 
						|
	guest = guest_nice = 0;
 | 
						|
	getboottime(&boottime);
 | 
						|
	jif = boottime.tv_sec;
 | 
						|
 | 
						|
	for_each_possible_cpu(i) {
 | 
						|
		user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
 | 
						|
		nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
 | 
						|
		system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
 | 
						|
		idle += get_idle_time(i);
 | 
						|
		iowait += get_iowait_time(i);
 | 
						|
		irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
 | 
						|
		softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
 | 
						|
		steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
 | 
						|
		guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
 | 
						|
		guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
 | 
						|
		sum += kstat_cpu_irqs_sum(i);
 | 
						|
		sum += arch_irq_stat_cpu(i);
 | 
						|
 | 
						|
		for (j = 0; j < NR_SOFTIRQS; j++) {
 | 
						|
			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
 | 
						|
 | 
						|
			per_softirq_sums[j] += softirq_stat;
 | 
						|
			sum_softirq += softirq_stat;
 | 
						|
		}
 | 
						|
	}
 | 
						|
	sum += arch_irq_stat();
 | 
						|
 | 
						|
	seq_puts(p, "cpu ");
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
 | 
						|
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
 | 
						|
	seq_putc(p, '\n');
 | 
						|
 | 
						|
	for_each_online_cpu(i) {
 | 
						|
		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
 | 
						|
		user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
 | 
						|
		nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
 | 
						|
		system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
 | 
						|
		idle = get_idle_time(i);
 | 
						|
		iowait = get_iowait_time(i);
 | 
						|
		irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
 | 
						|
		softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
 | 
						|
		steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
 | 
						|
		guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
 | 
						|
		guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
 | 
						|
		seq_printf(p, "cpu%d", i);
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
 | 
						|
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
 | 
						|
		seq_putc(p, '\n');
 | 
						|
	}
 | 
						|
	seq_printf(p, "intr %llu", (unsigned long long)sum);
 | 
						|
 | 
						|
	/* sum again ? it could be updated? */
 | 
						|
	for_each_irq_nr(j)
 | 
						|
		seq_put_decimal_ull(p, ' ', kstat_irqs(j));
 | 
						|
 | 
						|
	seq_printf(p,
 | 
						|
		"\nctxt %llu\n"
 | 
						|
		"btime %lu\n"
 | 
						|
		"processes %lu\n"
 | 
						|
		"procs_running %lu\n"
 | 
						|
		"procs_blocked %lu\n",
 | 
						|
		nr_context_switches(),
 | 
						|
		(unsigned long)jif,
 | 
						|
		total_forks,
 | 
						|
		nr_running(),
 | 
						|
		nr_iowait());
 | 
						|
 | 
						|
	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
 | 
						|
 | 
						|
	for (i = 0; i < NR_SOFTIRQS; i++)
 | 
						|
		seq_put_decimal_ull(p, ' ', per_softirq_sums[i]);
 | 
						|
	seq_putc(p, '\n');
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
static int stat_open(struct inode *inode, struct file *file)
 | 
						|
{
 | 
						|
	unsigned size = 1024 + 128 * num_possible_cpus();
 | 
						|
	char *buf;
 | 
						|
	struct seq_file *m;
 | 
						|
	int res;
 | 
						|
 | 
						|
	/* minimum size to display an interrupt count : 2 bytes */
 | 
						|
	size += 2 * nr_irqs;
 | 
						|
 | 
						|
	/* don't ask for more than the kmalloc() max size */
 | 
						|
	if (size > KMALLOC_MAX_SIZE)
 | 
						|
		size = KMALLOC_MAX_SIZE;
 | 
						|
	buf = kmalloc(size, GFP_KERNEL);
 | 
						|
	if (!buf)
 | 
						|
		return -ENOMEM;
 | 
						|
 | 
						|
	res = single_open(file, show_stat, NULL);
 | 
						|
	if (!res) {
 | 
						|
		m = file->private_data;
 | 
						|
		m->buf = buf;
 | 
						|
		m->size = ksize(buf);
 | 
						|
	} else
 | 
						|
		kfree(buf);
 | 
						|
	return res;
 | 
						|
}
 | 
						|
 | 
						|
static const struct file_operations proc_stat_operations = {
 | 
						|
	.open		= stat_open,
 | 
						|
	.read		= seq_read,
 | 
						|
	.llseek		= seq_lseek,
 | 
						|
	.release	= single_release,
 | 
						|
};
 | 
						|
 | 
						|
static int __init proc_stat_init(void)
 | 
						|
{
 | 
						|
	proc_create("stat", 0, NULL, &proc_stat_operations);
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
module_init(proc_stat_init);
 |