| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_SCHEDSTATS
 | 
					
						
							| 
									
										
										
										
											2008-10-06 13:23:43 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Expects runqueue lock to be held for atomicity of update | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static inline void | 
					
						
							|  |  |  | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	if (rq) { | 
					
						
							|  |  |  | 		rq->rq_sched_info.run_delay += delta; | 
					
						
							| 
									
										
										
										
											2007-10-15 17:00:12 +02:00
										 |  |  | 		rq->rq_sched_info.pcount++; | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * Expects runqueue lock to be held for atomicity of update | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static inline void | 
					
						
							|  |  |  | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	if (rq) | 
					
						
							| 
									
										
										
										
											2008-12-16 23:41:22 -08:00
										 |  |  | 		rq->rq_cpu_time += delta; | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  | 
 | 
					
						
							|  |  |  | static inline void | 
					
						
							|  |  |  | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	if (rq) | 
					
						
							|  |  |  | 		rq->rq_sched_info.run_delay += delta; | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | # define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
 | 
					
						
							|  |  |  | # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
 | 
					
						
							| 
									
										
										
										
											2007-08-02 17:41:40 +02:00
										 |  |  | # define schedstat_set(var, val)	do { var = (val); } while (0)
 | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | #else /* !CONFIG_SCHEDSTATS */
 | 
					
						
							|  |  |  | static inline void | 
					
						
							|  |  |  | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
					
						
							|  |  |  | {} | 
					
						
							|  |  |  | static inline void | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
					
						
							|  |  |  | {} | 
					
						
							|  |  |  | static inline void | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
					
						
							|  |  |  | {} | 
					
						
							|  |  |  | # define schedstat_inc(rq, field)	do { } while (0)
 | 
					
						
							|  |  |  | # define schedstat_add(rq, field, amt)	do { } while (0)
 | 
					
						
							| 
									
										
										
										
											2007-08-02 17:41:40 +02:00
										 |  |  | # define schedstat_set(var, val)	do { } while (0)
 | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-11-09 22:39:37 +01:00
										 |  |  | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  | static inline void sched_info_reset_dequeued(struct task_struct *t) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	t->sched_info.last_queued = 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | /*
 | 
					
						
							| 
									
										
										
										
											2010-10-24 16:28:47 +06:00
										 |  |  |  * We are interested in knowing how long it was from the *first* time a | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  |  * task was queued to the time that it finally hit a cpu, we call this routine | 
					
						
							|  |  |  |  * from dequeue_task() to account for possible rq->clock skew across cpus. The | 
					
						
							|  |  |  |  * delta taken on each cpu would annul the skew. | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 	unsigned long long now = rq_clock(rq), delta = 0; | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (unlikely(sched_info_on())) | 
					
						
							|  |  |  | 		if (t->sched_info.last_queued) | 
					
						
							|  |  |  | 			delta = now - t->sched_info.last_queued; | 
					
						
							|  |  |  | 	sched_info_reset_dequeued(t); | 
					
						
							|  |  |  | 	t->sched_info.run_delay += delta; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 	rq_sched_info_dequeued(rq, delta); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * Called when a task finally hits the cpu.  We can now calculate how | 
					
						
							|  |  |  |  * long it was waiting to run.  We also note when it began so that we | 
					
						
							|  |  |  |  * can keep stats on how long its timeslice is. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | static void sched_info_arrive(struct rq *rq, struct task_struct *t) | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 	unsigned long long now = rq_clock(rq), delta = 0; | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (t->sched_info.last_queued) | 
					
						
							|  |  |  | 		delta = now - t->sched_info.last_queued; | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  | 	sched_info_reset_dequeued(t); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 	t->sched_info.run_delay += delta; | 
					
						
							|  |  |  | 	t->sched_info.last_arrival = now; | 
					
						
							| 
									
										
										
										
											2007-10-15 17:00:12 +02:00
										 |  |  | 	t->sched_info.pcount++; | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 	rq_sched_info_arrive(rq, delta); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * This function is only called from enqueue_task(), but also only updates | 
					
						
							|  |  |  |  * the timestamp if it is already not set.  It's assumed that | 
					
						
							|  |  |  |  * sched_info_dequeued() will clear that stamp when appropriate. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | { | 
					
						
							|  |  |  | 	if (unlikely(sched_info_on())) | 
					
						
							|  |  |  | 		if (!t->sched_info.last_queued) | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 			t->sched_info.last_queued = rq_clock(rq); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							| 
									
										
										
										
											2013-09-16 11:30:36 +03:00
										 |  |  |  * Called when a process ceases being the active-running process involuntarily | 
					
						
							|  |  |  |  * due, typically, to expiring its time slice (this may also be called when | 
					
						
							|  |  |  |  * switching to the idle task).  Now we can calculate how long we ran. | 
					
						
							| 
									
										
										
										
											2008-06-16 15:11:01 +05:30
										 |  |  |  * Also, if the process is still in the TASK_RUNNING state, call | 
					
						
							|  |  |  |  * sched_info_queued() to mark that it has now again started waiting on | 
					
						
							|  |  |  |  * the runqueue. | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 	unsigned long long delta = rq_clock(rq) - | 
					
						
							| 
									
										
										
										
											2007-11-09 22:39:37 +01:00
										 |  |  | 					t->sched_info.last_arrival; | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 	rq_sched_info_depart(rq, delta); | 
					
						
							| 
									
										
										
										
											2008-06-16 15:11:01 +05:30
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (t->state == TASK_RUNNING) | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 		sched_info_queued(rq, t); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * Called when tasks are switched involuntarily due, typically, to expiring | 
					
						
							|  |  |  |  * their time slice.  (This may also be called when switching to or from | 
					
						
							|  |  |  |  * the idle task.)  We are only called when prev != next. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static inline void | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | __sched_info_switch(struct rq *rq, | 
					
						
							|  |  |  | 		    struct task_struct *prev, struct task_struct *next) | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | { | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * prev now departs the cpu.  It's not interesting to record | 
					
						
							|  |  |  | 	 * stats about how efficient we were at scheduling the idle | 
					
						
							|  |  |  | 	 * process, however. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (prev != rq->idle) | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 		sched_info_depart(rq, prev); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (next != rq->idle) | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 		sched_info_arrive(rq, next); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | static inline void | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | sched_info_switch(struct rq *rq, | 
					
						
							|  |  |  | 		  struct task_struct *prev, struct task_struct *next) | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | { | 
					
						
							|  |  |  | 	if (unlikely(sched_info_on())) | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | 		__sched_info_switch(rq, prev, next); | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | #else
 | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | #define sched_info_queued(rq, t)		do { } while (0)
 | 
					
						
							| 
									
										
											  
											
												sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
>   cpu0      cpu1
>
> enqueue
>  <wait-d1>
> dequeue
>             enqueue
>              <wait-d2>
>             run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
											
										 
											2008-07-01 14:30:06 +05:30
										 |  |  | #define sched_info_reset_dequeued(t)	do { } while (0)
 | 
					
						
							| 
									
										
										
										
											2013-09-22 17:20:54 +03:00
										 |  |  | #define sched_info_dequeued(rq, t)		do { } while (0)
 | 
					
						
							|  |  |  | #define sched_info_depart(rq, t)		do { } while (0)
 | 
					
						
							|  |  |  | #define sched_info_arrive(rq, next)		do { } while (0)
 | 
					
						
							|  |  |  | #define sched_info_switch(rq, t, next)		do { } while (0)
 | 
					
						
							| 
									
										
										
										
											2007-11-09 22:39:37 +01:00
										 |  |  | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 | 
					
						
							| 
									
										
										
										
											2007-07-09 18:51:58 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * The following are functions that support scheduler-internal time accounting. | 
					
						
							|  |  |  |  * These functions are generally called at the timer tick.  None of this depends | 
					
						
							|  |  |  |  * on CONFIG_SCHEDSTATS. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-26 17:35:41 -04:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * cputimer_running - return true if cputimer is running | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * @tsk:	Pointer to target task. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static inline bool cputimer_running(struct task_struct *tsk) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!cputimer->running) | 
					
						
							|  |  |  | 		return false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime | 
					
						
							|  |  |  | 	 * in __exit_signal(), we won't account to the signal struct further | 
					
						
							|  |  |  | 	 * cputime consumed by that task, even though the task can still be | 
					
						
							|  |  |  | 	 * ticking after __exit_signal(). | 
					
						
							|  |  |  | 	 * | 
					
						
							|  |  |  | 	 * In order to keep a consistent behaviour between thread group cputime | 
					
						
							|  |  |  | 	 * and thread group cputimer accounting, lets also ignore the cputime | 
					
						
							|  |  |  | 	 * elapsing after __exit_signal() in any thread group timer running. | 
					
						
							|  |  |  | 	 * | 
					
						
							|  |  |  | 	 * This makes sure that POSIX CPU clocks and timers are synchronized, so | 
					
						
							|  |  |  | 	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU | 
					
						
							|  |  |  | 	 * clock delta is behind the expiring timer value. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (unlikely(!tsk->sighand)) | 
					
						
							|  |  |  | 		return false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return true; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | /**
 | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * account_group_user_time - Maintain utime for a thread group. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * @tsk:	Pointer to task structure. | 
					
						
							|  |  |  |  * @cputime:	Time value by which to increment the utime field of the | 
					
						
							|  |  |  |  *		thread_group_cputime structure. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * If thread group time is being maintained, get the structure for the | 
					
						
							|  |  |  |  * running CPU and update the utime field there. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | static inline void account_group_user_time(struct task_struct *tsk, | 
					
						
							|  |  |  | 					   cputime_t cputime) | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-06-11 01:09:52 +02:00
										 |  |  | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-26 17:35:41 -04:00
										 |  |  | 	if (!cputimer_running(tsk)) | 
					
						
							| 
									
										
										
										
											2009-02-05 12:24:16 +01:00
										 |  |  | 		return; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-07-25 18:56:56 +02:00
										 |  |  | 	raw_spin_lock(&cputimer->lock); | 
					
						
							| 
									
										
										
										
											2011-12-15 14:56:09 +01:00
										 |  |  | 	cputimer->cputime.utime += cputime; | 
					
						
							| 
									
										
										
										
											2009-07-25 18:56:56 +02:00
										 |  |  | 	raw_spin_unlock(&cputimer->lock); | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * account_group_system_time - Maintain stime for a thread group. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * @tsk:	Pointer to task structure. | 
					
						
							|  |  |  |  * @cputime:	Time value by which to increment the stime field of the | 
					
						
							|  |  |  |  *		thread_group_cputime structure. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * If thread group time is being maintained, get the structure for the | 
					
						
							|  |  |  |  * running CPU and update the stime field there. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | static inline void account_group_system_time(struct task_struct *tsk, | 
					
						
							|  |  |  | 					     cputime_t cputime) | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-06-11 01:09:52 +02:00
										 |  |  | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
					
						
							| 
									
										
										
										
											2009-02-05 12:24:16 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-26 17:35:41 -04:00
										 |  |  | 	if (!cputimer_running(tsk)) | 
					
						
							| 
									
										
										
										
											2009-02-05 12:24:16 +01:00
										 |  |  | 		return; | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-07-25 18:56:56 +02:00
										 |  |  | 	raw_spin_lock(&cputimer->lock); | 
					
						
							| 
									
										
										
										
											2011-12-15 14:56:09 +01:00
										 |  |  | 	cputimer->cputime.stime += cputime; | 
					
						
							| 
									
										
										
										
											2009-07-25 18:56:56 +02:00
										 |  |  | 	raw_spin_unlock(&cputimer->lock); | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * account_group_exec_runtime - Maintain exec runtime for a thread group. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * @tsk:	Pointer to task structure. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * @ns:		Time value by which to increment the sum_exec_runtime field | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  *		of the thread_group_cputime structure. | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * If thread group time is being maintained, get the structure for the | 
					
						
							|  |  |  |  * running CPU and update the sum_exec_runtime field there. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | static inline void account_group_exec_runtime(struct task_struct *tsk, | 
					
						
							|  |  |  | 					      unsigned long long ns) | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-06-11 01:09:52 +02:00
										 |  |  | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
					
						
							| 
									
										
										
										
											2009-02-05 12:24:16 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-26 17:35:41 -04:00
										 |  |  | 	if (!cputimer_running(tsk)) | 
					
						
							| 
									
										
										
										
											2009-02-05 12:24:16 +01:00
										 |  |  | 		return; | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-07-25 18:56:56 +02:00
										 |  |  | 	raw_spin_lock(&cputimer->lock); | 
					
						
							| 
									
										
										
										
											2009-02-05 12:24:16 +01:00
										 |  |  | 	cputimer->cputime.sum_exec_runtime += ns; | 
					
						
							| 
									
										
										
										
											2009-07-25 18:56:56 +02:00
										 |  |  | 	raw_spin_unlock(&cputimer->lock); | 
					
						
							| 
									
										
										
										
											2008-09-12 09:54:39 -07:00
										 |  |  | } |