| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | /* Include in trace.c */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-09 16:00:22 -04:00
										 |  |  | #include <linux/stringify.h>
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | #include <linux/kthread.h>
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | #include <linux/delay.h>
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:51 +02:00
										 |  |  | static inline int trace_valid_entry(struct trace_entry *entry) | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | { | 
					
						
							|  |  |  | 	switch (entry->type) { | 
					
						
							|  |  |  | 	case TRACE_FN: | 
					
						
							|  |  |  | 	case TRACE_CTX: | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:51 +02:00
										 |  |  | 	case TRACE_WAKE: | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:54 +02:00
										 |  |  | 	case TRACE_STACK: | 
					
						
							| 
									
										
										
										
											2008-08-01 12:26:41 -04:00
										 |  |  | 	case TRACE_PRINT: | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:54 +02:00
										 |  |  | 	case TRACE_SPECIAL: | 
					
						
							| 
									
										
										
										
											2008-11-12 15:24:24 -05:00
										 |  |  | 	case TRACE_BRANCH: | 
					
						
							| 
									
										
										
										
											2009-02-07 21:33:57 +01:00
										 |  |  | 	case TRACE_GRAPH_ENT: | 
					
						
							|  |  |  | 	case TRACE_GRAPH_RET: | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 		return 1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | 	struct ring_buffer_event *event; | 
					
						
							|  |  |  | 	struct trace_entry *entry; | 
					
						
							| 
									
										
										
										
											2009-02-18 22:50:01 -05:00
										 |  |  | 	unsigned int loops = 0; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 
					
						
							|  |  |  | 		entry = ring_buffer_event_data(event); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-18 22:50:01 -05:00
										 |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * The ring buffer is a size of trace_buf_size, if | 
					
						
							|  |  |  | 		 * we loop more than the size, there's something wrong | 
					
						
							|  |  |  | 		 * with the ring buffer. | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		if (loops++ > trace_buf_size) { | 
					
						
							|  |  |  | 			printk(KERN_CONT ".. bad ring buffer "); | 
					
						
							|  |  |  | 			goto failed; | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | 		if (!trace_valid_entry(entry)) { | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 			printk(KERN_CONT ".. invalid entry %d ", | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | 				entry->type); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 			goto failed; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |  failed: | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	/* disable tracing */ | 
					
						
							|  |  |  | 	tracing_disabled = 1; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	printk(KERN_CONT ".. corrupted trace buffer .. "); | 
					
						
							|  |  |  | 	return -1; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * Test the trace buffer to see if all the elements | 
					
						
							|  |  |  |  * are still sane. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:56 +02:00
										 |  |  | 	unsigned long flags, cnt = 0; | 
					
						
							|  |  |  | 	int cpu, ret = 0; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:56 +02:00
										 |  |  | 	/* Don't allow flipping of max traces now */ | 
					
						
							| 
									
										
										
										
											2008-11-15 15:48:29 -05:00
										 |  |  | 	local_irq_save(flags); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:56 +02:00
										 |  |  | 	__raw_spin_lock(&ftrace_max_lock); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | 	cnt = ring_buffer_entries(tr->buffer); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-18 18:33:57 -05:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * The trace_test_buffer_cpu runs a while loop to consume all data. | 
					
						
							|  |  |  | 	 * If the calling tracer is broken, and is constantly filling | 
					
						
							|  |  |  | 	 * the buffer, this will run forever, and hard lock the box. | 
					
						
							|  |  |  | 	 * We disable the ring buffer while we do this test to prevent | 
					
						
							|  |  |  | 	 * a hard lock up. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	tracing_off(); | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:41 -04:00
										 |  |  | 	for_each_possible_cpu(cpu) { | 
					
						
							|  |  |  | 		ret = trace_test_buffer_cpu(tr, cpu); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 		if (ret) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2009-02-18 18:33:57 -05:00
										 |  |  | 	tracing_on(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:56 +02:00
										 |  |  | 	__raw_spin_unlock(&ftrace_max_lock); | 
					
						
							| 
									
										
										
										
											2008-11-15 15:48:29 -05:00
										 |  |  | 	local_irq_restore(flags); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (count) | 
					
						
							|  |  |  | 		*count = cnt; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | 
					
						
							|  |  |  | 		trace->name, init_ret); | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2008-10-06 19:06:12 -04:00
										 |  |  | #ifdef CONFIG_FUNCTION_TRACER
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_DYNAMIC_FTRACE
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Test dynamic code modification and ftrace filters */ | 
					
						
							|  |  |  | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 
					
						
							|  |  |  | 					   struct trace_array *tr, | 
					
						
							|  |  |  | 					   int (*func)(void)) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int save_ftrace_enabled = ftrace_enabled; | 
					
						
							|  |  |  | 	int save_tracer_enabled = tracer_enabled; | 
					
						
							| 
									
										
										
										
											2008-08-01 12:26:41 -04:00
										 |  |  | 	unsigned long count; | 
					
						
							| 
									
										
										
										
											2008-05-14 23:49:44 -04:00
										 |  |  | 	char *func_name; | 
					
						
							| 
									
										
										
										
											2008-08-01 12:26:41 -04:00
										 |  |  | 	int ret; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* The ftrace test PASSED */ | 
					
						
							|  |  |  | 	printk(KERN_CONT "PASSED\n"); | 
					
						
							|  |  |  | 	pr_info("Testing dynamic ftrace: "); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* enable tracing, and record the filter function */ | 
					
						
							|  |  |  | 	ftrace_enabled = 1; | 
					
						
							|  |  |  | 	tracer_enabled = 1; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* passed in by parameter to fool gcc from optimizing */ | 
					
						
							|  |  |  | 	func(); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-14 23:49:44 -04:00
										 |  |  | 	/*
 | 
					
						
							| 
									
										
										
										
											2009-02-17 01:10:02 -05:00
										 |  |  | 	 * Some archs *cough*PowerPC*cough* add characters to the | 
					
						
							| 
									
										
										
										
											2008-05-14 23:49:44 -04:00
										 |  |  | 	 * start of the function names. We simply put a '*' to | 
					
						
							| 
									
										
										
										
											2009-02-17 01:10:02 -05:00
										 |  |  | 	 * accommodate them. | 
					
						
							| 
									
										
										
										
											2008-05-14 23:49:44 -04:00
										 |  |  | 	 */ | 
					
						
							| 
									
										
										
										
											2009-03-09 16:00:22 -04:00
										 |  |  | 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 
					
						
							| 
									
										
										
										
											2008-05-14 23:49:44 -04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	/* filter only on our function */ | 
					
						
							| 
									
										
										
										
											2008-05-14 23:49:44 -04:00
										 |  |  | 	ftrace_set_filter(func_name, strlen(func_name), 1); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* enable tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-08-01 12:26:41 -04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	/* Sleep for a 1/10 of a second */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* we should have nothing in the buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 	if (ret) | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (count) { | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. filter did not filter .. "); | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* call our function again */ | 
					
						
							|  |  |  | 	func(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* sleep again */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	ftrace_enabled = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* check the trace buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* we should only have one item */ | 
					
						
							|  |  |  | 	if (!ret && count != 1) { | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:54 +02:00
										 |  |  | 		printk(KERN_CONT ".. filter failed count=%ld ..", count); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 		ret = -1; | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  |  out: | 
					
						
							|  |  |  | 	ftrace_enabled = save_ftrace_enabled; | 
					
						
							|  |  |  | 	tracer_enabled = save_tracer_enabled; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Enable tracing on all functions again */ | 
					
						
							|  |  |  | 	ftrace_set_filter(NULL, 0, 1); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 | 
					
						
							|  |  |  | #endif /* CONFIG_DYNAMIC_FTRACE */
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Simple verification test of ftrace function tracer. | 
					
						
							|  |  |  |  * Enable ftrace, sleep 1/10 second, and then read the trace | 
					
						
							|  |  |  |  * buffer to see if all is in order. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	int save_ftrace_enabled = ftrace_enabled; | 
					
						
							|  |  |  | 	int save_tracer_enabled = tracer_enabled; | 
					
						
							| 
									
										
										
										
											2008-08-01 12:26:41 -04:00
										 |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	/* make sure msleep has been recorded */ | 
					
						
							|  |  |  | 	msleep(1); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	ftrace_enabled = 1; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	tracer_enabled = 1; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* Sleep for a 1/10 of a second */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	ftrace_enabled = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check the trace buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 		goto out; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 
					
						
							|  |  |  | 						     DYN_FTRACE_TEST_NAME); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |  out: | 
					
						
							|  |  |  | 	ftrace_enabled = save_ftrace_enabled; | 
					
						
							|  |  |  | 	tracer_enabled = save_tracer_enabled; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:48 +02:00
										 |  |  | 	/* kill ftrace totally if we failed */ | 
					
						
							|  |  |  | 	if (ret) | 
					
						
							|  |  |  | 		ftrace_kill(); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2008-10-06 19:06:12 -04:00
										 |  |  | #endif /* CONFIG_FUNCTION_TRACER */
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-07 21:33:57 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 | 
					
						
							| 
									
										
										
										
											2009-03-22 05:04:35 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* Maximum number of functions to trace before diagnosing a hang */ | 
					
						
							|  |  |  | #define GRAPH_MAX_FUNC_TEST	100000000
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void __ftrace_dump(bool disable_tracing); | 
					
						
							|  |  |  | static unsigned int graph_hang_thresh; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Wrap the real function entry probe to avoid possible hanging */ | 
					
						
							|  |  |  | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	/* This is harmlessly racy, we want to approximately detect a hang */ | 
					
						
							|  |  |  | 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | 
					
						
							|  |  |  | 		ftrace_graph_stop(); | 
					
						
							|  |  |  | 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 
					
						
							|  |  |  | 		if (ftrace_dump_on_oops) | 
					
						
							|  |  |  | 			__ftrace_dump(false); | 
					
						
							|  |  |  | 		return 0; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return trace_graph_entry(trace); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-07 21:33:57 +01:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Pretty much the same than for the function tracer from which the selftest | 
					
						
							|  |  |  |  * has been borrowed. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_function_graph(struct tracer *trace, | 
					
						
							|  |  |  | 					struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-22 05:04:35 +01:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Simulate the init() callback but we attach a watchdog callback | 
					
						
							|  |  |  | 	 * to detect and recover from possible hangs | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	tracing_reset_online_cpus(tr); | 
					
						
							|  |  |  | 	ret = register_ftrace_graph(&trace_graph_return, | 
					
						
							|  |  |  | 				    &trace_graph_entry_watchdog); | 
					
						
							| 
									
										
										
										
											2009-02-07 21:33:57 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2009-03-22 05:04:35 +01:00
										 |  |  | 	tracing_start_cmdline_record(); | 
					
						
							| 
									
										
										
										
											2009-02-07 21:33:57 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* Sleep for a 1/10 of a second */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-22 05:04:35 +01:00
										 |  |  | 	/* Have we just recovered from a hang? */ | 
					
						
							|  |  |  | 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | 
					
						
							| 
									
										
										
										
											2009-03-22 15:13:07 +01:00
										 |  |  | 		tracing_selftest_disabled = true; | 
					
						
							| 
									
										
										
										
											2009-03-22 05:04:35 +01:00
										 |  |  | 		ret = -1; | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-07 21:33:57 +01:00
										 |  |  | 	tracing_stop(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* check the trace buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							|  |  |  | 	tracing_start(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Don't test dynamic tracing, the function tracer already did */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | out: | 
					
						
							|  |  |  | 	/* Stop it if we failed */ | 
					
						
							|  |  |  | 	if (ret) | 
					
						
							|  |  |  | 		ftrace_graph_stop(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | #ifdef CONFIG_IRQSOFF_TRACER
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long save_max = tracing_max_latency; | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		return ret; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* reset the max latency */ | 
					
						
							|  |  |  | 	tracing_max_latency = 0; | 
					
						
							|  |  |  | 	/* disable interrupts for a bit */ | 
					
						
							|  |  |  | 	local_irq_disable(); | 
					
						
							|  |  |  | 	udelay(100); | 
					
						
							|  |  |  | 	local_irq_enable(); | 
					
						
							| 
									
										
										
										
											2009-03-17 22:38:58 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Stop the tracer to avoid a warning subsequent | 
					
						
							|  |  |  | 	 * to buffer flipping failure because tracing_stop() | 
					
						
							|  |  |  | 	 * disables the tr and max buffers, making flipping impossible | 
					
						
							|  |  |  | 	 * in case of parallels max irqs off latencies. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	trace->stop(tr); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check both trace buffers */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, NULL); | 
					
						
							|  |  |  | 	if (!ret) | 
					
						
							|  |  |  | 		ret = trace_test_buffer(&max_tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tracing_max_latency = save_max; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_IRQSOFF_TRACER */
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_PREEMPT_TRACER
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long save_max = tracing_max_latency; | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Now that the big kernel lock is no longer preemptable, | 
					
						
							|  |  |  | 	 * and this is called with the BKL held, it will always | 
					
						
							|  |  |  | 	 * fail. If preemption is already disabled, simply | 
					
						
							|  |  |  | 	 * pass the test. When the BKL is removed, or becomes | 
					
						
							|  |  |  | 	 * preemptible again, we will once again test this, | 
					
						
							|  |  |  | 	 * so keep it in. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (preempt_count()) { | 
					
						
							|  |  |  | 		printk(KERN_CONT "can not test ... force "); | 
					
						
							|  |  |  | 		return 0; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		return ret; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* reset the max latency */ | 
					
						
							|  |  |  | 	tracing_max_latency = 0; | 
					
						
							|  |  |  | 	/* disable preemption for a bit */ | 
					
						
							|  |  |  | 	preempt_disable(); | 
					
						
							|  |  |  | 	udelay(100); | 
					
						
							|  |  |  | 	preempt_enable(); | 
					
						
							| 
									
										
										
										
											2009-03-17 22:38:58 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Stop the tracer to avoid a warning subsequent | 
					
						
							|  |  |  | 	 * to buffer flipping failure because tracing_stop() | 
					
						
							|  |  |  | 	 * disables the tr and max buffers, making flipping impossible | 
					
						
							|  |  |  | 	 * in case of parallels max preempt off latencies. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	trace->stop(tr); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check both trace buffers */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, NULL); | 
					
						
							|  |  |  | 	if (!ret) | 
					
						
							|  |  |  | 		ret = trace_test_buffer(&max_tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tracing_max_latency = save_max; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_PREEMPT_TRACER */
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long save_max = tracing_max_latency; | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Now that the big kernel lock is no longer preemptable, | 
					
						
							|  |  |  | 	 * and this is called with the BKL held, it will always | 
					
						
							|  |  |  | 	 * fail. If preemption is already disabled, simply | 
					
						
							|  |  |  | 	 * pass the test. When the BKL is removed, or becomes | 
					
						
							|  |  |  | 	 * preemptible again, we will once again test this, | 
					
						
							|  |  |  | 	 * so keep it in. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (preempt_count()) { | 
					
						
							|  |  |  | 		printk(KERN_CONT "can not test ... force "); | 
					
						
							|  |  |  | 		return 0; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							| 
									
										
										
										
											2009-03-16 00:32:41 +01:00
										 |  |  | 		goto out_no_start; | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* reset the max latency */ | 
					
						
							|  |  |  | 	tracing_max_latency = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* disable preemption and interrupts for a bit */ | 
					
						
							|  |  |  | 	preempt_disable(); | 
					
						
							|  |  |  | 	local_irq_disable(); | 
					
						
							|  |  |  | 	udelay(100); | 
					
						
							|  |  |  | 	preempt_enable(); | 
					
						
							|  |  |  | 	/* reverse the order of preempt vs irqs */ | 
					
						
							|  |  |  | 	local_irq_enable(); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-17 22:38:58 +01:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Stop the tracer to avoid a warning subsequent | 
					
						
							|  |  |  | 	 * to buffer flipping failure because tracing_stop() | 
					
						
							|  |  |  | 	 * disables the tr and max buffers, making flipping impossible | 
					
						
							|  |  |  | 	 * in case of parallels max irqs/preempt off latencies. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	trace->stop(tr); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check both trace buffers */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, NULL); | 
					
						
							| 
									
										
										
										
											2009-03-16 00:32:41 +01:00
										 |  |  | 	if (ret) | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 		goto out; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ret = trace_test_buffer(&max_tr, &count); | 
					
						
							| 
									
										
										
										
											2009-03-16 00:32:41 +01:00
										 |  |  | 	if (ret) | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 		goto out; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* do the test by disabling interrupts first this time */ | 
					
						
							|  |  |  | 	tracing_max_latency = 0; | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2009-03-17 22:38:58 +01:00
										 |  |  | 	trace->start(tr); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	preempt_disable(); | 
					
						
							|  |  |  | 	local_irq_disable(); | 
					
						
							|  |  |  | 	udelay(100); | 
					
						
							|  |  |  | 	preempt_enable(); | 
					
						
							|  |  |  | 	/* reverse the order of preempt vs irqs */ | 
					
						
							|  |  |  | 	local_irq_enable(); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-17 22:38:58 +01:00
										 |  |  | 	trace->stop(tr); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check both trace buffers */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, NULL); | 
					
						
							|  |  |  | 	if (ret) | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ret = trace_test_buffer(&max_tr, &count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-16 00:32:41 +01:00
										 |  |  | out: | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2009-03-16 00:32:41 +01:00
										 |  |  | out_no_start: | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	tracing_max_latency = save_max; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-19 03:06:43 -07:00
										 |  |  | #ifdef CONFIG_NOP_TRACER
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	/* What could possibly go wrong? */ | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | #ifdef CONFIG_SCHED_TRACER
 | 
					
						
							|  |  |  | static int trace_wakeup_test_thread(void *data) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	/* Make this a RT thread, doesn't need to be too high */ | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:59 +02:00
										 |  |  | 	struct sched_param param = { .sched_priority = 5 }; | 
					
						
							|  |  |  | 	struct completion *x = data; | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:59 +02:00
										 |  |  | 	sched_setscheduler(current, SCHED_FIFO, ¶m); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* Make it know we have a new prio */ | 
					
						
							|  |  |  | 	complete(x); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* now go to sleep and let the test wake us up */ | 
					
						
							|  |  |  | 	set_current_state(TASK_INTERRUPTIBLE); | 
					
						
							|  |  |  | 	schedule(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* we are awake, now wait to disappear */ | 
					
						
							|  |  |  | 	while (!kthread_should_stop()) { | 
					
						
							|  |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * This is an RT task, do short sleeps to let | 
					
						
							|  |  |  | 		 * others run. | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		msleep(100); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long save_max = tracing_max_latency; | 
					
						
							|  |  |  | 	struct task_struct *p; | 
					
						
							|  |  |  | 	struct completion isrt; | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	init_completion(&isrt); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* create a high prio thread */ | 
					
						
							|  |  |  | 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:45 +02:00
										 |  |  | 	if (IS_ERR(p)) { | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 		printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 
					
						
							|  |  |  | 		return -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* make sure the thread is running at an RT prio */ | 
					
						
							|  |  |  | 	wait_for_completion(&isrt); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		return ret; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* reset the max latency */ | 
					
						
							|  |  |  | 	tracing_max_latency = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* sleep to let the RT thread sleep too */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Yes this is slightly racy. It is possible that for some | 
					
						
							|  |  |  | 	 * strange reason that the RT thread we created, did not | 
					
						
							|  |  |  | 	 * call schedule for 100ms after doing the completion, | 
					
						
							|  |  |  | 	 * and we do a wakeup on a task that already is awake. | 
					
						
							|  |  |  | 	 * But that is extremely unlikely, and the worst thing that | 
					
						
							|  |  |  | 	 * happens in such a case, is that we disable tracing. | 
					
						
							|  |  |  | 	 * Honestly, if this race does happen something is horrible | 
					
						
							|  |  |  | 	 * wrong with the system. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	wake_up_process(p); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-29 23:02:37 -04:00
										 |  |  | 	/* give a little time to let the thread wake up */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check both trace buffers */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, NULL); | 
					
						
							|  |  |  | 	if (!ret) | 
					
						
							|  |  |  | 		ret = trace_test_buffer(&max_tr, &count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	tracing_max_latency = save_max; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* kill the thread */ | 
					
						
							|  |  |  | 	kthread_stop(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_SCHED_TRACER */
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_CONTEXT_SWITCH_TRACER
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		return ret; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* Sleep for a 1/10 of a second */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 	/* check the trace buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:44 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:47 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_SYSPROF_TRACER
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							| 
									
										
										
										
											2009-02-17 01:09:47 -05:00
										 |  |  | 		return ret; | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:47 +02:00
										 |  |  | 	/* Sleep for a 1/10 of a second */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 	/* stop the tracing. */ | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_stop(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:47 +02:00
										 |  |  | 	/* check the trace buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							| 
									
										
										
										
											2008-11-07 22:36:02 -05:00
										 |  |  | 	tracing_start(); | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:47 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-17 01:09:47 -05:00
										 |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-05-12 21:20:47 +02:00
										 |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_SYSPROF_TRACER */
 | 
					
						
							| 
									
										
										
										
											2008-11-12 15:24:24 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_BRANCH_TRACER
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long count; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* start the tracing */ | 
					
						
							| 
									
										
										
										
											2009-02-05 18:02:00 -02:00
										 |  |  | 	ret = tracer_init(trace, tr); | 
					
						
							| 
									
										
										
										
											2008-11-16 05:57:26 +01:00
										 |  |  | 	if (ret) { | 
					
						
							|  |  |  | 		warn_failed_init_tracer(trace, ret); | 
					
						
							|  |  |  | 		return ret; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-11-12 15:24:24 -05:00
										 |  |  | 	/* Sleep for a 1/10 of a second */ | 
					
						
							|  |  |  | 	msleep(100); | 
					
						
							|  |  |  | 	/* stop the tracing. */ | 
					
						
							|  |  |  | 	tracing_stop(); | 
					
						
							|  |  |  | 	/* check the trace buffer */ | 
					
						
							|  |  |  | 	ret = trace_test_buffer(tr, &count); | 
					
						
							|  |  |  | 	trace->reset(tr); | 
					
						
							|  |  |  | 	tracing_start(); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-17 01:09:47 -05:00
										 |  |  | 	if (!ret && !count) { | 
					
						
							|  |  |  | 		printk(KERN_CONT ".. no entries found .."); | 
					
						
							|  |  |  | 		ret = -1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-11-12 15:24:24 -05:00
										 |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif /* CONFIG_BRANCH_TRACER */
 |