 c5ebcedb56
			
		
	
	
	c5ebcedb56
	
	
	
		
			
			Introducing PERF_SAMPLE_STACK_USER sample type bit to trigger the dump of the user level stack on sample. The size of the dump is specified by sample_stack_user value. Being able to dump parts of the user stack, starting from the stack pointer, will be useful to make a post mortem dwarf CFI based stack unwinding. Added HAVE_PERF_USER_STACK_DUMP config option to determine if the architecture provides user stack dump on perf event samples. This needs access to the user stack pointer which is not unified across architectures. Enabling this for x86 architecture. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Original-patch-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Arun Sharma <asharma@fb.com> Cc: Benjamin Redelings <benjamin.redelings@nescent.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Frank Ch. Eigler <fche@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Ulrich Drepper <drepper@gmail.com> Link: http://lkml.kernel.org/r/1344345647-11536-6-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
		
			
				
	
	
		
			177 lines
		
	
	
	
		
			4 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			177 lines
		
	
	
	
		
			4 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _KERNEL_EVENTS_INTERNAL_H
 | |
| #define _KERNEL_EVENTS_INTERNAL_H
 | |
| 
 | |
| #include <linux/hardirq.h>
 | |
| #include <linux/uaccess.h>
 | |
| 
 | |
| /* Buffer handling */
 | |
| 
 | |
| #define RING_BUFFER_WRITABLE		0x01
 | |
| 
 | |
| struct ring_buffer {
 | |
| 	atomic_t			refcount;
 | |
| 	struct rcu_head			rcu_head;
 | |
| #ifdef CONFIG_PERF_USE_VMALLOC
 | |
| 	struct work_struct		work;
 | |
| 	int				page_order;	/* allocation order  */
 | |
| #endif
 | |
| 	int				nr_pages;	/* nr of data pages  */
 | |
| 	int				writable;	/* are we writable   */
 | |
| 
 | |
| 	atomic_t			poll;		/* POLL_ for wakeups */
 | |
| 
 | |
| 	local_t				head;		/* write position    */
 | |
| 	local_t				nest;		/* nested writers    */
 | |
| 	local_t				events;		/* event limit       */
 | |
| 	local_t				wakeup;		/* wakeup stamp      */
 | |
| 	local_t				lost;		/* nr records lost   */
 | |
| 
 | |
| 	long				watermark;	/* wakeup watermark  */
 | |
| 	/* poll crap */
 | |
| 	spinlock_t			event_lock;
 | |
| 	struct list_head		event_list;
 | |
| 
 | |
| 	struct perf_event_mmap_page	*user_page;
 | |
| 	void				*data_pages[0];
 | |
| };
 | |
| 
 | |
| extern void rb_free(struct ring_buffer *rb);
 | |
| extern struct ring_buffer *
 | |
| rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 | |
| extern void perf_event_wakeup(struct perf_event *event);
 | |
| 
 | |
| extern void
 | |
| perf_event_header__init_id(struct perf_event_header *header,
 | |
| 			   struct perf_sample_data *data,
 | |
| 			   struct perf_event *event);
 | |
| extern void
 | |
| perf_event__output_id_sample(struct perf_event *event,
 | |
| 			     struct perf_output_handle *handle,
 | |
| 			     struct perf_sample_data *sample);
 | |
| 
 | |
| extern struct page *
 | |
| perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 | |
| 
 | |
| #ifdef CONFIG_PERF_USE_VMALLOC
 | |
| /*
 | |
|  * Back perf_mmap() with vmalloc memory.
 | |
|  *
 | |
|  * Required for architectures that have d-cache aliasing issues.
 | |
|  */
 | |
| 
 | |
| static inline int page_order(struct ring_buffer *rb)
 | |
| {
 | |
| 	return rb->page_order;
 | |
| }
 | |
| 
 | |
| #else
 | |
| 
 | |
| static inline int page_order(struct ring_buffer *rb)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| static inline unsigned long perf_data_size(struct ring_buffer *rb)
 | |
| {
 | |
| 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 | |
| }
 | |
| 
 | |
| #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
 | |
| static inline unsigned int						\
 | |
| func_name(struct perf_output_handle *handle,				\
 | |
| 	  const void *buf, unsigned int len)				\
 | |
| {									\
 | |
| 	unsigned long size, written;					\
 | |
| 									\
 | |
| 	do {								\
 | |
| 		size = min_t(unsigned long, handle->size, len);		\
 | |
| 									\
 | |
| 		written = memcpy_func(handle->addr, buf, size);		\
 | |
| 									\
 | |
| 		len -= written;						\
 | |
| 		handle->addr += written;				\
 | |
| 		buf += written;						\
 | |
| 		handle->size -= written;				\
 | |
| 		if (!handle->size) {					\
 | |
| 			struct ring_buffer *rb = handle->rb;		\
 | |
| 									\
 | |
| 			handle->page++;					\
 | |
| 			handle->page &= rb->nr_pages - 1;		\
 | |
| 			handle->addr = rb->data_pages[handle->page];	\
 | |
| 			handle->size = PAGE_SIZE << page_order(rb);	\
 | |
| 		}							\
 | |
| 	} while (len && written == size);				\
 | |
| 									\
 | |
| 	return len;							\
 | |
| }
 | |
| 
 | |
| static inline int memcpy_common(void *dst, const void *src, size_t n)
 | |
| {
 | |
| 	memcpy(dst, src, n);
 | |
| 	return n;
 | |
| }
 | |
| 
 | |
| DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
 | |
| 
 | |
| #define MEMCPY_SKIP(dst, src, n) (n)
 | |
| 
 | |
| DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
 | |
| 
 | |
| #ifndef arch_perf_out_copy_user
 | |
| #define arch_perf_out_copy_user __copy_from_user_inatomic
 | |
| #endif
 | |
| 
 | |
| DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
 | |
| 
 | |
| /* Callchain handling */
 | |
| extern struct perf_callchain_entry *
 | |
| perf_callchain(struct perf_event *event, struct pt_regs *regs);
 | |
| extern int get_callchain_buffers(void);
 | |
| extern void put_callchain_buffers(void);
 | |
| 
 | |
| static inline int get_recursion_context(int *recursion)
 | |
| {
 | |
| 	int rctx;
 | |
| 
 | |
| 	if (in_nmi())
 | |
| 		rctx = 3;
 | |
| 	else if (in_irq())
 | |
| 		rctx = 2;
 | |
| 	else if (in_softirq())
 | |
| 		rctx = 1;
 | |
| 	else
 | |
| 		rctx = 0;
 | |
| 
 | |
| 	if (recursion[rctx])
 | |
| 		return -1;
 | |
| 
 | |
| 	recursion[rctx]++;
 | |
| 	barrier();
 | |
| 
 | |
| 	return rctx;
 | |
| }
 | |
| 
 | |
| static inline void put_recursion_context(int *recursion, int rctx)
 | |
| {
 | |
| 	barrier();
 | |
| 	recursion[rctx]--;
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
 | |
| static inline bool arch_perf_have_user_stack_dump(void)
 | |
| {
 | |
| 	return true;
 | |
| }
 | |
| 
 | |
| #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
 | |
| #else
 | |
| static inline bool arch_perf_have_user_stack_dump(void)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| 
 | |
| #define perf_user_stack_pointer(regs) 0
 | |
| #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
 | |
| 
 | |
| #endif /* _KERNEL_EVENTS_INTERNAL_H */
 |