 af66abfe2e
			
		
	
	
	af66abfe2e
	
	
	
		
			
			Handling multiple PMUs using a single hotplug notifier requires a list of PMUs to be maintained, with synchronisation in the probe, remove, and notify paths. This is error-prone and makes the code much harder to maintain. Instead of using a single notifier, we can dynamically allocate a notifier block per-PMU. The end result is the same, but the list of PMUs is implicit in the hotplug notifier list rather than within a perf-local data structure, which makes the code far easier to handle. Signed-off-by: Mark Rutland <mark.rutland at arm.com> Reviewed-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
		
			
				
	
	
		
			162 lines
		
	
	
	
		
			4.7 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			162 lines
		
	
	
	
		
			4.7 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  linux/arch/arm/include/asm/pmu.h
 | |
|  *
 | |
|  *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License version 2 as
 | |
|  * published by the Free Software Foundation.
 | |
|  *
 | |
|  */
 | |
| 
 | |
| #ifndef __ARM_PMU_H__
 | |
| #define __ARM_PMU_H__
 | |
| 
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/perf_event.h>
 | |
| 
 | |
| #include <asm/cputype.h>
 | |
| 
 | |
| /*
 | |
|  * struct arm_pmu_platdata - ARM PMU platform data
 | |
|  *
 | |
|  * @handle_irq: an optional handler which will be called from the
 | |
|  *	interrupt and passed the address of the low level handler,
 | |
|  *	and can be used to implement any platform specific handling
 | |
|  *	before or after calling it.
 | |
|  * @runtime_resume: an optional handler which will be called by the
 | |
|  *	runtime PM framework following a call to pm_runtime_get().
 | |
|  *	Note that if pm_runtime_get() is called more than once in
 | |
|  *	succession this handler will only be called once.
 | |
|  * @runtime_suspend: an optional handler which will be called by the
 | |
|  *	runtime PM framework following a call to pm_runtime_put().
 | |
|  *	Note that if pm_runtime_get() is called more than once in
 | |
|  *	succession this handler will only be called following the
 | |
|  *	final call to pm_runtime_put() that actually disables the
 | |
|  *	hardware.
 | |
|  */
 | |
| struct arm_pmu_platdata {
 | |
| 	irqreturn_t (*handle_irq)(int irq, void *dev,
 | |
| 				  irq_handler_t pmu_handler);
 | |
| 	int (*runtime_resume)(struct device *dev);
 | |
| 	int (*runtime_suspend)(struct device *dev);
 | |
| };
 | |
| 
 | |
| #ifdef CONFIG_HW_PERF_EVENTS
 | |
| 
 | |
| /*
 | |
|  * The ARMv7 CPU PMU supports up to 32 event counters.
 | |
|  */
 | |
| #define ARMPMU_MAX_HWEVENTS		32
 | |
| 
 | |
| #define HW_OP_UNSUPPORTED		0xFFFF
 | |
| #define C(_x)				PERF_COUNT_HW_CACHE_##_x
 | |
| #define CACHE_OP_UNSUPPORTED		0xFFFF
 | |
| 
 | |
| #define PERF_MAP_ALL_UNSUPPORTED					\
 | |
| 	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
 | |
| 
 | |
| #define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
 | |
| [0 ... C(MAX) - 1] = {							\
 | |
| 	[0 ... C(OP_MAX) - 1] = {					\
 | |
| 		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
 | |
| 	},								\
 | |
| }
 | |
| 
 | |
| /* The events for a given PMU register set. */
 | |
| struct pmu_hw_events {
 | |
| 	/*
 | |
| 	 * The events that are active on the PMU for the given index.
 | |
| 	 */
 | |
| 	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
 | |
| 
 | |
| 	/*
 | |
| 	 * A 1 bit for an index indicates that the counter is being used for
 | |
| 	 * an event. A 0 means that the counter can be used.
 | |
| 	 */
 | |
| 	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
 | |
| 
 | |
| 	/*
 | |
| 	 * Hardware lock to serialize accesses to PMU registers. Needed for the
 | |
| 	 * read/modify/write sequences.
 | |
| 	 */
 | |
| 	raw_spinlock_t		pmu_lock;
 | |
| 
 | |
| 	/*
 | |
| 	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
 | |
| 	 * already have to allocate this struct per cpu.
 | |
| 	 */
 | |
| 	struct arm_pmu		*percpu_pmu;
 | |
| };
 | |
| 
 | |
| struct arm_pmu {
 | |
| 	struct pmu	pmu;
 | |
| 	cpumask_t	active_irqs;
 | |
| 	char		*name;
 | |
| 	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
 | |
| 	void		(*enable)(struct perf_event *event);
 | |
| 	void		(*disable)(struct perf_event *event);
 | |
| 	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
 | |
| 					 struct perf_event *event);
 | |
| 	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
 | |
| 					 struct perf_event *event);
 | |
| 	int		(*set_event_filter)(struct hw_perf_event *evt,
 | |
| 					    struct perf_event_attr *attr);
 | |
| 	u32		(*read_counter)(struct perf_event *event);
 | |
| 	void		(*write_counter)(struct perf_event *event, u32 val);
 | |
| 	void		(*start)(struct arm_pmu *);
 | |
| 	void		(*stop)(struct arm_pmu *);
 | |
| 	void		(*reset)(void *);
 | |
| 	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
 | |
| 	void		(*free_irq)(struct arm_pmu *);
 | |
| 	int		(*map_event)(struct perf_event *event);
 | |
| 	int		num_events;
 | |
| 	atomic_t	active_events;
 | |
| 	struct mutex	reserve_mutex;
 | |
| 	u64		max_period;
 | |
| 	struct platform_device	*plat_device;
 | |
| 	struct pmu_hw_events	__percpu *hw_events;
 | |
| 	struct notifier_block	hotplug_nb;
 | |
| };
 | |
| 
 | |
| #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
 | |
| 
 | |
| extern const struct dev_pm_ops armpmu_dev_pm_ops;
 | |
| 
 | |
| int armpmu_register(struct arm_pmu *armpmu, int type);
 | |
| 
 | |
| u64 armpmu_event_update(struct perf_event *event);
 | |
| 
 | |
| int armpmu_event_set_period(struct perf_event *event);
 | |
| 
 | |
| int armpmu_map_event(struct perf_event *event,
 | |
| 		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
 | |
| 		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
 | |
| 						[PERF_COUNT_HW_CACHE_OP_MAX]
 | |
| 						[PERF_COUNT_HW_CACHE_RESULT_MAX],
 | |
| 		     u32 raw_event_mask);
 | |
| 
 | |
| struct pmu_probe_info {
 | |
| 	unsigned int cpuid;
 | |
| 	unsigned int mask;
 | |
| 	int (*init)(struct arm_pmu *);
 | |
| };
 | |
| 
 | |
| #define PMU_PROBE(_cpuid, _mask, _fn)	\
 | |
| {					\
 | |
| 	.cpuid = (_cpuid),		\
 | |
| 	.mask = (_mask),		\
 | |
| 	.init = (_fn),			\
 | |
| }
 | |
| 
 | |
| #define ARM_PMU_PROBE(_cpuid, _fn) \
 | |
| 	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
 | |
| 
 | |
| #define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
 | |
| 
 | |
| #define XSCALE_PMU_PROBE(_version, _fn) \
 | |
| 	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
 | |
| 
 | |
| #endif /* CONFIG_HW_PERF_EVENTS */
 | |
| 
 | |
| #endif /* __ARM_PMU_H__ */
 |