sparc64: Add initial perf event conflict resolution and checks.
Cribbed from powerpc code, as usual. :-) Currently it is only used to validate that all counters have the same user/kernel/hv attributes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
					parent
					
						
							
								7eebda60d5
							
						
					
				
			
			
				commit
				
					
						01552f765c
					
				
			
		
					 1 changed files with 77 additions and 5 deletions
				
			
		| 
						 | 
					@ -713,12 +713,66 @@ static void hw_perf_event_destroy(struct perf_event *event)
 | 
				
			||||||
	perf_event_release_pmc();
 | 
						perf_event_release_pmc();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int eu = 0, ek = 0, eh = 0;
 | 
				
			||||||
 | 
						struct perf_event *event;
 | 
				
			||||||
 | 
						int i, n, first;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						n = n_prev + n_new;
 | 
				
			||||||
 | 
						if (n <= 1)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						first = 1;
 | 
				
			||||||
 | 
						for (i = 0; i < n; i++) {
 | 
				
			||||||
 | 
							event = evts[i];
 | 
				
			||||||
 | 
							if (first) {
 | 
				
			||||||
 | 
								eu = event->attr.exclude_user;
 | 
				
			||||||
 | 
								ek = event->attr.exclude_kernel;
 | 
				
			||||||
 | 
								eh = event->attr.exclude_hv;
 | 
				
			||||||
 | 
								first = 0;
 | 
				
			||||||
 | 
							} else if (event->attr.exclude_user != eu ||
 | 
				
			||||||
 | 
								   event->attr.exclude_kernel != ek ||
 | 
				
			||||||
 | 
								   event->attr.exclude_hv != eh) {
 | 
				
			||||||
 | 
								return -EAGAIN;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int collect_events(struct perf_event *group, int max_count,
 | 
				
			||||||
 | 
								  struct perf_event *evts[], u64 *events)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct perf_event *event;
 | 
				
			||||||
 | 
						int n = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!is_software_event(group)) {
 | 
				
			||||||
 | 
							if (n >= max_count)
 | 
				
			||||||
 | 
								return -1;
 | 
				
			||||||
 | 
							evts[n] = group;
 | 
				
			||||||
 | 
							events[n++] = group->hw.config;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						list_for_each_entry(event, &group->sibling_list, group_entry) {
 | 
				
			||||||
 | 
							if (!is_software_event(event) &&
 | 
				
			||||||
 | 
							    event->state != PERF_EVENT_STATE_OFF) {
 | 
				
			||||||
 | 
								if (n >= max_count)
 | 
				
			||||||
 | 
									return -1;
 | 
				
			||||||
 | 
								evts[n] = event;
 | 
				
			||||||
 | 
								events[n++] = event->hw.config;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return n;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __hw_perf_event_init(struct perf_event *event)
 | 
					static int __hw_perf_event_init(struct perf_event *event)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct perf_event_attr *attr = &event->attr;
 | 
						struct perf_event_attr *attr = &event->attr;
 | 
				
			||||||
 | 
						struct perf_event *evts[MAX_HWEVENTS];
 | 
				
			||||||
	struct hw_perf_event *hwc = &event->hw;
 | 
						struct hw_perf_event *hwc = &event->hw;
 | 
				
			||||||
	const struct perf_event_map *pmap;
 | 
						const struct perf_event_map *pmap;
 | 
				
			||||||
	u64 enc;
 | 
						u64 enc, events[MAX_HWEVENTS];
 | 
				
			||||||
 | 
						int n;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (atomic_read(&nmi_active) < 0)
 | 
						if (atomic_read(&nmi_active) < 0)
 | 
				
			||||||
		return -ENODEV;
 | 
							return -ENODEV;
 | 
				
			||||||
| 
						 | 
					@ -734,9 +788,6 @@ static int __hw_perf_event_init(struct perf_event *event)
 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
		return -EOPNOTSUPP;
 | 
							return -EOPNOTSUPP;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	perf_event_grab_pmc();
 | 
					 | 
				
			||||||
	event->destroy = hw_perf_event_destroy;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* We save the enable bits in the config_base.  So to
 | 
						/* We save the enable bits in the config_base.  So to
 | 
				
			||||||
	 * turn off sampling just write 'config', and to enable
 | 
						 * turn off sampling just write 'config', and to enable
 | 
				
			||||||
	 * things write 'config | config_base'.
 | 
						 * things write 'config | config_base'.
 | 
				
			||||||
| 
						 | 
					@ -749,13 +800,34 @@ static int __hw_perf_event_init(struct perf_event *event)
 | 
				
			||||||
	if (!attr->exclude_hv)
 | 
						if (!attr->exclude_hv)
 | 
				
			||||||
		hwc->config_base |= sparc_pmu->hv_bit;
 | 
							hwc->config_base |= sparc_pmu->hv_bit;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						enc = pmap->encoding;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						n = 0;
 | 
				
			||||||
 | 
						if (event->group_leader != event) {
 | 
				
			||||||
 | 
							n = collect_events(event->group_leader,
 | 
				
			||||||
 | 
									   perf_max_events - 1,
 | 
				
			||||||
 | 
									   evts, events);
 | 
				
			||||||
 | 
							if (n < 0)
 | 
				
			||||||
 | 
								return -EINVAL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						events[n] = enc;
 | 
				
			||||||
 | 
						evts[n] = event;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (check_excludes(evts, n, 1))
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Try to do all error checking before this point, as unwinding
 | 
				
			||||||
 | 
						 * state after grabbing the PMC is difficult.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						perf_event_grab_pmc();
 | 
				
			||||||
 | 
						event->destroy = hw_perf_event_destroy;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!hwc->sample_period) {
 | 
						if (!hwc->sample_period) {
 | 
				
			||||||
		hwc->sample_period = MAX_PERIOD;
 | 
							hwc->sample_period = MAX_PERIOD;
 | 
				
			||||||
		hwc->last_period = hwc->sample_period;
 | 
							hwc->last_period = hwc->sample_period;
 | 
				
			||||||
		atomic64_set(&hwc->period_left, hwc->sample_period);
 | 
							atomic64_set(&hwc->period_left, hwc->sample_period);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	enc = pmap->encoding;
 | 
					 | 
				
			||||||
	if (pmap->pic_mask & PIC_UPPER) {
 | 
						if (pmap->pic_mask & PIC_UPPER) {
 | 
				
			||||||
		hwc->idx = PIC_UPPER_INDEX;
 | 
							hwc->idx = PIC_UPPER_INDEX;
 | 
				
			||||||
		enc <<= sparc_pmu->upper_shift;
 | 
							enc <<= sparc_pmu->upper_shift;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue